diff --git a/.gitattributes b/.gitattributes
new file mode 100644
index 0000000000000000000000000000000000000000..a6344aac8c09253b3b630fb776ae94478aa0275b
--- /dev/null
+++ b/.gitattributes
@@ -0,0 +1,35 @@
+*.7z filter=lfs diff=lfs merge=lfs -text
+*.arrow filter=lfs diff=lfs merge=lfs -text
+*.bin filter=lfs diff=lfs merge=lfs -text
+*.bz2 filter=lfs diff=lfs merge=lfs -text
+*.ckpt filter=lfs diff=lfs merge=lfs -text
+*.ftz filter=lfs diff=lfs merge=lfs -text
+*.gz filter=lfs diff=lfs merge=lfs -text
+*.h5 filter=lfs diff=lfs merge=lfs -text
+*.joblib filter=lfs diff=lfs merge=lfs -text
+*.lfs.* filter=lfs diff=lfs merge=lfs -text
+*.mlmodel filter=lfs diff=lfs merge=lfs -text
+*.model filter=lfs diff=lfs merge=lfs -text
+*.msgpack filter=lfs diff=lfs merge=lfs -text
+*.npy filter=lfs diff=lfs merge=lfs -text
+*.npz filter=lfs diff=lfs merge=lfs -text
+*.onnx filter=lfs diff=lfs merge=lfs -text
+*.ot filter=lfs diff=lfs merge=lfs -text
+*.parquet filter=lfs diff=lfs merge=lfs -text
+*.pb filter=lfs diff=lfs merge=lfs -text
+*.pickle filter=lfs diff=lfs merge=lfs -text
+*.pkl filter=lfs diff=lfs merge=lfs -text
+*.pt filter=lfs diff=lfs merge=lfs -text
+*.pth filter=lfs diff=lfs merge=lfs -text
+*.rar filter=lfs diff=lfs merge=lfs -text
+*.safetensors filter=lfs diff=lfs merge=lfs -text
+saved_model/**/* filter=lfs diff=lfs merge=lfs -text
+*.tar.* filter=lfs diff=lfs merge=lfs -text
+*.tar filter=lfs diff=lfs merge=lfs -text
+*.tflite filter=lfs diff=lfs merge=lfs -text
+*.tgz filter=lfs diff=lfs merge=lfs -text
+*.wasm filter=lfs diff=lfs merge=lfs -text
+*.xz filter=lfs diff=lfs merge=lfs -text
+*.zip filter=lfs diff=lfs merge=lfs -text
+*.zst filter=lfs diff=lfs merge=lfs -text
+*tfevents* filter=lfs diff=lfs merge=lfs -text
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..bd43f4d8d262f67f23dd3b294b4fb43e55d2d230
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,3 @@
+*.pt
+*.pth
+*.st
\ No newline at end of file
diff --git a/README.md b/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..a8457ea3f35dbb3a47b5573aff671c56b57d1f9c
--- /dev/null
+++ b/README.md
@@ -0,0 +1,13 @@
+---
+title: UniControl Demo
+emoji: 📚
+colorFrom: green
+colorTo: blue
+sdk: gradio
+sdk_version: 3.36.1
+app_file: app.py
+pinned: false
+license: apache-2.0
+---
+
+Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
diff --git a/__pycache__/config.cpython-310.pyc b/__pycache__/config.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ca0687b783c79c4b266a3dbc096bf9723094d488
Binary files /dev/null and b/__pycache__/config.cpython-310.pyc differ
diff --git a/__pycache__/model.cpython-310.pyc b/__pycache__/model.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..74c3d5e93081b8df522a6a198b6281f90e695a0b
Binary files /dev/null and b/__pycache__/model.cpython-310.pyc differ
diff --git a/__pycache__/utils.cpython-310.pyc b/__pycache__/utils.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..aeef551f26630b22e1fcb18a3e88d2f08db4b822
Binary files /dev/null and b/__pycache__/utils.cpython-310.pyc differ
diff --git a/_app.py b/_app.py
new file mode 100644
index 0000000000000000000000000000000000000000..bef9a839a75af02c064ee684692814f7e32056d2
--- /dev/null
+++ b/_app.py
@@ -0,0 +1,1360 @@
+'''
+ * Copyright (c) 2023 Salesforce, Inc.
+ * All rights reserved.
+ * SPDX-License-Identifier: Apache License 2.0
+ * For full license text, see LICENSE.txt file in the repo root or http://www.apache.org/licenses/
+ * By Can Qin
+ * Modified from ControlNet repo: https://github.com/lllyasviel/ControlNet
+ * Copyright (c) 2023 Lvmin Zhang and Maneesh Agrawala
+'''
+
+import config
+
+import cv2
+import einops
+import gradio as gr
+import numpy as np
+import torch
+import random
+import os
+
+from pytorch_lightning import seed_everything
+from annotator.util import resize_image, HWC3
+from annotator.uniformer_base import UniformerDetector
+from annotator.hed import HEDdetector
+from annotator.canny import CannyDetector
+from annotator.midas import MidasDetector
+from annotator.outpainting import Outpainter
+from annotator.openpose import OpenposeDetector
+from annotator.inpainting import Inpainter
+from annotator.grayscale import GrayscaleConverter
+from annotator.blur import Blurrer
+import cvlib as cv
+
+from utils import create_model, load_state_dict
+from lib.ddim_hacked import DDIMSampler
+
+from safetensors.torch import load_file as stload
+from collections import OrderedDict
+
+apply_uniformer = UniformerDetector()
+apply_midas = MidasDetector()
+apply_canny = CannyDetector()
+apply_hed = HEDdetector()
+model_outpainting = Outpainter()
+apply_openpose = OpenposeDetector()
+model_grayscale = GrayscaleConverter()
+model_blur = Blurrer()
+model_inpainting = Inpainter()
+
+
+def midas(img, res):
+ img = resize_image(HWC3(img), res)
+ results = apply_midas(img)
+ return results
+
+
+def outpainting(img, res, height_top_extended, height_down_extended, width_left_extended, width_right_extended):
+ img = resize_image(HWC3(img), res)
+ result = model_outpainting(img, height_top_extended, height_down_extended, width_left_extended, width_right_extended)
+ return result
+
+
+def grayscale(img, res):
+ img = resize_image(HWC3(img), res)
+ result = model_grayscale(img)
+ return result
+
+
+def blur(img, res, ksize):
+ img = resize_image(HWC3(img), res)
+ result = model_blur(img, ksize)
+ return result
+
+
+def inpainting(img, res, height_top_mask, height_down_mask, width_left_mask, width_right_mask):
+ img = resize_image(HWC3(img), res)
+ result = model_inpainting(img, height_top_mask, height_down_mask, width_left_mask, width_right_mask)
+ return result
+
+model = create_model('./models/cldm_v15_unicontrol.yaml').cpu()
+# model_url = 'https://huggingface.co/Robert001/UniControl-Model/resolve/main/unicontrol_v1.1.ckpt'
+model_url = 'https://huggingface.co/Robert001/UniControl-Model/resolve/main/unicontrol_v1.1.st'
+
+ckpts_path='./'
+# model_path = os.path.join(ckpts_path, "unicontrol_v1.1.ckpt")
+model_path = os.path.join(ckpts_path, "unicontrol_v1.1.st")
+
+if not os.path.exists(model_path):
+ from basicsr.utils.download_util import load_file_from_url
+ load_file_from_url(model_url, model_dir=ckpts_path)
+
+model_dict = OrderedDict(stload(model_path, device='cpu'))
+model.load_state_dict(model_dict, strict=False)
+# model.load_state_dict(load_state_dict(model_path, location='cuda'), strict=False)
+model = model.cuda()
+ddim_sampler = DDIMSampler(model)
+
+task_to_name = {'hed': 'control_hed', 'canny': 'control_canny', 'seg': 'control_seg', 'segbase': 'control_seg',
+ 'depth': 'control_depth', 'normal': 'control_normal', 'openpose': 'control_openpose',
+ 'bbox': 'control_bbox', 'grayscale': 'control_grayscale', 'outpainting': 'control_outpainting',
+ 'hedsketch': 'control_hedsketch', 'inpainting': 'control_inpainting', 'blur': 'control_blur',
+ 'grayscale': 'control_grayscale'}
+
+name_to_instruction = {"control_hed": "hed edge to image", "control_canny": "canny edge to image",
+ "control_seg": "segmentation map to image", "control_depth": "depth map to image",
+ "control_normal": "normal surface map to image", "control_img": "image editing",
+ "control_openpose": "human pose skeleton to image", "control_hedsketch": "sketch to image",
+ "control_bbox": "bounding box to image", "control_outpainting": "image outpainting",
+ "control_grayscale": "gray image to color image", "control_blur": "deblur image to clean image",
+ "control_inpainting": "image inpainting"}
+
+
+def process_canny(input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, ddim_steps, guess_mode,
+ strength, scale, seed, eta, low_threshold, high_threshold, condition_mode):
+ with torch.no_grad():
+ img = resize_image(HWC3(input_image), image_resolution)
+ H, W, C = img.shape
+ if condition_mode == True:
+ detected_map = apply_canny(img, low_threshold, high_threshold)
+ detected_map = HWC3(detected_map)
+ else:
+ detected_map = 255 - img
+
+ control = torch.from_numpy(detected_map.copy()).float().cuda() / 255.0
+ control = torch.stack([control for _ in range(num_samples)], dim=0)
+ control = einops.rearrange(control, 'b h w c -> b c h w').clone()
+
+ if seed == -1:
+ seed = random.randint(0, 65535)
+ seed_everything(seed)
+
+ if config.save_memory:
+ model.low_vram_shift(is_diffusing=False)
+ task = 'canny'
+ task_dic = {}
+ task_dic['name'] = task_to_name[task]
+ task_instruction = name_to_instruction[task_dic['name']]
+ task_dic['feature'] = model.get_learned_conditioning(task_instruction)[:, :1, :]
+
+ cond = {"c_concat": [control],
+ "c_crossattn": [model.get_learned_conditioning([prompt + ', ' + a_prompt] * num_samples)],
+ "task": task_dic}
+
+ un_cond = {"c_concat": [control * 0] if guess_mode else [control],
+ "c_crossattn": [model.get_learned_conditioning([n_prompt] * num_samples)]}
+ shape = (4, H // 8, W // 8)
+
+ if config.save_memory:
+ model.low_vram_shift(is_diffusing=True)
+ model.control_scales = [strength * (0.825 ** float(12 - i)) for i in range(13)] if guess_mode else ([strength] * 13)
+ samples, intermediates = ddim_sampler.sample(ddim_steps, num_samples,
+ shape, cond, verbose=False, eta=eta,
+ unconditional_guidance_scale=scale,
+ unconditional_conditioning=un_cond)
+
+ if config.save_memory:
+ model.low_vram_shift(is_diffusing=False)
+
+ x_samples = model.decode_first_stage(samples)
+ x_samples = (einops.rearrange(x_samples, 'b c h w -> b h w c') * 127.5 + 127.5).cpu().numpy().clip(0,
+ 255).astype(
+ np.uint8)
+
+ results = [x_samples[i] for i in range(num_samples)]
+ return [255 - detected_map] + results
+
+
+def process_hed(input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, detect_resolution, ddim_steps,
+ guess_mode, strength, scale, seed, eta, condition_mode):
+ with torch.no_grad():
+ input_image = HWC3(input_image)
+ img = resize_image(input_image, image_resolution)
+ H, W, C = img.shape
+ if condition_mode == True:
+ detected_map = apply_hed(resize_image(input_image, detect_resolution))
+ detected_map = HWC3(detected_map)
+ else:
+ detected_map = img
+
+ detected_map = cv2.resize(detected_map, (W, H), interpolation=cv2.INTER_LINEAR)
+
+ control = torch.from_numpy(detected_map.copy()).float().cuda() / 255.0
+ control = torch.stack([control for _ in range(num_samples)], dim=0)
+ control = einops.rearrange(control, 'b h w c -> b c h w').clone()
+
+ if seed == -1:
+ seed = random.randint(0, 65535)
+ seed_everything(seed)
+
+ if config.save_memory:
+ model.low_vram_shift(is_diffusing=False)
+
+ task = 'hed'
+ task_dic = {}
+ task_dic['name'] = task_to_name[task]
+ task_instruction = name_to_instruction[task_dic['name']]
+ task_dic['feature'] = model.get_learned_conditioning(task_instruction)[:, :1, :]
+
+ cond = {"c_concat": [control],
+ "c_crossattn": [model.get_learned_conditioning([prompt + ', ' + a_prompt] * num_samples)],
+ "task": task_dic}
+
+ un_cond = {"c_concat": [control * 0] if guess_mode else [control],
+ "c_crossattn": [model.get_learned_conditioning([n_prompt] * num_samples)]}
+ shape = (4, H // 8, W // 8)
+
+ if config.save_memory:
+ model.low_vram_shift(is_diffusing=True)
+ model.control_scales = [strength * (0.825 ** float(12 - i)) for i in range(13)] if guess_mode else ([strength] * 13)
+ samples, intermediates = ddim_sampler.sample(ddim_steps, num_samples,
+ shape, cond, verbose=False, eta=eta,
+ unconditional_guidance_scale=scale,
+ unconditional_conditioning=un_cond)
+
+ if config.save_memory:
+ model.low_vram_shift(is_diffusing=False)
+
+ x_samples = model.decode_first_stage(samples)
+ x_samples = (einops.rearrange(x_samples, 'b c h w -> b h w c') * 127.5 + 127.5).cpu().numpy().clip(0,
+ 255).astype(
+ np.uint8)
+
+ results = [x_samples[i] for i in range(num_samples)]
+ return [detected_map] + results
+
+
+def process_depth(input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, detect_resolution, ddim_steps,
+ guess_mode, strength, scale, seed, eta, condition_mode):
+ with torch.no_grad():
+ input_image = HWC3(input_image)
+ img = resize_image(input_image, image_resolution)
+ H, W, C = img.shape
+ if condition_mode == True:
+ detected_map, _ = apply_midas(resize_image(input_image, detect_resolution))
+ detected_map = HWC3(detected_map)
+ else:
+ detected_map = img
+
+ detected_map = cv2.resize(detected_map, (W, H), interpolation=cv2.INTER_LINEAR)
+
+ control = torch.from_numpy(detected_map.copy()).float().cuda() / 255.0
+ control = torch.stack([control for _ in range(num_samples)], dim=0)
+ control = einops.rearrange(control, 'b h w c -> b c h w').clone()
+
+ if seed == -1:
+ seed = random.randint(0, 65535)
+ seed_everything(seed)
+
+ if config.save_memory:
+ model.low_vram_shift(is_diffusing=False)
+ task = 'depth'
+ task_dic = {}
+ task_dic['name'] = task_to_name[task]
+ task_instruction = name_to_instruction[task_dic['name']]
+ task_dic['feature'] = model.get_learned_conditioning(task_instruction)[:, :1, :]
+ cond = {"c_concat": [control],
+ "c_crossattn": [model.get_learned_conditioning([prompt + ', ' + a_prompt] * num_samples)],
+ "task": task_dic}
+
+ un_cond = {"c_concat": [control * 0] if guess_mode else [control],
+ "c_crossattn": [model.get_learned_conditioning([n_prompt] * num_samples)]}
+ shape = (4, H // 8, W // 8)
+
+ if config.save_memory:
+ model.low_vram_shift(is_diffusing=True)
+ model.control_scales = [strength * (0.825 ** float(12 - i)) for i in range(13)] if guess_mode else (
+ [strength] * 13)
+ samples, intermediates = ddim_sampler.sample(ddim_steps, num_samples,
+ shape, cond, verbose=False, eta=eta,
+ unconditional_guidance_scale=scale,
+ unconditional_conditioning=un_cond)
+
+ if config.save_memory:
+ model.low_vram_shift(is_diffusing=False)
+
+ x_samples = model.decode_first_stage(samples)
+ x_samples = (einops.rearrange(x_samples, 'b c h w -> b h w c') * 127.5 + 127.5).cpu().numpy().clip(0,
+ 255).astype(
+ np.uint8)
+
+ results = [x_samples[i] for i in range(num_samples)]
+ return [detected_map] + results
+
+
+def process_normal(input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, detect_resolution,
+ ddim_steps, guess_mode, strength, scale, seed, eta, condition_mode):
+ with torch.no_grad():
+
+ input_image = HWC3(input_image)
+ img = resize_image(input_image, image_resolution)
+ H, W, C = img.shape
+ if condition_mode == True:
+ _, detected_map = apply_midas(resize_image(input_image, detect_resolution))
+ detected_map = HWC3(detected_map)
+ else:
+ detected_map = img
+
+ detected_map = cv2.resize(detected_map, (W, H), interpolation=cv2.INTER_LINEAR)
+
+ control = torch.from_numpy(detected_map.copy()).float().cuda() / 255.0
+ control = torch.stack([control for _ in range(num_samples)], dim=0)
+ control = einops.rearrange(control, 'b h w c -> b c h w').clone()
+
+ if seed == -1:
+ seed = random.randint(0, 65535)
+ seed_everything(seed)
+
+ if config.save_memory:
+ model.low_vram_shift(is_diffusing=False)
+ task = 'normal'
+ task_dic = {}
+ task_dic['name'] = task_to_name[task]
+ task_instruction = name_to_instruction[task_dic['name']]
+ task_dic['feature'] = model.get_learned_conditioning(task_instruction)[:, :1, :]
+ cond = {"c_concat": [control],
+ "c_crossattn": [model.get_learned_conditioning([prompt + ', ' + a_prompt] * num_samples)],
+ "task": task_dic}
+
+ un_cond = {"c_concat": [control * 0] if guess_mode else [control],
+ "c_crossattn": [model.get_learned_conditioning([n_prompt] * num_samples)]}
+ shape = (4, H // 8, W // 8)
+
+ if config.save_memory:
+ model.low_vram_shift(is_diffusing=True)
+ model.control_scales = [strength * (0.825 ** float(12 - i)) for i in range(13)] if guess_mode else (
+ [strength] * 13)
+ samples, intermediates = ddim_sampler.sample(ddim_steps, num_samples,
+ shape, cond, verbose=False, eta=eta,
+ unconditional_guidance_scale=scale,
+ unconditional_conditioning=un_cond)
+
+ if config.save_memory:
+ model.low_vram_shift(is_diffusing=False)
+
+ x_samples = model.decode_first_stage(samples)
+ x_samples = (einops.rearrange(x_samples, 'b c h w -> b h w c') * 127.5 + 127.5).cpu().numpy().clip(0,
+ 255).astype(
+ np.uint8)
+
+ results = [x_samples[i] for i in range(num_samples)]
+ return [detected_map] + results
+
+
+def process_pose(input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, detect_resolution, ddim_steps,
+ guess_mode, strength, scale, seed, eta, condition_mode):
+ with torch.no_grad():
+ input_image = HWC3(input_image)
+ img = resize_image(input_image, image_resolution)
+ H, W, C = img.shape
+ if condition_mode == True:
+ detected_map, _ = apply_openpose(resize_image(input_image, detect_resolution))
+ detected_map = HWC3(detected_map)
+ else:
+ detected_map = img
+
+ detected_map = cv2.resize(detected_map, (W, H), interpolation=cv2.INTER_NEAREST)
+
+ control = torch.from_numpy(detected_map.copy()).float().cuda() / 255.0
+ control = torch.stack([control for _ in range(num_samples)], dim=0)
+ control = einops.rearrange(control, 'b h w c -> b c h w').clone()
+
+ if seed == -1:
+ seed = random.randint(0, 65535)
+ seed_everything(seed)
+
+ if config.save_memory:
+ model.low_vram_shift(is_diffusing=False)
+ task = 'openpose'
+ task_dic = {}
+ task_dic['name'] = task_to_name[task]
+ task_instruction = name_to_instruction[task_dic['name']]
+ task_dic['feature'] = model.get_learned_conditioning(task_instruction)[:, :1, :]
+ cond = {"c_concat": [control],
+ "c_crossattn": [model.get_learned_conditioning([prompt + ', ' + a_prompt] * num_samples)],
+ "task": task_dic}
+
+ un_cond = {"c_concat": [control * 0] if guess_mode else [control],
+ "c_crossattn": [model.get_learned_conditioning([n_prompt] * num_samples)]}
+ shape = (4, H // 8, W // 8)
+
+ if config.save_memory:
+ model.low_vram_shift(is_diffusing=True)
+ model.control_scales = [strength * (0.825 ** float(12 - i)) for i in range(13)] if guess_mode else (
+ [strength] * 13)
+ samples, intermediates = ddim_sampler.sample(ddim_steps, num_samples,
+ shape, cond, verbose=False, eta=eta,
+ unconditional_guidance_scale=scale,
+ unconditional_conditioning=un_cond)
+
+ if config.save_memory:
+ model.low_vram_shift(is_diffusing=False)
+
+ x_samples = model.decode_first_stage(samples)
+ x_samples = (einops.rearrange(x_samples, 'b c h w -> b h w c') * 127.5 + 127.5).cpu().numpy().clip(0,
+ 255).astype(
+ np.uint8)
+
+ results = [x_samples[i] for i in range(num_samples)]
+ return [detected_map] + results
+
+
+def process_seg(input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, detect_resolution, ddim_steps,
+ guess_mode, strength, scale, seed, eta, condition_mode):
+ with torch.no_grad():
+ input_image = HWC3(input_image)
+ img = resize_image(input_image, image_resolution)
+ H, W, C = img.shape
+
+ if condition_mode == True:
+ detected_map = apply_uniformer(resize_image(input_image, detect_resolution))
+ else:
+ detected_map = img
+
+ detected_map = cv2.resize(detected_map, (W, H), interpolation=cv2.INTER_NEAREST)
+
+ control = torch.from_numpy(detected_map.copy()).float().cuda() / 255.0
+ control = torch.stack([control for _ in range(num_samples)], dim=0)
+ control = einops.rearrange(control, 'b h w c -> b c h w').clone()
+
+ if seed == -1:
+ seed = random.randint(0, 65535)
+ seed_everything(seed)
+
+ if config.save_memory:
+ model.low_vram_shift(is_diffusing=False)
+ task = 'seg'
+ task_dic = {}
+ task_dic['name'] = task_to_name[task]
+ task_instruction = name_to_instruction[task_dic['name']]
+ task_dic['feature'] = model.get_learned_conditioning(task_instruction)[:, :1, :]
+
+ cond = {"c_concat": [control],
+ "c_crossattn": [model.get_learned_conditioning([prompt + ', ' + a_prompt] * num_samples)],
+ "task": task_dic}
+ un_cond = {"c_concat": [control * 0] if guess_mode else [control],
+ "c_crossattn": [model.get_learned_conditioning([n_prompt] * num_samples)]}
+ shape = (4, H // 8, W // 8)
+
+ if config.save_memory:
+ model.low_vram_shift(is_diffusing=True)
+ model.control_scales = [strength * (0.825 ** float(12 - i)) for i in range(13)] if guess_mode else (
+ [strength] * 13)
+ samples, intermediates = ddim_sampler.sample(ddim_steps, num_samples,
+ shape, cond, verbose=False, eta=eta,
+ unconditional_guidance_scale=scale,
+ unconditional_conditioning=un_cond)
+
+ if config.save_memory:
+ model.low_vram_shift(is_diffusing=False)
+
+ x_samples = model.decode_first_stage(samples)
+ x_samples = (einops.rearrange(x_samples, 'b c h w -> b h w c') * 127.5 + 127.5).cpu().numpy().clip(0,
+ 255).astype(
+ np.uint8)
+
+ results = [x_samples[i] for i in range(num_samples)]
+ return [detected_map] + results
+
+
+color_dict = {
+ 'background': (0, 0, 100),
+ 'person': (255, 0, 0),
+ 'bicycle': (0, 255, 0),
+ 'car': (0, 0, 255),
+ 'motorcycle': (255, 255, 0),
+ 'airplane': (255, 0, 255),
+ 'bus': (0, 255, 255),
+ 'train': (128, 128, 0),
+ 'truck': (128, 0, 128),
+ 'boat': (0, 128, 128),
+ 'traffic light': (128, 128, 128),
+ 'fire hydrant': (64, 0, 0),
+ 'stop sign': (0, 64, 0),
+ 'parking meter': (0, 0, 64),
+ 'bench': (64, 64, 0),
+ 'bird': (64, 0, 64),
+ 'cat': (0, 64, 64),
+ 'dog': (192, 192, 192),
+ 'horse': (32, 32, 32),
+ 'sheep': (96, 96, 96),
+ 'cow': (160, 160, 160),
+ 'elephant': (224, 224, 224),
+ 'bear': (32, 0, 0),
+ 'zebra': (0, 32, 0),
+ 'giraffe': (0, 0, 32),
+ 'backpack': (32, 32, 0),
+ 'umbrella': (32, 0, 32),
+ 'handbag': (0, 32, 32),
+ 'tie': (96, 0, 0),
+ 'suitcase': (0, 96, 0),
+ 'frisbee': (0, 0, 96),
+ 'skis': (96, 96, 0),
+ 'snowboard': (96, 0, 96),
+ 'sports ball': (0, 96, 96),
+ 'kite': (160, 0, 0),
+ 'baseball bat': (0, 160, 0),
+ 'baseball glove': (0, 0, 160),
+ 'skateboard': (160, 160, 0),
+ 'surfboard': (160, 0, 160),
+ 'tennis racket': (0, 160, 160),
+ 'bottle': (224, 0, 0),
+ 'wine glass': (0, 224, 0),
+ 'cup': (0, 0, 224),
+ 'fork': (224, 224, 0),
+ 'knife': (224, 0, 224),
+ 'spoon': (0, 224, 224),
+ 'bowl': (64, 64, 64),
+ 'banana': (128, 64, 64),
+ 'apple': (64, 128, 64),
+ 'sandwich': (64, 64, 128),
+ 'orange': (128, 128, 64),
+ 'broccoli': (128, 64, 128),
+ 'carrot': (64, 128, 128),
+ 'hot dog': (192, 64, 64),
+ 'pizza': (64, 192, 64),
+ 'donut': (64, 64, 192),
+ 'cake': (192, 192, 64),
+ 'chair': (192, 64, 192),
+ 'couch': (64, 192, 192),
+ 'potted plant': (96, 32, 32),
+ 'bed': (32, 96, 32),
+ 'dining table': (32, 32, 96),
+ 'toilet': (96, 96, 32),
+ 'tv': (96, 32, 96),
+ 'laptop': (32, 96, 96),
+ 'mouse': (160, 32, 32),
+ 'remote': (32, 160, 32),
+ 'keyboard': (32, 32, 160),
+ 'cell phone': (160, 160, 32),
+ 'microwave': (160, 32, 160),
+ 'oven': (32, 160, 160),
+ 'toaster': (224, 32, 32),
+ 'sink': (32, 224, 32),
+ 'refrigerator': (32, 32, 224),
+ 'book': (224, 224, 32),
+ 'clock': (224, 32, 224),
+ 'vase': (32, 224, 224),
+ 'scissors': (64, 96, 96),
+ 'teddy bear': (96, 64, 96),
+ 'hair drier': (96, 96, 64),
+ 'toothbrush': (160, 96, 96)
+}
+
+
+def process_bbox(input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, ddim_steps, guess_mode,
+ strength, scale, seed, eta, confidence, nms_thresh, condition_mode):
+ with torch.no_grad():
+ input_image = HWC3(input_image)
+ img = resize_image(input_image, image_resolution)
+ H, W, C = img.shape
+
+ if condition_mode == True:
+ bbox, label, conf = cv.detect_common_objects(input_image, confidence=confidence, nms_thresh=nms_thresh)
+ mask = np.zeros((input_image.shape), np.uint8)
+ if len(bbox) > 0:
+ order_area = np.zeros(len(bbox))
+ # order_final = np.arange(len(bbox))
+ area_all = 0
+ for idx_mask, box in enumerate(bbox):
+ x_1, y_1, x_2, y_2 = box
+
+ x_1 = 0 if x_1 < 0 else x_1
+ y_1 = 0 if y_1 < 0 else y_1
+ x_2 = input_image.shape[1] if x_2 < 0 else x_2
+ y_2 = input_image.shape[0] if y_2 < 0 else y_2
+
+ area = (x_2 - x_1) * (y_2 - y_1)
+ order_area[idx_mask] = area
+ area_all += area
+ ordered_area = np.argsort(-order_area)
+
+ for idx_mask in ordered_area:
+ box = bbox[idx_mask]
+ x_1, y_1, x_2, y_2 = box
+ x_1 = 0 if x_1 < 0 else x_1
+ y_1 = 0 if y_1 < 0 else y_1
+ x_2 = input_image.shape[1] if x_2 < 0 else x_2
+ y_2 = input_image.shape[0] if y_2 < 0 else y_2
+
+ mask[y_1:y_2, x_1:x_2, :] = color_dict[label[idx_mask]]
+ detected_map = mask
+ else:
+ detected_map = img
+
+ detected_map = cv2.resize(detected_map, (W, H), interpolation=cv2.INTER_LINEAR)
+
+ control = torch.from_numpy(detected_map.copy()).float().cuda() / 255.0
+ control = torch.stack([control for _ in range(num_samples)], dim=0)
+ control = einops.rearrange(control, 'b h w c -> b c h w').clone()
+
+ if seed == -1:
+ seed = random.randint(0, 65535)
+ seed_everything(seed)
+
+ if config.save_memory:
+ model.low_vram_shift(is_diffusing=False)
+
+ task = 'bbox'
+ task_dic = {}
+ task_dic['name'] = task_to_name[task]
+ task_instruction = name_to_instruction[task_dic['name']]
+ task_dic['feature'] = model.get_learned_conditioning(task_instruction)[:, :1, :]
+
+ cond = {"c_concat": [control],
+ "c_crossattn": [model.get_learned_conditioning([prompt + ', ' + a_prompt] * num_samples)],
+ "task": task_dic}
+
+ un_cond = {"c_concat": [control * 0] if guess_mode else [control],
+ "c_crossattn": [model.get_learned_conditioning([n_prompt] * num_samples)]}
+ shape = (4, H // 8, W // 8)
+
+ if config.save_memory:
+ model.low_vram_shift(is_diffusing=True)
+ model.control_scales = [strength * (0.825 ** float(12 - i)) for i in range(13)] if guess_mode else (
+ [strength] * 13)
+ samples, intermediates = ddim_sampler.sample(ddim_steps, num_samples,
+ shape, cond, verbose=False, eta=eta,
+ unconditional_guidance_scale=scale,
+ unconditional_conditioning=un_cond)
+
+ if config.save_memory:
+ model.low_vram_shift(is_diffusing=False)
+
+ x_samples = model.decode_first_stage(samples)
+ x_samples = (einops.rearrange(x_samples, 'b c h w -> b h w c') * 127.5 + 127.5).cpu().numpy().clip(0,
+ 255).astype(
+ np.uint8)
+
+ results = [x_samples[i] for i in range(num_samples)]
+ return [detected_map] + results
+
+
+def process_outpainting(input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, ddim_steps, guess_mode,
+ strength, scale, seed, eta, height_top_extended, height_down_extended, width_left_extended, width_right_extended, condition_mode):
+ with torch.no_grad():
+ input_image = HWC3(input_image)
+ img = resize_image(input_image, image_resolution)
+ H, W, C = img.shape
+ if condition_mode == True:
+ detected_map = outpainting(input_image, image_resolution, height_top_extended, height_down_extended, width_left_extended, width_right_extended)
+ else:
+ detected_map = img
+
+ detected_map = cv2.resize(detected_map, (W, H), interpolation=cv2.INTER_LINEAR)
+
+ control = torch.from_numpy(detected_map.copy()).float().cuda() / 255.0
+ control = torch.stack([control for _ in range(num_samples)], dim=0)
+ control = einops.rearrange(control, 'b h w c -> b c h w').clone()
+
+ if seed == -1:
+ seed = random.randint(0, 65535)
+ seed_everything(seed)
+
+ if config.save_memory:
+ model.low_vram_shift(is_diffusing=False)
+
+ task = 'outpainting'
+ task_dic = {}
+ task_dic['name'] = task_to_name[task]
+ task_instruction = name_to_instruction[task_dic['name']]
+ task_dic['feature'] = model.get_learned_conditioning(task_instruction)[:, :1, :]
+
+ cond = {"c_concat": [control],
+ "c_crossattn": [model.get_learned_conditioning([prompt + ', ' + a_prompt] * num_samples)],
+ "task": task_dic}
+
+ un_cond = {"c_concat": [control * 0] if guess_mode else [control],
+ "c_crossattn": [model.get_learned_conditioning([n_prompt] * num_samples)]}
+ shape = (4, H // 8, W // 8)
+
+ if config.save_memory:
+ model.low_vram_shift(is_diffusing=True)
+ model.control_scales = [strength * (0.825 ** float(12 - i)) for i in range(13)] if guess_mode else (
+ [strength] * 13)
+ samples, intermediates = ddim_sampler.sample(ddim_steps, num_samples,
+ shape, cond, verbose=False, eta=eta,
+ unconditional_guidance_scale=scale,
+ unconditional_conditioning=un_cond)
+
+ if config.save_memory:
+ model.low_vram_shift(is_diffusing=False)
+
+ x_samples = model.decode_first_stage(samples)
+ x_samples = (einops.rearrange(x_samples, 'b c h w -> b h w c') * 127.5 + 127.5).cpu().numpy().clip(0,
+ 255).astype(
+ np.uint8)
+
+ results = [x_samples[i] for i in range(num_samples)]
+ return [detected_map] + results
+
+
+def process_sketch(input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, detect_resolution,
+ ddim_steps, guess_mode, strength, scale, seed, eta, condition_mode):
+ with torch.no_grad():
+ input_image = HWC3(input_image)
+ img = resize_image(input_image, image_resolution)
+ H, W, C = img.shape
+
+ if condition_mode == True:
+ detected_map = apply_hed(resize_image(input_image, detect_resolution))
+ detected_map = HWC3(detected_map)
+
+ # sketch the hed image
+ retry = 0
+ cnt = 0
+ while retry == 0:
+ threshold_value = np.random.randint(110, 160)
+ kernel_size = 3
+ alpha = 1.5
+ beta = 50
+ binary_image = cv2.threshold(detected_map, threshold_value, 255, cv2.THRESH_BINARY)[1]
+ inverted_image = cv2.bitwise_not(binary_image)
+ smoothed_image = cv2.GaussianBlur(inverted_image, (kernel_size, kernel_size), 0)
+ sketch_image = cv2.convertScaleAbs(smoothed_image, alpha=alpha, beta=beta)
+ if np.sum(sketch_image < 5) > 0.005 * sketch_image.shape[0] * sketch_image.shape[1] or cnt == 5:
+ retry = 1
+ else:
+ cnt += 1
+ detected_map = sketch_image
+ else:
+ detected_map = img
+
+ detected_map = cv2.resize(detected_map, (W, H), interpolation=cv2.INTER_LINEAR)
+
+ control = torch.from_numpy(detected_map.copy()).float().cuda() / 255.0
+ control = torch.stack([control for _ in range(num_samples)], dim=0)
+ control = einops.rearrange(control, 'b h w c -> b c h w').clone()
+
+ if seed == -1:
+ seed = random.randint(0, 65535)
+ seed_everything(seed)
+
+ if config.save_memory:
+ model.low_vram_shift(is_diffusing=False)
+
+ task = 'hedsketch'
+ task_dic = {}
+ task_dic['name'] = task_to_name[task]
+ task_instruction = name_to_instruction[task_dic['name']]
+ task_dic['feature'] = model.get_learned_conditioning(task_instruction)[:, :1, :]
+
+ cond = {"c_concat": [control],
+ "c_crossattn": [model.get_learned_conditioning([prompt + ', ' + a_prompt] * num_samples)],
+ "task": task_dic}
+
+ un_cond = {"c_concat": [control * 0] if guess_mode else [control],
+ "c_crossattn": [model.get_learned_conditioning([n_prompt] * num_samples)]}
+ shape = (4, H // 8, W // 8)
+
+ if config.save_memory:
+ model.low_vram_shift(is_diffusing=True)
+ model.control_scales = [strength * (0.825 ** float(12 - i)) for i in range(13)] if guess_mode else (
+ [strength] * 13)
+ samples, intermediates = ddim_sampler.sample(ddim_steps, num_samples,
+ shape, cond, verbose=False, eta=eta,
+ unconditional_guidance_scale=scale,
+ unconditional_conditioning=un_cond)
+
+ if config.save_memory:
+ model.low_vram_shift(is_diffusing=False)
+
+ x_samples = model.decode_first_stage(samples)
+ x_samples = (einops.rearrange(x_samples, 'b c h w -> b h w c') * 127.5 + 127.5).cpu().numpy().clip(0,
+ 255).astype(
+ np.uint8)
+
+ results = [x_samples[i] for i in range(num_samples)]
+ return [detected_map] + results
+
+
+def process_colorization(input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, ddim_steps, guess_mode,
+ strength, scale, seed, eta, condition_mode):
+ with torch.no_grad():
+ input_image = HWC3(input_image)
+ img = resize_image(input_image, image_resolution)
+ H, W, C = img.shape
+ if condition_mode == True:
+ detected_map = grayscale(input_image, image_resolution)
+ detected_map = cv2.resize(detected_map, (W, H), interpolation=cv2.INTER_LINEAR)
+ detected_map = detected_map[:, :, np.newaxis]
+ detected_map = detected_map.repeat(3, axis=2)
+ else:
+ detected_map = img
+
+ control = torch.from_numpy(detected_map.copy()).float().cuda() / 255.0
+ control = torch.stack([control for _ in range(num_samples)], dim=0)
+ control = einops.rearrange(control, 'b h w c -> b c h w').clone()
+
+ if seed == -1:
+ seed = random.randint(0, 65535)
+ seed_everything(seed)
+
+ if config.save_memory:
+ model.low_vram_shift(is_diffusing=False)
+
+ task = 'grayscale'
+ task_dic = {}
+ task_dic['name'] = task_to_name[task]
+ task_instruction = name_to_instruction[task_dic['name']]
+ task_dic['feature'] = model.get_learned_conditioning(task_instruction)[:, :1, :]
+
+ cond = {"c_concat": [control],
+ "c_crossattn": [model.get_learned_conditioning([prompt + ', ' + a_prompt] * num_samples)],
+ "task": task_dic}
+
+ un_cond = {"c_concat": [control * 0] if guess_mode else [control],
+ "c_crossattn": [model.get_learned_conditioning([n_prompt] * num_samples)]}
+ shape = (4, H // 8, W // 8)
+
+ if config.save_memory:
+ model.low_vram_shift(is_diffusing=True)
+ model.control_scales = [strength * (0.825 ** float(12 - i)) for i in range(13)] if guess_mode else (
+ [strength] * 13)
+ samples, intermediates = ddim_sampler.sample(ddim_steps, num_samples,
+ shape, cond, verbose=False, eta=eta,
+ unconditional_guidance_scale=scale,
+ unconditional_conditioning=un_cond)
+
+ if config.save_memory:
+ model.low_vram_shift(is_diffusing=False)
+
+ x_samples = model.decode_first_stage(samples)
+ x_samples = (einops.rearrange(x_samples, 'b c h w -> b h w c') * 127.5 + 127.5).cpu().numpy().clip(0,
+ 255).astype(
+ np.uint8)
+
+ results = [x_samples[i] for i in range(num_samples)]
+ return [detected_map] + results
+
+
+def process_deblur(input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, ddim_steps, guess_mode,
+ strength, scale, seed, eta, ksize, condition_mode):
+ with torch.no_grad():
+ input_image = HWC3(input_image)
+ img = resize_image(input_image, image_resolution)
+ H, W, C = img.shape
+ if condition_mode == True:
+ detected_map = blur(input_image, image_resolution, ksize)
+ else:
+ detected_map = img
+
+ detected_map = cv2.resize(detected_map, (W, H), interpolation=cv2.INTER_LINEAR)
+
+ control = torch.from_numpy(detected_map.copy()).float().cuda() / 255.0
+ control = torch.stack([control for _ in range(num_samples)], dim=0)
+ control = einops.rearrange(control, 'b h w c -> b c h w').clone()
+
+ if seed == -1:
+ seed = random.randint(0, 65535)
+ seed_everything(seed)
+
+ if config.save_memory:
+ model.low_vram_shift(is_diffusing=False)
+
+ task = 'blur'
+ task_dic = {}
+ task_dic['name'] = task_to_name[task]
+ task_instruction = name_to_instruction[task_dic['name']]
+ task_dic['feature'] = model.get_learned_conditioning(task_instruction)[:, :1, :]
+
+ cond = {"c_concat": [control],
+ "c_crossattn": [model.get_learned_conditioning([prompt + ', ' + a_prompt] * num_samples)],
+ "task": task_dic}
+ un_cond = {"c_concat": [control * 0] if guess_mode else [control],
+ "c_crossattn": [model.get_learned_conditioning([n_prompt] * num_samples)]}
+ shape = (4, H // 8, W // 8)
+
+ if config.save_memory:
+ model.low_vram_shift(is_diffusing=True)
+ model.control_scales = [strength * (0.825 ** float(12 - i)) for i in range(13)] if guess_mode else (
+ [strength] * 13)
+ samples, intermediates = ddim_sampler.sample(ddim_steps, num_samples,
+ shape, cond, verbose=False, eta=eta,
+ unconditional_guidance_scale=scale,
+ unconditional_conditioning=un_cond)
+
+ if config.save_memory:
+ model.low_vram_shift(is_diffusing=False)
+
+ x_samples = model.decode_first_stage(samples)
+ x_samples = (einops.rearrange(x_samples, 'b c h w -> b h w c') * 127.5 + 127.5).cpu().numpy().clip(0,
+ 255).astype(
+ np.uint8)
+
+ results = [x_samples[i] for i in range(num_samples)]
+ return [detected_map] + results
+
+
+def process_inpainting(input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, ddim_steps, guess_mode,
+ strength, scale, seed, eta, h_ratio_t, h_ratio_d, w_ratio_l, w_ratio_r, condition_mode):
+ with torch.no_grad():
+ input_image = HWC3(input_image)
+ img = resize_image(input_image, image_resolution)
+ H, W, C = img.shape
+ if condition_mode == True:
+ detected_map = inpainting(input_image, image_resolution, h_ratio_t, h_ratio_d, w_ratio_l, w_ratio_r)
+ else:
+ detected_map = img
+ detected_map = cv2.resize(detected_map, (W, H), interpolation=cv2.INTER_LINEAR)
+
+ control = torch.from_numpy(detected_map.copy()).float().cuda() / 255.0
+ control = torch.stack([control for _ in range(num_samples)], dim=0)
+ control = einops.rearrange(control, 'b h w c -> b c h w').clone()
+
+ if seed == -1:
+ seed = random.randint(0, 65535)
+ seed_everything(seed)
+
+ if config.save_memory:
+ model.low_vram_shift(is_diffusing=False)
+
+ task = 'inpainting'
+ task_dic = {}
+ task_dic['name'] = task_to_name[task]
+ task_instruction = name_to_instruction[task_dic['name']]
+ task_dic['feature'] = model.get_learned_conditioning(task_instruction)[:, :1, :]
+
+ cond = {"c_concat": [control],
+ "c_crossattn": [model.get_learned_conditioning([prompt + ', ' + a_prompt] * num_samples)],
+ "task": task_dic}
+ un_cond = {"c_concat": [control * 0] if guess_mode else [control],
+ "c_crossattn": [model.get_learned_conditioning([n_prompt] * num_samples)]}
+ shape = (4, H // 8, W // 8)
+
+ if config.save_memory:
+ model.low_vram_shift(is_diffusing=True)
+ model.control_scales = [strength * (0.825 ** float(12 - i)) for i in range(13)] if guess_mode else (
+ [strength] * 13)
+ samples, intermediates = ddim_sampler.sample(ddim_steps, num_samples,
+ shape, cond, verbose=False, eta=eta,
+ unconditional_guidance_scale=scale,
+ unconditional_conditioning=un_cond)
+
+ if config.save_memory:
+ model.low_vram_shift(is_diffusing=False)
+
+ x_samples = model.decode_first_stage(samples)
+ x_samples = (einops.rearrange(x_samples, 'b c h w -> b h w c') * 127.5 + 127.5).cpu().numpy().clip(0,
+ 255).astype(
+ np.uint8)
+
+ results = [x_samples[i] for i in range(num_samples)]
+ return [detected_map] + results
+
+
+############################################################################################################
+
+
+demo = gr.Blocks()
+with demo:
+ #gr.Markdown("UniControl Stable Diffusion Demo")
+ gr.HTML(
+ """
+
+
+ UniControl Stable Diffusion Demo
+
+
+ Can Qin 1,2, Shu Zhang1, Ning Yu 1, Yihao Feng1, Xinyi Yang1, Yingbo Zhou 1, Huan Wang 1, Juan Carlos Niebles1, Caiming Xiong 1, Silvio Savarese 1, Stefano Ermon 3, Yun Fu 2, Ran Xu 1
+
+
+ 1 Salesforce AI 2 Northeastern University 3 Stanford University
+
+
+ Work done when Can Qin was an intern at Salesforce AI Research.
+
+
+ ONE compact model for ALL the visual-condition-to-image generation!
+ [Github]
+ [Website]
+ [arXiv]
+
+
+ """)
+
+ with gr.Tabs():
+ with gr.TabItem("Canny"):
+ with gr.Row():
+ gr.Markdown("## UniControl Stable Diffusion with Canny Edge Maps")
+ with gr.Row():
+ with gr.Column():
+ input_image = gr.Image(source='upload', type="numpy")
+ prompt = gr.Textbox(label="Prompt")
+ run_button = gr.Button(label="Run")
+ with gr.Accordion("Advanced options", open=False):
+ num_samples = gr.Slider(label="Images", minimum=1, maximum=12, value=1, step=1)
+ image_resolution = gr.Slider(label="Image Resolution", minimum=256, maximum=768, value=512,
+ step=64)
+ strength = gr.Slider(label="Control Strength", minimum=0.0, maximum=2.0, value=1.0, step=0.01)
+ condition_mode = gr.Checkbox(label='Condition Extraction: RGB -> Canny', value=True)
+ guess_mode = gr.Checkbox(label='Guess Mode', value=False)
+ low_threshold = gr.Slider(label="Canny low threshold", minimum=1, maximum=255, value=40, step=1)
+ high_threshold = gr.Slider(label="Canny high threshold", minimum=1, maximum=255, value=200,
+ step=1)
+ ddim_steps = gr.Slider(label="Steps", minimum=1, maximum=100, value=20, step=1)
+ scale = gr.Slider(label="Guidance Scale", minimum=0.1, maximum=30.0, value=9.0, step=0.1)
+ seed = gr.Slider(label="Seed", minimum=-1, maximum=2147483647, step=1, randomize=True)
+ eta = gr.Number(label="eta (DDIM)", value=0.0)
+ a_prompt = gr.Textbox(label="Added Prompt", value='best quality, extremely detailed, bright')
+ n_prompt = gr.Textbox(label="Negative Prompt", value='longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality')
+ with gr.Column():
+ result_gallery = gr.Gallery(label='Output', show_label=False, elem_id="gallery").style(grid=2,
+ height='auto')
+ ips = [input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, ddim_steps, guess_mode,
+ strength, scale, seed, eta, low_threshold, high_threshold, condition_mode]
+ run_button.click(fn=process_canny, inputs=ips, outputs=[result_gallery])
+
+ with gr.TabItem("HED"):
+ with gr.Row():
+ gr.Markdown("## UniControl Stable Diffusion with HED Maps")
+ with gr.Row():
+ with gr.Column():
+ input_image = gr.Image(source='upload', type="numpy")
+ prompt = gr.Textbox(label="Prompt")
+ run_button = gr.Button(label="Run")
+ with gr.Accordion("Advanced options", open=False):
+ num_samples = gr.Slider(label="Images", minimum=1, maximum=12, value=1, step=1)
+ image_resolution = gr.Slider(label="Image Resolution", minimum=256, maximum=768, value=512,
+ step=64)
+ strength = gr.Slider(label="Control Strength", minimum=0.0, maximum=2.0, value=1.0, step=0.01)
+ condition_mode = gr.Checkbox(label='Condition Extraction: RGB -> HED', value=True)
+ guess_mode = gr.Checkbox(label='Guess Mode', value=False)
+ detect_resolution = gr.Slider(label="HED Resolution", minimum=128, maximum=1024, value=512,
+ step=1)
+ ddim_steps = gr.Slider(label="Steps", minimum=1, maximum=100, value=20, step=1)
+ scale = gr.Slider(label="Guidance Scale", minimum=0.1, maximum=30.0, value=9.0, step=0.1)
+ seed = gr.Slider(label="Seed", minimum=-1, maximum=2147483647, step=1, randomize=True)
+ eta = gr.Number(label="eta (DDIM)", value=0.0)
+ a_prompt = gr.Textbox(label="Added Prompt", value='best quality, extremely detailed, bright')
+ n_prompt = gr.Textbox(label="Negative Prompt", value='longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality')
+ with gr.Column():
+ result_gallery = gr.Gallery(label='Output', show_label=False, elem_id="gallery").style(grid=2,
+ height='auto')
+ ips = [input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, detect_resolution,
+ ddim_steps, guess_mode, strength, scale, seed, eta, condition_mode]
+ run_button.click(fn=process_hed, inputs=ips, outputs=[result_gallery])
+
+ with gr.TabItem("Sketch"):
+ with gr.Row():
+ gr.Markdown("## UniControl Stable Diffusion with Sketch Maps")
+ with gr.Row():
+ with gr.Column():
+ input_image = gr.Image(source='upload', type="numpy")
+ prompt = gr.Textbox(label="Prompt")
+ run_button = gr.Button(label="Run")
+ with gr.Accordion("Advanced options", open=False):
+ num_samples = gr.Slider(label="Images", minimum=1, maximum=12, value=1, step=1)
+ image_resolution = gr.Slider(label="Image Resolution", minimum=256, maximum=768, value=512,
+ step=64)
+ strength = gr.Slider(label="Control Strength", minimum=0.0, maximum=2.0, value=1.0, step=0.01)
+ condition_mode = gr.Checkbox(label='Condition Extraction: RGB -> Sketch', value=False)
+ guess_mode = gr.Checkbox(label='Guess Mode', value=False)
+ detect_resolution = gr.Slider(label="HED Resolution", minimum=128, maximum=1024, value=512,
+ step=1)
+ ddim_steps = gr.Slider(label="Steps", minimum=1, maximum=100, value=20, step=1)
+ scale = gr.Slider(label="Guidance Scale", minimum=0.1, maximum=30.0, value=9.0, step=0.1)
+ seed = gr.Slider(label="Seed", minimum=-1, maximum=2147483647, step=1, randomize=True)
+ eta = gr.Number(label="eta (DDIM)", value=0.0)
+ a_prompt = gr.Textbox(label="Added Prompt", value='best quality, extremely detailed')
+ n_prompt = gr.Textbox(label="Negative Prompt", value='longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality')
+ with gr.Column():
+ result_gallery = gr.Gallery(label='Output', show_label=False, elem_id="gallery").style(grid=2,
+ height='auto')
+ ips = [input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, detect_resolution,
+ ddim_steps, guess_mode, strength, scale, seed, eta, condition_mode]
+ run_button.click(fn=process_sketch, inputs=ips, outputs=[result_gallery])
+
+ with gr.TabItem("Depth"):
+ with gr.Row():
+ gr.Markdown("## UniControl Stable Diffusion with Depth Maps")
+ with gr.Row():
+ with gr.Column():
+ input_image = gr.Image(source='upload', type="numpy")
+ prompt = gr.Textbox(label="Prompt")
+ run_button = gr.Button(label="Run")
+ with gr.Accordion("Advanced options", open=False):
+ num_samples = gr.Slider(label="Images", minimum=1, maximum=12, value=1, step=1)
+ image_resolution = gr.Slider(label="Image Resolution", minimum=256, maximum=768, value=512,
+ step=64)
+ strength = gr.Slider(label="Control Strength", minimum=0.0, maximum=2.0, value=1.0, step=0.01)
+ condition_mode = gr.Checkbox(label='Condition Extraction: RGB -> Depth', value=True)
+ guess_mode = gr.Checkbox(label='Guess Mode', value=False)
+ detect_resolution = gr.Slider(label="Depth Resolution", minimum=128, maximum=1024, value=384,
+ step=1)
+ ddim_steps = gr.Slider(label="Steps", minimum=1, maximum=100, value=20, step=1)
+ scale = gr.Slider(label="Guidance Scale", minimum=0.1, maximum=30.0, value=9.0, step=0.1)
+ seed = gr.Slider(label="Seed", minimum=-1, maximum=2147483647, step=1, randomize=True)
+ eta = gr.Number(label="eta (DDIM)", value=0.0)
+ a_prompt = gr.Textbox(label="Added Prompt", value='best quality, extremely detailed, bright')
+ n_prompt = gr.Textbox(label="Negative Prompt", value='longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality')
+ with gr.Column():
+ result_gallery = gr.Gallery(label='Output', show_label=False, elem_id="gallery").style(grid=2,
+ height='auto')
+ ips = [input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, detect_resolution,
+ ddim_steps, guess_mode, strength, scale, seed, eta, condition_mode]
+ run_button.click(fn=process_depth, inputs=ips, outputs=[result_gallery])
+
+ with gr.TabItem("Normal"):
+ with gr.Row():
+ gr.Markdown("## UniControl Stable Diffusion with Normal Surface")
+ with gr.Row():
+ with gr.Column():
+ input_image = gr.Image(source='upload', type="numpy")
+ prompt = gr.Textbox(label="Prompt")
+ run_button = gr.Button(label="Run")
+ with gr.Accordion("Advanced options", open=False):
+ num_samples = gr.Slider(label="Images", minimum=1, maximum=12, value=1, step=1)
+ image_resolution = gr.Slider(label="Image Resolution", minimum=256, maximum=768, value=512,
+ step=64)
+ strength = gr.Slider(label="Control Strength", minimum=0.0, maximum=2.0, value=1.0, step=0.01)
+ condition_mode = gr.Checkbox(label='Condition Extraction: RGB -> Normal', value=True)
+ guess_mode = gr.Checkbox(label='Guess Mode', value=False)
+ detect_resolution = gr.Slider(label="Depth Resolution", minimum=128, maximum=1024, value=384,
+ step=1)
+ ddim_steps = gr.Slider(label="Steps", minimum=1, maximum=100, value=20, step=1)
+ scale = gr.Slider(label="Guidance Scale", minimum=0.1, maximum=30.0, value=9.0, step=0.1)
+ seed = gr.Slider(label="Seed", minimum=-1, maximum=2147483647, step=1, randomize=True)
+ eta = gr.Number(label="eta (DDIM)", value=0.0)
+ a_prompt = gr.Textbox(label="Added Prompt", value='best quality, extremely detailed, bright')
+ n_prompt = gr.Textbox(label="Negative Prompt", value='longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality')
+ with gr.Column():
+ result_gallery = gr.Gallery(label='Output', show_label=False, elem_id="gallery").style(grid=2,
+ height='auto')
+ ips = [input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, detect_resolution,
+ ddim_steps, guess_mode, strength, scale, seed, eta, condition_mode]
+ run_button.click(fn=process_normal, inputs=ips, outputs=[result_gallery])
+
+ with gr.TabItem("Human Pose"):
+ with gr.Row():
+ gr.Markdown("## UniControl Stable Diffusion with Human Pose")
+ with gr.Row():
+ with gr.Column():
+ input_image = gr.Image(source='upload', type="numpy")
+ prompt = gr.Textbox(label="Prompt")
+ run_button = gr.Button(label="Run")
+ with gr.Accordion("Advanced options", open=False):
+ num_samples = gr.Slider(label="Images", minimum=1, maximum=12, value=1, step=1)
+ image_resolution = gr.Slider(label="Image Resolution", minimum=256, maximum=768, value=512,
+ step=64)
+ strength = gr.Slider(label="Control Strength", minimum=0.0, maximum=2.0, value=1.0, step=0.01)
+ condition_mode = gr.Checkbox(label='Condition Extraction: RGB -> Skeleton', value=True)
+ guess_mode = gr.Checkbox(label='Guess Mode', value=False)
+ detect_resolution = gr.Slider(label="OpenPose Resolution", minimum=128, maximum=1024, value=512,
+ step=1)
+ ddim_steps = gr.Slider(label="Steps", minimum=1, maximum=100, value=20, step=1)
+ scale = gr.Slider(label="Guidance Scale", minimum=0.1, maximum=30.0, value=9.0, step=0.1)
+ seed = gr.Slider(label="Seed", minimum=-1, maximum=2147483647, step=1, randomize=True)
+ eta = gr.Number(label="eta (DDIM)", value=0.0)
+ a_prompt = gr.Textbox(label="Added Prompt", value='best quality, extremely detailed, bright')
+ n_prompt = gr.Textbox(label="Negative Prompt", value='longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality')
+ with gr.Column():
+ result_gallery = gr.Gallery(label='Output', show_label=False, elem_id="gallery").style(grid=2,
+ height='auto')
+ ips = [input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, detect_resolution,
+ ddim_steps, guess_mode, strength, scale, seed, eta, condition_mode]
+ run_button.click(fn=process_pose, inputs=ips, outputs=[result_gallery])
+
+ with gr.TabItem("Segmentation"):
+ with gr.Row():
+ gr.Markdown("## UniControl Stable Diffusion with Segmentation Maps (ADE20K)")
+ with gr.Row():
+ with gr.Column():
+ input_image = gr.Image(source='upload', type="numpy")
+ prompt = gr.Textbox(label="Prompt")
+ run_button = gr.Button(label="Run")
+ with gr.Accordion("Advanced options", open=False):
+ num_samples = gr.Slider(label="Images", minimum=1, maximum=12, value=1, step=1)
+ image_resolution = gr.Slider(label="Image Resolution", minimum=256, maximum=768, value=512,
+ step=64)
+ strength = gr.Slider(label="Control Strength", minimum=0.0, maximum=2.0, value=1.0, step=0.01)
+ condition_mode = gr.Checkbox(label='Condition Extraction: RGB -> Seg', value=True)
+ guess_mode = gr.Checkbox(label='Guess Mode', value=False)
+ detect_resolution = gr.Slider(label="Segmentation Resolution", minimum=128, maximum=1024,
+ value=512, step=1)
+ ddim_steps = gr.Slider(label="Steps", minimum=1, maximum=100, value=20, step=1)
+ scale = gr.Slider(label="Guidance Scale", minimum=0.1, maximum=30.0, value=9.0, step=0.1)
+ seed = gr.Slider(label="Seed", minimum=-1, maximum=2147483647, step=1, randomize=True)
+ eta = gr.Number(label="eta (DDIM)", value=0.0)
+ a_prompt = gr.Textbox(label="Added Prompt", value='best quality, extremely detailed, bright')
+ n_prompt = gr.Textbox(label="Negative Prompt", value='longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality')
+ with gr.Column():
+ result_gallery = gr.Gallery(label='Output', show_label=False, elem_id="gallery").style(grid=2,
+ height='auto')
+ ips = [input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, detect_resolution,
+ ddim_steps, guess_mode, strength, scale, seed, eta, condition_mode]
+ run_button.click(fn=process_seg, inputs=ips, outputs=[result_gallery])
+
+ with gr.TabItem("Bbox"):
+ with gr.Row():
+ gr.Markdown("## UniControl Stable Diffusion with Object Bounding Boxes (MS-COCO)")
+ with gr.Row():
+ with gr.Column():
+ input_image = gr.Image(source='upload', type="numpy")
+ prompt = gr.Textbox(label="Prompt")
+ run_button = gr.Button(label="Run")
+ with gr.Accordion("Advanced options", open=False):
+ num_samples = gr.Slider(label="Images", minimum=1, maximum=12, value=1, step=1)
+ image_resolution = gr.Slider(label="Image Resolution", minimum=256, maximum=768, value=512,
+ step=64)
+ strength = gr.Slider(label="Control Strength", minimum=0.0, maximum=2.0, value=1.0, step=0.01)
+ condition_mode = gr.Checkbox(label='Condition Extraction: RGB -> Bbox', value=True)
+ guess_mode = gr.Checkbox(label='Guess Mode', value=False)
+ confidence = gr.Slider(label="Confidence of Detection", minimum=0.1, maximum=1.0, value=0.4,
+ step=0.1)
+ nms_thresh = gr.Slider(label="Nms Threshold", minimum=0.1, maximum=1.0, value=0.5, step=0.1)
+ ddim_steps = gr.Slider(label="Steps", minimum=1, maximum=100, value=20, step=1)
+ scale = gr.Slider(label="Guidance Scale", minimum=0.1, maximum=30.0, value=9.0, step=0.1)
+ seed = gr.Slider(label="Seed", minimum=-1, maximum=2147483647, step=1, randomize=True)
+ eta = gr.Number(label="eta (DDIM)", value=0.0)
+ a_prompt = gr.Textbox(label="Added Prompt", value='best quality, extremely detailed, bright')
+ n_prompt = gr.Textbox(label="Negative Prompt", value='longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality')
+ with gr.Column():
+ result_gallery = gr.Gallery(label='Output', show_label=False, elem_id="gallery").style(grid=2,
+ height='auto')
+ ips = [input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, ddim_steps, guess_mode,
+ strength, scale, seed, eta, confidence, nms_thresh, condition_mode]
+ run_button.click(fn=process_bbox, inputs=ips, outputs=[result_gallery])
+
+ with gr.TabItem("Outpainting"):
+ with gr.Row():
+ gr.Markdown("## UniControl Stable Diffusion with Image Outpainting")
+ with gr.Row():
+ with gr.Column():
+ input_image = gr.Image(source='upload', type="numpy")
+ prompt = gr.Textbox(label="Prompt")
+ run_button = gr.Button(label="Run")
+ with gr.Accordion("Advanced options", open=False):
+ num_samples = gr.Slider(label="Images", minimum=1, maximum=12, value=1, step=1)
+ image_resolution = gr.Slider(label="Image Resolution", minimum=256, maximum=768, value=512,
+ step=64)
+ strength = gr.Slider(label="Control Strength", minimum=0.0, maximum=2.0, value=1.0, step=0.01)
+ condition_mode = gr.Checkbox(label='Condition Extraction: Extending', value=False)
+ guess_mode = gr.Checkbox(label='Guess Mode', value=False)
+
+ height_top_extended = gr.Slider(label="Top Extended Ratio (%)", minimum=1, maximum=200,
+ value=50, step=1)
+ height_down_extended = gr.Slider(label="Down Extended Ratio (%)", minimum=1, maximum=200,
+ value=50, step=1)
+
+ width_left_extended = gr.Slider(label="Left Extended Ratio (%)", minimum=1, maximum=200,
+ value=50, step=1)
+ width_right_extended = gr.Slider(label="Right Extended Ratio (%)", minimum=1, maximum=200,
+ value=50, step=1)
+
+ ddim_steps = gr.Slider(label="Steps", minimum=1, maximum=100, value=20, step=1)
+ scale = gr.Slider(label="Guidance Scale", minimum=0.1, maximum=30.0, value=9.0, step=0.1)
+ seed = gr.Slider(label="Seed", minimum=-1, maximum=2147483647, step=1, randomize=True)
+ eta = gr.Number(label="eta (DDIM)", value=0.0)
+ a_prompt = gr.Textbox(label="Added Prompt", value='best quality, extremely detailed')
+ n_prompt = gr.Textbox(label="Negative Prompt", value='')
+ with gr.Column():
+ result_gallery = gr.Gallery(label='Output', show_label=False, elem_id="gallery").style(grid=2,
+ height='auto')
+ ips = [input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, ddim_steps, guess_mode,
+ strength, scale, seed, eta, height_top_extended, height_down_extended, width_left_extended, width_right_extended, condition_mode]
+ run_button.click(fn=process_outpainting, inputs=ips, outputs=[result_gallery])
+
+ with gr.TabItem("Inpainting"):
+ with gr.Row():
+ gr.Markdown("## UniControl Stable Diffusion with Image Inpainting")
+ with gr.Row():
+ with gr.Column():
+ input_image = gr.Image(source='upload', type="numpy")
+ prompt = gr.Textbox(label="Prompt")
+ run_button = gr.Button(label="Run")
+ with gr.Accordion("Advanced options", open=False):
+ num_samples = gr.Slider(label="Images", minimum=1, maximum=12, value=1, step=1)
+ image_resolution = gr.Slider(label="Image Resolution", minimum=256, maximum=768, value=512,
+ step=64)
+ strength = gr.Slider(label="Control Strength", minimum=0.0, maximum=2.0, value=1.0, step=0.01)
+ condition_mode = gr.Checkbox(label='Condition Extraction: Cropped Masking', value=False)
+ guess_mode = gr.Checkbox(label='Guess Mode', value=False)
+ h_ratio_t = gr.Slider(label="Top Masking Ratio (%)", minimum=0, maximum=100, value=30,
+ step=1)
+ h_ratio_d = gr.Slider(label="Down Masking Ratio (%)", minimum=0, maximum=100, value=60,
+ step=1)
+ w_ratio_l = gr.Slider(label="Left Masking Ratio (%)", minimum=0, maximum=100, value=30,
+ step=1)
+ w_ratio_r = gr.Slider(label="Right Masking Ratio (%)", minimum=0, maximum=100, value=60,
+ step=1)
+ ddim_steps = gr.Slider(label="Steps", minimum=1, maximum=100, value=20, step=1)
+ scale = gr.Slider(label="Guidance Scale", minimum=0.1, maximum=30.0, value=9.0, step=0.1)
+ seed = gr.Slider(label="Seed", minimum=-1, maximum=2147483647, step=1, randomize=True)
+ eta = gr.Number(label="eta (DDIM)", value=0.0)
+ a_prompt = gr.Textbox(label="Added Prompt", value='best quality, extremely detailed')
+ n_prompt = gr.Textbox(label="Negative Prompt", value='')
+ with gr.Column():
+ result_gallery = gr.Gallery(label='Output', show_label=False, elem_id="gallery").style(grid=2,
+ height='auto')
+ ips = [input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, ddim_steps, guess_mode,
+ strength, scale, seed, eta, h_ratio_t, h_ratio_d, w_ratio_l, w_ratio_r, condition_mode]
+ run_button.click(fn=process_inpainting, inputs=ips, outputs=[result_gallery])
+
+ with gr.TabItem("Colorization"):
+ with gr.Row():
+ gr.Markdown("## UniControl Stable Diffusion with Gray Image Colorization")
+ with gr.Row():
+ with gr.Column():
+ input_image = gr.Image(source='upload', type="numpy")
+ prompt = gr.Textbox(label="Prompt")
+ run_button = gr.Button(label="Run")
+ with gr.Accordion("Advanced options", open=False):
+ num_samples = gr.Slider(label="Images", minimum=1, maximum=12, value=1, step=1)
+ image_resolution = gr.Slider(label="Image Resolution", minimum=256, maximum=768, value=512,
+ step=64)
+ strength = gr.Slider(label="Control Strength", minimum=0.0, maximum=2.0, value=1.0, step=0.01)
+ condition_mode = gr.Checkbox(label='Condition Extraction: RGB -> Gray', value=False)
+ guess_mode = gr.Checkbox(label='Guess Mode', value=False)
+ ddim_steps = gr.Slider(label="Steps", minimum=1, maximum=100, value=20, step=1)
+ scale = gr.Slider(label="Guidance Scale", minimum=0.1, maximum=30.0, value=9.0, step=0.1)
+ seed = gr.Slider(label="Seed", minimum=-1, maximum=2147483647, step=1, randomize=True)
+ eta = gr.Number(label="eta (DDIM)", value=0.0)
+ a_prompt = gr.Textbox(label="Added Prompt", value='best quality, extremely detailed, colorful')
+ n_prompt = gr.Textbox(label="Negative Prompt", value='')
+ with gr.Column():
+ result_gallery = gr.Gallery(label='Output', show_label=False, elem_id="gallery").style(grid=2,
+ height='auto')
+ ips = [input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, ddim_steps, guess_mode,
+ strength, scale, seed, eta, condition_mode]
+ run_button.click(fn=process_colorization, inputs=ips, outputs=[result_gallery])
+
+ with gr.TabItem("Deblurring"):
+ with gr.Row():
+ gr.Markdown("## UniControl Stable Diffusion with Image Deblurring")
+ with gr.Row():
+ with gr.Column():
+ input_image = gr.Image(source='upload', type="numpy")
+ prompt = gr.Textbox(label="Prompt")
+ run_button = gr.Button(label="Run")
+ with gr.Accordion("Advanced options", open=False):
+ num_samples = gr.Slider(label="Images", minimum=1, maximum=12, value=1, step=1)
+ image_resolution = gr.Slider(label="Image Resolution", minimum=256, maximum=768, value=512,
+ step=64)
+ strength = gr.Slider(label="Control Strength", minimum=0.0, maximum=2.0, value=1.0, step=0.01)
+ condition_mode = gr.Checkbox(label='Condition Extraction: RGB -> Blur', value=False)
+ guess_mode = gr.Checkbox(label='Guess Mode', value=False)
+ ksize = gr.Slider(label="Kernel Size", minimum=11, maximum=101, value=51, step=2)
+ ddim_steps = gr.Slider(label="Steps", minimum=1, maximum=100, value=20, step=1)
+ scale = gr.Slider(label="Guidance Scale", minimum=0.1, maximum=30.0, value=9.0, step=0.1)
+ seed = gr.Slider(label="Seed", minimum=-1, maximum=2147483647, step=1, randomize=True)
+ eta = gr.Number(label="eta (DDIM)", value=0.0)
+ a_prompt = gr.Textbox(label="Added Prompt", value='best quality, extremely detailed')
+ n_prompt = gr.Textbox(label="Negative Prompt", value='')
+ with gr.Column():
+ result_gallery = gr.Gallery(label='Output', show_label=False, elem_id="gallery").style(grid=2,
+ height='auto')
+ ips = [input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, ddim_steps, guess_mode,
+ strength, scale, seed, eta, ksize, condition_mode]
+ run_button.click(fn=process_deblur, inputs=ips, outputs=[result_gallery])
+
+
+ gr.Markdown('''### Tips
+ - Please pay attention to Condition Extraction option.
+ - Positive prompts and negative prompts are very useful sometimes.
+ ''')
+ gr.Markdown('''### Related Spaces
+ - https://huggingface.co/spaces/hysts/ControlNet
+ - https://huggingface.co/spaces/shi-labs/Prompt-Free-Diffusion
+ ''')
+demo.launch()
diff --git a/annotator/__pycache__/util.cpython-310.pyc b/annotator/__pycache__/util.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b02ccc896bf50a987f3d5de6eb67ec69385cbce5
Binary files /dev/null and b/annotator/__pycache__/util.cpython-310.pyc differ
diff --git a/annotator/__pycache__/util.cpython-38.pyc b/annotator/__pycache__/util.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..200bf5166ad0db0400ee7d62e4be4349c2cd4b86
Binary files /dev/null and b/annotator/__pycache__/util.cpython-38.pyc differ
diff --git a/annotator/blur/__init__.py b/annotator/blur/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..2ecb3183424a080ac1763fad4de81974908ea532
--- /dev/null
+++ b/annotator/blur/__init__.py
@@ -0,0 +1,7 @@
+import cv2
+
+class Blurrer:
+ def __call__(self, img, ksize):
+ img_new = cv2.GaussianBlur(img, (ksize, ksize), cv2.BORDER_DEFAULT)
+ img_new = img_new.astype('ubyte')
+ return img_new
diff --git a/annotator/blur/__pycache__/__init__.cpython-310.pyc b/annotator/blur/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..56a8f63b309ac4e91caeaba9d89abcc03fbc7c3c
Binary files /dev/null and b/annotator/blur/__pycache__/__init__.cpython-310.pyc differ
diff --git a/annotator/blur/__pycache__/__init__.cpython-38.pyc b/annotator/blur/__pycache__/__init__.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..860f16c258679270e99fa5ebcf752f33fbb6b51e
Binary files /dev/null and b/annotator/blur/__pycache__/__init__.cpython-38.pyc differ
diff --git a/annotator/canny/__init__.py b/annotator/canny/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..1bcdaf9e72d29bd86d0965e051366381633a5003
--- /dev/null
+++ b/annotator/canny/__init__.py
@@ -0,0 +1,16 @@
+'''
+ * Copyright (c) 2023 Salesforce, Inc.
+ * All rights reserved.
+ * SPDX-License-Identifier: Apache License 2.0
+ * For full license text, see LICENSE.txt file in the repo root or http://www.apache.org/licenses/
+ * By Can Qin
+ * Modified from ControlNet repo: https://github.com/lllyasviel/ControlNet
+ * Copyright (c) 2023 Lvmin Zhang and Maneesh Agrawala
+'''
+
+import cv2
+
+
+class CannyDetector:
+ def __call__(self, img, low_threshold, high_threshold):
+ return cv2.Canny(img, low_threshold, high_threshold)
diff --git a/annotator/canny/__pycache__/__init__.cpython-310.pyc b/annotator/canny/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6fabb59cd2353ae778cebc8d5c2b962d74badf8a
Binary files /dev/null and b/annotator/canny/__pycache__/__init__.cpython-310.pyc differ
diff --git a/annotator/canny/__pycache__/__init__.cpython-38.pyc b/annotator/canny/__pycache__/__init__.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6fe438df524dbe125e7bf29bb2b5d96ecf172673
Binary files /dev/null and b/annotator/canny/__pycache__/__init__.cpython-38.pyc differ
diff --git a/annotator/ckpts/ckpts.txt b/annotator/ckpts/ckpts.txt
new file mode 100644
index 0000000000000000000000000000000000000000..1978551fb2a9226814eaf58459f414fcfac4e69b
--- /dev/null
+++ b/annotator/ckpts/ckpts.txt
@@ -0,0 +1 @@
+Weights here.
\ No newline at end of file
diff --git a/annotator/grayscale/__init__.py b/annotator/grayscale/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..5964787b692e0c662ed3f98a648a2c79fe8a219b
--- /dev/null
+++ b/annotator/grayscale/__init__.py
@@ -0,0 +1,5 @@
+from skimage import color
+
+class GrayscaleConverter:
+ def __call__(self, img):
+ return (color.rgb2gray(img) * 255.0).astype('ubyte')
diff --git a/annotator/grayscale/__pycache__/__init__.cpython-310.pyc b/annotator/grayscale/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..be1f10dffda17c67d795dd39e7317d649b25af09
Binary files /dev/null and b/annotator/grayscale/__pycache__/__init__.cpython-310.pyc differ
diff --git a/annotator/grayscale/__pycache__/__init__.cpython-38.pyc b/annotator/grayscale/__pycache__/__init__.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e4998b6926b1386952367a9b2b5cfaf7b35c1b2d
Binary files /dev/null and b/annotator/grayscale/__pycache__/__init__.cpython-38.pyc differ
diff --git a/annotator/hed/__init__.py b/annotator/hed/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..edfc2927b50cdfb42f7cbfdc78300238a67bf9df
--- /dev/null
+++ b/annotator/hed/__init__.py
@@ -0,0 +1,107 @@
+'''
+ * Copyright (c) 2023 Salesforce, Inc.
+ * All rights reserved.
+ * SPDX-License-Identifier: Apache License 2.0
+ * For full license text, see LICENSE.txt file in the repo root or http://www.apache.org/licenses/
+ * By Can Qin
+ * Modified from ControlNet repo: https://github.com/lllyasviel/ControlNet
+ * Copyright (c) 2023 Lvmin Zhang and Maneesh Agrawala
+'''
+
+# This is an improved version and model of HED edge detection without GPL contamination
+# Please use this implementation in your products
+# This implementation may produce slightly different results from Saining Xie's official implementations,
+# but it generates smoother edges and is more suitable for ControlNet as well as other image-to-image translations.
+# Different from official models and other implementations, this is an RGB-input model (rather than BGR)
+# and in this way it works better for gradio's RGB protocol
+
+import os
+import cv2
+import torch
+import numpy as np
+
+from einops import rearrange
+from annotator.util import annotator_ckpts_path
+
+
+class DoubleConvBlock(torch.nn.Module):
+ def __init__(self, input_channel, output_channel, layer_number):
+ super().__init__()
+ self.convs = torch.nn.Sequential()
+ self.convs.append(torch.nn.Conv2d(in_channels=input_channel, out_channels=output_channel, kernel_size=(3, 3), stride=(1, 1), padding=1))
+ for i in range(1, layer_number):
+ self.convs.append(torch.nn.Conv2d(in_channels=output_channel, out_channels=output_channel, kernel_size=(3, 3), stride=(1, 1), padding=1))
+ self.projection = torch.nn.Conv2d(in_channels=output_channel, out_channels=1, kernel_size=(1, 1), stride=(1, 1), padding=0)
+
+ def __call__(self, x, down_sampling=False):
+ h = x
+ if down_sampling:
+ h = torch.nn.functional.max_pool2d(h, kernel_size=(2, 2), stride=(2, 2))
+ for conv in self.convs:
+ h = conv(h)
+ h = torch.nn.functional.relu(h)
+ return h, self.projection(h)
+
+
+class ControlNetHED_Apache2(torch.nn.Module):
+ def __init__(self):
+ super().__init__()
+ self.norm = torch.nn.Parameter(torch.zeros(size=(1, 3, 1, 1)))
+ self.block1 = DoubleConvBlock(input_channel=3, output_channel=64, layer_number=2)
+ self.block2 = DoubleConvBlock(input_channel=64, output_channel=128, layer_number=2)
+ self.block3 = DoubleConvBlock(input_channel=128, output_channel=256, layer_number=3)
+ self.block4 = DoubleConvBlock(input_channel=256, output_channel=512, layer_number=3)
+ self.block5 = DoubleConvBlock(input_channel=512, output_channel=512, layer_number=3)
+
+ def __call__(self, x):
+ h = x - self.norm
+ h, projection1 = self.block1(h)
+ h, projection2 = self.block2(h, down_sampling=True)
+ h, projection3 = self.block3(h, down_sampling=True)
+ h, projection4 = self.block4(h, down_sampling=True)
+ h, projection5 = self.block5(h, down_sampling=True)
+ return projection1, projection2, projection3, projection4, projection5
+
+
+class HEDdetector:
+ def __init__(self):
+ remote_model_path = "https://huggingface.co/lllyasviel/Annotators/resolve/main/ControlNetHED.pth"
+ modelpath = remote_model_path
+ modelpath = os.path.join(annotator_ckpts_path, "ControlNetHED.pth")
+ if not os.path.exists(modelpath):
+ from basicsr.utils.download_util import load_file_from_url
+ load_file_from_url(remote_model_path, model_dir=annotator_ckpts_path)
+ self.netNetwork = ControlNetHED_Apache2().float().cuda().eval()
+ self.netNetwork.load_state_dict(torch.load(modelpath))
+
+ def __call__(self, input_image):
+ assert input_image.ndim == 3
+ H, W, C = input_image.shape
+ with torch.no_grad():
+ image_hed = torch.from_numpy(input_image.copy()).float().cuda()
+ image_hed = rearrange(image_hed, 'h w c -> 1 c h w')
+ edges = self.netNetwork(image_hed)
+ edges = [e.detach().cpu().numpy().astype(np.float32)[0, 0] for e in edges]
+ edges = [cv2.resize(e, (W, H), interpolation=cv2.INTER_LINEAR) for e in edges]
+ edges = np.stack(edges, axis=2)
+ edge = 1 / (1 + np.exp(-np.mean(edges, axis=2).astype(np.float64)))
+ edge = (edge * 255.0).clip(0, 255).astype(np.uint8)
+ return edge
+
+
+def nms(x, t, s):
+ x = cv2.GaussianBlur(x.astype(np.float32), (0, 0), s)
+
+ f1 = np.array([[0, 0, 0], [1, 1, 1], [0, 0, 0]], dtype=np.uint8)
+ f2 = np.array([[0, 1, 0], [0, 1, 0], [0, 1, 0]], dtype=np.uint8)
+ f3 = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype=np.uint8)
+ f4 = np.array([[0, 0, 1], [0, 1, 0], [1, 0, 0]], dtype=np.uint8)
+
+ y = np.zeros_like(x)
+
+ for f in [f1, f2, f3, f4]:
+ np.putmask(y, cv2.dilate(x, kernel=f) == x, x)
+
+ z = np.zeros_like(y, dtype=np.uint8)
+ z[y > t] = 255
+ return z
diff --git a/annotator/hed/__pycache__/__init__.cpython-310.pyc b/annotator/hed/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5d1e182e0ae22f6da0a007be483d7264cdc5d9cb
Binary files /dev/null and b/annotator/hed/__pycache__/__init__.cpython-310.pyc differ
diff --git a/annotator/hed/__pycache__/__init__.cpython-38.pyc b/annotator/hed/__pycache__/__init__.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..66bed9a1fb8ca12311a8af0a4c6af19588d48783
Binary files /dev/null and b/annotator/hed/__pycache__/__init__.cpython-38.pyc differ
diff --git a/annotator/inpainting/__init__.py b/annotator/inpainting/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..c84462036d7ad95500d7021035d6fa822ccefbef
--- /dev/null
+++ b/annotator/inpainting/__init__.py
@@ -0,0 +1,16 @@
+import numpy as np
+
+class Inpainter:
+ def __call__(self, img, height_top_mask, height_down_mask, width_left_mask, width_right_mask):
+ h = img.shape[0]
+ w = img.shape[1]
+ h_top_mask = int(float(h) / 100.0 * float(height_top_mask))
+ h_down_mask = int(float(h) / 100.0 * float(height_down_mask))
+
+ w_left_mask = int(float(w) / 100.0 * float(width_left_mask))
+ w_right_mask = int(float(w) / 100.0 * float(width_right_mask))
+
+ img_new = img
+ img_new[h_top_mask:h_down_mask, w_left_mask:w_right_mask] = 0
+ img_new = img_new.astype('ubyte')
+ return img_new
diff --git a/annotator/inpainting/__pycache__/__init__.cpython-310.pyc b/annotator/inpainting/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e73319d5b8b1f425199dd8067c0f9341842c93ac
Binary files /dev/null and b/annotator/inpainting/__pycache__/__init__.cpython-310.pyc differ
diff --git a/annotator/inpainting/__pycache__/__init__.cpython-38.pyc b/annotator/inpainting/__pycache__/__init__.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4cf235e2abdb2f7bbf498062f05b9484a8e29d47
Binary files /dev/null and b/annotator/inpainting/__pycache__/__init__.cpython-38.pyc differ
diff --git a/annotator/midas/LICENSE b/annotator/midas/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..277b5c11be103f028a8d10985139f1da10c2f08e
--- /dev/null
+++ b/annotator/midas/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2019 Intel ISL (Intel Intelligent Systems Lab)
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/annotator/midas/__init__.py b/annotator/midas/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..426c7b2328f9cb475c344e80aeb828c866559aba
--- /dev/null
+++ b/annotator/midas/__init__.py
@@ -0,0 +1,52 @@
+'''
+ * Copyright (c) 2023 Salesforce, Inc.
+ * All rights reserved.
+ * SPDX-License-Identifier: Apache License 2.0
+ * For full license text, see LICENSE.txt file in the repo root or http://www.apache.org/licenses/
+ * By Can Qin
+ * Modified from ControlNet repo: https://github.com/lllyasviel/ControlNet
+ * Copyright (c) 2023 Lvmin Zhang and Maneesh Agrawala
+'''
+
+# Midas Depth Estimation
+# From https://github.com/isl-org/MiDaS
+# MIT LICENSE
+
+import cv2
+import numpy as np
+import torch
+
+from einops import rearrange
+from .api import MiDaSInference
+
+
+class MidasDetector:
+ def __init__(self):
+ self.model = MiDaSInference(model_type="dpt_hybrid").cuda()
+
+ def __call__(self, input_image, a=np.pi * 2.0, bg_th=0.1):
+ assert input_image.ndim == 3
+ image_depth = input_image
+ with torch.no_grad():
+ image_depth = torch.from_numpy(image_depth).float().cuda()
+ image_depth = image_depth / 127.5 - 1.0
+ image_depth = rearrange(image_depth, 'h w c -> 1 c h w')
+ depth = self.model(image_depth)[0]
+
+ depth_pt = depth.clone()
+ depth_pt -= torch.min(depth_pt)
+ depth_pt /= torch.max(depth_pt)
+ depth_pt = depth_pt.cpu().numpy()
+ depth_image = (depth_pt * 255.0).clip(0, 255).astype(np.uint8)
+
+ depth_np = depth.cpu().numpy()
+ x = cv2.Sobel(depth_np, cv2.CV_32F, 1, 0, ksize=3)
+ y = cv2.Sobel(depth_np, cv2.CV_32F, 0, 1, ksize=3)
+ z = np.ones_like(x) * a
+ x[depth_pt < bg_th] = 0
+ y[depth_pt < bg_th] = 0
+ normal = np.stack([x, y, z], axis=2)
+ normal /= np.sum(normal ** 2.0, axis=2, keepdims=True) ** 0.5
+ normal_image = (normal * 127.5 + 127.5).clip(0, 255).astype(np.uint8)
+
+ return depth_image, normal_image
diff --git a/annotator/midas/__pycache__/__init__.cpython-310.pyc b/annotator/midas/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fd75a5fd0979d574dfc833a9a88ea7d06f417de7
Binary files /dev/null and b/annotator/midas/__pycache__/__init__.cpython-310.pyc differ
diff --git a/annotator/midas/__pycache__/__init__.cpython-38.pyc b/annotator/midas/__pycache__/__init__.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5762ebcd750fad967177a040abe867418387da39
Binary files /dev/null and b/annotator/midas/__pycache__/__init__.cpython-38.pyc differ
diff --git a/annotator/midas/__pycache__/api.cpython-310.pyc b/annotator/midas/__pycache__/api.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..671eaa9db48b64fd5614ac429303f0f5e8c4fc92
Binary files /dev/null and b/annotator/midas/__pycache__/api.cpython-310.pyc differ
diff --git a/annotator/midas/__pycache__/api.cpython-38.pyc b/annotator/midas/__pycache__/api.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5f4b7b54ce2e3179f7f5ab18cfb18747b2233dba
Binary files /dev/null and b/annotator/midas/__pycache__/api.cpython-38.pyc differ
diff --git a/annotator/midas/api.py b/annotator/midas/api.py
new file mode 100644
index 0000000000000000000000000000000000000000..d22ba86fc85ab65281c74c9e315766e401cfd054
--- /dev/null
+++ b/annotator/midas/api.py
@@ -0,0 +1,183 @@
+'''
+ * Copyright (c) 2023 Salesforce, Inc.
+ * All rights reserved.
+ * SPDX-License-Identifier: Apache License 2.0
+ * For full license text, see LICENSE.txt file in the repo root or http://www.apache.org/licenses/
+ * By Can Qin
+ * Modified from ControlNet repo: https://github.com/lllyasviel/ControlNet
+ * Copyright (c) 2023 Lvmin Zhang and Maneesh Agrawala
+'''
+
+# based on https://github.com/isl-org/MiDaS
+
+import cv2
+import os
+import torch
+import torch.nn as nn
+from torchvision.transforms import Compose
+
+from .midas.dpt_depth import DPTDepthModel
+from .midas.midas_net import MidasNet
+from .midas.midas_net_custom import MidasNet_small
+from .midas.transforms import Resize, NormalizeImage, PrepareForNet
+from annotator.util import annotator_ckpts_path
+
+
+ISL_PATHS = {
+ "dpt_large": os.path.join(annotator_ckpts_path, "dpt_large_384.pt"),
+ "dpt_hybrid": os.path.join(annotator_ckpts_path, "dpt_hybrid-midas-501f0c75.pt"),
+ "midas_v21": "",
+ "midas_v21_small": "",
+}
+
+remote_model_path = "https://huggingface.co/lllyasviel/ControlNet/resolve/main/annotator/ckpts/dpt_hybrid-midas-501f0c75.pt"
+# remote_model_path = "https://storage.googleapis.com/sfr-unicontrol-data-research/annotator/ckpts/dpt_large_384.pt" #"https://huggingface.co/Salesforce/UniControl/blob/main/annotator/ckpts/dpt_large_384.pt"
+
+def disabled_train(self, mode=True):
+ """Overwrite model.train with this function to make sure train/eval mode
+ does not change anymore."""
+ return self
+
+
+def load_midas_transform(model_type):
+ # https://github.com/isl-org/MiDaS/blob/master/run.py
+ # load transform only
+ if model_type == "dpt_large": # DPT-Large
+ net_w, net_h = 384, 384
+ resize_mode = "minimal"
+ normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
+
+ elif model_type == "dpt_hybrid": # DPT-Hybrid
+ net_w, net_h = 384, 384
+ resize_mode = "minimal"
+ normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
+
+ elif model_type == "midas_v21":
+ net_w, net_h = 384, 384
+ resize_mode = "upper_bound"
+ normalization = NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
+
+ elif model_type == "midas_v21_small":
+ net_w, net_h = 256, 256
+ resize_mode = "upper_bound"
+ normalization = NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
+
+ else:
+ assert False, f"model_type '{model_type}' not implemented, use: --model_type large"
+
+ transform = Compose(
+ [
+ Resize(
+ net_w,
+ net_h,
+ resize_target=None,
+ keep_aspect_ratio=True,
+ ensure_multiple_of=32,
+ resize_method=resize_mode,
+ image_interpolation_method=cv2.INTER_CUBIC,
+ ),
+ normalization,
+ PrepareForNet(),
+ ]
+ )
+
+ return transform
+
+
+def load_model(model_type):
+ # https://github.com/isl-org/MiDaS/blob/master/run.py
+ # load network
+ model_path = ISL_PATHS[model_type]
+ if model_type == "dpt_large": # DPT-Large
+ if not os.path.exists(model_path):
+ from basicsr.utils.download_util import load_file_from_url
+ load_file_from_url(remote_model_path, model_dir=annotator_ckpts_path)
+ #model_path = remote_model_path
+ model = DPTDepthModel(
+ path=model_path,
+ backbone="vitl16_384",
+ non_negative=True,
+ )
+ net_w, net_h = 384, 384
+ resize_mode = "minimal"
+ normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
+
+ elif model_type == "dpt_hybrid": # DPT-Hybrid
+ if not os.path.exists(model_path):
+ from basicsr.utils.download_util import load_file_from_url
+ load_file_from_url(remote_model_path, model_dir=annotator_ckpts_path)
+
+ model = DPTDepthModel(
+ path=model_path,
+ backbone="vitb_rn50_384",
+ non_negative=True,
+ )
+ net_w, net_h = 384, 384
+ resize_mode = "minimal"
+ normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
+
+ elif model_type == "midas_v21":
+ model = MidasNet(model_path, non_negative=True)
+ net_w, net_h = 384, 384
+ resize_mode = "upper_bound"
+ normalization = NormalizeImage(
+ mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
+ )
+
+ elif model_type == "midas_v21_small":
+ model = MidasNet_small(model_path, features=64, backbone="efficientnet_lite3", exportable=True,
+ non_negative=True, blocks={'expand': True})
+ net_w, net_h = 256, 256
+ resize_mode = "upper_bound"
+ normalization = NormalizeImage(
+ mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
+ )
+
+ else:
+ print(f"model_type '{model_type}' not implemented, use: --model_type large")
+ assert False
+
+ transform = Compose(
+ [
+ Resize(
+ net_w,
+ net_h,
+ resize_target=None,
+ keep_aspect_ratio=True,
+ ensure_multiple_of=32,
+ resize_method=resize_mode,
+ image_interpolation_method=cv2.INTER_CUBIC,
+ ),
+ normalization,
+ PrepareForNet(),
+ ]
+ )
+
+ return model.eval(), transform
+
+
+class MiDaSInference(nn.Module):
+ MODEL_TYPES_TORCH_HUB = [
+ "DPT_Large",
+ "DPT_Hybrid",
+ "MiDaS_small"
+ ]
+ MODEL_TYPES_ISL = [
+ "dpt_large",
+ "dpt_hybrid",
+ "midas_v21",
+ "midas_v21_small",
+ ]
+
+ def __init__(self, model_type):
+ super().__init__()
+ assert (model_type in self.MODEL_TYPES_ISL)
+ model, _ = load_model(model_type)
+ self.model = model
+ self.model.train = disabled_train
+
+ def forward(self, x):
+ with torch.no_grad():
+ prediction = self.model(x)
+ return prediction
+
diff --git a/annotator/midas/midas/__init__.py b/annotator/midas/midas/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/annotator/midas/midas/__pycache__/__init__.cpython-310.pyc b/annotator/midas/midas/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..bbc16bb84c1d7fc93656d6df024cbe013434a441
Binary files /dev/null and b/annotator/midas/midas/__pycache__/__init__.cpython-310.pyc differ
diff --git a/annotator/midas/midas/__pycache__/__init__.cpython-38.pyc b/annotator/midas/midas/__pycache__/__init__.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..804fd6db269ce09df24b218e485f1cd6115c4f0a
Binary files /dev/null and b/annotator/midas/midas/__pycache__/__init__.cpython-38.pyc differ
diff --git a/annotator/midas/midas/__pycache__/base_model.cpython-310.pyc b/annotator/midas/midas/__pycache__/base_model.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e96006940501c328feef933c6f0a9f90781fc674
Binary files /dev/null and b/annotator/midas/midas/__pycache__/base_model.cpython-310.pyc differ
diff --git a/annotator/midas/midas/__pycache__/base_model.cpython-38.pyc b/annotator/midas/midas/__pycache__/base_model.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6c94cfa86ea3d63a867db8e58b9c4800063ec604
Binary files /dev/null and b/annotator/midas/midas/__pycache__/base_model.cpython-38.pyc differ
diff --git a/annotator/midas/midas/__pycache__/blocks.cpython-310.pyc b/annotator/midas/midas/__pycache__/blocks.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a77c60703d7b4736c47746e10b2e2f7f22669fd6
Binary files /dev/null and b/annotator/midas/midas/__pycache__/blocks.cpython-310.pyc differ
diff --git a/annotator/midas/midas/__pycache__/blocks.cpython-38.pyc b/annotator/midas/midas/__pycache__/blocks.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..254786aa591824f0d06b49b88509f9d2a1093214
Binary files /dev/null and b/annotator/midas/midas/__pycache__/blocks.cpython-38.pyc differ
diff --git a/annotator/midas/midas/__pycache__/dpt_depth.cpython-310.pyc b/annotator/midas/midas/__pycache__/dpt_depth.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..624a24d5270aee5a93b9b44f96483e0fb373bce4
Binary files /dev/null and b/annotator/midas/midas/__pycache__/dpt_depth.cpython-310.pyc differ
diff --git a/annotator/midas/midas/__pycache__/dpt_depth.cpython-38.pyc b/annotator/midas/midas/__pycache__/dpt_depth.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..30158de42651a2add06e6b5ba2c18ff7ddad3f8c
Binary files /dev/null and b/annotator/midas/midas/__pycache__/dpt_depth.cpython-38.pyc differ
diff --git a/annotator/midas/midas/__pycache__/midas_net.cpython-310.pyc b/annotator/midas/midas/__pycache__/midas_net.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..cb24bc6fc320166b12b3390d88ea8ddcc59aec0c
Binary files /dev/null and b/annotator/midas/midas/__pycache__/midas_net.cpython-310.pyc differ
diff --git a/annotator/midas/midas/__pycache__/midas_net.cpython-38.pyc b/annotator/midas/midas/__pycache__/midas_net.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2ecb4fed1bf16d7a3c79ee60b11f174ca7d83a39
Binary files /dev/null and b/annotator/midas/midas/__pycache__/midas_net.cpython-38.pyc differ
diff --git a/annotator/midas/midas/__pycache__/midas_net_custom.cpython-310.pyc b/annotator/midas/midas/__pycache__/midas_net_custom.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..63f0b762e66a1743e7e702ce9fcbac5e64fddbbf
Binary files /dev/null and b/annotator/midas/midas/__pycache__/midas_net_custom.cpython-310.pyc differ
diff --git a/annotator/midas/midas/__pycache__/midas_net_custom.cpython-38.pyc b/annotator/midas/midas/__pycache__/midas_net_custom.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..34bd7d1e3927d56c0f773800be815298f7b6293c
Binary files /dev/null and b/annotator/midas/midas/__pycache__/midas_net_custom.cpython-38.pyc differ
diff --git a/annotator/midas/midas/__pycache__/transforms.cpython-310.pyc b/annotator/midas/midas/__pycache__/transforms.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..bd972a30afe4b97910ab560acc0672ee3428d862
Binary files /dev/null and b/annotator/midas/midas/__pycache__/transforms.cpython-310.pyc differ
diff --git a/annotator/midas/midas/__pycache__/transforms.cpython-38.pyc b/annotator/midas/midas/__pycache__/transforms.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9b9ebc138a75ff20a0773834aefbc95c6a12e1d7
Binary files /dev/null and b/annotator/midas/midas/__pycache__/transforms.cpython-38.pyc differ
diff --git a/annotator/midas/midas/__pycache__/vit.cpython-310.pyc b/annotator/midas/midas/__pycache__/vit.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..edd9316e550d213b565005296e094d3841c4da93
Binary files /dev/null and b/annotator/midas/midas/__pycache__/vit.cpython-310.pyc differ
diff --git a/annotator/midas/midas/__pycache__/vit.cpython-38.pyc b/annotator/midas/midas/__pycache__/vit.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..84e0ffcd0720dc1503832a58d270441283c937ee
Binary files /dev/null and b/annotator/midas/midas/__pycache__/vit.cpython-38.pyc differ
diff --git a/annotator/midas/midas/base_model.py b/annotator/midas/midas/base_model.py
new file mode 100644
index 0000000000000000000000000000000000000000..56016ef4310ce43d26a26edc17f360597b7b72ec
--- /dev/null
+++ b/annotator/midas/midas/base_model.py
@@ -0,0 +1,26 @@
+'''
+ * Copyright (c) 2023 Salesforce, Inc.
+ * All rights reserved.
+ * SPDX-License-Identifier: Apache License 2.0
+ * For full license text, see LICENSE.txt file in the repo root or http://www.apache.org/licenses/
+ * By Can Qin
+ * Modified from ControlNet repo: https://github.com/lllyasviel/ControlNet
+ * Copyright (c) 2023 Lvmin Zhang and Maneesh Agrawala
+'''
+
+import torch
+
+
+class BaseModel(torch.nn.Module):
+ def load(self, path):
+ """Load model from file.
+
+ Args:
+ path (str): file path
+ """
+ parameters = torch.load(path, map_location=torch.device('cpu'))
+
+ if "optimizer" in parameters:
+ parameters = parameters["model"]
+
+ self.load_state_dict(parameters)
diff --git a/annotator/midas/midas/blocks.py b/annotator/midas/midas/blocks.py
new file mode 100644
index 0000000000000000000000000000000000000000..62d50a2fde0a44b94271d4329c3934d1d3f2ba1a
--- /dev/null
+++ b/annotator/midas/midas/blocks.py
@@ -0,0 +1,352 @@
+'''
+ * Copyright (c) 2023 Salesforce, Inc.
+ * All rights reserved.
+ * SPDX-License-Identifier: Apache License 2.0
+ * For full license text, see LICENSE.txt file in the repo root or http://www.apache.org/licenses/
+ * By Can Qin
+ * Modified from ControlNet repo: https://github.com/lllyasviel/ControlNet
+ * Copyright (c) 2023 Lvmin Zhang and Maneesh Agrawala
+'''
+
+import torch
+import torch.nn as nn
+
+from .vit import (
+ _make_pretrained_vitb_rn50_384,
+ _make_pretrained_vitl16_384,
+ _make_pretrained_vitb16_384,
+ forward_vit,
+)
+
+def _make_encoder(backbone, features, use_pretrained, groups=1, expand=False, exportable=True, hooks=None, use_vit_only=False, use_readout="ignore",):
+ if backbone == "vitl16_384":
+ pretrained = _make_pretrained_vitl16_384(
+ use_pretrained, hooks=hooks, use_readout=use_readout
+ )
+ scratch = _make_scratch(
+ [256, 512, 1024, 1024], features, groups=groups, expand=expand
+ ) # ViT-L/16 - 85.0% Top1 (backbone)
+ elif backbone == "vitb_rn50_384":
+ pretrained = _make_pretrained_vitb_rn50_384(
+ use_pretrained,
+ hooks=hooks,
+ use_vit_only=use_vit_only,
+ use_readout=use_readout,
+ )
+ scratch = _make_scratch(
+ [256, 512, 768, 768], features, groups=groups, expand=expand
+ ) # ViT-H/16 - 85.0% Top1 (backbone)
+ elif backbone == "vitb16_384":
+ pretrained = _make_pretrained_vitb16_384(
+ use_pretrained, hooks=hooks, use_readout=use_readout
+ )
+ scratch = _make_scratch(
+ [96, 192, 384, 768], features, groups=groups, expand=expand
+ ) # ViT-B/16 - 84.6% Top1 (backbone)
+ elif backbone == "resnext101_wsl":
+ pretrained = _make_pretrained_resnext101_wsl(use_pretrained)
+ scratch = _make_scratch([256, 512, 1024, 2048], features, groups=groups, expand=expand) # efficientnet_lite3
+ elif backbone == "efficientnet_lite3":
+ pretrained = _make_pretrained_efficientnet_lite3(use_pretrained, exportable=exportable)
+ scratch = _make_scratch([32, 48, 136, 384], features, groups=groups, expand=expand) # efficientnet_lite3
+ else:
+ print(f"Backbone '{backbone}' not implemented")
+ assert False
+
+ return pretrained, scratch
+
+
+def _make_scratch(in_shape, out_shape, groups=1, expand=False):
+ scratch = nn.Module()
+
+ out_shape1 = out_shape
+ out_shape2 = out_shape
+ out_shape3 = out_shape
+ out_shape4 = out_shape
+ if expand==True:
+ out_shape1 = out_shape
+ out_shape2 = out_shape*2
+ out_shape3 = out_shape*4
+ out_shape4 = out_shape*8
+
+ scratch.layer1_rn = nn.Conv2d(
+ in_shape[0], out_shape1, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
+ )
+ scratch.layer2_rn = nn.Conv2d(
+ in_shape[1], out_shape2, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
+ )
+ scratch.layer3_rn = nn.Conv2d(
+ in_shape[2], out_shape3, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
+ )
+ scratch.layer4_rn = nn.Conv2d(
+ in_shape[3], out_shape4, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
+ )
+
+ return scratch
+
+
+def _make_pretrained_efficientnet_lite3(use_pretrained, exportable=False):
+ efficientnet = torch.hub.load(
+ "rwightman/gen-efficientnet-pytorch",
+ "tf_efficientnet_lite3",
+ pretrained=use_pretrained,
+ exportable=exportable
+ )
+ return _make_efficientnet_backbone(efficientnet)
+
+
+def _make_efficientnet_backbone(effnet):
+ pretrained = nn.Module()
+
+ pretrained.layer1 = nn.Sequential(
+ effnet.conv_stem, effnet.bn1, effnet.act1, *effnet.blocks[0:2]
+ )
+ pretrained.layer2 = nn.Sequential(*effnet.blocks[2:3])
+ pretrained.layer3 = nn.Sequential(*effnet.blocks[3:5])
+ pretrained.layer4 = nn.Sequential(*effnet.blocks[5:9])
+
+ return pretrained
+
+
+def _make_resnet_backbone(resnet):
+ pretrained = nn.Module()
+ pretrained.layer1 = nn.Sequential(
+ resnet.conv1, resnet.bn1, resnet.relu, resnet.maxpool, resnet.layer1
+ )
+
+ pretrained.layer2 = resnet.layer2
+ pretrained.layer3 = resnet.layer3
+ pretrained.layer4 = resnet.layer4
+
+ return pretrained
+
+
+def _make_pretrained_resnext101_wsl(use_pretrained):
+ resnet = torch.hub.load("facebookresearch/WSL-Images", "resnext101_32x8d_wsl")
+ return _make_resnet_backbone(resnet)
+
+
+
+class Interpolate(nn.Module):
+ """Interpolation module.
+ """
+
+ def __init__(self, scale_factor, mode, align_corners=False):
+ """Init.
+
+ Args:
+ scale_factor (float): scaling
+ mode (str): interpolation mode
+ """
+ super(Interpolate, self).__init__()
+
+ self.interp = nn.functional.interpolate
+ self.scale_factor = scale_factor
+ self.mode = mode
+ self.align_corners = align_corners
+
+ def forward(self, x):
+ """Forward pass.
+
+ Args:
+ x (tensor): input
+
+ Returns:
+ tensor: interpolated data
+ """
+
+ x = self.interp(
+ x, scale_factor=self.scale_factor, mode=self.mode, align_corners=self.align_corners
+ )
+
+ return x
+
+
+class ResidualConvUnit(nn.Module):
+ """Residual convolution module.
+ """
+
+ def __init__(self, features):
+ """Init.
+
+ Args:
+ features (int): number of features
+ """
+ super().__init__()
+
+ self.conv1 = nn.Conv2d(
+ features, features, kernel_size=3, stride=1, padding=1, bias=True
+ )
+
+ self.conv2 = nn.Conv2d(
+ features, features, kernel_size=3, stride=1, padding=1, bias=True
+ )
+
+ self.relu = nn.ReLU(inplace=True)
+
+ def forward(self, x):
+ """Forward pass.
+
+ Args:
+ x (tensor): input
+
+ Returns:
+ tensor: output
+ """
+ out = self.relu(x)
+ out = self.conv1(out)
+ out = self.relu(out)
+ out = self.conv2(out)
+
+ return out + x
+
+
+class FeatureFusionBlock(nn.Module):
+ """Feature fusion block.
+ """
+
+ def __init__(self, features):
+ """Init.
+
+ Args:
+ features (int): number of features
+ """
+ super(FeatureFusionBlock, self).__init__()
+
+ self.resConfUnit1 = ResidualConvUnit(features)
+ self.resConfUnit2 = ResidualConvUnit(features)
+
+ def forward(self, *xs):
+ """Forward pass.
+
+ Returns:
+ tensor: output
+ """
+ output = xs[0]
+
+ if len(xs) == 2:
+ output += self.resConfUnit1(xs[1])
+
+ output = self.resConfUnit2(output)
+
+ output = nn.functional.interpolate(
+ output, scale_factor=2, mode="bilinear", align_corners=True
+ )
+
+ return output
+
+
+
+
+class ResidualConvUnit_custom(nn.Module):
+ """Residual convolution module.
+ """
+
+ def __init__(self, features, activation, bn):
+ """Init.
+
+ Args:
+ features (int): number of features
+ """
+ super().__init__()
+
+ self.bn = bn
+
+ self.groups=1
+
+ self.conv1 = nn.Conv2d(
+ features, features, kernel_size=3, stride=1, padding=1, bias=True, groups=self.groups
+ )
+
+ self.conv2 = nn.Conv2d(
+ features, features, kernel_size=3, stride=1, padding=1, bias=True, groups=self.groups
+ )
+
+ if self.bn==True:
+ self.bn1 = nn.BatchNorm2d(features)
+ self.bn2 = nn.BatchNorm2d(features)
+
+ self.activation = activation
+
+ self.skip_add = nn.quantized.FloatFunctional()
+
+ def forward(self, x):
+ """Forward pass.
+
+ Args:
+ x (tensor): input
+
+ Returns:
+ tensor: output
+ """
+
+ out = self.activation(x)
+ out = self.conv1(out)
+ if self.bn==True:
+ out = self.bn1(out)
+
+ out = self.activation(out)
+ out = self.conv2(out)
+ if self.bn==True:
+ out = self.bn2(out)
+
+ if self.groups > 1:
+ out = self.conv_merge(out)
+
+ return self.skip_add.add(out, x)
+
+ # return out + x
+
+
+class FeatureFusionBlock_custom(nn.Module):
+ """Feature fusion block.
+ """
+
+ def __init__(self, features, activation, deconv=False, bn=False, expand=False, align_corners=True):
+ """Init.
+
+ Args:
+ features (int): number of features
+ """
+ super(FeatureFusionBlock_custom, self).__init__()
+
+ self.deconv = deconv
+ self.align_corners = align_corners
+
+ self.groups=1
+
+ self.expand = expand
+ out_features = features
+ if self.expand==True:
+ out_features = features//2
+
+ self.out_conv = nn.Conv2d(features, out_features, kernel_size=1, stride=1, padding=0, bias=True, groups=1)
+
+ self.resConfUnit1 = ResidualConvUnit_custom(features, activation, bn)
+ self.resConfUnit2 = ResidualConvUnit_custom(features, activation, bn)
+
+ self.skip_add = nn.quantized.FloatFunctional()
+
+ def forward(self, *xs):
+ """Forward pass.
+
+ Returns:
+ tensor: output
+ """
+ output = xs[0]
+
+ if len(xs) == 2:
+ res = self.resConfUnit1(xs[1])
+ output = self.skip_add.add(output, res)
+ # output += res
+
+ output = self.resConfUnit2(output)
+
+ output = nn.functional.interpolate(
+ output, scale_factor=2, mode="bilinear", align_corners=self.align_corners
+ )
+
+ output = self.out_conv(output)
+
+ return output
+
diff --git a/annotator/midas/midas/dpt_depth.py b/annotator/midas/midas/dpt_depth.py
new file mode 100644
index 0000000000000000000000000000000000000000..2cd3ba3b6fbffe6ec0e8bb2bb6e4705229a8713b
--- /dev/null
+++ b/annotator/midas/midas/dpt_depth.py
@@ -0,0 +1,119 @@
+'''
+ * Copyright (c) 2023 Salesforce, Inc.
+ * All rights reserved.
+ * SPDX-License-Identifier: Apache License 2.0
+ * For full license text, see LICENSE.txt file in the repo root or http://www.apache.org/licenses/
+ * By Can Qin
+ * Modified from ControlNet repo: https://github.com/lllyasviel/ControlNet
+ * Copyright (c) 2023 Lvmin Zhang and Maneesh Agrawala
+'''
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+from .base_model import BaseModel
+from .blocks import (
+ FeatureFusionBlock,
+ FeatureFusionBlock_custom,
+ Interpolate,
+ _make_encoder,
+ forward_vit,
+)
+
+
+def _make_fusion_block(features, use_bn):
+ return FeatureFusionBlock_custom(
+ features,
+ nn.ReLU(False),
+ deconv=False,
+ bn=use_bn,
+ expand=False,
+ align_corners=True,
+ )
+
+
+class DPT(BaseModel):
+ def __init__(
+ self,
+ head,
+ features=256,
+ backbone="vitb_rn50_384",
+ readout="project",
+ channels_last=False,
+ use_bn=False,
+ ):
+
+ super(DPT, self).__init__()
+
+ self.channels_last = channels_last
+
+ hooks = {
+ "vitb_rn50_384": [0, 1, 8, 11],
+ "vitb16_384": [2, 5, 8, 11],
+ "vitl16_384": [5, 11, 17, 23],
+ }
+
+ # Instantiate backbone and reassemble blocks
+ self.pretrained, self.scratch = _make_encoder(
+ backbone,
+ features,
+ False, # Set to true of you want to train from scratch, uses ImageNet weights
+ groups=1,
+ expand=False,
+ exportable=False,
+ hooks=hooks[backbone],
+ use_readout=readout,
+ )
+
+ self.scratch.refinenet1 = _make_fusion_block(features, use_bn)
+ self.scratch.refinenet2 = _make_fusion_block(features, use_bn)
+ self.scratch.refinenet3 = _make_fusion_block(features, use_bn)
+ self.scratch.refinenet4 = _make_fusion_block(features, use_bn)
+
+ self.scratch.output_conv = head
+
+
+ def forward(self, x):
+ if self.channels_last == True:
+ x.contiguous(memory_format=torch.channels_last)
+
+ layer_1, layer_2, layer_3, layer_4 = forward_vit(self.pretrained, x)
+
+ layer_1_rn = self.scratch.layer1_rn(layer_1)
+ layer_2_rn = self.scratch.layer2_rn(layer_2)
+ layer_3_rn = self.scratch.layer3_rn(layer_3)
+ layer_4_rn = self.scratch.layer4_rn(layer_4)
+
+ path_4 = self.scratch.refinenet4(layer_4_rn)
+ path_3 = self.scratch.refinenet3(path_4, layer_3_rn)
+ path_2 = self.scratch.refinenet2(path_3, layer_2_rn)
+ path_1 = self.scratch.refinenet1(path_2, layer_1_rn)
+
+ out = self.scratch.output_conv(path_1)
+
+ return out
+
+
+class DPTDepthModel(DPT):
+ def __init__(self, path=None, non_negative=True, **kwargs):
+ features = kwargs["features"] if "features" in kwargs else 256
+
+ head = nn.Sequential(
+ nn.Conv2d(features, features // 2, kernel_size=3, stride=1, padding=1),
+ Interpolate(scale_factor=2, mode="bilinear", align_corners=True),
+ nn.Conv2d(features // 2, 32, kernel_size=3, stride=1, padding=1),
+ nn.ReLU(True),
+ nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0),
+ nn.ReLU(True) if non_negative else nn.Identity(),
+ nn.Identity(),
+ )
+
+ super().__init__(head, **kwargs)
+
+ if path is not None:
+ self.load(path)
+
+ def forward(self, x):
+ return super().forward(x).squeeze(dim=1)
+
diff --git a/annotator/midas/midas/midas_net.py b/annotator/midas/midas/midas_net.py
new file mode 100644
index 0000000000000000000000000000000000000000..356e7538f5b9691babe061342fbf8f092360999f
--- /dev/null
+++ b/annotator/midas/midas/midas_net.py
@@ -0,0 +1,86 @@
+'''
+ * Copyright (c) 2023 Salesforce, Inc.
+ * All rights reserved.
+ * SPDX-License-Identifier: Apache License 2.0
+ * For full license text, see LICENSE.txt file in the repo root or http://www.apache.org/licenses/
+ * By Can Qin
+ * Modified from ControlNet repo: https://github.com/lllyasviel/ControlNet
+ * Copyright (c) 2023 Lvmin Zhang and Maneesh Agrawala
+'''
+
+"""MidashNet: Network for monocular depth estimation trained by mixing several datasets.
+This file contains code that is adapted from
+https://github.com/thomasjpfan/pytorch_refinenet/blob/master/pytorch_refinenet/refinenet/refinenet_4cascade.py
+"""
+import torch
+import torch.nn as nn
+
+from .base_model import BaseModel
+from .blocks import FeatureFusionBlock, Interpolate, _make_encoder
+
+
+class MidasNet(BaseModel):
+ """Network for monocular depth estimation.
+ """
+
+ def __init__(self, path=None, features=256, non_negative=True):
+ """Init.
+
+ Args:
+ path (str, optional): Path to saved model. Defaults to None.
+ features (int, optional): Number of features. Defaults to 256.
+ backbone (str, optional): Backbone network for encoder. Defaults to resnet50
+ """
+ print("Loading weights: ", path)
+
+ super(MidasNet, self).__init__()
+
+ use_pretrained = False if path is None else True
+
+ self.pretrained, self.scratch = _make_encoder(backbone="resnext101_wsl", features=features, use_pretrained=use_pretrained)
+
+ self.scratch.refinenet4 = FeatureFusionBlock(features)
+ self.scratch.refinenet3 = FeatureFusionBlock(features)
+ self.scratch.refinenet2 = FeatureFusionBlock(features)
+ self.scratch.refinenet1 = FeatureFusionBlock(features)
+
+ self.scratch.output_conv = nn.Sequential(
+ nn.Conv2d(features, 128, kernel_size=3, stride=1, padding=1),
+ Interpolate(scale_factor=2, mode="bilinear"),
+ nn.Conv2d(128, 32, kernel_size=3, stride=1, padding=1),
+ nn.ReLU(True),
+ nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0),
+ nn.ReLU(True) if non_negative else nn.Identity(),
+ )
+
+ if path:
+ self.load(path)
+
+ def forward(self, x):
+ """Forward pass.
+
+ Args:
+ x (tensor): input data (image)
+
+ Returns:
+ tensor: depth
+ """
+
+ layer_1 = self.pretrained.layer1(x)
+ layer_2 = self.pretrained.layer2(layer_1)
+ layer_3 = self.pretrained.layer3(layer_2)
+ layer_4 = self.pretrained.layer4(layer_3)
+
+ layer_1_rn = self.scratch.layer1_rn(layer_1)
+ layer_2_rn = self.scratch.layer2_rn(layer_2)
+ layer_3_rn = self.scratch.layer3_rn(layer_3)
+ layer_4_rn = self.scratch.layer4_rn(layer_4)
+
+ path_4 = self.scratch.refinenet4(layer_4_rn)
+ path_3 = self.scratch.refinenet3(path_4, layer_3_rn)
+ path_2 = self.scratch.refinenet2(path_3, layer_2_rn)
+ path_1 = self.scratch.refinenet1(path_2, layer_1_rn)
+
+ out = self.scratch.output_conv(path_1)
+
+ return torch.squeeze(out, dim=1)
diff --git a/annotator/midas/midas/midas_net_custom.py b/annotator/midas/midas/midas_net_custom.py
new file mode 100644
index 0000000000000000000000000000000000000000..3cfdfbe38a2ff6e9ea2dc3c4702e0713a0757a82
--- /dev/null
+++ b/annotator/midas/midas/midas_net_custom.py
@@ -0,0 +1,138 @@
+'''
+ * Copyright (c) 2023 Salesforce, Inc.
+ * All rights reserved.
+ * SPDX-License-Identifier: Apache License 2.0
+ * For full license text, see LICENSE.txt file in the repo root or http://www.apache.org/licenses/
+ * By Can Qin
+ * Modified from ControlNet repo: https://github.com/lllyasviel/ControlNet
+ * Copyright (c) 2023 Lvmin Zhang and Maneesh Agrawala
+'''
+
+"""MidashNet: Network for monocular depth estimation trained by mixing several datasets.
+This file contains code that is adapted from
+https://github.com/thomasjpfan/pytorch_refinenet/blob/master/pytorch_refinenet/refinenet/refinenet_4cascade.py
+"""
+import torch
+import torch.nn as nn
+
+from .base_model import BaseModel
+from .blocks import FeatureFusionBlock, FeatureFusionBlock_custom, Interpolate, _make_encoder
+
+
+class MidasNet_small(BaseModel):
+ """Network for monocular depth estimation.
+ """
+
+ def __init__(self, path=None, features=64, backbone="efficientnet_lite3", non_negative=True, exportable=True, channels_last=False, align_corners=True,
+ blocks={'expand': True}):
+ """Init.
+
+ Args:
+ path (str, optional): Path to saved model. Defaults to None.
+ features (int, optional): Number of features. Defaults to 256.
+ backbone (str, optional): Backbone network for encoder. Defaults to resnet50
+ """
+ print("Loading weights: ", path)
+
+ super(MidasNet_small, self).__init__()
+
+ use_pretrained = False if path else True
+
+ self.channels_last = channels_last
+ self.blocks = blocks
+ self.backbone = backbone
+
+ self.groups = 1
+
+ features1=features
+ features2=features
+ features3=features
+ features4=features
+ self.expand = False
+ if "expand" in self.blocks and self.blocks['expand'] == True:
+ self.expand = True
+ features1=features
+ features2=features*2
+ features3=features*4
+ features4=features*8
+
+ self.pretrained, self.scratch = _make_encoder(self.backbone, features, use_pretrained, groups=self.groups, expand=self.expand, exportable=exportable)
+
+ self.scratch.activation = nn.ReLU(False)
+
+ self.scratch.refinenet4 = FeatureFusionBlock_custom(features4, self.scratch.activation, deconv=False, bn=False, expand=self.expand, align_corners=align_corners)
+ self.scratch.refinenet3 = FeatureFusionBlock_custom(features3, self.scratch.activation, deconv=False, bn=False, expand=self.expand, align_corners=align_corners)
+ self.scratch.refinenet2 = FeatureFusionBlock_custom(features2, self.scratch.activation, deconv=False, bn=False, expand=self.expand, align_corners=align_corners)
+ self.scratch.refinenet1 = FeatureFusionBlock_custom(features1, self.scratch.activation, deconv=False, bn=False, align_corners=align_corners)
+
+
+ self.scratch.output_conv = nn.Sequential(
+ nn.Conv2d(features, features//2, kernel_size=3, stride=1, padding=1, groups=self.groups),
+ Interpolate(scale_factor=2, mode="bilinear"),
+ nn.Conv2d(features//2, 32, kernel_size=3, stride=1, padding=1),
+ self.scratch.activation,
+ nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0),
+ nn.ReLU(True) if non_negative else nn.Identity(),
+ nn.Identity(),
+ )
+
+ if path:
+ self.load(path)
+
+
+ def forward(self, x):
+ """Forward pass.
+
+ Args:
+ x (tensor): input data (image)
+
+ Returns:
+ tensor: depth
+ """
+ if self.channels_last==True:
+ print("self.channels_last = ", self.channels_last)
+ x.contiguous(memory_format=torch.channels_last)
+
+
+ layer_1 = self.pretrained.layer1(x)
+ layer_2 = self.pretrained.layer2(layer_1)
+ layer_3 = self.pretrained.layer3(layer_2)
+ layer_4 = self.pretrained.layer4(layer_3)
+
+ layer_1_rn = self.scratch.layer1_rn(layer_1)
+ layer_2_rn = self.scratch.layer2_rn(layer_2)
+ layer_3_rn = self.scratch.layer3_rn(layer_3)
+ layer_4_rn = self.scratch.layer4_rn(layer_4)
+
+
+ path_4 = self.scratch.refinenet4(layer_4_rn)
+ path_3 = self.scratch.refinenet3(path_4, layer_3_rn)
+ path_2 = self.scratch.refinenet2(path_3, layer_2_rn)
+ path_1 = self.scratch.refinenet1(path_2, layer_1_rn)
+
+ out = self.scratch.output_conv(path_1)
+
+ return torch.squeeze(out, dim=1)
+
+
+
+def fuse_model(m):
+ prev_previous_type = nn.Identity()
+ prev_previous_name = ''
+ previous_type = nn.Identity()
+ previous_name = ''
+ for name, module in m.named_modules():
+ if prev_previous_type == nn.Conv2d and previous_type == nn.BatchNorm2d and type(module) == nn.ReLU:
+ # print("FUSED ", prev_previous_name, previous_name, name)
+ torch.quantization.fuse_modules(m, [prev_previous_name, previous_name, name], inplace=True)
+ elif prev_previous_type == nn.Conv2d and previous_type == nn.BatchNorm2d:
+ # print("FUSED ", prev_previous_name, previous_name)
+ torch.quantization.fuse_modules(m, [prev_previous_name, previous_name], inplace=True)
+ # elif previous_type == nn.Conv2d and type(module) == nn.ReLU:
+ # print("FUSED ", previous_name, name)
+ # torch.quantization.fuse_modules(m, [previous_name, name], inplace=True)
+
+ prev_previous_type = previous_type
+ prev_previous_name = previous_name
+ previous_type = type(module)
+ previous_name = name
\ No newline at end of file
diff --git a/annotator/midas/midas/transforms.py b/annotator/midas/midas/transforms.py
new file mode 100644
index 0000000000000000000000000000000000000000..94eb3e16ba8e74d46f88120c644ab0c5b9120bb1
--- /dev/null
+++ b/annotator/midas/midas/transforms.py
@@ -0,0 +1,244 @@
+'''
+ * Copyright (c) 2023 Salesforce, Inc.
+ * All rights reserved.
+ * SPDX-License-Identifier: Apache License 2.0
+ * For full license text, see LICENSE.txt file in the repo root or http://www.apache.org/licenses/
+ * By Can Qin
+ * Modified from ControlNet repo: https://github.com/lllyasviel/ControlNet
+ * Copyright (c) 2023 Lvmin Zhang and Maneesh Agrawala
+'''
+
+import numpy as np
+import cv2
+import math
+
+
+def apply_min_size(sample, size, image_interpolation_method=cv2.INTER_AREA):
+ """Rezise the sample to ensure the given size. Keeps aspect ratio.
+
+ Args:
+ sample (dict): sample
+ size (tuple): image size
+
+ Returns:
+ tuple: new size
+ """
+ shape = list(sample["disparity"].shape)
+
+ if shape[0] >= size[0] and shape[1] >= size[1]:
+ return sample
+
+ scale = [0, 0]
+ scale[0] = size[0] / shape[0]
+ scale[1] = size[1] / shape[1]
+
+ scale = max(scale)
+
+ shape[0] = math.ceil(scale * shape[0])
+ shape[1] = math.ceil(scale * shape[1])
+
+ # resize
+ sample["image"] = cv2.resize(
+ sample["image"], tuple(shape[::-1]), interpolation=image_interpolation_method
+ )
+
+ sample["disparity"] = cv2.resize(
+ sample["disparity"], tuple(shape[::-1]), interpolation=cv2.INTER_NEAREST
+ )
+ sample["mask"] = cv2.resize(
+ sample["mask"].astype(np.float32),
+ tuple(shape[::-1]),
+ interpolation=cv2.INTER_NEAREST,
+ )
+ sample["mask"] = sample["mask"].astype(bool)
+
+ return tuple(shape)
+
+
+class Resize(object):
+ """Resize sample to given size (width, height).
+ """
+
+ def __init__(
+ self,
+ width,
+ height,
+ resize_target=True,
+ keep_aspect_ratio=False,
+ ensure_multiple_of=1,
+ resize_method="lower_bound",
+ image_interpolation_method=cv2.INTER_AREA,
+ ):
+ """Init.
+
+ Args:
+ width (int): desired output width
+ height (int): desired output height
+ resize_target (bool, optional):
+ True: Resize the full sample (image, mask, target).
+ False: Resize image only.
+ Defaults to True.
+ keep_aspect_ratio (bool, optional):
+ True: Keep the aspect ratio of the input sample.
+ Output sample might not have the given width and height, and
+ resize behaviour depends on the parameter 'resize_method'.
+ Defaults to False.
+ ensure_multiple_of (int, optional):
+ Output width and height is constrained to be multiple of this parameter.
+ Defaults to 1.
+ resize_method (str, optional):
+ "lower_bound": Output will be at least as large as the given size.
+ "upper_bound": Output will be at max as large as the given size. (Output size might be smaller than given size.)
+ "minimal": Scale as least as possible. (Output size might be smaller than given size.)
+ Defaults to "lower_bound".
+ """
+ self.__width = width
+ self.__height = height
+
+ self.__resize_target = resize_target
+ self.__keep_aspect_ratio = keep_aspect_ratio
+ self.__multiple_of = ensure_multiple_of
+ self.__resize_method = resize_method
+ self.__image_interpolation_method = image_interpolation_method
+
+ def constrain_to_multiple_of(self, x, min_val=0, max_val=None):
+ y = (np.round(x / self.__multiple_of) * self.__multiple_of).astype(int)
+
+ if max_val is not None and y > max_val:
+ y = (np.floor(x / self.__multiple_of) * self.__multiple_of).astype(int)
+
+ if y < min_val:
+ y = (np.ceil(x / self.__multiple_of) * self.__multiple_of).astype(int)
+
+ return y
+
+ def get_size(self, width, height):
+ # determine new height and width
+ scale_height = self.__height / height
+ scale_width = self.__width / width
+
+ if self.__keep_aspect_ratio:
+ if self.__resize_method == "lower_bound":
+ # scale such that output size is lower bound
+ if scale_width > scale_height:
+ # fit width
+ scale_height = scale_width
+ else:
+ # fit height
+ scale_width = scale_height
+ elif self.__resize_method == "upper_bound":
+ # scale such that output size is upper bound
+ if scale_width < scale_height:
+ # fit width
+ scale_height = scale_width
+ else:
+ # fit height
+ scale_width = scale_height
+ elif self.__resize_method == "minimal":
+ # scale as least as possbile
+ if abs(1 - scale_width) < abs(1 - scale_height):
+ # fit width
+ scale_height = scale_width
+ else:
+ # fit height
+ scale_width = scale_height
+ else:
+ raise ValueError(
+ f"resize_method {self.__resize_method} not implemented"
+ )
+
+ if self.__resize_method == "lower_bound":
+ new_height = self.constrain_to_multiple_of(
+ scale_height * height, min_val=self.__height
+ )
+ new_width = self.constrain_to_multiple_of(
+ scale_width * width, min_val=self.__width
+ )
+ elif self.__resize_method == "upper_bound":
+ new_height = self.constrain_to_multiple_of(
+ scale_height * height, max_val=self.__height
+ )
+ new_width = self.constrain_to_multiple_of(
+ scale_width * width, max_val=self.__width
+ )
+ elif self.__resize_method == "minimal":
+ new_height = self.constrain_to_multiple_of(scale_height * height)
+ new_width = self.constrain_to_multiple_of(scale_width * width)
+ else:
+ raise ValueError(f"resize_method {self.__resize_method} not implemented")
+
+ return (new_width, new_height)
+
+ def __call__(self, sample):
+ width, height = self.get_size(
+ sample["image"].shape[1], sample["image"].shape[0]
+ )
+
+ # resize sample
+ sample["image"] = cv2.resize(
+ sample["image"],
+ (width, height),
+ interpolation=self.__image_interpolation_method,
+ )
+
+ if self.__resize_target:
+ if "disparity" in sample:
+ sample["disparity"] = cv2.resize(
+ sample["disparity"],
+ (width, height),
+ interpolation=cv2.INTER_NEAREST,
+ )
+
+ if "depth" in sample:
+ sample["depth"] = cv2.resize(
+ sample["depth"], (width, height), interpolation=cv2.INTER_NEAREST
+ )
+
+ sample["mask"] = cv2.resize(
+ sample["mask"].astype(np.float32),
+ (width, height),
+ interpolation=cv2.INTER_NEAREST,
+ )
+ sample["mask"] = sample["mask"].astype(bool)
+
+ return sample
+
+
+class NormalizeImage(object):
+ """Normlize image by given mean and std.
+ """
+
+ def __init__(self, mean, std):
+ self.__mean = mean
+ self.__std = std
+
+ def __call__(self, sample):
+ sample["image"] = (sample["image"] - self.__mean) / self.__std
+
+ return sample
+
+
+class PrepareForNet(object):
+ """Prepare sample for usage as network input.
+ """
+
+ def __init__(self):
+ pass
+
+ def __call__(self, sample):
+ image = np.transpose(sample["image"], (2, 0, 1))
+ sample["image"] = np.ascontiguousarray(image).astype(np.float32)
+
+ if "mask" in sample:
+ sample["mask"] = sample["mask"].astype(np.float32)
+ sample["mask"] = np.ascontiguousarray(sample["mask"])
+
+ if "disparity" in sample:
+ disparity = sample["disparity"].astype(np.float32)
+ sample["disparity"] = np.ascontiguousarray(disparity)
+
+ if "depth" in sample:
+ depth = sample["depth"].astype(np.float32)
+ sample["depth"] = np.ascontiguousarray(depth)
+
+ return sample
diff --git a/annotator/midas/midas/vit.py b/annotator/midas/midas/vit.py
new file mode 100644
index 0000000000000000000000000000000000000000..f861ea8bd64a46a9c647534fc7aa777691eaab83
--- /dev/null
+++ b/annotator/midas/midas/vit.py
@@ -0,0 +1,501 @@
+'''
+ * Copyright (c) 2023 Salesforce, Inc.
+ * All rights reserved.
+ * SPDX-License-Identifier: Apache License 2.0
+ * For full license text, see LICENSE.txt file in the repo root or http://www.apache.org/licenses/
+ * By Can Qin
+ * Modified from ControlNet repo: https://github.com/lllyasviel/ControlNet
+ * Copyright (c) 2023 Lvmin Zhang and Maneesh Agrawala
+'''
+
+import torch
+import torch.nn as nn
+import timm
+import types
+import math
+import torch.nn.functional as F
+
+
+class Slice(nn.Module):
+ def __init__(self, start_index=1):
+ super(Slice, self).__init__()
+ self.start_index = start_index
+
+ def forward(self, x):
+ return x[:, self.start_index :]
+
+
+class AddReadout(nn.Module):
+ def __init__(self, start_index=1):
+ super(AddReadout, self).__init__()
+ self.start_index = start_index
+
+ def forward(self, x):
+ if self.start_index == 2:
+ readout = (x[:, 0] + x[:, 1]) / 2
+ else:
+ readout = x[:, 0]
+ return x[:, self.start_index :] + readout.unsqueeze(1)
+
+
+class ProjectReadout(nn.Module):
+ def __init__(self, in_features, start_index=1):
+ super(ProjectReadout, self).__init__()
+ self.start_index = start_index
+
+ self.project = nn.Sequential(nn.Linear(2 * in_features, in_features), nn.GELU())
+
+ def forward(self, x):
+ readout = x[:, 0].unsqueeze(1).expand_as(x[:, self.start_index :])
+ features = torch.cat((x[:, self.start_index :], readout), -1)
+
+ return self.project(features)
+
+
+class Transpose(nn.Module):
+ def __init__(self, dim0, dim1):
+ super(Transpose, self).__init__()
+ self.dim0 = dim0
+ self.dim1 = dim1
+
+ def forward(self, x):
+ x = x.transpose(self.dim0, self.dim1)
+ return x
+
+
+def forward_vit(pretrained, x):
+ b, c, h, w = x.shape
+
+ glob = pretrained.model.forward_flex(x)
+
+ layer_1 = pretrained.activations["1"]
+ layer_2 = pretrained.activations["2"]
+ layer_3 = pretrained.activations["3"]
+ layer_4 = pretrained.activations["4"]
+
+ layer_1 = pretrained.act_postprocess1[0:2](layer_1)
+ layer_2 = pretrained.act_postprocess2[0:2](layer_2)
+ layer_3 = pretrained.act_postprocess3[0:2](layer_3)
+ layer_4 = pretrained.act_postprocess4[0:2](layer_4)
+
+ unflatten = nn.Sequential(
+ nn.Unflatten(
+ 2,
+ torch.Size(
+ [
+ h // pretrained.model.patch_size[1],
+ w // pretrained.model.patch_size[0],
+ ]
+ ),
+ )
+ )
+
+ if layer_1.ndim == 3:
+ layer_1 = unflatten(layer_1)
+ if layer_2.ndim == 3:
+ layer_2 = unflatten(layer_2)
+ if layer_3.ndim == 3:
+ layer_3 = unflatten(layer_3)
+ if layer_4.ndim == 3:
+ layer_4 = unflatten(layer_4)
+
+ layer_1 = pretrained.act_postprocess1[3 : len(pretrained.act_postprocess1)](layer_1)
+ layer_2 = pretrained.act_postprocess2[3 : len(pretrained.act_postprocess2)](layer_2)
+ layer_3 = pretrained.act_postprocess3[3 : len(pretrained.act_postprocess3)](layer_3)
+ layer_4 = pretrained.act_postprocess4[3 : len(pretrained.act_postprocess4)](layer_4)
+
+ return layer_1, layer_2, layer_3, layer_4
+
+
+def _resize_pos_embed(self, posemb, gs_h, gs_w):
+ posemb_tok, posemb_grid = (
+ posemb[:, : self.start_index],
+ posemb[0, self.start_index :],
+ )
+
+ gs_old = int(math.sqrt(len(posemb_grid)))
+
+ posemb_grid = posemb_grid.reshape(1, gs_old, gs_old, -1).permute(0, 3, 1, 2)
+ posemb_grid = F.interpolate(posemb_grid, size=(gs_h, gs_w), mode="bilinear")
+ posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, gs_h * gs_w, -1)
+
+ posemb = torch.cat([posemb_tok, posemb_grid], dim=1)
+
+ return posemb
+
+
+def forward_flex(self, x):
+ b, c, h, w = x.shape
+
+ pos_embed = self._resize_pos_embed(
+ self.pos_embed, h // self.patch_size[1], w // self.patch_size[0]
+ )
+
+ B = x.shape[0]
+
+ if hasattr(self.patch_embed, "backbone"):
+ x = self.patch_embed.backbone(x)
+ if isinstance(x, (list, tuple)):
+ x = x[-1] # last feature if backbone outputs list/tuple of features
+
+ x = self.patch_embed.proj(x).flatten(2).transpose(1, 2)
+
+ if getattr(self, "dist_token", None) is not None:
+ cls_tokens = self.cls_token.expand(
+ B, -1, -1
+ ) # stole cls_tokens impl from Phil Wang, thanks
+ dist_token = self.dist_token.expand(B, -1, -1)
+ x = torch.cat((cls_tokens, dist_token, x), dim=1)
+ else:
+ cls_tokens = self.cls_token.expand(
+ B, -1, -1
+ ) # stole cls_tokens impl from Phil Wang, thanks
+ x = torch.cat((cls_tokens, x), dim=1)
+
+ x = x + pos_embed
+ x = self.pos_drop(x)
+
+ for blk in self.blocks:
+ x = blk(x)
+
+ x = self.norm(x)
+
+ return x
+
+
+activations = {}
+
+
+def get_activation(name):
+ def hook(model, input, output):
+ activations[name] = output
+
+ return hook
+
+
+def get_readout_oper(vit_features, features, use_readout, start_index=1):
+ if use_readout == "ignore":
+ readout_oper = [Slice(start_index)] * len(features)
+ elif use_readout == "add":
+ readout_oper = [AddReadout(start_index)] * len(features)
+ elif use_readout == "project":
+ readout_oper = [
+ ProjectReadout(vit_features, start_index) for out_feat in features
+ ]
+ else:
+ assert (
+ False
+ ), "wrong operation for readout token, use_readout can be 'ignore', 'add', or 'project'"
+
+ return readout_oper
+
+
+def _make_vit_b16_backbone(
+ model,
+ features=[96, 192, 384, 768],
+ size=[384, 384],
+ hooks=[2, 5, 8, 11],
+ vit_features=768,
+ use_readout="ignore",
+ start_index=1,
+):
+ pretrained = nn.Module()
+
+ pretrained.model = model
+ pretrained.model.blocks[hooks[0]].register_forward_hook(get_activation("1"))
+ pretrained.model.blocks[hooks[1]].register_forward_hook(get_activation("2"))
+ pretrained.model.blocks[hooks[2]].register_forward_hook(get_activation("3"))
+ pretrained.model.blocks[hooks[3]].register_forward_hook(get_activation("4"))
+
+ pretrained.activations = activations
+
+ readout_oper = get_readout_oper(vit_features, features, use_readout, start_index)
+
+ # 32, 48, 136, 384
+ pretrained.act_postprocess1 = nn.Sequential(
+ readout_oper[0],
+ Transpose(1, 2),
+ nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
+ nn.Conv2d(
+ in_channels=vit_features,
+ out_channels=features[0],
+ kernel_size=1,
+ stride=1,
+ padding=0,
+ ),
+ nn.ConvTranspose2d(
+ in_channels=features[0],
+ out_channels=features[0],
+ kernel_size=4,
+ stride=4,
+ padding=0,
+ bias=True,
+ dilation=1,
+ groups=1,
+ ),
+ )
+
+ pretrained.act_postprocess2 = nn.Sequential(
+ readout_oper[1],
+ Transpose(1, 2),
+ nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
+ nn.Conv2d(
+ in_channels=vit_features,
+ out_channels=features[1],
+ kernel_size=1,
+ stride=1,
+ padding=0,
+ ),
+ nn.ConvTranspose2d(
+ in_channels=features[1],
+ out_channels=features[1],
+ kernel_size=2,
+ stride=2,
+ padding=0,
+ bias=True,
+ dilation=1,
+ groups=1,
+ ),
+ )
+
+ pretrained.act_postprocess3 = nn.Sequential(
+ readout_oper[2],
+ Transpose(1, 2),
+ nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
+ nn.Conv2d(
+ in_channels=vit_features,
+ out_channels=features[2],
+ kernel_size=1,
+ stride=1,
+ padding=0,
+ ),
+ )
+
+ pretrained.act_postprocess4 = nn.Sequential(
+ readout_oper[3],
+ Transpose(1, 2),
+ nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
+ nn.Conv2d(
+ in_channels=vit_features,
+ out_channels=features[3],
+ kernel_size=1,
+ stride=1,
+ padding=0,
+ ),
+ nn.Conv2d(
+ in_channels=features[3],
+ out_channels=features[3],
+ kernel_size=3,
+ stride=2,
+ padding=1,
+ ),
+ )
+
+ pretrained.model.start_index = start_index
+ pretrained.model.patch_size = [16, 16]
+
+ # We inject this function into the VisionTransformer instances so that
+ # we can use it with interpolated position embeddings without modifying the library source.
+ pretrained.model.forward_flex = types.MethodType(forward_flex, pretrained.model)
+ pretrained.model._resize_pos_embed = types.MethodType(
+ _resize_pos_embed, pretrained.model
+ )
+
+ return pretrained
+
+
+def _make_pretrained_vitl16_384(pretrained, use_readout="ignore", hooks=None):
+ model = timm.create_model("vit_large_patch16_384", pretrained=pretrained)
+
+ hooks = [5, 11, 17, 23] if hooks == None else hooks
+ return _make_vit_b16_backbone(
+ model,
+ features=[256, 512, 1024, 1024],
+ hooks=hooks,
+ vit_features=1024,
+ use_readout=use_readout,
+ )
+
+
+def _make_pretrained_vitb16_384(pretrained, use_readout="ignore", hooks=None):
+ model = timm.create_model("vit_base_patch16_384", pretrained=pretrained)
+
+ hooks = [2, 5, 8, 11] if hooks == None else hooks
+ return _make_vit_b16_backbone(
+ model, features=[96, 192, 384, 768], hooks=hooks, use_readout=use_readout
+ )
+
+
+def _make_pretrained_deitb16_384(pretrained, use_readout="ignore", hooks=None):
+ model = timm.create_model("vit_deit_base_patch16_384", pretrained=pretrained)
+
+ hooks = [2, 5, 8, 11] if hooks == None else hooks
+ return _make_vit_b16_backbone(
+ model, features=[96, 192, 384, 768], hooks=hooks, use_readout=use_readout
+ )
+
+
+def _make_pretrained_deitb16_distil_384(pretrained, use_readout="ignore", hooks=None):
+ model = timm.create_model(
+ "vit_deit_base_distilled_patch16_384", pretrained=pretrained
+ )
+
+ hooks = [2, 5, 8, 11] if hooks == None else hooks
+ return _make_vit_b16_backbone(
+ model,
+ features=[96, 192, 384, 768],
+ hooks=hooks,
+ use_readout=use_readout,
+ start_index=2,
+ )
+
+
+def _make_vit_b_rn50_backbone(
+ model,
+ features=[256, 512, 768, 768],
+ size=[384, 384],
+ hooks=[0, 1, 8, 11],
+ vit_features=768,
+ use_vit_only=False,
+ use_readout="ignore",
+ start_index=1,
+):
+ pretrained = nn.Module()
+
+ pretrained.model = model
+
+ if use_vit_only == True:
+ pretrained.model.blocks[hooks[0]].register_forward_hook(get_activation("1"))
+ pretrained.model.blocks[hooks[1]].register_forward_hook(get_activation("2"))
+ else:
+ pretrained.model.patch_embed.backbone.stages[0].register_forward_hook(
+ get_activation("1")
+ )
+ pretrained.model.patch_embed.backbone.stages[1].register_forward_hook(
+ get_activation("2")
+ )
+
+ pretrained.model.blocks[hooks[2]].register_forward_hook(get_activation("3"))
+ pretrained.model.blocks[hooks[3]].register_forward_hook(get_activation("4"))
+
+ pretrained.activations = activations
+
+ readout_oper = get_readout_oper(vit_features, features, use_readout, start_index)
+
+ if use_vit_only == True:
+ pretrained.act_postprocess1 = nn.Sequential(
+ readout_oper[0],
+ Transpose(1, 2),
+ nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
+ nn.Conv2d(
+ in_channels=vit_features,
+ out_channels=features[0],
+ kernel_size=1,
+ stride=1,
+ padding=0,
+ ),
+ nn.ConvTranspose2d(
+ in_channels=features[0],
+ out_channels=features[0],
+ kernel_size=4,
+ stride=4,
+ padding=0,
+ bias=True,
+ dilation=1,
+ groups=1,
+ ),
+ )
+
+ pretrained.act_postprocess2 = nn.Sequential(
+ readout_oper[1],
+ Transpose(1, 2),
+ nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
+ nn.Conv2d(
+ in_channels=vit_features,
+ out_channels=features[1],
+ kernel_size=1,
+ stride=1,
+ padding=0,
+ ),
+ nn.ConvTranspose2d(
+ in_channels=features[1],
+ out_channels=features[1],
+ kernel_size=2,
+ stride=2,
+ padding=0,
+ bias=True,
+ dilation=1,
+ groups=1,
+ ),
+ )
+ else:
+ pretrained.act_postprocess1 = nn.Sequential(
+ nn.Identity(), nn.Identity(), nn.Identity()
+ )
+ pretrained.act_postprocess2 = nn.Sequential(
+ nn.Identity(), nn.Identity(), nn.Identity()
+ )
+
+ pretrained.act_postprocess3 = nn.Sequential(
+ readout_oper[2],
+ Transpose(1, 2),
+ nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
+ nn.Conv2d(
+ in_channels=vit_features,
+ out_channels=features[2],
+ kernel_size=1,
+ stride=1,
+ padding=0,
+ ),
+ )
+
+ pretrained.act_postprocess4 = nn.Sequential(
+ readout_oper[3],
+ Transpose(1, 2),
+ nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
+ nn.Conv2d(
+ in_channels=vit_features,
+ out_channels=features[3],
+ kernel_size=1,
+ stride=1,
+ padding=0,
+ ),
+ nn.Conv2d(
+ in_channels=features[3],
+ out_channels=features[3],
+ kernel_size=3,
+ stride=2,
+ padding=1,
+ ),
+ )
+
+ pretrained.model.start_index = start_index
+ pretrained.model.patch_size = [16, 16]
+
+ # We inject this function into the VisionTransformer instances so that
+ # we can use it with interpolated position embeddings without modifying the library source.
+ pretrained.model.forward_flex = types.MethodType(forward_flex, pretrained.model)
+
+ # We inject this function into the VisionTransformer instances so that
+ # we can use it with interpolated position embeddings without modifying the library source.
+ pretrained.model._resize_pos_embed = types.MethodType(
+ _resize_pos_embed, pretrained.model
+ )
+
+ return pretrained
+
+
+def _make_pretrained_vitb_rn50_384(
+ pretrained, use_readout="ignore", hooks=None, use_vit_only=False
+):
+ model = timm.create_model("vit_base_resnet50_384", pretrained=pretrained)
+
+ hooks = [0, 1, 8, 11] if hooks == None else hooks
+ return _make_vit_b_rn50_backbone(
+ model,
+ features=[256, 512, 768, 768],
+ size=[384, 384],
+ hooks=hooks,
+ use_vit_only=use_vit_only,
+ use_readout=use_readout,
+ )
diff --git a/annotator/midas/utils.py b/annotator/midas/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..c74fe70bc4b3d77b6a6f6751756bc243721738a1
--- /dev/null
+++ b/annotator/midas/utils.py
@@ -0,0 +1,199 @@
+'''
+ * Copyright (c) 2023 Salesforce, Inc.
+ * All rights reserved.
+ * SPDX-License-Identifier: Apache License 2.0
+ * For full license text, see LICENSE.txt file in the repo root or http://www.apache.org/licenses/
+ * By Can Qin
+ * Modified from ControlNet repo: https://github.com/lllyasviel/ControlNet
+ * Copyright (c) 2023 Lvmin Zhang and Maneesh Agrawala
+'''
+
+"""Utils for monoDepth."""
+import sys
+import re
+import numpy as np
+import cv2
+import torch
+
+
+def read_pfm(path):
+ """Read pfm file.
+
+ Args:
+ path (str): path to file
+
+ Returns:
+ tuple: (data, scale)
+ """
+ with open(path, "rb") as file:
+
+ color = None
+ width = None
+ height = None
+ scale = None
+ endian = None
+
+ header = file.readline().rstrip()
+ if header.decode("ascii") == "PF":
+ color = True
+ elif header.decode("ascii") == "Pf":
+ color = False
+ else:
+ raise Exception("Not a PFM file: " + path)
+
+ dim_match = re.match(r"^(\d+)\s(\d+)\s$", file.readline().decode("ascii"))
+ if dim_match:
+ width, height = list(map(int, dim_match.groups()))
+ else:
+ raise Exception("Malformed PFM header.")
+
+ scale = float(file.readline().decode("ascii").rstrip())
+ if scale < 0:
+ # little-endian
+ endian = "<"
+ scale = -scale
+ else:
+ # big-endian
+ endian = ">"
+
+ data = np.fromfile(file, endian + "f")
+ shape = (height, width, 3) if color else (height, width)
+
+ data = np.reshape(data, shape)
+ data = np.flipud(data)
+
+ return data, scale
+
+
+def write_pfm(path, image, scale=1):
+ """Write pfm file.
+
+ Args:
+ path (str): pathto file
+ image (array): data
+ scale (int, optional): Scale. Defaults to 1.
+ """
+
+ with open(path, "wb") as file:
+ color = None
+
+ if image.dtype.name != "float32":
+ raise Exception("Image dtype must be float32.")
+
+ image = np.flipud(image)
+
+ if len(image.shape) == 3 and image.shape[2] == 3: # color image
+ color = True
+ elif (
+ len(image.shape) == 2 or len(image.shape) == 3 and image.shape[2] == 1
+ ): # greyscale
+ color = False
+ else:
+ raise Exception("Image must have H x W x 3, H x W x 1 or H x W dimensions.")
+
+ file.write("PF\n" if color else "Pf\n".encode())
+ file.write("%d %d\n".encode() % (image.shape[1], image.shape[0]))
+
+ endian = image.dtype.byteorder
+
+ if endian == "<" or endian == "=" and sys.byteorder == "little":
+ scale = -scale
+
+ file.write("%f\n".encode() % scale)
+
+ image.tofile(file)
+
+
+def read_image(path):
+ """Read image and output RGB image (0-1).
+
+ Args:
+ path (str): path to file
+
+ Returns:
+ array: RGB image (0-1)
+ """
+ img = cv2.imread(path)
+
+ if img.ndim == 2:
+ img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
+
+ img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) / 255.0
+
+ return img
+
+
+def resize_image(img):
+ """Resize image and make it fit for network.
+
+ Args:
+ img (array): image
+
+ Returns:
+ tensor: data ready for network
+ """
+ height_orig = img.shape[0]
+ width_orig = img.shape[1]
+
+ if width_orig > height_orig:
+ scale = width_orig / 384
+ else:
+ scale = height_orig / 384
+
+ height = (np.ceil(height_orig / scale / 32) * 32).astype(int)
+ width = (np.ceil(width_orig / scale / 32) * 32).astype(int)
+
+ img_resized = cv2.resize(img, (width, height), interpolation=cv2.INTER_AREA)
+
+ img_resized = (
+ torch.from_numpy(np.transpose(img_resized, (2, 0, 1))).contiguous().float()
+ )
+ img_resized = img_resized.unsqueeze(0)
+
+ return img_resized
+
+
+def resize_depth(depth, width, height):
+ """Resize depth map and bring to CPU (numpy).
+
+ Args:
+ depth (tensor): depth
+ width (int): image width
+ height (int): image height
+
+ Returns:
+ array: processed depth
+ """
+ depth = torch.squeeze(depth[0, :, :, :]).to("cpu")
+
+ depth_resized = cv2.resize(
+ depth.numpy(), (width, height), interpolation=cv2.INTER_CUBIC
+ )
+
+ return depth_resized
+
+def write_depth(path, depth, bits=1):
+ """Write depth map to pfm and png file.
+
+ Args:
+ path (str): filepath without extension
+ depth (array): depth
+ """
+ write_pfm(path + ".pfm", depth.astype(np.float32))
+
+ depth_min = depth.min()
+ depth_max = depth.max()
+
+ max_val = (2**(8*bits))-1
+
+ if depth_max - depth_min > np.finfo("float").eps:
+ out = max_val * (depth - depth_min) / (depth_max - depth_min)
+ else:
+ out = np.zeros(depth.shape, dtype=depth.type)
+
+ if bits == 1:
+ cv2.imwrite(path + ".png", out.astype("uint8"))
+ elif bits == 2:
+ cv2.imwrite(path + ".png", out.astype("uint16"))
+
+ return
diff --git a/annotator/mlsd/LICENSE b/annotator/mlsd/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..d855c6db44b4e873eedd750d34fa2eaf22e22363
--- /dev/null
+++ b/annotator/mlsd/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2021-present NAVER Corp.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
\ No newline at end of file
diff --git a/annotator/mlsd/__init__.py b/annotator/mlsd/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..4b44ad05af4b387cb8a1442f5b56c0c6ee3a2b1f
--- /dev/null
+++ b/annotator/mlsd/__init__.py
@@ -0,0 +1,53 @@
+'''
+ * Copyright (c) 2023 Salesforce, Inc.
+ * All rights reserved.
+ * SPDX-License-Identifier: Apache License 2.0
+ * For full license text, see LICENSE.txt file in the repo root or http://www.apache.org/licenses/
+ * By Can Qin
+ * Modified from ControlNet repo: https://github.com/lllyasviel/ControlNet
+ * Copyright (c) 2023 Lvmin Zhang and Maneesh Agrawala
+'''
+
+# MLSD Line Detection
+# From https://github.com/navervision/mlsd
+# Apache-2.0 license
+
+import cv2
+import numpy as np
+import torch
+import os
+
+from einops import rearrange
+from .models.mbv2_mlsd_tiny import MobileV2_MLSD_Tiny
+from .models.mbv2_mlsd_large import MobileV2_MLSD_Large
+from .utils import pred_lines
+
+from annotator.util import annotator_ckpts_path
+
+
+remote_model_path = "https://huggingface.co/lllyasviel/ControlNet/resolve/main/annotator/ckpts/mlsd_large_512_fp32.pth"
+
+
+class MLSDdetector:
+ def __init__(self):
+ model_path = os.path.join(annotator_ckpts_path, "mlsd_large_512_fp32.pth")
+ if not os.path.exists(model_path):
+ from basicsr.utils.download_util import load_file_from_url
+ load_file_from_url(remote_model_path, model_dir=annotator_ckpts_path)
+ model = MobileV2_MLSD_Large()
+ model.load_state_dict(torch.load(model_path), strict=True)
+ self.model = model.cuda().eval()
+
+ def __call__(self, input_image, thr_v, thr_d):
+ assert input_image.ndim == 3
+ img = input_image
+ img_output = np.zeros_like(img)
+ try:
+ with torch.no_grad():
+ lines = pred_lines(img, self.model, [img.shape[0], img.shape[1]], thr_v, thr_d)
+ for line in lines:
+ x_start, y_start, x_end, y_end = [int(val) for val in line]
+ cv2.line(img_output, (x_start, y_start), (x_end, y_end), [255, 255, 255], 1)
+ except Exception as e:
+ pass
+ return img_output[:, :, 0]
diff --git a/annotator/mlsd/models/mbv2_mlsd_large.py b/annotator/mlsd/models/mbv2_mlsd_large.py
new file mode 100644
index 0000000000000000000000000000000000000000..7122d8ebd9279530b332a0729a9c2bd2aed70fb9
--- /dev/null
+++ b/annotator/mlsd/models/mbv2_mlsd_large.py
@@ -0,0 +1,302 @@
+'''
+ * Copyright (c) 2023 Salesforce, Inc.
+ * All rights reserved.
+ * SPDX-License-Identifier: Apache License 2.0
+ * For full license text, see LICENSE.txt file in the repo root or http://www.apache.org/licenses/
+ * By Can Qin
+ * Modified from ControlNet repo: https://github.com/lllyasviel/ControlNet
+ * Copyright (c) 2023 Lvmin Zhang and Maneesh Agrawala
+'''
+
+import os
+import sys
+import torch
+import torch.nn as nn
+import torch.utils.model_zoo as model_zoo
+from torch.nn import functional as F
+
+
+class BlockTypeA(nn.Module):
+ def __init__(self, in_c1, in_c2, out_c1, out_c2, upscale = True):
+ super(BlockTypeA, self).__init__()
+ self.conv1 = nn.Sequential(
+ nn.Conv2d(in_c2, out_c2, kernel_size=1),
+ nn.BatchNorm2d(out_c2),
+ nn.ReLU(inplace=True)
+ )
+ self.conv2 = nn.Sequential(
+ nn.Conv2d(in_c1, out_c1, kernel_size=1),
+ nn.BatchNorm2d(out_c1),
+ nn.ReLU(inplace=True)
+ )
+ self.upscale = upscale
+
+ def forward(self, a, b):
+ b = self.conv1(b)
+ a = self.conv2(a)
+ if self.upscale:
+ b = F.interpolate(b, scale_factor=2.0, mode='bilinear', align_corners=True)
+ return torch.cat((a, b), dim=1)
+
+
+class BlockTypeB(nn.Module):
+ def __init__(self, in_c, out_c):
+ super(BlockTypeB, self).__init__()
+ self.conv1 = nn.Sequential(
+ nn.Conv2d(in_c, in_c, kernel_size=3, padding=1),
+ nn.BatchNorm2d(in_c),
+ nn.ReLU()
+ )
+ self.conv2 = nn.Sequential(
+ nn.Conv2d(in_c, out_c, kernel_size=3, padding=1),
+ nn.BatchNorm2d(out_c),
+ nn.ReLU()
+ )
+
+ def forward(self, x):
+ x = self.conv1(x) + x
+ x = self.conv2(x)
+ return x
+
+class BlockTypeC(nn.Module):
+ def __init__(self, in_c, out_c):
+ super(BlockTypeC, self).__init__()
+ self.conv1 = nn.Sequential(
+ nn.Conv2d(in_c, in_c, kernel_size=3, padding=5, dilation=5),
+ nn.BatchNorm2d(in_c),
+ nn.ReLU()
+ )
+ self.conv2 = nn.Sequential(
+ nn.Conv2d(in_c, in_c, kernel_size=3, padding=1),
+ nn.BatchNorm2d(in_c),
+ nn.ReLU()
+ )
+ self.conv3 = nn.Conv2d(in_c, out_c, kernel_size=1)
+
+ def forward(self, x):
+ x = self.conv1(x)
+ x = self.conv2(x)
+ x = self.conv3(x)
+ return x
+
+def _make_divisible(v, divisor, min_value=None):
+ """
+ This function is taken from the original tf repo.
+ It ensures that all layers have a channel number that is divisible by 8
+ It can be seen here:
+ https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
+ :param v:
+ :param divisor:
+ :param min_value:
+ :return:
+ """
+ if min_value is None:
+ min_value = divisor
+ new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
+ # Make sure that round down does not go down by more than 10%.
+ if new_v < 0.9 * v:
+ new_v += divisor
+ return new_v
+
+
+class ConvBNReLU(nn.Sequential):
+ def __init__(self, in_planes, out_planes, kernel_size=3, stride=1, groups=1):
+ self.channel_pad = out_planes - in_planes
+ self.stride = stride
+ #padding = (kernel_size - 1) // 2
+
+ # TFLite uses slightly different padding than PyTorch
+ if stride == 2:
+ padding = 0
+ else:
+ padding = (kernel_size - 1) // 2
+
+ super(ConvBNReLU, self).__init__(
+ nn.Conv2d(in_planes, out_planes, kernel_size, stride, padding, groups=groups, bias=False),
+ nn.BatchNorm2d(out_planes),
+ nn.ReLU6(inplace=True)
+ )
+ self.max_pool = nn.MaxPool2d(kernel_size=stride, stride=stride)
+
+
+ def forward(self, x):
+ # TFLite uses different padding
+ if self.stride == 2:
+ x = F.pad(x, (0, 1, 0, 1), "constant", 0)
+ #print(x.shape)
+
+ for module in self:
+ if not isinstance(module, nn.MaxPool2d):
+ x = module(x)
+ return x
+
+
+class InvertedResidual(nn.Module):
+ def __init__(self, inp, oup, stride, expand_ratio):
+ super(InvertedResidual, self).__init__()
+ self.stride = stride
+ assert stride in [1, 2]
+
+ hidden_dim = int(round(inp * expand_ratio))
+ self.use_res_connect = self.stride == 1 and inp == oup
+
+ layers = []
+ if expand_ratio != 1:
+ # pw
+ layers.append(ConvBNReLU(inp, hidden_dim, kernel_size=1))
+ layers.extend([
+ # dw
+ ConvBNReLU(hidden_dim, hidden_dim, stride=stride, groups=hidden_dim),
+ # pw-linear
+ nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
+ nn.BatchNorm2d(oup),
+ ])
+ self.conv = nn.Sequential(*layers)
+
+ def forward(self, x):
+ if self.use_res_connect:
+ return x + self.conv(x)
+ else:
+ return self.conv(x)
+
+
+class MobileNetV2(nn.Module):
+ def __init__(self, pretrained=True):
+ """
+ MobileNet V2 main class
+ Args:
+ num_classes (int): Number of classes
+ width_mult (float): Width multiplier - adjusts number of channels in each layer by this amount
+ inverted_residual_setting: Network structure
+ round_nearest (int): Round the number of channels in each layer to be a multiple of this number
+ Set to 1 to turn off rounding
+ block: Module specifying inverted residual building block for mobilenet
+ """
+ super(MobileNetV2, self).__init__()
+
+ block = InvertedResidual
+ input_channel = 32
+ last_channel = 1280
+ width_mult = 1.0
+ round_nearest = 8
+
+ inverted_residual_setting = [
+ # t, c, n, s
+ [1, 16, 1, 1],
+ [6, 24, 2, 2],
+ [6, 32, 3, 2],
+ [6, 64, 4, 2],
+ [6, 96, 3, 1],
+ #[6, 160, 3, 2],
+ #[6, 320, 1, 1],
+ ]
+
+ # only check the first element, assuming user knows t,c,n,s are required
+ if len(inverted_residual_setting) == 0 or len(inverted_residual_setting[0]) != 4:
+ raise ValueError("inverted_residual_setting should be non-empty "
+ "or a 4-element list, got {}".format(inverted_residual_setting))
+
+ # building first layer
+ input_channel = _make_divisible(input_channel * width_mult, round_nearest)
+ self.last_channel = _make_divisible(last_channel * max(1.0, width_mult), round_nearest)
+ features = [ConvBNReLU(4, input_channel, stride=2)]
+ # building inverted residual blocks
+ for t, c, n, s in inverted_residual_setting:
+ output_channel = _make_divisible(c * width_mult, round_nearest)
+ for i in range(n):
+ stride = s if i == 0 else 1
+ features.append(block(input_channel, output_channel, stride, expand_ratio=t))
+ input_channel = output_channel
+
+ self.features = nn.Sequential(*features)
+ self.fpn_selected = [1, 3, 6, 10, 13]
+ # weight initialization
+ for m in self.modules():
+ if isinstance(m, nn.Conv2d):
+ nn.init.kaiming_normal_(m.weight, mode='fan_out')
+ if m.bias is not None:
+ nn.init.zeros_(m.bias)
+ elif isinstance(m, nn.BatchNorm2d):
+ nn.init.ones_(m.weight)
+ nn.init.zeros_(m.bias)
+ elif isinstance(m, nn.Linear):
+ nn.init.normal_(m.weight, 0, 0.01)
+ nn.init.zeros_(m.bias)
+ if pretrained:
+ self._load_pretrained_model()
+
+ def _forward_impl(self, x):
+ # This exists since TorchScript doesn't support inheritance, so the superclass method
+ # (this one) needs to have a name other than `forward` that can be accessed in a subclass
+ fpn_features = []
+ for i, f in enumerate(self.features):
+ if i > self.fpn_selected[-1]:
+ break
+ x = f(x)
+ if i in self.fpn_selected:
+ fpn_features.append(x)
+
+ c1, c2, c3, c4, c5 = fpn_features
+ return c1, c2, c3, c4, c5
+
+
+ def forward(self, x):
+ return self._forward_impl(x)
+
+ def _load_pretrained_model(self):
+ pretrain_dict = model_zoo.load_url('https://download.pytorch.org/models/mobilenet_v2-b0353104.pth')
+ model_dict = {}
+ state_dict = self.state_dict()
+ for k, v in pretrain_dict.items():
+ if k in state_dict:
+ model_dict[k] = v
+ state_dict.update(model_dict)
+ self.load_state_dict(state_dict)
+
+
+class MobileV2_MLSD_Large(nn.Module):
+ def __init__(self):
+ super(MobileV2_MLSD_Large, self).__init__()
+
+ self.backbone = MobileNetV2(pretrained=False)
+ ## A, B
+ self.block15 = BlockTypeA(in_c1= 64, in_c2= 96,
+ out_c1= 64, out_c2=64,
+ upscale=False)
+ self.block16 = BlockTypeB(128, 64)
+
+ ## A, B
+ self.block17 = BlockTypeA(in_c1 = 32, in_c2 = 64,
+ out_c1= 64, out_c2= 64)
+ self.block18 = BlockTypeB(128, 64)
+
+ ## A, B
+ self.block19 = BlockTypeA(in_c1=24, in_c2=64,
+ out_c1=64, out_c2=64)
+ self.block20 = BlockTypeB(128, 64)
+
+ ## A, B, C
+ self.block21 = BlockTypeA(in_c1=16, in_c2=64,
+ out_c1=64, out_c2=64)
+ self.block22 = BlockTypeB(128, 64)
+
+ self.block23 = BlockTypeC(64, 16)
+
+ def forward(self, x):
+ c1, c2, c3, c4, c5 = self.backbone(x)
+
+ x = self.block15(c4, c5)
+ x = self.block16(x)
+
+ x = self.block17(c3, x)
+ x = self.block18(x)
+
+ x = self.block19(c2, x)
+ x = self.block20(x)
+
+ x = self.block21(c1, x)
+ x = self.block22(x)
+ x = self.block23(x)
+ x = x[:, 7:, :, :]
+
+ return x
\ No newline at end of file
diff --git a/annotator/mlsd/models/mbv2_mlsd_tiny.py b/annotator/mlsd/models/mbv2_mlsd_tiny.py
new file mode 100644
index 0000000000000000000000000000000000000000..a7b90c3f97fe1475461294e820263353b0008007
--- /dev/null
+++ b/annotator/mlsd/models/mbv2_mlsd_tiny.py
@@ -0,0 +1,285 @@
+'''
+ * Copyright (c) 2023 Salesforce, Inc.
+ * All rights reserved.
+ * SPDX-License-Identifier: Apache License 2.0
+ * For full license text, see LICENSE.txt file in the repo root or http://www.apache.org/licenses/
+ * By Can Qin
+ * Modified from ControlNet repo: https://github.com/lllyasviel/ControlNet
+ * Copyright (c) 2023 Lvmin Zhang and Maneesh Agrawala
+'''
+
+import os
+import sys
+import torch
+import torch.nn as nn
+import torch.utils.model_zoo as model_zoo
+from torch.nn import functional as F
+
+
+class BlockTypeA(nn.Module):
+ def __init__(self, in_c1, in_c2, out_c1, out_c2, upscale = True):
+ super(BlockTypeA, self).__init__()
+ self.conv1 = nn.Sequential(
+ nn.Conv2d(in_c2, out_c2, kernel_size=1),
+ nn.BatchNorm2d(out_c2),
+ nn.ReLU(inplace=True)
+ )
+ self.conv2 = nn.Sequential(
+ nn.Conv2d(in_c1, out_c1, kernel_size=1),
+ nn.BatchNorm2d(out_c1),
+ nn.ReLU(inplace=True)
+ )
+ self.upscale = upscale
+
+ def forward(self, a, b):
+ b = self.conv1(b)
+ a = self.conv2(a)
+ b = F.interpolate(b, scale_factor=2.0, mode='bilinear', align_corners=True)
+ return torch.cat((a, b), dim=1)
+
+
+class BlockTypeB(nn.Module):
+ def __init__(self, in_c, out_c):
+ super(BlockTypeB, self).__init__()
+ self.conv1 = nn.Sequential(
+ nn.Conv2d(in_c, in_c, kernel_size=3, padding=1),
+ nn.BatchNorm2d(in_c),
+ nn.ReLU()
+ )
+ self.conv2 = nn.Sequential(
+ nn.Conv2d(in_c, out_c, kernel_size=3, padding=1),
+ nn.BatchNorm2d(out_c),
+ nn.ReLU()
+ )
+
+ def forward(self, x):
+ x = self.conv1(x) + x
+ x = self.conv2(x)
+ return x
+
+class BlockTypeC(nn.Module):
+ def __init__(self, in_c, out_c):
+ super(BlockTypeC, self).__init__()
+ self.conv1 = nn.Sequential(
+ nn.Conv2d(in_c, in_c, kernel_size=3, padding=5, dilation=5),
+ nn.BatchNorm2d(in_c),
+ nn.ReLU()
+ )
+ self.conv2 = nn.Sequential(
+ nn.Conv2d(in_c, in_c, kernel_size=3, padding=1),
+ nn.BatchNorm2d(in_c),
+ nn.ReLU()
+ )
+ self.conv3 = nn.Conv2d(in_c, out_c, kernel_size=1)
+
+ def forward(self, x):
+ x = self.conv1(x)
+ x = self.conv2(x)
+ x = self.conv3(x)
+ return x
+
+def _make_divisible(v, divisor, min_value=None):
+ """
+ This function is taken from the original tf repo.
+ It ensures that all layers have a channel number that is divisible by 8
+ It can be seen here:
+ https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
+ :param v:
+ :param divisor:
+ :param min_value:
+ :return:
+ """
+ if min_value is None:
+ min_value = divisor
+ new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
+ # Make sure that round down does not go down by more than 10%.
+ if new_v < 0.9 * v:
+ new_v += divisor
+ return new_v
+
+
+class ConvBNReLU(nn.Sequential):
+ def __init__(self, in_planes, out_planes, kernel_size=3, stride=1, groups=1):
+ self.channel_pad = out_planes - in_planes
+ self.stride = stride
+ #padding = (kernel_size - 1) // 2
+
+ # TFLite uses slightly different padding than PyTorch
+ if stride == 2:
+ padding = 0
+ else:
+ padding = (kernel_size - 1) // 2
+
+ super(ConvBNReLU, self).__init__(
+ nn.Conv2d(in_planes, out_planes, kernel_size, stride, padding, groups=groups, bias=False),
+ nn.BatchNorm2d(out_planes),
+ nn.ReLU6(inplace=True)
+ )
+ self.max_pool = nn.MaxPool2d(kernel_size=stride, stride=stride)
+
+
+ def forward(self, x):
+ # TFLite uses different padding
+ if self.stride == 2:
+ x = F.pad(x, (0, 1, 0, 1), "constant", 0)
+ #print(x.shape)
+
+ for module in self:
+ if not isinstance(module, nn.MaxPool2d):
+ x = module(x)
+ return x
+
+
+class InvertedResidual(nn.Module):
+ def __init__(self, inp, oup, stride, expand_ratio):
+ super(InvertedResidual, self).__init__()
+ self.stride = stride
+ assert stride in [1, 2]
+
+ hidden_dim = int(round(inp * expand_ratio))
+ self.use_res_connect = self.stride == 1 and inp == oup
+
+ layers = []
+ if expand_ratio != 1:
+ # pw
+ layers.append(ConvBNReLU(inp, hidden_dim, kernel_size=1))
+ layers.extend([
+ # dw
+ ConvBNReLU(hidden_dim, hidden_dim, stride=stride, groups=hidden_dim),
+ # pw-linear
+ nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
+ nn.BatchNorm2d(oup),
+ ])
+ self.conv = nn.Sequential(*layers)
+
+ def forward(self, x):
+ if self.use_res_connect:
+ return x + self.conv(x)
+ else:
+ return self.conv(x)
+
+
+class MobileNetV2(nn.Module):
+ def __init__(self, pretrained=True):
+ """
+ MobileNet V2 main class
+ Args:
+ num_classes (int): Number of classes
+ width_mult (float): Width multiplier - adjusts number of channels in each layer by this amount
+ inverted_residual_setting: Network structure
+ round_nearest (int): Round the number of channels in each layer to be a multiple of this number
+ Set to 1 to turn off rounding
+ block: Module specifying inverted residual building block for mobilenet
+ """
+ super(MobileNetV2, self).__init__()
+
+ block = InvertedResidual
+ input_channel = 32
+ last_channel = 1280
+ width_mult = 1.0
+ round_nearest = 8
+
+ inverted_residual_setting = [
+ # t, c, n, s
+ [1, 16, 1, 1],
+ [6, 24, 2, 2],
+ [6, 32, 3, 2],
+ [6, 64, 4, 2],
+ #[6, 96, 3, 1],
+ #[6, 160, 3, 2],
+ #[6, 320, 1, 1],
+ ]
+
+ # only check the first element, assuming user knows t,c,n,s are required
+ if len(inverted_residual_setting) == 0 or len(inverted_residual_setting[0]) != 4:
+ raise ValueError("inverted_residual_setting should be non-empty "
+ "or a 4-element list, got {}".format(inverted_residual_setting))
+
+ # building first layer
+ input_channel = _make_divisible(input_channel * width_mult, round_nearest)
+ self.last_channel = _make_divisible(last_channel * max(1.0, width_mult), round_nearest)
+ features = [ConvBNReLU(4, input_channel, stride=2)]
+ # building inverted residual blocks
+ for t, c, n, s in inverted_residual_setting:
+ output_channel = _make_divisible(c * width_mult, round_nearest)
+ for i in range(n):
+ stride = s if i == 0 else 1
+ features.append(block(input_channel, output_channel, stride, expand_ratio=t))
+ input_channel = output_channel
+ self.features = nn.Sequential(*features)
+
+ self.fpn_selected = [3, 6, 10]
+ # weight initialization
+ for m in self.modules():
+ if isinstance(m, nn.Conv2d):
+ nn.init.kaiming_normal_(m.weight, mode='fan_out')
+ if m.bias is not None:
+ nn.init.zeros_(m.bias)
+ elif isinstance(m, nn.BatchNorm2d):
+ nn.init.ones_(m.weight)
+ nn.init.zeros_(m.bias)
+ elif isinstance(m, nn.Linear):
+ nn.init.normal_(m.weight, 0, 0.01)
+ nn.init.zeros_(m.bias)
+
+ #if pretrained:
+ # self._load_pretrained_model()
+
+ def _forward_impl(self, x):
+ # This exists since TorchScript doesn't support inheritance, so the superclass method
+ # (this one) needs to have a name other than `forward` that can be accessed in a subclass
+ fpn_features = []
+ for i, f in enumerate(self.features):
+ if i > self.fpn_selected[-1]:
+ break
+ x = f(x)
+ if i in self.fpn_selected:
+ fpn_features.append(x)
+
+ c2, c3, c4 = fpn_features
+ return c2, c3, c4
+
+
+ def forward(self, x):
+ return self._forward_impl(x)
+
+ def _load_pretrained_model(self):
+ pretrain_dict = model_zoo.load_url('https://download.pytorch.org/models/mobilenet_v2-b0353104.pth')
+ model_dict = {}
+ state_dict = self.state_dict()
+ for k, v in pretrain_dict.items():
+ if k in state_dict:
+ model_dict[k] = v
+ state_dict.update(model_dict)
+ self.load_state_dict(state_dict)
+
+
+class MobileV2_MLSD_Tiny(nn.Module):
+ def __init__(self):
+ super(MobileV2_MLSD_Tiny, self).__init__()
+
+ self.backbone = MobileNetV2(pretrained=True)
+
+ self.block12 = BlockTypeA(in_c1= 32, in_c2= 64,
+ out_c1= 64, out_c2=64)
+ self.block13 = BlockTypeB(128, 64)
+
+ self.block14 = BlockTypeA(in_c1 = 24, in_c2 = 64,
+ out_c1= 32, out_c2= 32)
+ self.block15 = BlockTypeB(64, 64)
+
+ self.block16 = BlockTypeC(64, 16)
+
+ def forward(self, x):
+ c2, c3, c4 = self.backbone(x)
+
+ x = self.block12(c3, c4)
+ x = self.block13(x)
+ x = self.block14(c2, x)
+ x = self.block15(x)
+ x = self.block16(x)
+ x = x[:, 7:, :, :]
+ #print(x.shape)
+ x = F.interpolate(x, scale_factor=2.0, mode='bilinear', align_corners=True)
+
+ return x
\ No newline at end of file
diff --git a/annotator/mlsd/utils.py b/annotator/mlsd/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..e938aa5523270bdd52af88bc8029a0ca969cb111
--- /dev/null
+++ b/annotator/mlsd/utils.py
@@ -0,0 +1,590 @@
+'''
+ * Copyright (c) 2023 Salesforce, Inc.
+ * All rights reserved.
+ * SPDX-License-Identifier: Apache License 2.0
+ * For full license text, see LICENSE.txt file in the repo root or http://www.apache.org/licenses/
+ * By Can Qin
+ * Modified from ControlNet repo: https://github.com/lllyasviel/ControlNet
+ * Copyright (c) 2023 Lvmin Zhang and Maneesh Agrawala
+'''
+
+'''
+modified by lihaoweicv
+pytorch version
+'''
+
+'''
+M-LSD
+Copyright 2021-present NAVER Corp.
+Apache License v2.0
+'''
+
+import os
+import numpy as np
+import cv2
+import torch
+from torch.nn import functional as F
+
+
+def deccode_output_score_and_ptss(tpMap, topk_n = 200, ksize = 5):
+ '''
+ tpMap:
+ center: tpMap[1, 0, :, :]
+ displacement: tpMap[1, 1:5, :, :]
+ '''
+ b, c, h, w = tpMap.shape
+ assert b==1, 'only support bsize==1'
+ displacement = tpMap[:, 1:5, :, :][0]
+ center = tpMap[:, 0, :, :]
+ heat = torch.sigmoid(center)
+ hmax = F.max_pool2d( heat, (ksize, ksize), stride=1, padding=(ksize-1)//2)
+ keep = (hmax == heat).float()
+ heat = heat * keep
+ heat = heat.reshape(-1, )
+
+ scores, indices = torch.topk(heat, topk_n, dim=-1, largest=True)
+ yy = torch.floor_divide(indices, w).unsqueeze(-1)
+ xx = torch.fmod(indices, w).unsqueeze(-1)
+ ptss = torch.cat((yy, xx),dim=-1)
+
+ ptss = ptss.detach().cpu().numpy()
+ scores = scores.detach().cpu().numpy()
+ displacement = displacement.detach().cpu().numpy()
+ displacement = displacement.transpose((1,2,0))
+ return ptss, scores, displacement
+
+
+def pred_lines(image, model,
+ input_shape=[512, 512],
+ score_thr=0.10,
+ dist_thr=20.0):
+ h, w, _ = image.shape
+ h_ratio, w_ratio = [h / input_shape[0], w / input_shape[1]]
+
+ resized_image = np.concatenate([cv2.resize(image, (input_shape[1], input_shape[0]), interpolation=cv2.INTER_AREA),
+ np.ones([input_shape[0], input_shape[1], 1])], axis=-1)
+
+ resized_image = resized_image.transpose((2,0,1))
+ batch_image = np.expand_dims(resized_image, axis=0).astype('float32')
+ batch_image = (batch_image / 127.5) - 1.0
+
+ batch_image = torch.from_numpy(batch_image).float().cuda()
+ outputs = model(batch_image)
+ pts, pts_score, vmap = deccode_output_score_and_ptss(outputs, 200, 3)
+ start = vmap[:, :, :2]
+ end = vmap[:, :, 2:]
+ dist_map = np.sqrt(np.sum((start - end) ** 2, axis=-1))
+
+ segments_list = []
+ for center, score in zip(pts, pts_score):
+ y, x = center
+ distance = dist_map[y, x]
+ if score > score_thr and distance > dist_thr:
+ disp_x_start, disp_y_start, disp_x_end, disp_y_end = vmap[y, x, :]
+ x_start = x + disp_x_start
+ y_start = y + disp_y_start
+ x_end = x + disp_x_end
+ y_end = y + disp_y_end
+ segments_list.append([x_start, y_start, x_end, y_end])
+
+ lines = 2 * np.array(segments_list) # 256 > 512
+ lines[:, 0] = lines[:, 0] * w_ratio
+ lines[:, 1] = lines[:, 1] * h_ratio
+ lines[:, 2] = lines[:, 2] * w_ratio
+ lines[:, 3] = lines[:, 3] * h_ratio
+
+ return lines
+
+
+def pred_squares(image,
+ model,
+ input_shape=[512, 512],
+ params={'score': 0.06,
+ 'outside_ratio': 0.28,
+ 'inside_ratio': 0.45,
+ 'w_overlap': 0.0,
+ 'w_degree': 1.95,
+ 'w_length': 0.0,
+ 'w_area': 1.86,
+ 'w_center': 0.14}):
+ '''
+ shape = [height, width]
+ '''
+ h, w, _ = image.shape
+ original_shape = [h, w]
+
+ resized_image = np.concatenate([cv2.resize(image, (input_shape[0], input_shape[1]), interpolation=cv2.INTER_AREA),
+ np.ones([input_shape[0], input_shape[1], 1])], axis=-1)
+ resized_image = resized_image.transpose((2, 0, 1))
+ batch_image = np.expand_dims(resized_image, axis=0).astype('float32')
+ batch_image = (batch_image / 127.5) - 1.0
+
+ batch_image = torch.from_numpy(batch_image).float().cuda()
+ outputs = model(batch_image)
+
+ pts, pts_score, vmap = deccode_output_score_and_ptss(outputs, 200, 3)
+ start = vmap[:, :, :2] # (x, y)
+ end = vmap[:, :, 2:] # (x, y)
+ dist_map = np.sqrt(np.sum((start - end) ** 2, axis=-1))
+
+ junc_list = []
+ segments_list = []
+ for junc, score in zip(pts, pts_score):
+ y, x = junc
+ distance = dist_map[y, x]
+ if score > params['score'] and distance > 20.0:
+ junc_list.append([x, y])
+ disp_x_start, disp_y_start, disp_x_end, disp_y_end = vmap[y, x, :]
+ d_arrow = 1.0
+ x_start = x + d_arrow * disp_x_start
+ y_start = y + d_arrow * disp_y_start
+ x_end = x + d_arrow * disp_x_end
+ y_end = y + d_arrow * disp_y_end
+ segments_list.append([x_start, y_start, x_end, y_end])
+
+ segments = np.array(segments_list)
+
+ ####### post processing for squares
+ # 1. get unique lines
+ point = np.array([[0, 0]])
+ point = point[0]
+ start = segments[:, :2]
+ end = segments[:, 2:]
+ diff = start - end
+ a = diff[:, 1]
+ b = -diff[:, 0]
+ c = a * start[:, 0] + b * start[:, 1]
+
+ d = np.abs(a * point[0] + b * point[1] - c) / np.sqrt(a ** 2 + b ** 2 + 1e-10)
+ theta = np.arctan2(diff[:, 0], diff[:, 1]) * 180 / np.pi
+ theta[theta < 0.0] += 180
+ hough = np.concatenate([d[:, None], theta[:, None]], axis=-1)
+
+ d_quant = 1
+ theta_quant = 2
+ hough[:, 0] //= d_quant
+ hough[:, 1] //= theta_quant
+ _, indices, counts = np.unique(hough, axis=0, return_index=True, return_counts=True)
+
+ acc_map = np.zeros([512 // d_quant + 1, 360 // theta_quant + 1], dtype='float32')
+ idx_map = np.zeros([512 // d_quant + 1, 360 // theta_quant + 1], dtype='int32') - 1
+ yx_indices = hough[indices, :].astype('int32')
+ acc_map[yx_indices[:, 0], yx_indices[:, 1]] = counts
+ idx_map[yx_indices[:, 0], yx_indices[:, 1]] = indices
+
+ acc_map_np = acc_map
+ # acc_map = acc_map[None, :, :, None]
+ #
+ # ### fast suppression using tensorflow op
+ # acc_map = tf.constant(acc_map, dtype=tf.float32)
+ # max_acc_map = tf.keras.layers.MaxPool2D(pool_size=(5, 5), strides=1, padding='same')(acc_map)
+ # acc_map = acc_map * tf.cast(tf.math.equal(acc_map, max_acc_map), tf.float32)
+ # flatten_acc_map = tf.reshape(acc_map, [1, -1])
+ # topk_values, topk_indices = tf.math.top_k(flatten_acc_map, k=len(pts))
+ # _, h, w, _ = acc_map.shape
+ # y = tf.expand_dims(topk_indices // w, axis=-1)
+ # x = tf.expand_dims(topk_indices % w, axis=-1)
+ # yx = tf.concat([y, x], axis=-1)
+
+ ### fast suppression using pytorch op
+ acc_map = torch.from_numpy(acc_map_np).unsqueeze(0).unsqueeze(0)
+ _,_, h, w = acc_map.shape
+ max_acc_map = F.max_pool2d(acc_map,kernel_size=5, stride=1, padding=2)
+ acc_map = acc_map * ( (acc_map == max_acc_map).float() )
+ flatten_acc_map = acc_map.reshape([-1, ])
+
+ scores, indices = torch.topk(flatten_acc_map, len(pts), dim=-1, largest=True)
+ yy = torch.div(indices, w, rounding_mode='floor').unsqueeze(-1)
+ xx = torch.fmod(indices, w).unsqueeze(-1)
+ yx = torch.cat((yy, xx), dim=-1)
+
+ yx = yx.detach().cpu().numpy()
+
+ topk_values = scores.detach().cpu().numpy()
+ indices = idx_map[yx[:, 0], yx[:, 1]]
+ basis = 5 // 2
+
+ merged_segments = []
+ for yx_pt, max_indice, value in zip(yx, indices, topk_values):
+ y, x = yx_pt
+ if max_indice == -1 or value == 0:
+ continue
+ segment_list = []
+ for y_offset in range(-basis, basis + 1):
+ for x_offset in range(-basis, basis + 1):
+ indice = idx_map[y + y_offset, x + x_offset]
+ cnt = int(acc_map_np[y + y_offset, x + x_offset])
+ if indice != -1:
+ segment_list.append(segments[indice])
+ if cnt > 1:
+ check_cnt = 1
+ current_hough = hough[indice]
+ for new_indice, new_hough in enumerate(hough):
+ if (current_hough == new_hough).all() and indice != new_indice:
+ segment_list.append(segments[new_indice])
+ check_cnt += 1
+ if check_cnt == cnt:
+ break
+ group_segments = np.array(segment_list).reshape([-1, 2])
+ sorted_group_segments = np.sort(group_segments, axis=0)
+ x_min, y_min = sorted_group_segments[0, :]
+ x_max, y_max = sorted_group_segments[-1, :]
+
+ deg = theta[max_indice]
+ if deg >= 90:
+ merged_segments.append([x_min, y_max, x_max, y_min])
+ else:
+ merged_segments.append([x_min, y_min, x_max, y_max])
+
+ # 2. get intersections
+ new_segments = np.array(merged_segments) # (x1, y1, x2, y2)
+ start = new_segments[:, :2] # (x1, y1)
+ end = new_segments[:, 2:] # (x2, y2)
+ new_centers = (start + end) / 2.0
+ diff = start - end
+ dist_segments = np.sqrt(np.sum(diff ** 2, axis=-1))
+
+ # ax + by = c
+ a = diff[:, 1]
+ b = -diff[:, 0]
+ c = a * start[:, 0] + b * start[:, 1]
+ pre_det = a[:, None] * b[None, :]
+ det = pre_det - np.transpose(pre_det)
+
+ pre_inter_y = a[:, None] * c[None, :]
+ inter_y = (pre_inter_y - np.transpose(pre_inter_y)) / (det + 1e-10)
+ pre_inter_x = c[:, None] * b[None, :]
+ inter_x = (pre_inter_x - np.transpose(pre_inter_x)) / (det + 1e-10)
+ inter_pts = np.concatenate([inter_x[:, :, None], inter_y[:, :, None]], axis=-1).astype('int32')
+
+ # 3. get corner information
+ # 3.1 get distance
+ '''
+ dist_segments:
+ | dist(0), dist(1), dist(2), ...|
+ dist_inter_to_segment1:
+ | dist(inter,0), dist(inter,0), dist(inter,0), ... |
+ | dist(inter,1), dist(inter,1), dist(inter,1), ... |
+ ...
+ dist_inter_to_semgnet2:
+ | dist(inter,0), dist(inter,1), dist(inter,2), ... |
+ | dist(inter,0), dist(inter,1), dist(inter,2), ... |
+ ...
+ '''
+
+ dist_inter_to_segment1_start = np.sqrt(
+ np.sum(((inter_pts - start[:, None, :]) ** 2), axis=-1, keepdims=True)) # [n_batch, n_batch, 1]
+ dist_inter_to_segment1_end = np.sqrt(
+ np.sum(((inter_pts - end[:, None, :]) ** 2), axis=-1, keepdims=True)) # [n_batch, n_batch, 1]
+ dist_inter_to_segment2_start = np.sqrt(
+ np.sum(((inter_pts - start[None, :, :]) ** 2), axis=-1, keepdims=True)) # [n_batch, n_batch, 1]
+ dist_inter_to_segment2_end = np.sqrt(
+ np.sum(((inter_pts - end[None, :, :]) ** 2), axis=-1, keepdims=True)) # [n_batch, n_batch, 1]
+
+ # sort ascending
+ dist_inter_to_segment1 = np.sort(
+ np.concatenate([dist_inter_to_segment1_start, dist_inter_to_segment1_end], axis=-1),
+ axis=-1) # [n_batch, n_batch, 2]
+ dist_inter_to_segment2 = np.sort(
+ np.concatenate([dist_inter_to_segment2_start, dist_inter_to_segment2_end], axis=-1),
+ axis=-1) # [n_batch, n_batch, 2]
+
+ # 3.2 get degree
+ inter_to_start = new_centers[:, None, :] - inter_pts
+ deg_inter_to_start = np.arctan2(inter_to_start[:, :, 1], inter_to_start[:, :, 0]) * 180 / np.pi
+ deg_inter_to_start[deg_inter_to_start < 0.0] += 360
+ inter_to_end = new_centers[None, :, :] - inter_pts
+ deg_inter_to_end = np.arctan2(inter_to_end[:, :, 1], inter_to_end[:, :, 0]) * 180 / np.pi
+ deg_inter_to_end[deg_inter_to_end < 0.0] += 360
+
+ '''
+ B -- G
+ | |
+ C -- R
+ B : blue / G: green / C: cyan / R: red
+
+ 0 -- 1
+ | |
+ 3 -- 2
+ '''
+ # rename variables
+ deg1_map, deg2_map = deg_inter_to_start, deg_inter_to_end
+ # sort deg ascending
+ deg_sort = np.sort(np.concatenate([deg1_map[:, :, None], deg2_map[:, :, None]], axis=-1), axis=-1)
+
+ deg_diff_map = np.abs(deg1_map - deg2_map)
+ # we only consider the smallest degree of intersect
+ deg_diff_map[deg_diff_map > 180] = 360 - deg_diff_map[deg_diff_map > 180]
+
+ # define available degree range
+ deg_range = [60, 120]
+
+ corner_dict = {corner_info: [] for corner_info in range(4)}
+ inter_points = []
+ for i in range(inter_pts.shape[0]):
+ for j in range(i + 1, inter_pts.shape[1]):
+ # i, j > line index, always i < j
+ x, y = inter_pts[i, j, :]
+ deg1, deg2 = deg_sort[i, j, :]
+ deg_diff = deg_diff_map[i, j]
+
+ check_degree = deg_diff > deg_range[0] and deg_diff < deg_range[1]
+
+ outside_ratio = params['outside_ratio'] # over ratio >>> drop it!
+ inside_ratio = params['inside_ratio'] # over ratio >>> drop it!
+ check_distance = ((dist_inter_to_segment1[i, j, 1] >= dist_segments[i] and \
+ dist_inter_to_segment1[i, j, 0] <= dist_segments[i] * outside_ratio) or \
+ (dist_inter_to_segment1[i, j, 1] <= dist_segments[i] and \
+ dist_inter_to_segment1[i, j, 0] <= dist_segments[i] * inside_ratio)) and \
+ ((dist_inter_to_segment2[i, j, 1] >= dist_segments[j] and \
+ dist_inter_to_segment2[i, j, 0] <= dist_segments[j] * outside_ratio) or \
+ (dist_inter_to_segment2[i, j, 1] <= dist_segments[j] and \
+ dist_inter_to_segment2[i, j, 0] <= dist_segments[j] * inside_ratio))
+
+ if check_degree and check_distance:
+ corner_info = None
+
+ if (deg1 >= 0 and deg1 <= 45 and deg2 >= 45 and deg2 <= 120) or \
+ (deg2 >= 315 and deg1 >= 45 and deg1 <= 120):
+ corner_info, color_info = 0, 'blue'
+ elif (deg1 >= 45 and deg1 <= 125 and deg2 >= 125 and deg2 <= 225):
+ corner_info, color_info = 1, 'green'
+ elif (deg1 >= 125 and deg1 <= 225 and deg2 >= 225 and deg2 <= 315):
+ corner_info, color_info = 2, 'black'
+ elif (deg1 >= 0 and deg1 <= 45 and deg2 >= 225 and deg2 <= 315) or \
+ (deg2 >= 315 and deg1 >= 225 and deg1 <= 315):
+ corner_info, color_info = 3, 'cyan'
+ else:
+ corner_info, color_info = 4, 'red' # we don't use it
+ continue
+
+ corner_dict[corner_info].append([x, y, i, j])
+ inter_points.append([x, y])
+
+ square_list = []
+ connect_list = []
+ segments_list = []
+ for corner0 in corner_dict[0]:
+ for corner1 in corner_dict[1]:
+ connect01 = False
+ for corner0_line in corner0[2:]:
+ if corner0_line in corner1[2:]:
+ connect01 = True
+ break
+ if connect01:
+ for corner2 in corner_dict[2]:
+ connect12 = False
+ for corner1_line in corner1[2:]:
+ if corner1_line in corner2[2:]:
+ connect12 = True
+ break
+ if connect12:
+ for corner3 in corner_dict[3]:
+ connect23 = False
+ for corner2_line in corner2[2:]:
+ if corner2_line in corner3[2:]:
+ connect23 = True
+ break
+ if connect23:
+ for corner3_line in corner3[2:]:
+ if corner3_line in corner0[2:]:
+ # SQUARE!!!
+ '''
+ 0 -- 1
+ | |
+ 3 -- 2
+ square_list:
+ order: 0 > 1 > 2 > 3
+ | x0, y0, x1, y1, x2, y2, x3, y3 |
+ | x0, y0, x1, y1, x2, y2, x3, y3 |
+ ...
+ connect_list:
+ order: 01 > 12 > 23 > 30
+ | line_idx01, line_idx12, line_idx23, line_idx30 |
+ | line_idx01, line_idx12, line_idx23, line_idx30 |
+ ...
+ segments_list:
+ order: 0 > 1 > 2 > 3
+ | line_idx0_i, line_idx0_j, line_idx1_i, line_idx1_j, line_idx2_i, line_idx2_j, line_idx3_i, line_idx3_j |
+ | line_idx0_i, line_idx0_j, line_idx1_i, line_idx1_j, line_idx2_i, line_idx2_j, line_idx3_i, line_idx3_j |
+ ...
+ '''
+ square_list.append(corner0[:2] + corner1[:2] + corner2[:2] + corner3[:2])
+ connect_list.append([corner0_line, corner1_line, corner2_line, corner3_line])
+ segments_list.append(corner0[2:] + corner1[2:] + corner2[2:] + corner3[2:])
+
+ def check_outside_inside(segments_info, connect_idx):
+ # return 'outside or inside', min distance, cover_param, peri_param
+ if connect_idx == segments_info[0]:
+ check_dist_mat = dist_inter_to_segment1
+ else:
+ check_dist_mat = dist_inter_to_segment2
+
+ i, j = segments_info
+ min_dist, max_dist = check_dist_mat[i, j, :]
+ connect_dist = dist_segments[connect_idx]
+ if max_dist > connect_dist:
+ return 'outside', min_dist, 0, 1
+ else:
+ return 'inside', min_dist, -1, -1
+
+ top_square = None
+
+ try:
+ map_size = input_shape[0] / 2
+ squares = np.array(square_list).reshape([-1, 4, 2])
+ score_array = []
+ connect_array = np.array(connect_list)
+ segments_array = np.array(segments_list).reshape([-1, 4, 2])
+
+ # get degree of corners:
+ squares_rollup = np.roll(squares, 1, axis=1)
+ squares_rolldown = np.roll(squares, -1, axis=1)
+ vec1 = squares_rollup - squares
+ normalized_vec1 = vec1 / (np.linalg.norm(vec1, axis=-1, keepdims=True) + 1e-10)
+ vec2 = squares_rolldown - squares
+ normalized_vec2 = vec2 / (np.linalg.norm(vec2, axis=-1, keepdims=True) + 1e-10)
+ inner_products = np.sum(normalized_vec1 * normalized_vec2, axis=-1) # [n_squares, 4]
+ squares_degree = np.arccos(inner_products) * 180 / np.pi # [n_squares, 4]
+
+ # get square score
+ overlap_scores = []
+ degree_scores = []
+ length_scores = []
+
+ for connects, segments, square, degree in zip(connect_array, segments_array, squares, squares_degree):
+ '''
+ 0 -- 1
+ | |
+ 3 -- 2
+
+ # segments: [4, 2]
+ # connects: [4]
+ '''
+
+ ###################################### OVERLAP SCORES
+ cover = 0
+ perimeter = 0
+ # check 0 > 1 > 2 > 3
+ square_length = []
+
+ for start_idx in range(4):
+ end_idx = (start_idx + 1) % 4
+
+ connect_idx = connects[start_idx] # segment idx of segment01
+ start_segments = segments[start_idx]
+ end_segments = segments[end_idx]
+
+ start_point = square[start_idx]
+ end_point = square[end_idx]
+
+ # check whether outside or inside
+ start_position, start_min, start_cover_param, start_peri_param = check_outside_inside(start_segments,
+ connect_idx)
+ end_position, end_min, end_cover_param, end_peri_param = check_outside_inside(end_segments, connect_idx)
+
+ cover += dist_segments[connect_idx] + start_cover_param * start_min + end_cover_param * end_min
+ perimeter += dist_segments[connect_idx] + start_peri_param * start_min + end_peri_param * end_min
+
+ square_length.append(
+ dist_segments[connect_idx] + start_peri_param * start_min + end_peri_param * end_min)
+
+ overlap_scores.append(cover / perimeter)
+ ######################################
+ ###################################### DEGREE SCORES
+ '''
+ deg0 vs deg2
+ deg1 vs deg3
+ '''
+ deg0, deg1, deg2, deg3 = degree
+ deg_ratio1 = deg0 / deg2
+ if deg_ratio1 > 1.0:
+ deg_ratio1 = 1 / deg_ratio1
+ deg_ratio2 = deg1 / deg3
+ if deg_ratio2 > 1.0:
+ deg_ratio2 = 1 / deg_ratio2
+ degree_scores.append((deg_ratio1 + deg_ratio2) / 2)
+ ######################################
+ ###################################### LENGTH SCORES
+ '''
+ len0 vs len2
+ len1 vs len3
+ '''
+ len0, len1, len2, len3 = square_length
+ len_ratio1 = len0 / len2 if len2 > len0 else len2 / len0
+ len_ratio2 = len1 / len3 if len3 > len1 else len3 / len1
+ length_scores.append((len_ratio1 + len_ratio2) / 2)
+
+ ######################################
+
+ overlap_scores = np.array(overlap_scores)
+ overlap_scores /= np.max(overlap_scores)
+
+ degree_scores = np.array(degree_scores)
+ # degree_scores /= np.max(degree_scores)
+
+ length_scores = np.array(length_scores)
+
+ ###################################### AREA SCORES
+ area_scores = np.reshape(squares, [-1, 4, 2])
+ area_x = area_scores[:, :, 0]
+ area_y = area_scores[:, :, 1]
+ correction = area_x[:, -1] * area_y[:, 0] - area_y[:, -1] * area_x[:, 0]
+ area_scores = np.sum(area_x[:, :-1] * area_y[:, 1:], axis=-1) - np.sum(area_y[:, :-1] * area_x[:, 1:], axis=-1)
+ area_scores = 0.5 * np.abs(area_scores + correction)
+ area_scores /= (map_size * map_size) # np.max(area_scores)
+ ######################################
+
+ ###################################### CENTER SCORES
+ centers = np.array([[256 // 2, 256 // 2]], dtype='float32') # [1, 2]
+ # squares: [n, 4, 2]
+ square_centers = np.mean(squares, axis=1) # [n, 2]
+ center2center = np.sqrt(np.sum((centers - square_centers) ** 2))
+ center_scores = center2center / (map_size / np.sqrt(2.0))
+
+ '''
+ score_w = [overlap, degree, area, center, length]
+ '''
+ score_w = [0.0, 1.0, 10.0, 0.5, 1.0]
+ score_array = params['w_overlap'] * overlap_scores \
+ + params['w_degree'] * degree_scores \
+ + params['w_area'] * area_scores \
+ - params['w_center'] * center_scores \
+ + params['w_length'] * length_scores
+
+ best_square = []
+
+ sorted_idx = np.argsort(score_array)[::-1]
+ score_array = score_array[sorted_idx]
+ squares = squares[sorted_idx]
+
+ except Exception as e:
+ pass
+
+ '''return list
+ merged_lines, squares, scores
+ '''
+
+ try:
+ new_segments[:, 0] = new_segments[:, 0] * 2 / input_shape[1] * original_shape[1]
+ new_segments[:, 1] = new_segments[:, 1] * 2 / input_shape[0] * original_shape[0]
+ new_segments[:, 2] = new_segments[:, 2] * 2 / input_shape[1] * original_shape[1]
+ new_segments[:, 3] = new_segments[:, 3] * 2 / input_shape[0] * original_shape[0]
+ except:
+ new_segments = []
+
+ try:
+ squares[:, :, 0] = squares[:, :, 0] * 2 / input_shape[1] * original_shape[1]
+ squares[:, :, 1] = squares[:, :, 1] * 2 / input_shape[0] * original_shape[0]
+ except:
+ squares = []
+ score_array = []
+
+ try:
+ inter_points = np.array(inter_points)
+ inter_points[:, 0] = inter_points[:, 0] * 2 / input_shape[1] * original_shape[1]
+ inter_points[:, 1] = inter_points[:, 1] * 2 / input_shape[0] * original_shape[0]
+ except:
+ inter_points = []
+
+ return new_segments, squares, score_array, inter_points
diff --git a/annotator/openpose/LICENSE b/annotator/openpose/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..6f60b76d35fa1012809985780964a5068adce4fd
--- /dev/null
+++ b/annotator/openpose/LICENSE
@@ -0,0 +1,108 @@
+OPENPOSE: MULTIPERSON KEYPOINT DETECTION
+SOFTWARE LICENSE AGREEMENT
+ACADEMIC OR NON-PROFIT ORGANIZATION NONCOMMERCIAL RESEARCH USE ONLY
+
+BY USING OR DOWNLOADING THE SOFTWARE, YOU ARE AGREEING TO THE TERMS OF THIS LICENSE AGREEMENT. IF YOU DO NOT AGREE WITH THESE TERMS, YOU MAY NOT USE OR DOWNLOAD THE SOFTWARE.
+
+This is a license agreement ("Agreement") between your academic institution or non-profit organization or self (called "Licensee" or "You" in this Agreement) and Carnegie Mellon University (called "Licensor" in this Agreement). All rights not specifically granted to you in this Agreement are reserved for Licensor.
+
+RESERVATION OF OWNERSHIP AND GRANT OF LICENSE:
+Licensor retains exclusive ownership of any copy of the Software (as defined below) licensed under this Agreement and hereby grants to Licensee a personal, non-exclusive,
+non-transferable license to use the Software for noncommercial research purposes, without the right to sublicense, pursuant to the terms and conditions of this Agreement. As used in this Agreement, the term "Software" means (i) the actual copy of all or any portion of code for program routines made accessible to Licensee by Licensor pursuant to this Agreement, inclusive of backups, updates, and/or merged copies permitted hereunder or subsequently supplied by Licensor, including all or any file structures, programming instructions, user interfaces and screen formats and sequences as well as any and all documentation and instructions related to it, and (ii) all or any derivatives and/or modifications created or made by You to any of the items specified in (i).
+
+CONFIDENTIALITY: Licensee acknowledges that the Software is proprietary to Licensor, and as such, Licensee agrees to receive all such materials in confidence and use the Software only in accordance with the terms of this Agreement. Licensee agrees to use reasonable effort to protect the Software from unauthorized use, reproduction, distribution, or publication.
+
+COPYRIGHT: The Software is owned by Licensor and is protected by United
+States copyright laws and applicable international treaties and/or conventions.
+
+PERMITTED USES: The Software may be used for your own noncommercial internal research purposes. You understand and agree that Licensor is not obligated to implement any suggestions and/or feedback you might provide regarding the Software, but to the extent Licensor does so, you are not entitled to any compensation related thereto.
+
+DERIVATIVES: You may create derivatives of or make modifications to the Software, however, You agree that all and any such derivatives and modifications will be owned by Licensor and become a part of the Software licensed to You under this Agreement. You may only use such derivatives and modifications for your own noncommercial internal research purposes, and you may not otherwise use, distribute or copy such derivatives and modifications in violation of this Agreement.
+
+BACKUPS: If Licensee is an organization, it may make that number of copies of the Software necessary for internal noncommercial use at a single site within its organization provided that all information appearing in or on the original labels, including the copyright and trademark notices are copied onto the labels of the copies.
+
+USES NOT PERMITTED: You may not distribute, copy or use the Software except as explicitly permitted herein. Licensee has not been granted any trademark license as part of this Agreement and may not use the name or mark “OpenPose", "Carnegie Mellon" or any renditions thereof without the prior written permission of Licensor.
+
+You may not sell, rent, lease, sublicense, lend, time-share or transfer, in whole or in part, or provide third parties access to prior or present versions (or any parts thereof) of the Software.
+
+ASSIGNMENT: You may not assign this Agreement or your rights hereunder without the prior written consent of Licensor. Any attempted assignment without such consent shall be null and void.
+
+TERM: The term of the license granted by this Agreement is from Licensee's acceptance of this Agreement by downloading the Software or by using the Software until terminated as provided below.
+
+The Agreement automatically terminates without notice if you fail to comply with any provision of this Agreement. Licensee may terminate this Agreement by ceasing using the Software. Upon any termination of this Agreement, Licensee will delete any and all copies of the Software. You agree that all provisions which operate to protect the proprietary rights of Licensor shall remain in force should breach occur and that the obligation of confidentiality described in this Agreement is binding in perpetuity and, as such, survives the term of the Agreement.
+
+FEE: Provided Licensee abides completely by the terms and conditions of this Agreement, there is no fee due to Licensor for Licensee's use of the Software in accordance with this Agreement.
+
+DISCLAIMER OF WARRANTIES: THE SOFTWARE IS PROVIDED "AS-IS" WITHOUT WARRANTY OF ANY KIND INCLUDING ANY WARRANTIES OF PERFORMANCE OR MERCHANTABILITY OR FITNESS FOR A PARTICULAR USE OR PURPOSE OR OF NON-INFRINGEMENT. LICENSEE BEARS ALL RISK RELATING TO QUALITY AND PERFORMANCE OF THE SOFTWARE AND RELATED MATERIALS.
+
+SUPPORT AND MAINTENANCE: No Software support or training by the Licensor is provided as part of this Agreement.
+
+EXCLUSIVE REMEDY AND LIMITATION OF LIABILITY: To the maximum extent permitted under applicable law, Licensor shall not be liable for direct, indirect, special, incidental, or consequential damages or lost profits related to Licensee's use of and/or inability to use the Software, even if Licensor is advised of the possibility of such damage.
+
+EXPORT REGULATION: Licensee agrees to comply with any and all applicable
+U.S. export control laws, regulations, and/or other laws related to embargoes and sanction programs administered by the Office of Foreign Assets Control.
+
+SEVERABILITY: If any provision(s) of this Agreement shall be held to be invalid, illegal, or unenforceable by a court or other tribunal of competent jurisdiction, the validity, legality and enforceability of the remaining provisions shall not in any way be affected or impaired thereby.
+
+NO IMPLIED WAIVERS: No failure or delay by Licensor in enforcing any right or remedy under this Agreement shall be construed as a waiver of any future or other exercise of such right or remedy by Licensor.
+
+GOVERNING LAW: This Agreement shall be construed and enforced in accordance with the laws of the Commonwealth of Pennsylvania without reference to conflict of laws principles. You consent to the personal jurisdiction of the courts of this County and waive their rights to venue outside of Allegheny County, Pennsylvania.
+
+ENTIRE AGREEMENT AND AMENDMENTS: This Agreement constitutes the sole and entire agreement between Licensee and Licensor as to the matter set forth herein and supersedes any previous agreements, understandings, and arrangements between the parties relating hereto.
+
+
+
+************************************************************************
+
+THIRD-PARTY SOFTWARE NOTICES AND INFORMATION
+
+This project incorporates material from the project(s) listed below (collectively, "Third Party Code"). This Third Party Code is licensed to you under their original license terms set forth below. We reserves all other rights not expressly granted, whether by implication, estoppel or otherwise.
+
+1. Caffe, version 1.0.0, (https://github.com/BVLC/caffe/)
+
+COPYRIGHT
+
+All contributions by the University of California:
+Copyright (c) 2014-2017 The Regents of the University of California (Regents)
+All rights reserved.
+
+All other contributions:
+Copyright (c) 2014-2017, the respective contributors
+All rights reserved.
+
+Caffe uses a shared copyright model: each contributor holds copyright over
+their contributions to Caffe. The project versioning records all such
+contribution and copyright details. If a contributor wants to further mark
+their specific copyright on a particular contribution, they should indicate
+their copyright solely in the commit message of the change when it is
+committed.
+
+LICENSE
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+CONTRIBUTION AGREEMENT
+
+By contributing to the BVLC/caffe repository through pull-request, comment,
+or otherwise, the contributor releases their content to the
+license and copyright terms herein.
+
+************END OF THIRD-PARTY SOFTWARE NOTICES AND INFORMATION**********
\ No newline at end of file
diff --git a/annotator/openpose/__init__.py b/annotator/openpose/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..2af34fe010fdf1f9ff84cf17aeb4b36fa6f0ed7c
--- /dev/null
+++ b/annotator/openpose/__init__.py
@@ -0,0 +1,59 @@
+'''
+ * Copyright (c) 2023 Salesforce, Inc.
+ * All rights reserved.
+ * SPDX-License-Identifier: Apache License 2.0
+ * For full license text, see LICENSE.txt file in the repo root or http://www.apache.org/licenses/
+ * By Can Qin
+ * Modified from ControlNet repo: https://github.com/lllyasviel/ControlNet
+ * Copyright (c) 2023 Lvmin Zhang and Maneesh Agrawala
+'''
+
+# Openpose
+# Original from CMU https://github.com/CMU-Perceptual-Computing-Lab/openpose
+# 2nd Edited by https://github.com/Hzzone/pytorch-openpose
+# 3rd Edited by ControlNet
+
+import os
+os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
+
+import torch
+import numpy as np
+from . import util
+from .body import Body
+from .hand import Hand
+from annotator.util import annotator_ckpts_path
+
+
+body_model_path = "https://huggingface.co/lllyasviel/ControlNet/resolve/main/annotator/ckpts/body_pose_model.pth"
+hand_model_path = "https://huggingface.co/lllyasviel/ControlNet/resolve/main/annotator/ckpts/hand_pose_model.pth"
+
+
+class OpenposeDetector:
+ def __init__(self):
+ body_modelpath = os.path.join(annotator_ckpts_path, "body_pose_model.pth")
+ # hand_modelpath = os.path.join(annotator_ckpts_path, "hand_pose_model.pth")
+
+ if not os.path.exists(body_modelpath):
+ from basicsr.utils.download_util import load_file_from_url
+ load_file_from_url(body_model_path, model_dir=annotator_ckpts_path)
+ # load_file_from_url(hand_model_path, model_dir=annotator_ckpts_path)
+
+ self.body_estimation = Body(body_modelpath)
+ # self.hand_estimation = Hand(hand_modelpath)
+
+ def __call__(self, oriImg, hand=False):
+ oriImg = oriImg[:, :, ::-1].copy()
+ with torch.no_grad():
+ candidate, subset = self.body_estimation(oriImg)
+ canvas = np.zeros_like(oriImg)
+ canvas = util.draw_bodypose(canvas, candidate, subset)
+ # if hand:
+ # hands_list = util.handDetect(candidate, subset, oriImg)
+ # all_hand_peaks = []
+ # for x, y, w, is_left in hands_list:
+ # peaks = self.hand_estimation(oriImg[y:y+w, x:x+w, :])
+ # peaks[:, 0] = np.where(peaks[:, 0] == 0, peaks[:, 0], peaks[:, 0] + x)
+ # peaks[:, 1] = np.where(peaks[:, 1] == 0, peaks[:, 1], peaks[:, 1] + y)
+ # all_hand_peaks.append(peaks)
+ # canvas = util.draw_handpose(canvas, all_hand_peaks)
+ return canvas, dict(candidate=candidate.tolist(), subset=subset.tolist())
diff --git a/annotator/openpose/__pycache__/__init__.cpython-310.pyc b/annotator/openpose/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..21aca3d82d0146fc2f24b62e79bd2c5f620050cc
Binary files /dev/null and b/annotator/openpose/__pycache__/__init__.cpython-310.pyc differ
diff --git a/annotator/openpose/__pycache__/__init__.cpython-38.pyc b/annotator/openpose/__pycache__/__init__.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..01c59f8b887317d99dd97a403b47f982e31bfa15
Binary files /dev/null and b/annotator/openpose/__pycache__/__init__.cpython-38.pyc differ
diff --git a/annotator/openpose/__pycache__/body.cpython-310.pyc b/annotator/openpose/__pycache__/body.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..77b0bbf99e0e214fc0bb8939db15ed433e31c678
Binary files /dev/null and b/annotator/openpose/__pycache__/body.cpython-310.pyc differ
diff --git a/annotator/openpose/__pycache__/body.cpython-38.pyc b/annotator/openpose/__pycache__/body.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6e9d5dbd1789cf1505f31b5ef33ab675f7aa7b08
Binary files /dev/null and b/annotator/openpose/__pycache__/body.cpython-38.pyc differ
diff --git a/annotator/openpose/__pycache__/hand.cpython-310.pyc b/annotator/openpose/__pycache__/hand.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3e65c4ab1906bca188640b0a690ad197c8c2e701
Binary files /dev/null and b/annotator/openpose/__pycache__/hand.cpython-310.pyc differ
diff --git a/annotator/openpose/__pycache__/hand.cpython-38.pyc b/annotator/openpose/__pycache__/hand.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..22241d04782b5e7455d0ffe3d8bca8ceb27446e8
Binary files /dev/null and b/annotator/openpose/__pycache__/hand.cpython-38.pyc differ
diff --git a/annotator/openpose/__pycache__/model.cpython-310.pyc b/annotator/openpose/__pycache__/model.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ba0657f301c43ef7886dc15200cdfad3f79f4574
Binary files /dev/null and b/annotator/openpose/__pycache__/model.cpython-310.pyc differ
diff --git a/annotator/openpose/__pycache__/model.cpython-38.pyc b/annotator/openpose/__pycache__/model.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..29bf5f2b849f0911e2d75cc0477fff830cb1080b
Binary files /dev/null and b/annotator/openpose/__pycache__/model.cpython-38.pyc differ
diff --git a/annotator/openpose/__pycache__/util.cpython-310.pyc b/annotator/openpose/__pycache__/util.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..499cf1995365169547756cbbd618c0f65e29e4d3
Binary files /dev/null and b/annotator/openpose/__pycache__/util.cpython-310.pyc differ
diff --git a/annotator/openpose/__pycache__/util.cpython-38.pyc b/annotator/openpose/__pycache__/util.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fcc19a275ea381dfab4d211e7c081666896fb48f
Binary files /dev/null and b/annotator/openpose/__pycache__/util.cpython-38.pyc differ
diff --git a/annotator/openpose/body.py b/annotator/openpose/body.py
new file mode 100644
index 0000000000000000000000000000000000000000..c9205f00f90ad4faf62fc0e46095b7eb3338b608
--- /dev/null
+++ b/annotator/openpose/body.py
@@ -0,0 +1,229 @@
+'''
+ * Copyright (c) 2023 Salesforce, Inc.
+ * All rights reserved.
+ * SPDX-License-Identifier: Apache License 2.0
+ * For full license text, see LICENSE.txt file in the repo root or http://www.apache.org/licenses/
+ * By Can Qin
+ * Modified from ControlNet repo: https://github.com/lllyasviel/ControlNet
+ * Copyright (c) 2023 Lvmin Zhang and Maneesh Agrawala
+'''
+
+import cv2
+import numpy as np
+import math
+import time
+from scipy.ndimage.filters import gaussian_filter
+import matplotlib.pyplot as plt
+import matplotlib
+import torch
+from torchvision import transforms
+
+from . import util
+from .model import bodypose_model
+
+class Body(object):
+ def __init__(self, model_path):
+ self.model = bodypose_model()
+ if torch.cuda.is_available():
+ self.model = self.model.cuda()
+ print('cuda')
+ model_dict = util.transfer(self.model, torch.load(model_path))
+ self.model.load_state_dict(model_dict)
+ self.model.eval()
+
+ def __call__(self, oriImg):
+ # scale_search = [0.5, 1.0, 1.5, 2.0]
+ scale_search = [0.5]
+ boxsize = 368
+ stride = 8
+ padValue = 128
+ thre1 = 0.1
+ thre2 = 0.05
+ multiplier = [x * boxsize / oriImg.shape[0] for x in scale_search]
+ heatmap_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 19))
+ paf_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 38))
+
+ for m in range(len(multiplier)):
+ scale = multiplier[m]
+ imageToTest = cv2.resize(oriImg, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC)
+ imageToTest_padded, pad = util.padRightDownCorner(imageToTest, stride, padValue)
+ im = np.transpose(np.float32(imageToTest_padded[:, :, :, np.newaxis]), (3, 2, 0, 1)) / 256 - 0.5
+ im = np.ascontiguousarray(im)
+
+ data = torch.from_numpy(im).float()
+ if torch.cuda.is_available():
+ data = data.cuda()
+ # data = data.permute([2, 0, 1]).unsqueeze(0).float()
+ with torch.no_grad():
+ Mconv7_stage6_L1, Mconv7_stage6_L2 = self.model(data)
+ Mconv7_stage6_L1 = Mconv7_stage6_L1.cpu().numpy()
+ Mconv7_stage6_L2 = Mconv7_stage6_L2.cpu().numpy()
+
+ # extract outputs, resize, and remove padding
+ # heatmap = np.transpose(np.squeeze(net.blobs[output_blobs.keys()[1]].data), (1, 2, 0)) # output 1 is heatmaps
+ heatmap = np.transpose(np.squeeze(Mconv7_stage6_L2), (1, 2, 0)) # output 1 is heatmaps
+ heatmap = cv2.resize(heatmap, (0, 0), fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC)
+ heatmap = heatmap[:imageToTest_padded.shape[0] - pad[2], :imageToTest_padded.shape[1] - pad[3], :]
+ heatmap = cv2.resize(heatmap, (oriImg.shape[1], oriImg.shape[0]), interpolation=cv2.INTER_CUBIC)
+
+ # paf = np.transpose(np.squeeze(net.blobs[output_blobs.keys()[0]].data), (1, 2, 0)) # output 0 is PAFs
+ paf = np.transpose(np.squeeze(Mconv7_stage6_L1), (1, 2, 0)) # output 0 is PAFs
+ paf = cv2.resize(paf, (0, 0), fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC)
+ paf = paf[:imageToTest_padded.shape[0] - pad[2], :imageToTest_padded.shape[1] - pad[3], :]
+ paf = cv2.resize(paf, (oriImg.shape[1], oriImg.shape[0]), interpolation=cv2.INTER_CUBIC)
+
+ heatmap_avg += heatmap_avg + heatmap / len(multiplier)
+ paf_avg += + paf / len(multiplier)
+
+ all_peaks = []
+ peak_counter = 0
+
+ for part in range(18):
+ map_ori = heatmap_avg[:, :, part]
+ one_heatmap = gaussian_filter(map_ori, sigma=3)
+
+ map_left = np.zeros(one_heatmap.shape)
+ map_left[1:, :] = one_heatmap[:-1, :]
+ map_right = np.zeros(one_heatmap.shape)
+ map_right[:-1, :] = one_heatmap[1:, :]
+ map_up = np.zeros(one_heatmap.shape)
+ map_up[:, 1:] = one_heatmap[:, :-1]
+ map_down = np.zeros(one_heatmap.shape)
+ map_down[:, :-1] = one_heatmap[:, 1:]
+
+ peaks_binary = np.logical_and.reduce(
+ (one_heatmap >= map_left, one_heatmap >= map_right, one_heatmap >= map_up, one_heatmap >= map_down, one_heatmap > thre1))
+ peaks = list(zip(np.nonzero(peaks_binary)[1], np.nonzero(peaks_binary)[0])) # note reverse
+ peaks_with_score = [x + (map_ori[x[1], x[0]],) for x in peaks]
+ peak_id = range(peak_counter, peak_counter + len(peaks))
+ peaks_with_score_and_id = [peaks_with_score[i] + (peak_id[i],) for i in range(len(peak_id))]
+
+ all_peaks.append(peaks_with_score_and_id)
+ peak_counter += len(peaks)
+
+ # find connection in the specified sequence, center 29 is in the position 15
+ limbSeq = [[2, 3], [2, 6], [3, 4], [4, 5], [6, 7], [7, 8], [2, 9], [9, 10], \
+ [10, 11], [2, 12], [12, 13], [13, 14], [2, 1], [1, 15], [15, 17], \
+ [1, 16], [16, 18], [3, 17], [6, 18]]
+ # the middle joints heatmap correpondence
+ mapIdx = [[31, 32], [39, 40], [33, 34], [35, 36], [41, 42], [43, 44], [19, 20], [21, 22], \
+ [23, 24], [25, 26], [27, 28], [29, 30], [47, 48], [49, 50], [53, 54], [51, 52], \
+ [55, 56], [37, 38], [45, 46]]
+
+ connection_all = []
+ special_k = []
+ mid_num = 10
+
+ for k in range(len(mapIdx)):
+ score_mid = paf_avg[:, :, [x - 19 for x in mapIdx[k]]]
+ candA = all_peaks[limbSeq[k][0] - 1]
+ candB = all_peaks[limbSeq[k][1] - 1]
+ nA = len(candA)
+ nB = len(candB)
+ indexA, indexB = limbSeq[k]
+ if (nA != 0 and nB != 0):
+ connection_candidate = []
+ for i in range(nA):
+ for j in range(nB):
+ vec = np.subtract(candB[j][:2], candA[i][:2])
+ norm = math.sqrt(vec[0] * vec[0] + vec[1] * vec[1])
+ norm = max(0.001, norm)
+ vec = np.divide(vec, norm)
+
+ startend = list(zip(np.linspace(candA[i][0], candB[j][0], num=mid_num), \
+ np.linspace(candA[i][1], candB[j][1], num=mid_num)))
+
+ vec_x = np.array([score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 0] \
+ for I in range(len(startend))])
+ vec_y = np.array([score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 1] \
+ for I in range(len(startend))])
+
+ score_midpts = np.multiply(vec_x, vec[0]) + np.multiply(vec_y, vec[1])
+ score_with_dist_prior = sum(score_midpts) / len(score_midpts) + min(
+ 0.5 * oriImg.shape[0] / norm - 1, 0)
+ criterion1 = len(np.nonzero(score_midpts > thre2)[0]) > 0.8 * len(score_midpts)
+ criterion2 = score_with_dist_prior > 0
+ if criterion1 and criterion2:
+ connection_candidate.append(
+ [i, j, score_with_dist_prior, score_with_dist_prior + candA[i][2] + candB[j][2]])
+
+ connection_candidate = sorted(connection_candidate, key=lambda x: x[2], reverse=True)
+ connection = np.zeros((0, 5))
+ for c in range(len(connection_candidate)):
+ i, j, s = connection_candidate[c][0:3]
+ if (i not in connection[:, 3] and j not in connection[:, 4]):
+ connection = np.vstack([connection, [candA[i][3], candB[j][3], s, i, j]])
+ if (len(connection) >= min(nA, nB)):
+ break
+
+ connection_all.append(connection)
+ else:
+ special_k.append(k)
+ connection_all.append([])
+
+ # last number in each row is the total parts number of that person
+ # the second last number in each row is the score of the overall configuration
+ subset = -1 * np.ones((0, 20))
+ candidate = np.array([item for sublist in all_peaks for item in sublist])
+
+ for k in range(len(mapIdx)):
+ if k not in special_k:
+ partAs = connection_all[k][:, 0]
+ partBs = connection_all[k][:, 1]
+ indexA, indexB = np.array(limbSeq[k]) - 1
+
+ for i in range(len(connection_all[k])): # = 1:size(temp,1)
+ found = 0
+ subset_idx = [-1, -1]
+ for j in range(len(subset)): # 1:size(subset,1):
+ if subset[j][indexA] == partAs[i] or subset[j][indexB] == partBs[i]:
+ subset_idx[found] = j
+ found += 1
+
+ if found == 1:
+ j = subset_idx[0]
+ if subset[j][indexB] != partBs[i]:
+ subset[j][indexB] = partBs[i]
+ subset[j][-1] += 1
+ subset[j][-2] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2]
+ elif found == 2: # if found 2 and disjoint, merge them
+ j1, j2 = subset_idx
+ membership = ((subset[j1] >= 0).astype(int) + (subset[j2] >= 0).astype(int))[:-2]
+ if len(np.nonzero(membership == 2)[0]) == 0: # merge
+ subset[j1][:-2] += (subset[j2][:-2] + 1)
+ subset[j1][-2:] += subset[j2][-2:]
+ subset[j1][-2] += connection_all[k][i][2]
+ subset = np.delete(subset, j2, 0)
+ else: # as like found == 1
+ subset[j1][indexB] = partBs[i]
+ subset[j1][-1] += 1
+ subset[j1][-2] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2]
+
+ # if find no partA in the subset, create a new subset
+ elif not found and k < 17:
+ row = -1 * np.ones(20)
+ row[indexA] = partAs[i]
+ row[indexB] = partBs[i]
+ row[-1] = 2
+ row[-2] = sum(candidate[connection_all[k][i, :2].astype(int), 2]) + connection_all[k][i][2]
+ subset = np.vstack([subset, row])
+ # delete some rows of subset which has few parts occur
+ deleteIdx = []
+ for i in range(len(subset)):
+ if subset[i][-1] < 4 or subset[i][-2] / subset[i][-1] < 0.4:
+ deleteIdx.append(i)
+ subset = np.delete(subset, deleteIdx, axis=0)
+
+ # subset: n*20 array, 0-17 is the index in candidate, 18 is the total score, 19 is the total parts
+ # candidate: x, y, score, id
+ return candidate, subset
+
+if __name__ == "__main__":
+ body_estimation = Body('../model/body_pose_model.pth')
+
+ test_image = '../images/ski.jpg'
+ oriImg = cv2.imread(test_image) # B,G,R order
+ candidate, subset = body_estimation(oriImg)
+ canvas = util.draw_bodypose(oriImg, candidate, subset)
+ plt.imshow(canvas[:, :, [2, 1, 0]])
+ plt.show()
diff --git a/annotator/openpose/hand.py b/annotator/openpose/hand.py
new file mode 100644
index 0000000000000000000000000000000000000000..d05abca4f6c7e35a44d638ee8defdfee6fc5fc0f
--- /dev/null
+++ b/annotator/openpose/hand.py
@@ -0,0 +1,96 @@
+'''
+ * Copyright (c) 2023 Salesforce, Inc.
+ * All rights reserved.
+ * SPDX-License-Identifier: Apache License 2.0
+ * For full license text, see LICENSE.txt file in the repo root or http://www.apache.org/licenses/
+ * By Can Qin
+ * Modified from ControlNet repo: https://github.com/lllyasviel/ControlNet
+ * Copyright (c) 2023 Lvmin Zhang and Maneesh Agrawala
+'''
+
+import cv2
+import json
+import numpy as np
+import math
+import time
+from scipy.ndimage.filters import gaussian_filter
+import matplotlib.pyplot as plt
+import matplotlib
+import torch
+from skimage.measure import label
+
+from .model import handpose_model
+from . import util
+
+class Hand(object):
+ def __init__(self, model_path):
+ self.model = handpose_model()
+ if torch.cuda.is_available():
+ self.model = self.model.cuda()
+ print('cuda')
+ model_dict = util.transfer(self.model, torch.load(model_path))
+ self.model.load_state_dict(model_dict)
+ self.model.eval()
+
+ def __call__(self, oriImg):
+ scale_search = [0.5, 1.0, 1.5, 2.0]
+ # scale_search = [0.5]
+ boxsize = 368
+ stride = 8
+ padValue = 128
+ thre = 0.05
+ multiplier = [x * boxsize / oriImg.shape[0] for x in scale_search]
+ heatmap_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 22))
+ # paf_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 38))
+
+ for m in range(len(multiplier)):
+ scale = multiplier[m]
+ imageToTest = cv2.resize(oriImg, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC)
+ imageToTest_padded, pad = util.padRightDownCorner(imageToTest, stride, padValue)
+ im = np.transpose(np.float32(imageToTest_padded[:, :, :, np.newaxis]), (3, 2, 0, 1)) / 256 - 0.5
+ im = np.ascontiguousarray(im)
+
+ data = torch.from_numpy(im).float()
+ if torch.cuda.is_available():
+ data = data.cuda()
+ # data = data.permute([2, 0, 1]).unsqueeze(0).float()
+ with torch.no_grad():
+ output = self.model(data).cpu().numpy()
+ # output = self.model(data).numpy()q
+
+ # extract outputs, resize, and remove padding
+ heatmap = np.transpose(np.squeeze(output), (1, 2, 0)) # output 1 is heatmaps
+ heatmap = cv2.resize(heatmap, (0, 0), fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC)
+ heatmap = heatmap[:imageToTest_padded.shape[0] - pad[2], :imageToTest_padded.shape[1] - pad[3], :]
+ heatmap = cv2.resize(heatmap, (oriImg.shape[1], oriImg.shape[0]), interpolation=cv2.INTER_CUBIC)
+
+ heatmap_avg += heatmap / len(multiplier)
+
+ all_peaks = []
+ for part in range(21):
+ map_ori = heatmap_avg[:, :, part]
+ one_heatmap = gaussian_filter(map_ori, sigma=3)
+ binary = np.ascontiguousarray(one_heatmap > thre, dtype=np.uint8)
+ # 全部小于阈值
+ if np.sum(binary) == 0:
+ all_peaks.append([0, 0])
+ continue
+ label_img, label_numbers = label(binary, return_num=True, connectivity=binary.ndim)
+ max_index = np.argmax([np.sum(map_ori[label_img == i]) for i in range(1, label_numbers + 1)]) + 1
+ label_img[label_img != max_index] = 0
+ map_ori[label_img == 0] = 0
+
+ y, x = util.npmax(map_ori)
+ all_peaks.append([x, y])
+ return np.array(all_peaks)
+
+if __name__ == "__main__":
+ hand_estimation = Hand('../model/hand_pose_model.pth')
+
+ # test_image = '../images/hand.jpg'
+ test_image = '../images/hand.jpg'
+ oriImg = cv2.imread(test_image) # B,G,R order
+ peaks = hand_estimation(oriImg)
+ canvas = util.draw_handpose(oriImg, peaks, True)
+ cv2.imshow('', canvas)
+ cv2.waitKey(0)
\ No newline at end of file
diff --git a/annotator/openpose/model.py b/annotator/openpose/model.py
new file mode 100644
index 0000000000000000000000000000000000000000..b7e4461bd619d694ddaa57c68fa275f7f47a0417
--- /dev/null
+++ b/annotator/openpose/model.py
@@ -0,0 +1,229 @@
+'''
+ * Copyright (c) 2023 Salesforce, Inc.
+ * All rights reserved.
+ * SPDX-License-Identifier: Apache License 2.0
+ * For full license text, see LICENSE.txt file in the repo root or http://www.apache.org/licenses/
+ * By Can Qin
+ * Modified from ControlNet repo: https://github.com/lllyasviel/ControlNet
+ * Copyright (c) 2023 Lvmin Zhang and Maneesh Agrawala
+'''
+
+import torch
+from collections import OrderedDict
+
+import torch
+import torch.nn as nn
+
+def make_layers(block, no_relu_layers):
+ layers = []
+ for layer_name, v in block.items():
+ if 'pool' in layer_name:
+ layer = nn.MaxPool2d(kernel_size=v[0], stride=v[1],
+ padding=v[2])
+ layers.append((layer_name, layer))
+ else:
+ conv2d = nn.Conv2d(in_channels=v[0], out_channels=v[1],
+ kernel_size=v[2], stride=v[3],
+ padding=v[4])
+ layers.append((layer_name, conv2d))
+ if layer_name not in no_relu_layers:
+ layers.append(('relu_'+layer_name, nn.ReLU(inplace=True)))
+
+ return nn.Sequential(OrderedDict(layers))
+
+class bodypose_model(nn.Module):
+ def __init__(self):
+ super(bodypose_model, self).__init__()
+
+ # these layers have no relu layer
+ no_relu_layers = ['conv5_5_CPM_L1', 'conv5_5_CPM_L2', 'Mconv7_stage2_L1',\
+ 'Mconv7_stage2_L2', 'Mconv7_stage3_L1', 'Mconv7_stage3_L2',\
+ 'Mconv7_stage4_L1', 'Mconv7_stage4_L2', 'Mconv7_stage5_L1',\
+ 'Mconv7_stage5_L2', 'Mconv7_stage6_L1', 'Mconv7_stage6_L1']
+ blocks = {}
+ block0 = OrderedDict([
+ ('conv1_1', [3, 64, 3, 1, 1]),
+ ('conv1_2', [64, 64, 3, 1, 1]),
+ ('pool1_stage1', [2, 2, 0]),
+ ('conv2_1', [64, 128, 3, 1, 1]),
+ ('conv2_2', [128, 128, 3, 1, 1]),
+ ('pool2_stage1', [2, 2, 0]),
+ ('conv3_1', [128, 256, 3, 1, 1]),
+ ('conv3_2', [256, 256, 3, 1, 1]),
+ ('conv3_3', [256, 256, 3, 1, 1]),
+ ('conv3_4', [256, 256, 3, 1, 1]),
+ ('pool3_stage1', [2, 2, 0]),
+ ('conv4_1', [256, 512, 3, 1, 1]),
+ ('conv4_2', [512, 512, 3, 1, 1]),
+ ('conv4_3_CPM', [512, 256, 3, 1, 1]),
+ ('conv4_4_CPM', [256, 128, 3, 1, 1])
+ ])
+
+
+ # Stage 1
+ block1_1 = OrderedDict([
+ ('conv5_1_CPM_L1', [128, 128, 3, 1, 1]),
+ ('conv5_2_CPM_L1', [128, 128, 3, 1, 1]),
+ ('conv5_3_CPM_L1', [128, 128, 3, 1, 1]),
+ ('conv5_4_CPM_L1', [128, 512, 1, 1, 0]),
+ ('conv5_5_CPM_L1', [512, 38, 1, 1, 0])
+ ])
+
+ block1_2 = OrderedDict([
+ ('conv5_1_CPM_L2', [128, 128, 3, 1, 1]),
+ ('conv5_2_CPM_L2', [128, 128, 3, 1, 1]),
+ ('conv5_3_CPM_L2', [128, 128, 3, 1, 1]),
+ ('conv5_4_CPM_L2', [128, 512, 1, 1, 0]),
+ ('conv5_5_CPM_L2', [512, 19, 1, 1, 0])
+ ])
+ blocks['block1_1'] = block1_1
+ blocks['block1_2'] = block1_2
+
+ self.model0 = make_layers(block0, no_relu_layers)
+
+ # Stages 2 - 6
+ for i in range(2, 7):
+ blocks['block%d_1' % i] = OrderedDict([
+ ('Mconv1_stage%d_L1' % i, [185, 128, 7, 1, 3]),
+ ('Mconv2_stage%d_L1' % i, [128, 128, 7, 1, 3]),
+ ('Mconv3_stage%d_L1' % i, [128, 128, 7, 1, 3]),
+ ('Mconv4_stage%d_L1' % i, [128, 128, 7, 1, 3]),
+ ('Mconv5_stage%d_L1' % i, [128, 128, 7, 1, 3]),
+ ('Mconv6_stage%d_L1' % i, [128, 128, 1, 1, 0]),
+ ('Mconv7_stage%d_L1' % i, [128, 38, 1, 1, 0])
+ ])
+
+ blocks['block%d_2' % i] = OrderedDict([
+ ('Mconv1_stage%d_L2' % i, [185, 128, 7, 1, 3]),
+ ('Mconv2_stage%d_L2' % i, [128, 128, 7, 1, 3]),
+ ('Mconv3_stage%d_L2' % i, [128, 128, 7, 1, 3]),
+ ('Mconv4_stage%d_L2' % i, [128, 128, 7, 1, 3]),
+ ('Mconv5_stage%d_L2' % i, [128, 128, 7, 1, 3]),
+ ('Mconv6_stage%d_L2' % i, [128, 128, 1, 1, 0]),
+ ('Mconv7_stage%d_L2' % i, [128, 19, 1, 1, 0])
+ ])
+
+ for k in blocks.keys():
+ blocks[k] = make_layers(blocks[k], no_relu_layers)
+
+ self.model1_1 = blocks['block1_1']
+ self.model2_1 = blocks['block2_1']
+ self.model3_1 = blocks['block3_1']
+ self.model4_1 = blocks['block4_1']
+ self.model5_1 = blocks['block5_1']
+ self.model6_1 = blocks['block6_1']
+
+ self.model1_2 = blocks['block1_2']
+ self.model2_2 = blocks['block2_2']
+ self.model3_2 = blocks['block3_2']
+ self.model4_2 = blocks['block4_2']
+ self.model5_2 = blocks['block5_2']
+ self.model6_2 = blocks['block6_2']
+
+
+ def forward(self, x):
+
+ out1 = self.model0(x)
+
+ out1_1 = self.model1_1(out1)
+ out1_2 = self.model1_2(out1)
+ out2 = torch.cat([out1_1, out1_2, out1], 1)
+
+ out2_1 = self.model2_1(out2)
+ out2_2 = self.model2_2(out2)
+ out3 = torch.cat([out2_1, out2_2, out1], 1)
+
+ out3_1 = self.model3_1(out3)
+ out3_2 = self.model3_2(out3)
+ out4 = torch.cat([out3_1, out3_2, out1], 1)
+
+ out4_1 = self.model4_1(out4)
+ out4_2 = self.model4_2(out4)
+ out5 = torch.cat([out4_1, out4_2, out1], 1)
+
+ out5_1 = self.model5_1(out5)
+ out5_2 = self.model5_2(out5)
+ out6 = torch.cat([out5_1, out5_2, out1], 1)
+
+ out6_1 = self.model6_1(out6)
+ out6_2 = self.model6_2(out6)
+
+ return out6_1, out6_2
+
+class handpose_model(nn.Module):
+ def __init__(self):
+ super(handpose_model, self).__init__()
+
+ # these layers have no relu layer
+ no_relu_layers = ['conv6_2_CPM', 'Mconv7_stage2', 'Mconv7_stage3',\
+ 'Mconv7_stage4', 'Mconv7_stage5', 'Mconv7_stage6']
+ # stage 1
+ block1_0 = OrderedDict([
+ ('conv1_1', [3, 64, 3, 1, 1]),
+ ('conv1_2', [64, 64, 3, 1, 1]),
+ ('pool1_stage1', [2, 2, 0]),
+ ('conv2_1', [64, 128, 3, 1, 1]),
+ ('conv2_2', [128, 128, 3, 1, 1]),
+ ('pool2_stage1', [2, 2, 0]),
+ ('conv3_1', [128, 256, 3, 1, 1]),
+ ('conv3_2', [256, 256, 3, 1, 1]),
+ ('conv3_3', [256, 256, 3, 1, 1]),
+ ('conv3_4', [256, 256, 3, 1, 1]),
+ ('pool3_stage1', [2, 2, 0]),
+ ('conv4_1', [256, 512, 3, 1, 1]),
+ ('conv4_2', [512, 512, 3, 1, 1]),
+ ('conv4_3', [512, 512, 3, 1, 1]),
+ ('conv4_4', [512, 512, 3, 1, 1]),
+ ('conv5_1', [512, 512, 3, 1, 1]),
+ ('conv5_2', [512, 512, 3, 1, 1]),
+ ('conv5_3_CPM', [512, 128, 3, 1, 1])
+ ])
+
+ block1_1 = OrderedDict([
+ ('conv6_1_CPM', [128, 512, 1, 1, 0]),
+ ('conv6_2_CPM', [512, 22, 1, 1, 0])
+ ])
+
+ blocks = {}
+ blocks['block1_0'] = block1_0
+ blocks['block1_1'] = block1_1
+
+ # stage 2-6
+ for i in range(2, 7):
+ blocks['block%d' % i] = OrderedDict([
+ ('Mconv1_stage%d' % i, [150, 128, 7, 1, 3]),
+ ('Mconv2_stage%d' % i, [128, 128, 7, 1, 3]),
+ ('Mconv3_stage%d' % i, [128, 128, 7, 1, 3]),
+ ('Mconv4_stage%d' % i, [128, 128, 7, 1, 3]),
+ ('Mconv5_stage%d' % i, [128, 128, 7, 1, 3]),
+ ('Mconv6_stage%d' % i, [128, 128, 1, 1, 0]),
+ ('Mconv7_stage%d' % i, [128, 22, 1, 1, 0])
+ ])
+
+ for k in blocks.keys():
+ blocks[k] = make_layers(blocks[k], no_relu_layers)
+
+ self.model1_0 = blocks['block1_0']
+ self.model1_1 = blocks['block1_1']
+ self.model2 = blocks['block2']
+ self.model3 = blocks['block3']
+ self.model4 = blocks['block4']
+ self.model5 = blocks['block5']
+ self.model6 = blocks['block6']
+
+ def forward(self, x):
+ out1_0 = self.model1_0(x)
+ out1_1 = self.model1_1(out1_0)
+ concat_stage2 = torch.cat([out1_1, out1_0], 1)
+ out_stage2 = self.model2(concat_stage2)
+ concat_stage3 = torch.cat([out_stage2, out1_0], 1)
+ out_stage3 = self.model3(concat_stage3)
+ concat_stage4 = torch.cat([out_stage3, out1_0], 1)
+ out_stage4 = self.model4(concat_stage4)
+ concat_stage5 = torch.cat([out_stage4, out1_0], 1)
+ out_stage5 = self.model5(concat_stage5)
+ concat_stage6 = torch.cat([out_stage5, out1_0], 1)
+ out_stage6 = self.model6(concat_stage6)
+ return out_stage6
+
+
diff --git a/annotator/openpose/util.py b/annotator/openpose/util.py
new file mode 100644
index 0000000000000000000000000000000000000000..0a144ff7f2f6f0a2d544786cbe37fbd00b5cbd7d
--- /dev/null
+++ b/annotator/openpose/util.py
@@ -0,0 +1,175 @@
+'''
+ * Copyright (c) 2023 Salesforce, Inc.
+ * All rights reserved.
+ * SPDX-License-Identifier: Apache License 2.0
+ * For full license text, see LICENSE.txt file in the repo root or http://www.apache.org/licenses/
+ * By Can Qin
+ * Modified from ControlNet repo: https://github.com/lllyasviel/ControlNet
+ * Copyright (c) 2023 Lvmin Zhang and Maneesh Agrawala
+'''
+
+
+import math
+import numpy as np
+import matplotlib
+import cv2
+
+
+def padRightDownCorner(img, stride, padValue):
+ h = img.shape[0]
+ w = img.shape[1]
+
+ pad = 4 * [None]
+ pad[0] = 0 # up
+ pad[1] = 0 # left
+ pad[2] = 0 if (h % stride == 0) else stride - (h % stride) # down
+ pad[3] = 0 if (w % stride == 0) else stride - (w % stride) # right
+
+ img_padded = img
+ pad_up = np.tile(img_padded[0:1, :, :]*0 + padValue, (pad[0], 1, 1))
+ img_padded = np.concatenate((pad_up, img_padded), axis=0)
+ pad_left = np.tile(img_padded[:, 0:1, :]*0 + padValue, (1, pad[1], 1))
+ img_padded = np.concatenate((pad_left, img_padded), axis=1)
+ pad_down = np.tile(img_padded[-2:-1, :, :]*0 + padValue, (pad[2], 1, 1))
+ img_padded = np.concatenate((img_padded, pad_down), axis=0)
+ pad_right = np.tile(img_padded[:, -2:-1, :]*0 + padValue, (1, pad[3], 1))
+ img_padded = np.concatenate((img_padded, pad_right), axis=1)
+
+ return img_padded, pad
+
+# transfer caffe model to pytorch which will match the layer name
+def transfer(model, model_weights):
+ transfered_model_weights = {}
+ for weights_name in model.state_dict().keys():
+ transfered_model_weights[weights_name] = model_weights['.'.join(weights_name.split('.')[1:])]
+ return transfered_model_weights
+
+# draw the body keypoint and lims
+def draw_bodypose(canvas, candidate, subset):
+ stickwidth = 4
+ limbSeq = [[2, 3], [2, 6], [3, 4], [4, 5], [6, 7], [7, 8], [2, 9], [9, 10], \
+ [10, 11], [2, 12], [12, 13], [13, 14], [2, 1], [1, 15], [15, 17], \
+ [1, 16], [16, 18], [3, 17], [6, 18]]
+
+ colors = [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0], [85, 255, 0], [0, 255, 0], \
+ [0, 255, 85], [0, 255, 170], [0, 255, 255], [0, 170, 255], [0, 85, 255], [0, 0, 255], [85, 0, 255], \
+ [170, 0, 255], [255, 0, 255], [255, 0, 170], [255, 0, 85]]
+ for i in range(18):
+ for n in range(len(subset)):
+ index = int(subset[n][i])
+ if index == -1:
+ continue
+ x, y = candidate[index][0:2]
+ cv2.circle(canvas, (int(x), int(y)), 4, colors[i], thickness=-1)
+ for i in range(17):
+ for n in range(len(subset)):
+ index = subset[n][np.array(limbSeq[i]) - 1]
+ if -1 in index:
+ continue
+ cur_canvas = canvas.copy()
+ Y = candidate[index.astype(int), 0]
+ X = candidate[index.astype(int), 1]
+ mX = np.mean(X)
+ mY = np.mean(Y)
+ length = ((X[0] - X[1]) ** 2 + (Y[0] - Y[1]) ** 2) ** 0.5
+ angle = math.degrees(math.atan2(X[0] - X[1], Y[0] - Y[1]))
+ polygon = cv2.ellipse2Poly((int(mY), int(mX)), (int(length / 2), stickwidth), int(angle), 0, 360, 1)
+ cv2.fillConvexPoly(cur_canvas, polygon, colors[i])
+ canvas = cv2.addWeighted(canvas, 0.4, cur_canvas, 0.6, 0)
+ # plt.imsave("preview.jpg", canvas[:, :, [2, 1, 0]])
+ # plt.imshow(canvas[:, :, [2, 1, 0]])
+ return canvas
+
+
+# image drawed by opencv is not good.
+def draw_handpose(canvas, all_hand_peaks, show_number=False):
+ edges = [[0, 1], [1, 2], [2, 3], [3, 4], [0, 5], [5, 6], [6, 7], [7, 8], [0, 9], [9, 10], \
+ [10, 11], [11, 12], [0, 13], [13, 14], [14, 15], [15, 16], [0, 17], [17, 18], [18, 19], [19, 20]]
+
+ for peaks in all_hand_peaks:
+ for ie, e in enumerate(edges):
+ if np.sum(np.all(peaks[e], axis=1)==0)==0:
+ x1, y1 = peaks[e[0]]
+ x2, y2 = peaks[e[1]]
+ cv2.line(canvas, (x1, y1), (x2, y2), matplotlib.colors.hsv_to_rgb([ie/float(len(edges)), 1.0, 1.0])*255, thickness=2)
+
+ for i, keyponit in enumerate(peaks):
+ x, y = keyponit
+ cv2.circle(canvas, (x, y), 4, (0, 0, 255), thickness=-1)
+ if show_number:
+ cv2.putText(canvas, str(i), (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (0, 0, 0), lineType=cv2.LINE_AA)
+ return canvas
+
+# detect hand according to body pose keypoints
+# please refer to https://github.com/CMU-Perceptual-Computing-Lab/openpose/blob/master/src/openpose/hand/handDetector.cpp
+def handDetect(candidate, subset, oriImg):
+ # right hand: wrist 4, elbow 3, shoulder 2
+ # left hand: wrist 7, elbow 6, shoulder 5
+ ratioWristElbow = 0.33
+ detect_result = []
+ image_height, image_width = oriImg.shape[0:2]
+ for person in subset.astype(int):
+ # if any of three not detected
+ has_left = np.sum(person[[5, 6, 7]] == -1) == 0
+ has_right = np.sum(person[[2, 3, 4]] == -1) == 0
+ if not (has_left or has_right):
+ continue
+ hands = []
+ #left hand
+ if has_left:
+ left_shoulder_index, left_elbow_index, left_wrist_index = person[[5, 6, 7]]
+ x1, y1 = candidate[left_shoulder_index][:2]
+ x2, y2 = candidate[left_elbow_index][:2]
+ x3, y3 = candidate[left_wrist_index][:2]
+ hands.append([x1, y1, x2, y2, x3, y3, True])
+ # right hand
+ if has_right:
+ right_shoulder_index, right_elbow_index, right_wrist_index = person[[2, 3, 4]]
+ x1, y1 = candidate[right_shoulder_index][:2]
+ x2, y2 = candidate[right_elbow_index][:2]
+ x3, y3 = candidate[right_wrist_index][:2]
+ hands.append([x1, y1, x2, y2, x3, y3, False])
+
+ for x1, y1, x2, y2, x3, y3, is_left in hands:
+ # pos_hand = pos_wrist + ratio * (pos_wrist - pos_elbox) = (1 + ratio) * pos_wrist - ratio * pos_elbox
+ # handRectangle.x = posePtr[wrist*3] + ratioWristElbow * (posePtr[wrist*3] - posePtr[elbow*3]);
+ # handRectangle.y = posePtr[wrist*3+1] + ratioWristElbow * (posePtr[wrist*3+1] - posePtr[elbow*3+1]);
+ # const auto distanceWristElbow = getDistance(poseKeypoints, person, wrist, elbow);
+ # const auto distanceElbowShoulder = getDistance(poseKeypoints, person, elbow, shoulder);
+ # handRectangle.width = 1.5f * fastMax(distanceWristElbow, 0.9f * distanceElbowShoulder);
+ x = x3 + ratioWristElbow * (x3 - x2)
+ y = y3 + ratioWristElbow * (y3 - y2)
+ distanceWristElbow = math.sqrt((x3 - x2) ** 2 + (y3 - y2) ** 2)
+ distanceElbowShoulder = math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
+ width = 1.5 * max(distanceWristElbow, 0.9 * distanceElbowShoulder)
+ # x-y refers to the center --> offset to topLeft point
+ # handRectangle.x -= handRectangle.width / 2.f;
+ # handRectangle.y -= handRectangle.height / 2.f;
+ x -= width / 2
+ y -= width / 2 # width = height
+ # overflow the image
+ if x < 0: x = 0
+ if y < 0: y = 0
+ width1 = width
+ width2 = width
+ if x + width > image_width: width1 = image_width - x
+ if y + width > image_height: width2 = image_height - y
+ width = min(width1, width2)
+ # the max hand box value is 20 pixels
+ if width >= 20:
+ detect_result.append([int(x), int(y), int(width), is_left])
+
+ '''
+ return value: [[x, y, w, True if left hand else False]].
+ width=height since the network require squared input.
+ x, y is the coordinate of top left
+ '''
+ return detect_result
+
+# get max index of 2d array
+def npmax(array):
+ arrayindex = array.argmax(1)
+ arrayvalue = array.max(1)
+ i = arrayvalue.argmax()
+ j = arrayindex[i]
+ return i, j
diff --git a/annotator/outpainting/__init__.py b/annotator/outpainting/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..020cb2bca599171a731582646310bb97d9bb2524
--- /dev/null
+++ b/annotator/outpainting/__init__.py
@@ -0,0 +1,25 @@
+'''
+ * Copyright (c) 2023 Salesforce, Inc.
+ * All rights reserved.
+ * SPDX-License-Identifier: Apache License 2.0
+ * For full license text, see LICENSE.txt file in the repo root or http://www.apache.org/licenses/
+ * By Ning Yu
+'''
+
+import numpy as np
+
+class Outpainter:
+ def __call__(self, img, height_top_extended, height_down_extended, width_left_extended, width_right_extended):
+ height, width, channel = img.shape
+
+ height_top_new = int(float(height) / 100.0 * float(height_top_extended))
+ height_down_new = int(float(height) / 100.0 * float(height_down_extended))
+ width_left_new = int(float(width) / 100.0 * float(width_left_extended))
+ width_right_new = int(float(width) / 100.0 * float(width_right_extended))
+
+ new_height = height + height_top_new + height_down_new
+ new_width = width + width_left_new + width_right_new
+ img_new = np.zeros([new_height, new_width, channel])
+ img_new[height_top_new: (height + height_top_new), width_left_new: (width + width_left_new), : ] = img
+ img_new = img_new.astype('ubyte')
+ return img_new
diff --git a/annotator/outpainting/__pycache__/__init__.cpython-310.pyc b/annotator/outpainting/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c1058d36ff1c675b433ffa5af3990ecf713dbaac
Binary files /dev/null and b/annotator/outpainting/__pycache__/__init__.cpython-310.pyc differ
diff --git a/annotator/outpainting/__pycache__/__init__.cpython-38.pyc b/annotator/outpainting/__pycache__/__init__.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..76734f981baf3cbb697fe6a5ba851401d922c4e0
Binary files /dev/null and b/annotator/outpainting/__pycache__/__init__.cpython-38.pyc differ
diff --git a/annotator/uniformer/LICENSE b/annotator/uniformer/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..c38dc639e6e238fbf59608f80b3a6ff1928ac429
--- /dev/null
+++ b/annotator/uniformer/LICENSE
@@ -0,0 +1,203 @@
+Copyright 2022 SenseTime X-Lab. All rights reserved.
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2022 SenseTime X-Lab.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
\ No newline at end of file
diff --git a/annotator/uniformer/__init__.py b/annotator/uniformer/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..50cb77b2d02a7e00fc17733af25f9d7643d02d98
--- /dev/null
+++ b/annotator/uniformer/__init__.py
@@ -0,0 +1,29 @@
+# Uniformer
+# From https://github.com/Sense-X/UniFormer
+# # Apache-2.0 license
+
+import os
+
+from annotator.uniformer.mmseg.apis import init_segmentor, inference_segmentor, show_result_pyplot
+from annotator.uniformer.mmseg.core.evaluation import get_palette
+from annotator.util import annotator_ckpts_path
+
+import pdb
+
+checkpoint_file = "https://huggingface.co/lllyasviel/ControlNet/resolve/main/annotator/ckpts/upernet_global_small.pth"
+
+
+class UniformerDetector:
+ def __init__(self):
+ modelpath = os.path.join(annotator_ckpts_path, "upernet_global_small.pth")
+ if not os.path.exists(modelpath):
+ from basicsr.utils.download_util import load_file_from_url
+ load_file_from_url(checkpoint_file, model_dir=annotator_ckpts_path)
+ config_file = os.path.join(os.path.dirname(annotator_ckpts_path), "uniformer", "exp", "upernet_global_small", "config.py")
+ self.model = init_segmentor(config_file, modelpath).cuda()
+
+ def __call__(self, img):
+ result = inference_segmentor(self.model, img)
+
+ res_img = show_result_pyplot(self.model, img, result, get_palette('ade'), opacity=1)
+ return res_img
diff --git a/annotator/uniformer/__init__det.py b/annotator/uniformer/__init__det.py
new file mode 100644
index 0000000000000000000000000000000000000000..baf7789d9db5b74dcca8b007a14b9a034e073549
--- /dev/null
+++ b/annotator/uniformer/__init__det.py
@@ -0,0 +1,28 @@
+# Uniformer
+# From https://github.com/Sense-X/UniFormer
+# # Apache-2.0 license
+
+import os
+
+# from annotator.uniformer.mmseg.apis import init_segmentor, inference_segmentor, show_result_pyplot
+# from annotator.uniformer.mmseg.core.evaluation import get_palette
+# from annotator.util.py import annotator_ckpts_path
+
+from annotator.uniformer.mmdet.apis import init_detector, inference_detector, show_result_pyplot
+from annotator.uniformer.mmdet.core.evaluation import get_palette
+from annotator.util import annotator_ckpts_path
+
+# checkpoint_file = "https://huggingface.co/lllyasviel/ControlNet/resolve/main/annotator/ckpts/upernet_global_small.pth"
+
+
+class UniformerDetector:
+ def __init__(self):
+ modelpath = os.path.join(annotator_ckpts_path, "cascade_mask_rcnn_3x_ms_hybrid_base.pth")
+
+ config_file = os.path.join(os.path.dirname(annotator_ckpts_path), "uniformer", "exp", "cascade_mask_rcnn_3x_ms_hybrid_base", "config.py")
+ self.model = init_detector(config_file, modelpath).cuda()
+
+ def __call__(self, img):
+ result = inference_detector(self.model, img)
+ res_img = show_result_pyplot(self.model, img, result, get_palette('coco'), opacity=1)
+ return res_img
diff --git a/annotator/uniformer/__init__seg.py b/annotator/uniformer/__init__seg.py
new file mode 100644
index 0000000000000000000000000000000000000000..3364d40997447a4ec15ca7a525a4d0e92ab211bd
--- /dev/null
+++ b/annotator/uniformer/__init__seg.py
@@ -0,0 +1,27 @@
+# Uniformer
+# From https://github.com/Sense-X/UniFormer
+# # Apache-2.0 license
+
+import os
+
+from annotator.uniformer.mmseg.apis import init_segmentor, inference_segmentor, show_result_pyplot
+from annotator.uniformer.mmseg.core.evaluation import get_palette
+from annotator.util import annotator_ckpts_path
+
+
+checkpoint_file = "https://huggingface.co/lllyasviel/ControlNet/resolve/main/annotator/ckpts/upernet_global_small.pth"
+
+
+class UniformerDetector:
+ def __init__(self):
+ modelpath = os.path.join(annotator_ckpts_path, "upernet_global_small.pth")
+ if not os.path.exists(modelpath):
+ from basicsr.utils.download_util import load_file_from_url
+ load_file_from_url(checkpoint_file, model_dir=annotator_ckpts_path)
+ config_file = os.path.join(os.path.dirname(annotator_ckpts_path), "uniformer", "exp", "upernet_global_small", "config.py")
+ self.model = init_segmentor(config_file, modelpath).cuda()
+
+ def __call__(self, img):
+ result = inference_segmentor(self.model, img)
+ res_img = show_result_pyplot(self.model, img, result, get_palette('ade'), opacity=1)
+ return res_img
diff --git a/annotator/uniformer/__pycache__/__init__.cpython-310.pyc b/annotator/uniformer/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b0a810ae953cda5616947421c2ca347cc5a22aed
Binary files /dev/null and b/annotator/uniformer/__pycache__/__init__.cpython-310.pyc differ
diff --git a/annotator/uniformer/__pycache__/__init__.cpython-38.pyc b/annotator/uniformer/__pycache__/__init__.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ff2894e43de683e76581a10b0a88588c9fd2206f
Binary files /dev/null and b/annotator/uniformer/__pycache__/__init__.cpython-38.pyc differ
diff --git a/annotator/uniformer/configs/_base_/datasets/ade20k.py b/annotator/uniformer/configs/_base_/datasets/ade20k.py
new file mode 100644
index 0000000000000000000000000000000000000000..efc8b4bb20c981f3db6df7eb52b3dc0744c94cc0
--- /dev/null
+++ b/annotator/uniformer/configs/_base_/datasets/ade20k.py
@@ -0,0 +1,54 @@
+# dataset settings
+dataset_type = 'ADE20KDataset'
+data_root = 'data/ade/ADEChallengeData2016'
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+crop_size = (512, 512)
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='LoadAnnotations', reduce_zero_label=True),
+ dict(type='Resize', img_scale=(2048, 512), ratio_range=(0.5, 2.0)),
+ dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
+ dict(type='RandomFlip', prob=0.5),
+ dict(type='PhotoMetricDistortion'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
+ dict(type='DefaultFormatBundle'),
+ dict(type='Collect', keys=['img', 'gt_semantic_seg']),
+]
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='MultiScaleFlipAug',
+ img_scale=(2048, 512),
+ # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
+ flip=False,
+ transforms=[
+ dict(type='Resize', keep_ratio=True),
+ dict(type='RandomFlip'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img']),
+ ])
+]
+data = dict(
+ samples_per_gpu=4,
+ workers_per_gpu=4,
+ train=dict(
+ type=dataset_type,
+ data_root=data_root,
+ img_dir='images/training',
+ ann_dir='annotations/training',
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ data_root=data_root,
+ img_dir='images/validation',
+ ann_dir='annotations/validation',
+ pipeline=test_pipeline),
+ test=dict(
+ type=dataset_type,
+ data_root=data_root,
+ img_dir='images/validation',
+ ann_dir='annotations/validation',
+ pipeline=test_pipeline))
diff --git a/annotator/uniformer/configs/_base_/datasets/chase_db1.py b/annotator/uniformer/configs/_base_/datasets/chase_db1.py
new file mode 100644
index 0000000000000000000000000000000000000000..298594ea925f87f22b37094a2ec50e370aec96a0
--- /dev/null
+++ b/annotator/uniformer/configs/_base_/datasets/chase_db1.py
@@ -0,0 +1,59 @@
+# dataset settings
+dataset_type = 'ChaseDB1Dataset'
+data_root = 'data/CHASE_DB1'
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+img_scale = (960, 999)
+crop_size = (128, 128)
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='LoadAnnotations'),
+ dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)),
+ dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
+ dict(type='RandomFlip', prob=0.5),
+ dict(type='PhotoMetricDistortion'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
+ dict(type='DefaultFormatBundle'),
+ dict(type='Collect', keys=['img', 'gt_semantic_seg'])
+]
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='MultiScaleFlipAug',
+ img_scale=img_scale,
+ # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0],
+ flip=False,
+ transforms=[
+ dict(type='Resize', keep_ratio=True),
+ dict(type='RandomFlip'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+ ])
+]
+
+data = dict(
+ samples_per_gpu=4,
+ workers_per_gpu=4,
+ train=dict(
+ type='RepeatDataset',
+ times=40000,
+ dataset=dict(
+ type=dataset_type,
+ data_root=data_root,
+ img_dir='images/training',
+ ann_dir='annotations/training',
+ pipeline=train_pipeline)),
+ val=dict(
+ type=dataset_type,
+ data_root=data_root,
+ img_dir='images/validation',
+ ann_dir='annotations/validation',
+ pipeline=test_pipeline),
+ test=dict(
+ type=dataset_type,
+ data_root=data_root,
+ img_dir='images/validation',
+ ann_dir='annotations/validation',
+ pipeline=test_pipeline))
diff --git a/annotator/uniformer/configs/_base_/datasets/cityscapes.py b/annotator/uniformer/configs/_base_/datasets/cityscapes.py
new file mode 100644
index 0000000000000000000000000000000000000000..f21867c63e1835f6fceb61f066e802fd8fd2a735
--- /dev/null
+++ b/annotator/uniformer/configs/_base_/datasets/cityscapes.py
@@ -0,0 +1,54 @@
+# dataset settings
+dataset_type = 'CityscapesDataset'
+data_root = 'data/cityscapes/'
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+crop_size = (512, 1024)
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='LoadAnnotations'),
+ dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)),
+ dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
+ dict(type='RandomFlip', prob=0.5),
+ dict(type='PhotoMetricDistortion'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
+ dict(type='DefaultFormatBundle'),
+ dict(type='Collect', keys=['img', 'gt_semantic_seg']),
+]
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='MultiScaleFlipAug',
+ img_scale=(2048, 1024),
+ # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
+ flip=False,
+ transforms=[
+ dict(type='Resize', keep_ratio=True),
+ dict(type='RandomFlip'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img']),
+ ])
+]
+data = dict(
+ samples_per_gpu=2,
+ workers_per_gpu=2,
+ train=dict(
+ type=dataset_type,
+ data_root=data_root,
+ img_dir='leftImg8bit/train',
+ ann_dir='gtFine/train',
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ data_root=data_root,
+ img_dir='leftImg8bit/val',
+ ann_dir='gtFine/val',
+ pipeline=test_pipeline),
+ test=dict(
+ type=dataset_type,
+ data_root=data_root,
+ img_dir='leftImg8bit/val',
+ ann_dir='gtFine/val',
+ pipeline=test_pipeline))
diff --git a/annotator/uniformer/configs/_base_/datasets/cityscapes_769x769.py b/annotator/uniformer/configs/_base_/datasets/cityscapes_769x769.py
new file mode 100644
index 0000000000000000000000000000000000000000..336c7b254fe392b4703039fec86a83acdbd2e1a5
--- /dev/null
+++ b/annotator/uniformer/configs/_base_/datasets/cityscapes_769x769.py
@@ -0,0 +1,35 @@
+_base_ = './cityscapes.py'
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+crop_size = (769, 769)
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='LoadAnnotations'),
+ dict(type='Resize', img_scale=(2049, 1025), ratio_range=(0.5, 2.0)),
+ dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
+ dict(type='RandomFlip', prob=0.5),
+ dict(type='PhotoMetricDistortion'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
+ dict(type='DefaultFormatBundle'),
+ dict(type='Collect', keys=['img', 'gt_semantic_seg']),
+]
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='MultiScaleFlipAug',
+ img_scale=(2049, 1025),
+ # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
+ flip=False,
+ transforms=[
+ dict(type='Resize', keep_ratio=True),
+ dict(type='RandomFlip'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img']),
+ ])
+]
+data = dict(
+ train=dict(pipeline=train_pipeline),
+ val=dict(pipeline=test_pipeline),
+ test=dict(pipeline=test_pipeline))
diff --git a/annotator/uniformer/configs/_base_/datasets/drive.py b/annotator/uniformer/configs/_base_/datasets/drive.py
new file mode 100644
index 0000000000000000000000000000000000000000..06e8ff606e0d2a4514ec8b7d2c6c436a32efcbf4
--- /dev/null
+++ b/annotator/uniformer/configs/_base_/datasets/drive.py
@@ -0,0 +1,59 @@
+# dataset settings
+dataset_type = 'DRIVEDataset'
+data_root = 'data/DRIVE'
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+img_scale = (584, 565)
+crop_size = (64, 64)
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='LoadAnnotations'),
+ dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)),
+ dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
+ dict(type='RandomFlip', prob=0.5),
+ dict(type='PhotoMetricDistortion'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
+ dict(type='DefaultFormatBundle'),
+ dict(type='Collect', keys=['img', 'gt_semantic_seg'])
+]
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='MultiScaleFlipAug',
+ img_scale=img_scale,
+ # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0],
+ flip=False,
+ transforms=[
+ dict(type='Resize', keep_ratio=True),
+ dict(type='RandomFlip'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+ ])
+]
+
+data = dict(
+ samples_per_gpu=4,
+ workers_per_gpu=4,
+ train=dict(
+ type='RepeatDataset',
+ times=40000,
+ dataset=dict(
+ type=dataset_type,
+ data_root=data_root,
+ img_dir='images/training',
+ ann_dir='annotations/training',
+ pipeline=train_pipeline)),
+ val=dict(
+ type=dataset_type,
+ data_root=data_root,
+ img_dir='images/validation',
+ ann_dir='annotations/validation',
+ pipeline=test_pipeline),
+ test=dict(
+ type=dataset_type,
+ data_root=data_root,
+ img_dir='images/validation',
+ ann_dir='annotations/validation',
+ pipeline=test_pipeline))
diff --git a/annotator/uniformer/configs/_base_/datasets/hrf.py b/annotator/uniformer/configs/_base_/datasets/hrf.py
new file mode 100644
index 0000000000000000000000000000000000000000..242d790eb1b83e75cf6b7eaa7a35c674099311ad
--- /dev/null
+++ b/annotator/uniformer/configs/_base_/datasets/hrf.py
@@ -0,0 +1,59 @@
+# dataset settings
+dataset_type = 'HRFDataset'
+data_root = 'data/HRF'
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+img_scale = (2336, 3504)
+crop_size = (256, 256)
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='LoadAnnotations'),
+ dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)),
+ dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
+ dict(type='RandomFlip', prob=0.5),
+ dict(type='PhotoMetricDistortion'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
+ dict(type='DefaultFormatBundle'),
+ dict(type='Collect', keys=['img', 'gt_semantic_seg'])
+]
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='MultiScaleFlipAug',
+ img_scale=img_scale,
+ # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0],
+ flip=False,
+ transforms=[
+ dict(type='Resize', keep_ratio=True),
+ dict(type='RandomFlip'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+ ])
+]
+
+data = dict(
+ samples_per_gpu=4,
+ workers_per_gpu=4,
+ train=dict(
+ type='RepeatDataset',
+ times=40000,
+ dataset=dict(
+ type=dataset_type,
+ data_root=data_root,
+ img_dir='images/training',
+ ann_dir='annotations/training',
+ pipeline=train_pipeline)),
+ val=dict(
+ type=dataset_type,
+ data_root=data_root,
+ img_dir='images/validation',
+ ann_dir='annotations/validation',
+ pipeline=test_pipeline),
+ test=dict(
+ type=dataset_type,
+ data_root=data_root,
+ img_dir='images/validation',
+ ann_dir='annotations/validation',
+ pipeline=test_pipeline))
diff --git a/annotator/uniformer/configs/_base_/datasets/pascal_context.py b/annotator/uniformer/configs/_base_/datasets/pascal_context.py
new file mode 100644
index 0000000000000000000000000000000000000000..ff65bad1b86d7e3a5980bb5b9fc55798dc8df5f4
--- /dev/null
+++ b/annotator/uniformer/configs/_base_/datasets/pascal_context.py
@@ -0,0 +1,60 @@
+# dataset settings
+dataset_type = 'PascalContextDataset'
+data_root = 'data/VOCdevkit/VOC2010/'
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+
+img_scale = (520, 520)
+crop_size = (480, 480)
+
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='LoadAnnotations'),
+ dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)),
+ dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
+ dict(type='RandomFlip', prob=0.5),
+ dict(type='PhotoMetricDistortion'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
+ dict(type='DefaultFormatBundle'),
+ dict(type='Collect', keys=['img', 'gt_semantic_seg']),
+]
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='MultiScaleFlipAug',
+ img_scale=img_scale,
+ # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
+ flip=False,
+ transforms=[
+ dict(type='Resize', keep_ratio=True),
+ dict(type='RandomFlip'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img']),
+ ])
+]
+data = dict(
+ samples_per_gpu=4,
+ workers_per_gpu=4,
+ train=dict(
+ type=dataset_type,
+ data_root=data_root,
+ img_dir='JPEGImages',
+ ann_dir='SegmentationClassContext',
+ split='ImageSets/SegmentationContext/train.txt',
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ data_root=data_root,
+ img_dir='JPEGImages',
+ ann_dir='SegmentationClassContext',
+ split='ImageSets/SegmentationContext/val.txt',
+ pipeline=test_pipeline),
+ test=dict(
+ type=dataset_type,
+ data_root=data_root,
+ img_dir='JPEGImages',
+ ann_dir='SegmentationClassContext',
+ split='ImageSets/SegmentationContext/val.txt',
+ pipeline=test_pipeline))
diff --git a/annotator/uniformer/configs/_base_/datasets/pascal_context_59.py b/annotator/uniformer/configs/_base_/datasets/pascal_context_59.py
new file mode 100644
index 0000000000000000000000000000000000000000..37585abab89834b95cd5bdd993b994fca1db65f6
--- /dev/null
+++ b/annotator/uniformer/configs/_base_/datasets/pascal_context_59.py
@@ -0,0 +1,60 @@
+# dataset settings
+dataset_type = 'PascalContextDataset59'
+data_root = 'data/VOCdevkit/VOC2010/'
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+
+img_scale = (520, 520)
+crop_size = (480, 480)
+
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='LoadAnnotations', reduce_zero_label=True),
+ dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)),
+ dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
+ dict(type='RandomFlip', prob=0.5),
+ dict(type='PhotoMetricDistortion'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
+ dict(type='DefaultFormatBundle'),
+ dict(type='Collect', keys=['img', 'gt_semantic_seg']),
+]
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='MultiScaleFlipAug',
+ img_scale=img_scale,
+ # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
+ flip=False,
+ transforms=[
+ dict(type='Resize', keep_ratio=True),
+ dict(type='RandomFlip'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img']),
+ ])
+]
+data = dict(
+ samples_per_gpu=4,
+ workers_per_gpu=4,
+ train=dict(
+ type=dataset_type,
+ data_root=data_root,
+ img_dir='JPEGImages',
+ ann_dir='SegmentationClassContext',
+ split='ImageSets/SegmentationContext/train.txt',
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ data_root=data_root,
+ img_dir='JPEGImages',
+ ann_dir='SegmentationClassContext',
+ split='ImageSets/SegmentationContext/val.txt',
+ pipeline=test_pipeline),
+ test=dict(
+ type=dataset_type,
+ data_root=data_root,
+ img_dir='JPEGImages',
+ ann_dir='SegmentationClassContext',
+ split='ImageSets/SegmentationContext/val.txt',
+ pipeline=test_pipeline))
diff --git a/annotator/uniformer/configs/_base_/datasets/pascal_voc12.py b/annotator/uniformer/configs/_base_/datasets/pascal_voc12.py
new file mode 100644
index 0000000000000000000000000000000000000000..ba1d42d0c5781f56dc177d860d856bb34adce555
--- /dev/null
+++ b/annotator/uniformer/configs/_base_/datasets/pascal_voc12.py
@@ -0,0 +1,57 @@
+# dataset settings
+dataset_type = 'PascalVOCDataset'
+data_root = 'data/VOCdevkit/VOC2012'
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+crop_size = (512, 512)
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='LoadAnnotations'),
+ dict(type='Resize', img_scale=(2048, 512), ratio_range=(0.5, 2.0)),
+ dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
+ dict(type='RandomFlip', prob=0.5),
+ dict(type='PhotoMetricDistortion'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
+ dict(type='DefaultFormatBundle'),
+ dict(type='Collect', keys=['img', 'gt_semantic_seg']),
+]
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='MultiScaleFlipAug',
+ img_scale=(2048, 512),
+ # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
+ flip=False,
+ transforms=[
+ dict(type='Resize', keep_ratio=True),
+ dict(type='RandomFlip'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img']),
+ ])
+]
+data = dict(
+ samples_per_gpu=4,
+ workers_per_gpu=4,
+ train=dict(
+ type=dataset_type,
+ data_root=data_root,
+ img_dir='JPEGImages',
+ ann_dir='SegmentationClass',
+ split='ImageSets/Segmentation/train.txt',
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ data_root=data_root,
+ img_dir='JPEGImages',
+ ann_dir='SegmentationClass',
+ split='ImageSets/Segmentation/val.txt',
+ pipeline=test_pipeline),
+ test=dict(
+ type=dataset_type,
+ data_root=data_root,
+ img_dir='JPEGImages',
+ ann_dir='SegmentationClass',
+ split='ImageSets/Segmentation/val.txt',
+ pipeline=test_pipeline))
diff --git a/annotator/uniformer/configs/_base_/datasets/pascal_voc12_aug.py b/annotator/uniformer/configs/_base_/datasets/pascal_voc12_aug.py
new file mode 100644
index 0000000000000000000000000000000000000000..3f23b6717d53ad29f02dd15046802a2631a5076b
--- /dev/null
+++ b/annotator/uniformer/configs/_base_/datasets/pascal_voc12_aug.py
@@ -0,0 +1,9 @@
+_base_ = './pascal_voc12.py'
+# dataset settings
+data = dict(
+ train=dict(
+ ann_dir=['SegmentationClass', 'SegmentationClassAug'],
+ split=[
+ 'ImageSets/Segmentation/train.txt',
+ 'ImageSets/Segmentation/aug.txt'
+ ]))
diff --git a/annotator/uniformer/configs/_base_/datasets/stare.py b/annotator/uniformer/configs/_base_/datasets/stare.py
new file mode 100644
index 0000000000000000000000000000000000000000..3f71b25488cc11a6b4d582ac52b5a24e1ad1cf8e
--- /dev/null
+++ b/annotator/uniformer/configs/_base_/datasets/stare.py
@@ -0,0 +1,59 @@
+# dataset settings
+dataset_type = 'STAREDataset'
+data_root = 'data/STARE'
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+img_scale = (605, 700)
+crop_size = (128, 128)
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='LoadAnnotations'),
+ dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)),
+ dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
+ dict(type='RandomFlip', prob=0.5),
+ dict(type='PhotoMetricDistortion'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
+ dict(type='DefaultFormatBundle'),
+ dict(type='Collect', keys=['img', 'gt_semantic_seg'])
+]
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='MultiScaleFlipAug',
+ img_scale=img_scale,
+ # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0],
+ flip=False,
+ transforms=[
+ dict(type='Resize', keep_ratio=True),
+ dict(type='RandomFlip'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img'])
+ ])
+]
+
+data = dict(
+ samples_per_gpu=4,
+ workers_per_gpu=4,
+ train=dict(
+ type='RepeatDataset',
+ times=40000,
+ dataset=dict(
+ type=dataset_type,
+ data_root=data_root,
+ img_dir='images/training',
+ ann_dir='annotations/training',
+ pipeline=train_pipeline)),
+ val=dict(
+ type=dataset_type,
+ data_root=data_root,
+ img_dir='images/validation',
+ ann_dir='annotations/validation',
+ pipeline=test_pipeline),
+ test=dict(
+ type=dataset_type,
+ data_root=data_root,
+ img_dir='images/validation',
+ ann_dir='annotations/validation',
+ pipeline=test_pipeline))
diff --git a/annotator/uniformer/configs/_base_/default_runtime.py b/annotator/uniformer/configs/_base_/default_runtime.py
new file mode 100644
index 0000000000000000000000000000000000000000..b564cc4e7e7d9a67dacaaddecb100e4d8f5c005b
--- /dev/null
+++ b/annotator/uniformer/configs/_base_/default_runtime.py
@@ -0,0 +1,14 @@
+# yapf:disable
+log_config = dict(
+ interval=50,
+ hooks=[
+ dict(type='TextLoggerHook', by_epoch=False),
+ # dict(type='TensorboardLoggerHook')
+ ])
+# yapf:enable
+dist_params = dict(backend='nccl')
+log_level = 'INFO'
+load_from = None
+resume_from = None
+workflow = [('train', 1)]
+cudnn_benchmark = True
diff --git a/annotator/uniformer/configs/_base_/models/ann_r50-d8.py b/annotator/uniformer/configs/_base_/models/ann_r50-d8.py
new file mode 100644
index 0000000000000000000000000000000000000000..a2cb653827e44e6015b3b83bc578003e614a6aa1
--- /dev/null
+++ b/annotator/uniformer/configs/_base_/models/ann_r50-d8.py
@@ -0,0 +1,46 @@
+# model settings
+norm_cfg = dict(type='SyncBN', requires_grad=True)
+model = dict(
+ type='EncoderDecoder',
+ pretrained='open-mmlab://resnet50_v1c',
+ backbone=dict(
+ type='ResNetV1c',
+ depth=50,
+ num_stages=4,
+ out_indices=(0, 1, 2, 3),
+ dilations=(1, 1, 2, 4),
+ strides=(1, 2, 1, 1),
+ norm_cfg=norm_cfg,
+ norm_eval=False,
+ style='pytorch',
+ contract_dilation=True),
+ decode_head=dict(
+ type='ANNHead',
+ in_channels=[1024, 2048],
+ in_index=[2, 3],
+ channels=512,
+ project_channels=256,
+ query_scales=(1, ),
+ key_pool_scales=(1, 3, 6, 8),
+ dropout_ratio=0.1,
+ num_classes=19,
+ norm_cfg=norm_cfg,
+ align_corners=False,
+ loss_decode=dict(
+ type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
+ auxiliary_head=dict(
+ type='FCNHead',
+ in_channels=1024,
+ in_index=2,
+ channels=256,
+ num_convs=1,
+ concat_input=False,
+ dropout_ratio=0.1,
+ num_classes=19,
+ norm_cfg=norm_cfg,
+ align_corners=False,
+ loss_decode=dict(
+ type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
+ # model training and testing settings
+ train_cfg=dict(),
+ test_cfg=dict(mode='whole'))
diff --git a/annotator/uniformer/configs/_base_/models/apcnet_r50-d8.py b/annotator/uniformer/configs/_base_/models/apcnet_r50-d8.py
new file mode 100644
index 0000000000000000000000000000000000000000..c8f5316cbcf3896ba9de7ca2c801eba512f01d5e
--- /dev/null
+++ b/annotator/uniformer/configs/_base_/models/apcnet_r50-d8.py
@@ -0,0 +1,44 @@
+# model settings
+norm_cfg = dict(type='SyncBN', requires_grad=True)
+model = dict(
+ type='EncoderDecoder',
+ pretrained='open-mmlab://resnet50_v1c',
+ backbone=dict(
+ type='ResNetV1c',
+ depth=50,
+ num_stages=4,
+ out_indices=(0, 1, 2, 3),
+ dilations=(1, 1, 2, 4),
+ strides=(1, 2, 1, 1),
+ norm_cfg=norm_cfg,
+ norm_eval=False,
+ style='pytorch',
+ contract_dilation=True),
+ decode_head=dict(
+ type='APCHead',
+ in_channels=2048,
+ in_index=3,
+ channels=512,
+ pool_scales=(1, 2, 3, 6),
+ dropout_ratio=0.1,
+ num_classes=19,
+ norm_cfg=dict(type='SyncBN', requires_grad=True),
+ align_corners=False,
+ loss_decode=dict(
+ type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
+ auxiliary_head=dict(
+ type='FCNHead',
+ in_channels=1024,
+ in_index=2,
+ channels=256,
+ num_convs=1,
+ concat_input=False,
+ dropout_ratio=0.1,
+ num_classes=19,
+ norm_cfg=norm_cfg,
+ align_corners=False,
+ loss_decode=dict(
+ type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
+ # model training and testing settings
+ train_cfg=dict(),
+ test_cfg=dict(mode='whole'))
diff --git a/annotator/uniformer/configs/_base_/models/ccnet_r50-d8.py b/annotator/uniformer/configs/_base_/models/ccnet_r50-d8.py
new file mode 100644
index 0000000000000000000000000000000000000000..794148f576b9e215c3c6963e73dffe98204b7717
--- /dev/null
+++ b/annotator/uniformer/configs/_base_/models/ccnet_r50-d8.py
@@ -0,0 +1,44 @@
+# model settings
+norm_cfg = dict(type='SyncBN', requires_grad=True)
+model = dict(
+ type='EncoderDecoder',
+ pretrained='open-mmlab://resnet50_v1c',
+ backbone=dict(
+ type='ResNetV1c',
+ depth=50,
+ num_stages=4,
+ out_indices=(0, 1, 2, 3),
+ dilations=(1, 1, 2, 4),
+ strides=(1, 2, 1, 1),
+ norm_cfg=norm_cfg,
+ norm_eval=False,
+ style='pytorch',
+ contract_dilation=True),
+ decode_head=dict(
+ type='CCHead',
+ in_channels=2048,
+ in_index=3,
+ channels=512,
+ recurrence=2,
+ dropout_ratio=0.1,
+ num_classes=19,
+ norm_cfg=norm_cfg,
+ align_corners=False,
+ loss_decode=dict(
+ type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
+ auxiliary_head=dict(
+ type='FCNHead',
+ in_channels=1024,
+ in_index=2,
+ channels=256,
+ num_convs=1,
+ concat_input=False,
+ dropout_ratio=0.1,
+ num_classes=19,
+ norm_cfg=norm_cfg,
+ align_corners=False,
+ loss_decode=dict(
+ type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
+ # model training and testing settings
+ train_cfg=dict(),
+ test_cfg=dict(mode='whole'))
diff --git a/annotator/uniformer/configs/_base_/models/cgnet.py b/annotator/uniformer/configs/_base_/models/cgnet.py
new file mode 100644
index 0000000000000000000000000000000000000000..eff8d9458c877c5db894957e0b1b4597e40da6ab
--- /dev/null
+++ b/annotator/uniformer/configs/_base_/models/cgnet.py
@@ -0,0 +1,35 @@
+# model settings
+norm_cfg = dict(type='SyncBN', eps=1e-03, requires_grad=True)
+model = dict(
+ type='EncoderDecoder',
+ backbone=dict(
+ type='CGNet',
+ norm_cfg=norm_cfg,
+ in_channels=3,
+ num_channels=(32, 64, 128),
+ num_blocks=(3, 21),
+ dilations=(2, 4),
+ reductions=(8, 16)),
+ decode_head=dict(
+ type='FCNHead',
+ in_channels=256,
+ in_index=2,
+ channels=256,
+ num_convs=0,
+ concat_input=False,
+ dropout_ratio=0,
+ num_classes=19,
+ norm_cfg=norm_cfg,
+ loss_decode=dict(
+ type='CrossEntropyLoss',
+ use_sigmoid=False,
+ loss_weight=1.0,
+ class_weight=[
+ 2.5959933, 6.7415504, 3.5354059, 9.8663225, 9.690899, 9.369352,
+ 10.289121, 9.953208, 4.3097677, 9.490387, 7.674431, 9.396905,
+ 10.347791, 6.3927646, 10.226669, 10.241062, 10.280587,
+ 10.396974, 10.055647
+ ])),
+ # model training and testing settings
+ train_cfg=dict(sampler=None),
+ test_cfg=dict(mode='whole'))
diff --git a/annotator/uniformer/configs/_base_/models/danet_r50-d8.py b/annotator/uniformer/configs/_base_/models/danet_r50-d8.py
new file mode 100644
index 0000000000000000000000000000000000000000..2c934939fac48525f22ad86f489a041dd7db7d09
--- /dev/null
+++ b/annotator/uniformer/configs/_base_/models/danet_r50-d8.py
@@ -0,0 +1,44 @@
+# model settings
+norm_cfg = dict(type='SyncBN', requires_grad=True)
+model = dict(
+ type='EncoderDecoder',
+ pretrained='open-mmlab://resnet50_v1c',
+ backbone=dict(
+ type='ResNetV1c',
+ depth=50,
+ num_stages=4,
+ out_indices=(0, 1, 2, 3),
+ dilations=(1, 1, 2, 4),
+ strides=(1, 2, 1, 1),
+ norm_cfg=norm_cfg,
+ norm_eval=False,
+ style='pytorch',
+ contract_dilation=True),
+ decode_head=dict(
+ type='DAHead',
+ in_channels=2048,
+ in_index=3,
+ channels=512,
+ pam_channels=64,
+ dropout_ratio=0.1,
+ num_classes=19,
+ norm_cfg=norm_cfg,
+ align_corners=False,
+ loss_decode=dict(
+ type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
+ auxiliary_head=dict(
+ type='FCNHead',
+ in_channels=1024,
+ in_index=2,
+ channels=256,
+ num_convs=1,
+ concat_input=False,
+ dropout_ratio=0.1,
+ num_classes=19,
+ norm_cfg=norm_cfg,
+ align_corners=False,
+ loss_decode=dict(
+ type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
+ # model training and testing settings
+ train_cfg=dict(),
+ test_cfg=dict(mode='whole'))
diff --git a/annotator/uniformer/configs/_base_/models/deeplabv3_r50-d8.py b/annotator/uniformer/configs/_base_/models/deeplabv3_r50-d8.py
new file mode 100644
index 0000000000000000000000000000000000000000..d7a43bee01422ad4795dd27874e0cd4bb6cbfecf
--- /dev/null
+++ b/annotator/uniformer/configs/_base_/models/deeplabv3_r50-d8.py
@@ -0,0 +1,44 @@
+# model settings
+norm_cfg = dict(type='SyncBN', requires_grad=True)
+model = dict(
+ type='EncoderDecoder',
+ pretrained='open-mmlab://resnet50_v1c',
+ backbone=dict(
+ type='ResNetV1c',
+ depth=50,
+ num_stages=4,
+ out_indices=(0, 1, 2, 3),
+ dilations=(1, 1, 2, 4),
+ strides=(1, 2, 1, 1),
+ norm_cfg=norm_cfg,
+ norm_eval=False,
+ style='pytorch',
+ contract_dilation=True),
+ decode_head=dict(
+ type='ASPPHead',
+ in_channels=2048,
+ in_index=3,
+ channels=512,
+ dilations=(1, 12, 24, 36),
+ dropout_ratio=0.1,
+ num_classes=19,
+ norm_cfg=norm_cfg,
+ align_corners=False,
+ loss_decode=dict(
+ type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
+ auxiliary_head=dict(
+ type='FCNHead',
+ in_channels=1024,
+ in_index=2,
+ channels=256,
+ num_convs=1,
+ concat_input=False,
+ dropout_ratio=0.1,
+ num_classes=19,
+ norm_cfg=norm_cfg,
+ align_corners=False,
+ loss_decode=dict(
+ type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
+ # model training and testing settings
+ train_cfg=dict(),
+ test_cfg=dict(mode='whole'))
diff --git a/annotator/uniformer/configs/_base_/models/deeplabv3_unet_s5-d16.py b/annotator/uniformer/configs/_base_/models/deeplabv3_unet_s5-d16.py
new file mode 100644
index 0000000000000000000000000000000000000000..0cd262999d8b2cb8e14a5c32190ae73f479d8e81
--- /dev/null
+++ b/annotator/uniformer/configs/_base_/models/deeplabv3_unet_s5-d16.py
@@ -0,0 +1,50 @@
+# model settings
+norm_cfg = dict(type='SyncBN', requires_grad=True)
+model = dict(
+ type='EncoderDecoder',
+ pretrained=None,
+ backbone=dict(
+ type='UNet',
+ in_channels=3,
+ base_channels=64,
+ num_stages=5,
+ strides=(1, 1, 1, 1, 1),
+ enc_num_convs=(2, 2, 2, 2, 2),
+ dec_num_convs=(2, 2, 2, 2),
+ downsamples=(True, True, True, True),
+ enc_dilations=(1, 1, 1, 1, 1),
+ dec_dilations=(1, 1, 1, 1),
+ with_cp=False,
+ conv_cfg=None,
+ norm_cfg=norm_cfg,
+ act_cfg=dict(type='ReLU'),
+ upsample_cfg=dict(type='InterpConv'),
+ norm_eval=False),
+ decode_head=dict(
+ type='ASPPHead',
+ in_channels=64,
+ in_index=4,
+ channels=16,
+ dilations=(1, 12, 24, 36),
+ dropout_ratio=0.1,
+ num_classes=2,
+ norm_cfg=norm_cfg,
+ align_corners=False,
+ loss_decode=dict(
+ type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
+ auxiliary_head=dict(
+ type='FCNHead',
+ in_channels=128,
+ in_index=3,
+ channels=64,
+ num_convs=1,
+ concat_input=False,
+ dropout_ratio=0.1,
+ num_classes=2,
+ norm_cfg=norm_cfg,
+ align_corners=False,
+ loss_decode=dict(
+ type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
+ # model training and testing settings
+ train_cfg=dict(),
+ test_cfg=dict(mode='slide', crop_size=256, stride=170))
diff --git a/annotator/uniformer/configs/_base_/models/deeplabv3plus_r50-d8.py b/annotator/uniformer/configs/_base_/models/deeplabv3plus_r50-d8.py
new file mode 100644
index 0000000000000000000000000000000000000000..050e39e091d816df9028d23aa3ecf9db74e441e1
--- /dev/null
+++ b/annotator/uniformer/configs/_base_/models/deeplabv3plus_r50-d8.py
@@ -0,0 +1,46 @@
+# model settings
+norm_cfg = dict(type='SyncBN', requires_grad=True)
+model = dict(
+ type='EncoderDecoder',
+ pretrained='open-mmlab://resnet50_v1c',
+ backbone=dict(
+ type='ResNetV1c',
+ depth=50,
+ num_stages=4,
+ out_indices=(0, 1, 2, 3),
+ dilations=(1, 1, 2, 4),
+ strides=(1, 2, 1, 1),
+ norm_cfg=norm_cfg,
+ norm_eval=False,
+ style='pytorch',
+ contract_dilation=True),
+ decode_head=dict(
+ type='DepthwiseSeparableASPPHead',
+ in_channels=2048,
+ in_index=3,
+ channels=512,
+ dilations=(1, 12, 24, 36),
+ c1_in_channels=256,
+ c1_channels=48,
+ dropout_ratio=0.1,
+ num_classes=19,
+ norm_cfg=norm_cfg,
+ align_corners=False,
+ loss_decode=dict(
+ type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
+ auxiliary_head=dict(
+ type='FCNHead',
+ in_channels=1024,
+ in_index=2,
+ channels=256,
+ num_convs=1,
+ concat_input=False,
+ dropout_ratio=0.1,
+ num_classes=19,
+ norm_cfg=norm_cfg,
+ align_corners=False,
+ loss_decode=dict(
+ type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
+ # model training and testing settings
+ train_cfg=dict(),
+ test_cfg=dict(mode='whole'))
diff --git a/annotator/uniformer/configs/_base_/models/dmnet_r50-d8.py b/annotator/uniformer/configs/_base_/models/dmnet_r50-d8.py
new file mode 100644
index 0000000000000000000000000000000000000000..d22ba52640bebd805b3b8d07025e276dfb023759
--- /dev/null
+++ b/annotator/uniformer/configs/_base_/models/dmnet_r50-d8.py
@@ -0,0 +1,44 @@
+# model settings
+norm_cfg = dict(type='SyncBN', requires_grad=True)
+model = dict(
+ type='EncoderDecoder',
+ pretrained='open-mmlab://resnet50_v1c',
+ backbone=dict(
+ type='ResNetV1c',
+ depth=50,
+ num_stages=4,
+ out_indices=(0, 1, 2, 3),
+ dilations=(1, 1, 2, 4),
+ strides=(1, 2, 1, 1),
+ norm_cfg=norm_cfg,
+ norm_eval=False,
+ style='pytorch',
+ contract_dilation=True),
+ decode_head=dict(
+ type='DMHead',
+ in_channels=2048,
+ in_index=3,
+ channels=512,
+ filter_sizes=(1, 3, 5, 7),
+ dropout_ratio=0.1,
+ num_classes=19,
+ norm_cfg=dict(type='SyncBN', requires_grad=True),
+ align_corners=False,
+ loss_decode=dict(
+ type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
+ auxiliary_head=dict(
+ type='FCNHead',
+ in_channels=1024,
+ in_index=2,
+ channels=256,
+ num_convs=1,
+ concat_input=False,
+ dropout_ratio=0.1,
+ num_classes=19,
+ norm_cfg=norm_cfg,
+ align_corners=False,
+ loss_decode=dict(
+ type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
+ # model training and testing settings
+ train_cfg=dict(),
+ test_cfg=dict(mode='whole'))
diff --git a/annotator/uniformer/configs/_base_/models/dnl_r50-d8.py b/annotator/uniformer/configs/_base_/models/dnl_r50-d8.py
new file mode 100644
index 0000000000000000000000000000000000000000..edb4c174c51e34c103737ba39bfc48bf831e561d
--- /dev/null
+++ b/annotator/uniformer/configs/_base_/models/dnl_r50-d8.py
@@ -0,0 +1,46 @@
+# model settings
+norm_cfg = dict(type='SyncBN', requires_grad=True)
+model = dict(
+ type='EncoderDecoder',
+ pretrained='open-mmlab://resnet50_v1c',
+ backbone=dict(
+ type='ResNetV1c',
+ depth=50,
+ num_stages=4,
+ out_indices=(0, 1, 2, 3),
+ dilations=(1, 1, 2, 4),
+ strides=(1, 2, 1, 1),
+ norm_cfg=norm_cfg,
+ norm_eval=False,
+ style='pytorch',
+ contract_dilation=True),
+ decode_head=dict(
+ type='DNLHead',
+ in_channels=2048,
+ in_index=3,
+ channels=512,
+ dropout_ratio=0.1,
+ reduction=2,
+ use_scale=True,
+ mode='embedded_gaussian',
+ num_classes=19,
+ norm_cfg=norm_cfg,
+ align_corners=False,
+ loss_decode=dict(
+ type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
+ auxiliary_head=dict(
+ type='FCNHead',
+ in_channels=1024,
+ in_index=2,
+ channels=256,
+ num_convs=1,
+ concat_input=False,
+ dropout_ratio=0.1,
+ num_classes=19,
+ norm_cfg=norm_cfg,
+ align_corners=False,
+ loss_decode=dict(
+ type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
+ # model training and testing settings
+ train_cfg=dict(),
+ test_cfg=dict(mode='whole'))
diff --git a/annotator/uniformer/configs/_base_/models/emanet_r50-d8.py b/annotator/uniformer/configs/_base_/models/emanet_r50-d8.py
new file mode 100644
index 0000000000000000000000000000000000000000..26adcd430926de0862204a71d345f2543167f27b
--- /dev/null
+++ b/annotator/uniformer/configs/_base_/models/emanet_r50-d8.py
@@ -0,0 +1,47 @@
+# model settings
+norm_cfg = dict(type='SyncBN', requires_grad=True)
+model = dict(
+ type='EncoderDecoder',
+ pretrained='open-mmlab://resnet50_v1c',
+ backbone=dict(
+ type='ResNetV1c',
+ depth=50,
+ num_stages=4,
+ out_indices=(0, 1, 2, 3),
+ dilations=(1, 1, 2, 4),
+ strides=(1, 2, 1, 1),
+ norm_cfg=norm_cfg,
+ norm_eval=False,
+ style='pytorch',
+ contract_dilation=True),
+ decode_head=dict(
+ type='EMAHead',
+ in_channels=2048,
+ in_index=3,
+ channels=256,
+ ema_channels=512,
+ num_bases=64,
+ num_stages=3,
+ momentum=0.1,
+ dropout_ratio=0.1,
+ num_classes=19,
+ norm_cfg=norm_cfg,
+ align_corners=False,
+ loss_decode=dict(
+ type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
+ auxiliary_head=dict(
+ type='FCNHead',
+ in_channels=1024,
+ in_index=2,
+ channels=256,
+ num_convs=1,
+ concat_input=False,
+ dropout_ratio=0.1,
+ num_classes=19,
+ norm_cfg=norm_cfg,
+ align_corners=False,
+ loss_decode=dict(
+ type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
+ # model training and testing settings
+ train_cfg=dict(),
+ test_cfg=dict(mode='whole'))
diff --git a/annotator/uniformer/configs/_base_/models/encnet_r50-d8.py b/annotator/uniformer/configs/_base_/models/encnet_r50-d8.py
new file mode 100644
index 0000000000000000000000000000000000000000..be777123a886503172a95fe0719e956a147bbd68
--- /dev/null
+++ b/annotator/uniformer/configs/_base_/models/encnet_r50-d8.py
@@ -0,0 +1,48 @@
+# model settings
+norm_cfg = dict(type='SyncBN', requires_grad=True)
+model = dict(
+ type='EncoderDecoder',
+ pretrained='open-mmlab://resnet50_v1c',
+ backbone=dict(
+ type='ResNetV1c',
+ depth=50,
+ num_stages=4,
+ out_indices=(0, 1, 2, 3),
+ dilations=(1, 1, 2, 4),
+ strides=(1, 2, 1, 1),
+ norm_cfg=norm_cfg,
+ norm_eval=False,
+ style='pytorch',
+ contract_dilation=True),
+ decode_head=dict(
+ type='EncHead',
+ in_channels=[512, 1024, 2048],
+ in_index=(1, 2, 3),
+ channels=512,
+ num_codes=32,
+ use_se_loss=True,
+ add_lateral=False,
+ dropout_ratio=0.1,
+ num_classes=19,
+ norm_cfg=norm_cfg,
+ align_corners=False,
+ loss_decode=dict(
+ type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
+ loss_se_decode=dict(
+ type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.2)),
+ auxiliary_head=dict(
+ type='FCNHead',
+ in_channels=1024,
+ in_index=2,
+ channels=256,
+ num_convs=1,
+ concat_input=False,
+ dropout_ratio=0.1,
+ num_classes=19,
+ norm_cfg=norm_cfg,
+ align_corners=False,
+ loss_decode=dict(
+ type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
+ # model training and testing settings
+ train_cfg=dict(),
+ test_cfg=dict(mode='whole'))
diff --git a/annotator/uniformer/configs/_base_/models/fast_scnn.py b/annotator/uniformer/configs/_base_/models/fast_scnn.py
new file mode 100644
index 0000000000000000000000000000000000000000..32fdeb659355a5ce5ef2cc7c2f30742703811cdf
--- /dev/null
+++ b/annotator/uniformer/configs/_base_/models/fast_scnn.py
@@ -0,0 +1,57 @@
+# model settings
+norm_cfg = dict(type='SyncBN', requires_grad=True, momentum=0.01)
+model = dict(
+ type='EncoderDecoder',
+ backbone=dict(
+ type='FastSCNN',
+ downsample_dw_channels=(32, 48),
+ global_in_channels=64,
+ global_block_channels=(64, 96, 128),
+ global_block_strides=(2, 2, 1),
+ global_out_channels=128,
+ higher_in_channels=64,
+ lower_in_channels=128,
+ fusion_out_channels=128,
+ out_indices=(0, 1, 2),
+ norm_cfg=norm_cfg,
+ align_corners=False),
+ decode_head=dict(
+ type='DepthwiseSeparableFCNHead',
+ in_channels=128,
+ channels=128,
+ concat_input=False,
+ num_classes=19,
+ in_index=-1,
+ norm_cfg=norm_cfg,
+ align_corners=False,
+ loss_decode=dict(
+ type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.4)),
+ auxiliary_head=[
+ dict(
+ type='FCNHead',
+ in_channels=128,
+ channels=32,
+ num_convs=1,
+ num_classes=19,
+ in_index=-2,
+ norm_cfg=norm_cfg,
+ concat_input=False,
+ align_corners=False,
+ loss_decode=dict(
+ type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.4)),
+ dict(
+ type='FCNHead',
+ in_channels=64,
+ channels=32,
+ num_convs=1,
+ num_classes=19,
+ in_index=-3,
+ norm_cfg=norm_cfg,
+ concat_input=False,
+ align_corners=False,
+ loss_decode=dict(
+ type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.4)),
+ ],
+ # model training and testing settings
+ train_cfg=dict(),
+ test_cfg=dict(mode='whole'))
diff --git a/annotator/uniformer/configs/_base_/models/fcn_hr18.py b/annotator/uniformer/configs/_base_/models/fcn_hr18.py
new file mode 100644
index 0000000000000000000000000000000000000000..c3e299bc89ada56ca14bbffcbdb08a586b8ed9e9
--- /dev/null
+++ b/annotator/uniformer/configs/_base_/models/fcn_hr18.py
@@ -0,0 +1,52 @@
+# model settings
+norm_cfg = dict(type='SyncBN', requires_grad=True)
+model = dict(
+ type='EncoderDecoder',
+ pretrained='open-mmlab://msra/hrnetv2_w18',
+ backbone=dict(
+ type='HRNet',
+ norm_cfg=norm_cfg,
+ norm_eval=False,
+ extra=dict(
+ stage1=dict(
+ num_modules=1,
+ num_branches=1,
+ block='BOTTLENECK',
+ num_blocks=(4, ),
+ num_channels=(64, )),
+ stage2=dict(
+ num_modules=1,
+ num_branches=2,
+ block='BASIC',
+ num_blocks=(4, 4),
+ num_channels=(18, 36)),
+ stage3=dict(
+ num_modules=4,
+ num_branches=3,
+ block='BASIC',
+ num_blocks=(4, 4, 4),
+ num_channels=(18, 36, 72)),
+ stage4=dict(
+ num_modules=3,
+ num_branches=4,
+ block='BASIC',
+ num_blocks=(4, 4, 4, 4),
+ num_channels=(18, 36, 72, 144)))),
+ decode_head=dict(
+ type='FCNHead',
+ in_channels=[18, 36, 72, 144],
+ in_index=(0, 1, 2, 3),
+ channels=sum([18, 36, 72, 144]),
+ input_transform='resize_concat',
+ kernel_size=1,
+ num_convs=1,
+ concat_input=False,
+ dropout_ratio=-1,
+ num_classes=19,
+ norm_cfg=norm_cfg,
+ align_corners=False,
+ loss_decode=dict(
+ type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
+ # model training and testing settings
+ train_cfg=dict(),
+ test_cfg=dict(mode='whole'))
diff --git a/annotator/uniformer/configs/_base_/models/fcn_r50-d8.py b/annotator/uniformer/configs/_base_/models/fcn_r50-d8.py
new file mode 100644
index 0000000000000000000000000000000000000000..5e98f6cc918b6146fc6d613c6918e825ef1355c3
--- /dev/null
+++ b/annotator/uniformer/configs/_base_/models/fcn_r50-d8.py
@@ -0,0 +1,45 @@
+# model settings
+norm_cfg = dict(type='SyncBN', requires_grad=True)
+model = dict(
+ type='EncoderDecoder',
+ pretrained='open-mmlab://resnet50_v1c',
+ backbone=dict(
+ type='ResNetV1c',
+ depth=50,
+ num_stages=4,
+ out_indices=(0, 1, 2, 3),
+ dilations=(1, 1, 2, 4),
+ strides=(1, 2, 1, 1),
+ norm_cfg=norm_cfg,
+ norm_eval=False,
+ style='pytorch',
+ contract_dilation=True),
+ decode_head=dict(
+ type='FCNHead',
+ in_channels=2048,
+ in_index=3,
+ channels=512,
+ num_convs=2,
+ concat_input=True,
+ dropout_ratio=0.1,
+ num_classes=19,
+ norm_cfg=norm_cfg,
+ align_corners=False,
+ loss_decode=dict(
+ type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
+ auxiliary_head=dict(
+ type='FCNHead',
+ in_channels=1024,
+ in_index=2,
+ channels=256,
+ num_convs=1,
+ concat_input=False,
+ dropout_ratio=0.1,
+ num_classes=19,
+ norm_cfg=norm_cfg,
+ align_corners=False,
+ loss_decode=dict(
+ type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
+ # model training and testing settings
+ train_cfg=dict(),
+ test_cfg=dict(mode='whole'))
diff --git a/annotator/uniformer/configs/_base_/models/fcn_unet_s5-d16.py b/annotator/uniformer/configs/_base_/models/fcn_unet_s5-d16.py
new file mode 100644
index 0000000000000000000000000000000000000000..a33e7972877f902d0e7d18401ca675e3e4e60a18
--- /dev/null
+++ b/annotator/uniformer/configs/_base_/models/fcn_unet_s5-d16.py
@@ -0,0 +1,51 @@
+# model settings
+norm_cfg = dict(type='SyncBN', requires_grad=True)
+model = dict(
+ type='EncoderDecoder',
+ pretrained=None,
+ backbone=dict(
+ type='UNet',
+ in_channels=3,
+ base_channels=64,
+ num_stages=5,
+ strides=(1, 1, 1, 1, 1),
+ enc_num_convs=(2, 2, 2, 2, 2),
+ dec_num_convs=(2, 2, 2, 2),
+ downsamples=(True, True, True, True),
+ enc_dilations=(1, 1, 1, 1, 1),
+ dec_dilations=(1, 1, 1, 1),
+ with_cp=False,
+ conv_cfg=None,
+ norm_cfg=norm_cfg,
+ act_cfg=dict(type='ReLU'),
+ upsample_cfg=dict(type='InterpConv'),
+ norm_eval=False),
+ decode_head=dict(
+ type='FCNHead',
+ in_channels=64,
+ in_index=4,
+ channels=64,
+ num_convs=1,
+ concat_input=False,
+ dropout_ratio=0.1,
+ num_classes=2,
+ norm_cfg=norm_cfg,
+ align_corners=False,
+ loss_decode=dict(
+ type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
+ auxiliary_head=dict(
+ type='FCNHead',
+ in_channels=128,
+ in_index=3,
+ channels=64,
+ num_convs=1,
+ concat_input=False,
+ dropout_ratio=0.1,
+ num_classes=2,
+ norm_cfg=norm_cfg,
+ align_corners=False,
+ loss_decode=dict(
+ type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
+ # model training and testing settings
+ train_cfg=dict(),
+ test_cfg=dict(mode='slide', crop_size=256, stride=170))
diff --git a/annotator/uniformer/configs/_base_/models/fpn_r50.py b/annotator/uniformer/configs/_base_/models/fpn_r50.py
new file mode 100644
index 0000000000000000000000000000000000000000..86ab327db92e44c14822d65f1c9277cb007f17c1
--- /dev/null
+++ b/annotator/uniformer/configs/_base_/models/fpn_r50.py
@@ -0,0 +1,36 @@
+# model settings
+norm_cfg = dict(type='SyncBN', requires_grad=True)
+model = dict(
+ type='EncoderDecoder',
+ pretrained='open-mmlab://resnet50_v1c',
+ backbone=dict(
+ type='ResNetV1c',
+ depth=50,
+ num_stages=4,
+ out_indices=(0, 1, 2, 3),
+ dilations=(1, 1, 1, 1),
+ strides=(1, 2, 2, 2),
+ norm_cfg=norm_cfg,
+ norm_eval=False,
+ style='pytorch',
+ contract_dilation=True),
+ neck=dict(
+ type='FPN',
+ in_channels=[256, 512, 1024, 2048],
+ out_channels=256,
+ num_outs=4),
+ decode_head=dict(
+ type='FPNHead',
+ in_channels=[256, 256, 256, 256],
+ in_index=[0, 1, 2, 3],
+ feature_strides=[4, 8, 16, 32],
+ channels=128,
+ dropout_ratio=0.1,
+ num_classes=19,
+ norm_cfg=norm_cfg,
+ align_corners=False,
+ loss_decode=dict(
+ type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
+ # model training and testing settings
+ train_cfg=dict(),
+ test_cfg=dict(mode='whole'))
diff --git a/annotator/uniformer/configs/_base_/models/fpn_uniformer.py b/annotator/uniformer/configs/_base_/models/fpn_uniformer.py
new file mode 100644
index 0000000000000000000000000000000000000000..8aae98c5991055bfcc08e82ccdc09f8b1d9f8a8d
--- /dev/null
+++ b/annotator/uniformer/configs/_base_/models/fpn_uniformer.py
@@ -0,0 +1,35 @@
+# model settings
+norm_cfg = dict(type='SyncBN', requires_grad=True)
+model = dict(
+ type='EncoderDecoder',
+ backbone=dict(
+ type='UniFormer',
+ embed_dim=[64, 128, 320, 512],
+ layers=[3, 4, 8, 3],
+ head_dim=64,
+ mlp_ratio=4.,
+ qkv_bias=True,
+ drop_rate=0.,
+ attn_drop_rate=0.,
+ drop_path_rate=0.1),
+ neck=dict(
+ type='FPN',
+ in_channels=[64, 128, 320, 512],
+ out_channels=256,
+ num_outs=4),
+ decode_head=dict(
+ type='FPNHead',
+ in_channels=[256, 256, 256, 256],
+ in_index=[0, 1, 2, 3],
+ feature_strides=[4, 8, 16, 32],
+ channels=128,
+ dropout_ratio=0.1,
+ num_classes=150,
+ norm_cfg=norm_cfg,
+ align_corners=False,
+ loss_decode=dict(
+ type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
+ # model training and testing settings
+ train_cfg=dict(),
+ test_cfg=dict(mode='whole')
+)
diff --git a/annotator/uniformer/configs/_base_/models/gcnet_r50-d8.py b/annotator/uniformer/configs/_base_/models/gcnet_r50-d8.py
new file mode 100644
index 0000000000000000000000000000000000000000..3d2ad69f5c22adfe79d5fdabf920217628987166
--- /dev/null
+++ b/annotator/uniformer/configs/_base_/models/gcnet_r50-d8.py
@@ -0,0 +1,46 @@
+# model settings
+norm_cfg = dict(type='SyncBN', requires_grad=True)
+model = dict(
+ type='EncoderDecoder',
+ pretrained='open-mmlab://resnet50_v1c',
+ backbone=dict(
+ type='ResNetV1c',
+ depth=50,
+ num_stages=4,
+ out_indices=(0, 1, 2, 3),
+ dilations=(1, 1, 2, 4),
+ strides=(1, 2, 1, 1),
+ norm_cfg=norm_cfg,
+ norm_eval=False,
+ style='pytorch',
+ contract_dilation=True),
+ decode_head=dict(
+ type='GCHead',
+ in_channels=2048,
+ in_index=3,
+ channels=512,
+ ratio=1 / 4.,
+ pooling_type='att',
+ fusion_types=('channel_add', ),
+ dropout_ratio=0.1,
+ num_classes=19,
+ norm_cfg=norm_cfg,
+ align_corners=False,
+ loss_decode=dict(
+ type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
+ auxiliary_head=dict(
+ type='FCNHead',
+ in_channels=1024,
+ in_index=2,
+ channels=256,
+ num_convs=1,
+ concat_input=False,
+ dropout_ratio=0.1,
+ num_classes=19,
+ norm_cfg=norm_cfg,
+ align_corners=False,
+ loss_decode=dict(
+ type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
+ # model training and testing settings
+ train_cfg=dict(),
+ test_cfg=dict(mode='whole'))
diff --git a/annotator/uniformer/configs/_base_/models/lraspp_m-v3-d8.py b/annotator/uniformer/configs/_base_/models/lraspp_m-v3-d8.py
new file mode 100644
index 0000000000000000000000000000000000000000..93258242a90695cc94a7c6bd41562d6a75988771
--- /dev/null
+++ b/annotator/uniformer/configs/_base_/models/lraspp_m-v3-d8.py
@@ -0,0 +1,25 @@
+# model settings
+norm_cfg = dict(type='SyncBN', eps=0.001, requires_grad=True)
+model = dict(
+ type='EncoderDecoder',
+ backbone=dict(
+ type='MobileNetV3',
+ arch='large',
+ out_indices=(1, 3, 16),
+ norm_cfg=norm_cfg),
+ decode_head=dict(
+ type='LRASPPHead',
+ in_channels=(16, 24, 960),
+ in_index=(0, 1, 2),
+ channels=128,
+ input_transform='multiple_select',
+ dropout_ratio=0.1,
+ num_classes=19,
+ norm_cfg=norm_cfg,
+ act_cfg=dict(type='ReLU'),
+ align_corners=False,
+ loss_decode=dict(
+ type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
+ # model training and testing settings
+ train_cfg=dict(),
+ test_cfg=dict(mode='whole'))
diff --git a/annotator/uniformer/configs/_base_/models/nonlocal_r50-d8.py b/annotator/uniformer/configs/_base_/models/nonlocal_r50-d8.py
new file mode 100644
index 0000000000000000000000000000000000000000..5674a39854cafd1f2e363bac99c58ccae62f24da
--- /dev/null
+++ b/annotator/uniformer/configs/_base_/models/nonlocal_r50-d8.py
@@ -0,0 +1,46 @@
+# model settings
+norm_cfg = dict(type='SyncBN', requires_grad=True)
+model = dict(
+ type='EncoderDecoder',
+ pretrained='open-mmlab://resnet50_v1c',
+ backbone=dict(
+ type='ResNetV1c',
+ depth=50,
+ num_stages=4,
+ out_indices=(0, 1, 2, 3),
+ dilations=(1, 1, 2, 4),
+ strides=(1, 2, 1, 1),
+ norm_cfg=norm_cfg,
+ norm_eval=False,
+ style='pytorch',
+ contract_dilation=True),
+ decode_head=dict(
+ type='NLHead',
+ in_channels=2048,
+ in_index=3,
+ channels=512,
+ dropout_ratio=0.1,
+ reduction=2,
+ use_scale=True,
+ mode='embedded_gaussian',
+ num_classes=19,
+ norm_cfg=norm_cfg,
+ align_corners=False,
+ loss_decode=dict(
+ type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
+ auxiliary_head=dict(
+ type='FCNHead',
+ in_channels=1024,
+ in_index=2,
+ channels=256,
+ num_convs=1,
+ concat_input=False,
+ dropout_ratio=0.1,
+ num_classes=19,
+ norm_cfg=norm_cfg,
+ align_corners=False,
+ loss_decode=dict(
+ type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
+ # model training and testing settings
+ train_cfg=dict(),
+ test_cfg=dict(mode='whole'))
diff --git a/annotator/uniformer/configs/_base_/models/ocrnet_hr18.py b/annotator/uniformer/configs/_base_/models/ocrnet_hr18.py
new file mode 100644
index 0000000000000000000000000000000000000000..c60f62a7cdf3f5c5096a7a7e725e8268fddcb057
--- /dev/null
+++ b/annotator/uniformer/configs/_base_/models/ocrnet_hr18.py
@@ -0,0 +1,68 @@
+# model settings
+norm_cfg = dict(type='SyncBN', requires_grad=True)
+model = dict(
+ type='CascadeEncoderDecoder',
+ num_stages=2,
+ pretrained='open-mmlab://msra/hrnetv2_w18',
+ backbone=dict(
+ type='HRNet',
+ norm_cfg=norm_cfg,
+ norm_eval=False,
+ extra=dict(
+ stage1=dict(
+ num_modules=1,
+ num_branches=1,
+ block='BOTTLENECK',
+ num_blocks=(4, ),
+ num_channels=(64, )),
+ stage2=dict(
+ num_modules=1,
+ num_branches=2,
+ block='BASIC',
+ num_blocks=(4, 4),
+ num_channels=(18, 36)),
+ stage3=dict(
+ num_modules=4,
+ num_branches=3,
+ block='BASIC',
+ num_blocks=(4, 4, 4),
+ num_channels=(18, 36, 72)),
+ stage4=dict(
+ num_modules=3,
+ num_branches=4,
+ block='BASIC',
+ num_blocks=(4, 4, 4, 4),
+ num_channels=(18, 36, 72, 144)))),
+ decode_head=[
+ dict(
+ type='FCNHead',
+ in_channels=[18, 36, 72, 144],
+ channels=sum([18, 36, 72, 144]),
+ in_index=(0, 1, 2, 3),
+ input_transform='resize_concat',
+ kernel_size=1,
+ num_convs=1,
+ concat_input=False,
+ dropout_ratio=-1,
+ num_classes=19,
+ norm_cfg=norm_cfg,
+ align_corners=False,
+ loss_decode=dict(
+ type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
+ dict(
+ type='OCRHead',
+ in_channels=[18, 36, 72, 144],
+ in_index=(0, 1, 2, 3),
+ input_transform='resize_concat',
+ channels=512,
+ ocr_channels=256,
+ dropout_ratio=-1,
+ num_classes=19,
+ norm_cfg=norm_cfg,
+ align_corners=False,
+ loss_decode=dict(
+ type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
+ ],
+ # model training and testing settings
+ train_cfg=dict(),
+ test_cfg=dict(mode='whole'))
diff --git a/annotator/uniformer/configs/_base_/models/ocrnet_r50-d8.py b/annotator/uniformer/configs/_base_/models/ocrnet_r50-d8.py
new file mode 100644
index 0000000000000000000000000000000000000000..615aa3ff703942b6c22b2d6e9642504dd3e41ebd
--- /dev/null
+++ b/annotator/uniformer/configs/_base_/models/ocrnet_r50-d8.py
@@ -0,0 +1,47 @@
+# model settings
+norm_cfg = dict(type='SyncBN', requires_grad=True)
+model = dict(
+ type='CascadeEncoderDecoder',
+ num_stages=2,
+ pretrained='open-mmlab://resnet50_v1c',
+ backbone=dict(
+ type='ResNetV1c',
+ depth=50,
+ num_stages=4,
+ out_indices=(0, 1, 2, 3),
+ dilations=(1, 1, 2, 4),
+ strides=(1, 2, 1, 1),
+ norm_cfg=norm_cfg,
+ norm_eval=False,
+ style='pytorch',
+ contract_dilation=True),
+ decode_head=[
+ dict(
+ type='FCNHead',
+ in_channels=1024,
+ in_index=2,
+ channels=256,
+ num_convs=1,
+ concat_input=False,
+ dropout_ratio=0.1,
+ num_classes=19,
+ norm_cfg=norm_cfg,
+ align_corners=False,
+ loss_decode=dict(
+ type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
+ dict(
+ type='OCRHead',
+ in_channels=2048,
+ in_index=3,
+ channels=512,
+ ocr_channels=256,
+ dropout_ratio=0.1,
+ num_classes=19,
+ norm_cfg=norm_cfg,
+ align_corners=False,
+ loss_decode=dict(
+ type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0))
+ ],
+ # model training and testing settings
+ train_cfg=dict(),
+ test_cfg=dict(mode='whole'))
diff --git a/annotator/uniformer/configs/_base_/models/pointrend_r50.py b/annotator/uniformer/configs/_base_/models/pointrend_r50.py
new file mode 100644
index 0000000000000000000000000000000000000000..9d323dbf9466d41e0800aa57ef84045f3d874bdf
--- /dev/null
+++ b/annotator/uniformer/configs/_base_/models/pointrend_r50.py
@@ -0,0 +1,56 @@
+# model settings
+norm_cfg = dict(type='SyncBN', requires_grad=True)
+model = dict(
+ type='CascadeEncoderDecoder',
+ num_stages=2,
+ pretrained='open-mmlab://resnet50_v1c',
+ backbone=dict(
+ type='ResNetV1c',
+ depth=50,
+ num_stages=4,
+ out_indices=(0, 1, 2, 3),
+ dilations=(1, 1, 1, 1),
+ strides=(1, 2, 2, 2),
+ norm_cfg=norm_cfg,
+ norm_eval=False,
+ style='pytorch',
+ contract_dilation=True),
+ neck=dict(
+ type='FPN',
+ in_channels=[256, 512, 1024, 2048],
+ out_channels=256,
+ num_outs=4),
+ decode_head=[
+ dict(
+ type='FPNHead',
+ in_channels=[256, 256, 256, 256],
+ in_index=[0, 1, 2, 3],
+ feature_strides=[4, 8, 16, 32],
+ channels=128,
+ dropout_ratio=-1,
+ num_classes=19,
+ norm_cfg=norm_cfg,
+ align_corners=False,
+ loss_decode=dict(
+ type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
+ dict(
+ type='PointHead',
+ in_channels=[256],
+ in_index=[0],
+ channels=256,
+ num_fcs=3,
+ coarse_pred_each_layer=True,
+ dropout_ratio=-1,
+ num_classes=19,
+ align_corners=False,
+ loss_decode=dict(
+ type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0))
+ ],
+ # model training and testing settings
+ train_cfg=dict(
+ num_points=2048, oversample_ratio=3, importance_sample_ratio=0.75),
+ test_cfg=dict(
+ mode='whole',
+ subdivision_steps=2,
+ subdivision_num_points=8196,
+ scale_factor=2))
diff --git a/annotator/uniformer/configs/_base_/models/psanet_r50-d8.py b/annotator/uniformer/configs/_base_/models/psanet_r50-d8.py
new file mode 100644
index 0000000000000000000000000000000000000000..689513fa9d2a40f14bf0ae4ae61f38f0dcc1b3da
--- /dev/null
+++ b/annotator/uniformer/configs/_base_/models/psanet_r50-d8.py
@@ -0,0 +1,49 @@
+# model settings
+norm_cfg = dict(type='SyncBN', requires_grad=True)
+model = dict(
+ type='EncoderDecoder',
+ pretrained='open-mmlab://resnet50_v1c',
+ backbone=dict(
+ type='ResNetV1c',
+ depth=50,
+ num_stages=4,
+ out_indices=(0, 1, 2, 3),
+ dilations=(1, 1, 2, 4),
+ strides=(1, 2, 1, 1),
+ norm_cfg=norm_cfg,
+ norm_eval=False,
+ style='pytorch',
+ contract_dilation=True),
+ decode_head=dict(
+ type='PSAHead',
+ in_channels=2048,
+ in_index=3,
+ channels=512,
+ mask_size=(97, 97),
+ psa_type='bi-direction',
+ compact=False,
+ shrink_factor=2,
+ normalization_factor=1.0,
+ psa_softmax=True,
+ dropout_ratio=0.1,
+ num_classes=19,
+ norm_cfg=norm_cfg,
+ align_corners=False,
+ loss_decode=dict(
+ type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
+ auxiliary_head=dict(
+ type='FCNHead',
+ in_channels=1024,
+ in_index=2,
+ channels=256,
+ num_convs=1,
+ concat_input=False,
+ dropout_ratio=0.1,
+ num_classes=19,
+ norm_cfg=norm_cfg,
+ align_corners=False,
+ loss_decode=dict(
+ type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
+ # model training and testing settings
+ train_cfg=dict(),
+ test_cfg=dict(mode='whole'))
diff --git a/annotator/uniformer/configs/_base_/models/pspnet_r50-d8.py b/annotator/uniformer/configs/_base_/models/pspnet_r50-d8.py
new file mode 100644
index 0000000000000000000000000000000000000000..f451e08ad2eb0732dcb806b1851eb978d4acf136
--- /dev/null
+++ b/annotator/uniformer/configs/_base_/models/pspnet_r50-d8.py
@@ -0,0 +1,44 @@
+# model settings
+norm_cfg = dict(type='SyncBN', requires_grad=True)
+model = dict(
+ type='EncoderDecoder',
+ pretrained='open-mmlab://resnet50_v1c',
+ backbone=dict(
+ type='ResNetV1c',
+ depth=50,
+ num_stages=4,
+ out_indices=(0, 1, 2, 3),
+ dilations=(1, 1, 2, 4),
+ strides=(1, 2, 1, 1),
+ norm_cfg=norm_cfg,
+ norm_eval=False,
+ style='pytorch',
+ contract_dilation=True),
+ decode_head=dict(
+ type='PSPHead',
+ in_channels=2048,
+ in_index=3,
+ channels=512,
+ pool_scales=(1, 2, 3, 6),
+ dropout_ratio=0.1,
+ num_classes=19,
+ norm_cfg=norm_cfg,
+ align_corners=False,
+ loss_decode=dict(
+ type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
+ auxiliary_head=dict(
+ type='FCNHead',
+ in_channels=1024,
+ in_index=2,
+ channels=256,
+ num_convs=1,
+ concat_input=False,
+ dropout_ratio=0.1,
+ num_classes=19,
+ norm_cfg=norm_cfg,
+ align_corners=False,
+ loss_decode=dict(
+ type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
+ # model training and testing settings
+ train_cfg=dict(),
+ test_cfg=dict(mode='whole'))
diff --git a/annotator/uniformer/configs/_base_/models/pspnet_unet_s5-d16.py b/annotator/uniformer/configs/_base_/models/pspnet_unet_s5-d16.py
new file mode 100644
index 0000000000000000000000000000000000000000..fcff9ec4f41fad158344ecd77313dc14564f3682
--- /dev/null
+++ b/annotator/uniformer/configs/_base_/models/pspnet_unet_s5-d16.py
@@ -0,0 +1,50 @@
+# model settings
+norm_cfg = dict(type='SyncBN', requires_grad=True)
+model = dict(
+ type='EncoderDecoder',
+ pretrained=None,
+ backbone=dict(
+ type='UNet',
+ in_channels=3,
+ base_channels=64,
+ num_stages=5,
+ strides=(1, 1, 1, 1, 1),
+ enc_num_convs=(2, 2, 2, 2, 2),
+ dec_num_convs=(2, 2, 2, 2),
+ downsamples=(True, True, True, True),
+ enc_dilations=(1, 1, 1, 1, 1),
+ dec_dilations=(1, 1, 1, 1),
+ with_cp=False,
+ conv_cfg=None,
+ norm_cfg=norm_cfg,
+ act_cfg=dict(type='ReLU'),
+ upsample_cfg=dict(type='InterpConv'),
+ norm_eval=False),
+ decode_head=dict(
+ type='PSPHead',
+ in_channels=64,
+ in_index=4,
+ channels=16,
+ pool_scales=(1, 2, 3, 6),
+ dropout_ratio=0.1,
+ num_classes=2,
+ norm_cfg=norm_cfg,
+ align_corners=False,
+ loss_decode=dict(
+ type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
+ auxiliary_head=dict(
+ type='FCNHead',
+ in_channels=128,
+ in_index=3,
+ channels=64,
+ num_convs=1,
+ concat_input=False,
+ dropout_ratio=0.1,
+ num_classes=2,
+ norm_cfg=norm_cfg,
+ align_corners=False,
+ loss_decode=dict(
+ type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
+ # model training and testing settings
+ train_cfg=dict(),
+ test_cfg=dict(mode='slide', crop_size=256, stride=170))
diff --git a/annotator/uniformer/configs/_base_/models/upernet_r50.py b/annotator/uniformer/configs/_base_/models/upernet_r50.py
new file mode 100644
index 0000000000000000000000000000000000000000..10974962fdd7136031fd06de1700f497d355ceaa
--- /dev/null
+++ b/annotator/uniformer/configs/_base_/models/upernet_r50.py
@@ -0,0 +1,44 @@
+# model settings
+norm_cfg = dict(type='SyncBN', requires_grad=True)
+model = dict(
+ type='EncoderDecoder',
+ pretrained='open-mmlab://resnet50_v1c',
+ backbone=dict(
+ type='ResNetV1c',
+ depth=50,
+ num_stages=4,
+ out_indices=(0, 1, 2, 3),
+ dilations=(1, 1, 1, 1),
+ strides=(1, 2, 2, 2),
+ norm_cfg=norm_cfg,
+ norm_eval=False,
+ style='pytorch',
+ contract_dilation=True),
+ decode_head=dict(
+ type='UPerHead',
+ in_channels=[256, 512, 1024, 2048],
+ in_index=[0, 1, 2, 3],
+ pool_scales=(1, 2, 3, 6),
+ channels=512,
+ dropout_ratio=0.1,
+ num_classes=19,
+ norm_cfg=norm_cfg,
+ align_corners=False,
+ loss_decode=dict(
+ type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
+ auxiliary_head=dict(
+ type='FCNHead',
+ in_channels=1024,
+ in_index=2,
+ channels=256,
+ num_convs=1,
+ concat_input=False,
+ dropout_ratio=0.1,
+ num_classes=19,
+ norm_cfg=norm_cfg,
+ align_corners=False,
+ loss_decode=dict(
+ type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
+ # model training and testing settings
+ train_cfg=dict(),
+ test_cfg=dict(mode='whole'))
diff --git a/annotator/uniformer/configs/_base_/models/upernet_uniformer.py b/annotator/uniformer/configs/_base_/models/upernet_uniformer.py
new file mode 100644
index 0000000000000000000000000000000000000000..41aa4db809dc6e2c508e98051f61807d07477903
--- /dev/null
+++ b/annotator/uniformer/configs/_base_/models/upernet_uniformer.py
@@ -0,0 +1,43 @@
+# model settings
+norm_cfg = dict(type='BN', requires_grad=True)
+model = dict(
+ type='EncoderDecoder',
+ pretrained=None,
+ backbone=dict(
+ type='UniFormer',
+ embed_dim=[64, 128, 320, 512],
+ layers=[3, 4, 8, 3],
+ head_dim=64,
+ mlp_ratio=4.,
+ qkv_bias=True,
+ drop_rate=0.,
+ attn_drop_rate=0.,
+ drop_path_rate=0.1),
+ decode_head=dict(
+ type='UPerHead',
+ in_channels=[64, 128, 320, 512],
+ in_index=[0, 1, 2, 3],
+ pool_scales=(1, 2, 3, 6),
+ channels=512,
+ dropout_ratio=0.1,
+ num_classes=19,
+ norm_cfg=norm_cfg,
+ align_corners=False,
+ loss_decode=dict(
+ type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
+ auxiliary_head=dict(
+ type='FCNHead',
+ in_channels=320,
+ in_index=2,
+ channels=256,
+ num_convs=1,
+ concat_input=False,
+ dropout_ratio=0.1,
+ num_classes=19,
+ norm_cfg=norm_cfg,
+ align_corners=False,
+ loss_decode=dict(
+ type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
+ # model training and testing settings
+ train_cfg=dict(),
+ test_cfg=dict(mode='whole'))
\ No newline at end of file
diff --git a/annotator/uniformer/configs/_base_/schedules/schedule_160k.py b/annotator/uniformer/configs/_base_/schedules/schedule_160k.py
new file mode 100644
index 0000000000000000000000000000000000000000..52603890b10f25faf8eec9f9e5a4468fae09b811
--- /dev/null
+++ b/annotator/uniformer/configs/_base_/schedules/schedule_160k.py
@@ -0,0 +1,9 @@
+# optimizer
+optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005)
+optimizer_config = dict()
+# learning policy
+lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False)
+# runtime settings
+runner = dict(type='IterBasedRunner', max_iters=160000)
+checkpoint_config = dict(by_epoch=False, interval=16000)
+evaluation = dict(interval=16000, metric='mIoU')
diff --git a/annotator/uniformer/configs/_base_/schedules/schedule_20k.py b/annotator/uniformer/configs/_base_/schedules/schedule_20k.py
new file mode 100644
index 0000000000000000000000000000000000000000..bf780a1b6f6521833c6a5859675147824efa599d
--- /dev/null
+++ b/annotator/uniformer/configs/_base_/schedules/schedule_20k.py
@@ -0,0 +1,9 @@
+# optimizer
+optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005)
+optimizer_config = dict()
+# learning policy
+lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False)
+# runtime settings
+runner = dict(type='IterBasedRunner', max_iters=20000)
+checkpoint_config = dict(by_epoch=False, interval=2000)
+evaluation = dict(interval=2000, metric='mIoU')
diff --git a/annotator/uniformer/configs/_base_/schedules/schedule_40k.py b/annotator/uniformer/configs/_base_/schedules/schedule_40k.py
new file mode 100644
index 0000000000000000000000000000000000000000..cdbf841abcb26eed87bf76ab816aff4bae0630ee
--- /dev/null
+++ b/annotator/uniformer/configs/_base_/schedules/schedule_40k.py
@@ -0,0 +1,9 @@
+# optimizer
+optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005)
+optimizer_config = dict()
+# learning policy
+lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False)
+# runtime settings
+runner = dict(type='IterBasedRunner', max_iters=40000)
+checkpoint_config = dict(by_epoch=False, interval=4000)
+evaluation = dict(interval=4000, metric='mIoU')
diff --git a/annotator/uniformer/configs/_base_/schedules/schedule_80k.py b/annotator/uniformer/configs/_base_/schedules/schedule_80k.py
new file mode 100644
index 0000000000000000000000000000000000000000..c190cee6bdc7922b688ea75dc8f152fa15c24617
--- /dev/null
+++ b/annotator/uniformer/configs/_base_/schedules/schedule_80k.py
@@ -0,0 +1,9 @@
+# optimizer
+optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005)
+optimizer_config = dict()
+# learning policy
+lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False)
+# runtime settings
+runner = dict(type='IterBasedRunner', max_iters=80000)
+checkpoint_config = dict(by_epoch=False, interval=8000)
+evaluation = dict(interval=8000, metric='mIoU')
diff --git a/annotator/uniformer/exp/cascade_mask_rcnn_3x_ms_hybrid_base/config.py b/annotator/uniformer/exp/cascade_mask_rcnn_3x_ms_hybrid_base/config.py
new file mode 100644
index 0000000000000000000000000000000000000000..55f586d96db66a52054ac504f9a69080197560c9
--- /dev/null
+++ b/annotator/uniformer/exp/cascade_mask_rcnn_3x_ms_hybrid_base/config.py
@@ -0,0 +1,142 @@
+_base_ = [
+ '../../configs/_base_/models/cascade_mask_rcnn_uniformer_fpn.py',
+ '../../configs/_base_/datasets/coco_instance.py',
+ '../../configs/_base_/schedules/schedule_1x.py',
+ '../../configs/_base_/default_runtime.py'
+]
+
+model = dict(
+ backbone=dict(
+ embed_dim=[64, 128, 320, 512],
+ layers=[5, 8, 20, 7],
+ head_dim=64,
+ drop_path_rate=0.4,
+ use_checkpoint=True,
+ checkpoint_num=[0, 0, 20, 0],
+ windows=False,
+ hybrid=True,
+ window_size=14
+ ),
+ neck=dict(in_channels=[64, 128, 320, 512]),
+ roi_head=dict(
+ bbox_head=[
+ dict(
+ type='ConvFCBBoxHead',
+ num_shared_convs=4,
+ num_shared_fcs=1,
+ in_channels=256,
+ conv_out_channels=256,
+ fc_out_channels=1024,
+ roi_feat_size=7,
+ num_classes=80,
+ bbox_coder=dict(
+ type='DeltaXYWHBBoxCoder',
+ target_means=[0., 0., 0., 0.],
+ target_stds=[0.1, 0.1, 0.2, 0.2]),
+ reg_class_agnostic=False,
+ reg_decoded_bbox=True,
+ norm_cfg=dict(type='SyncBN', requires_grad=True),
+ loss_cls=dict(
+ type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
+ loss_bbox=dict(type='GIoULoss', loss_weight=10.0)),
+ dict(
+ type='ConvFCBBoxHead',
+ num_shared_convs=4,
+ num_shared_fcs=1,
+ in_channels=256,
+ conv_out_channels=256,
+ fc_out_channels=1024,
+ roi_feat_size=7,
+ num_classes=80,
+ bbox_coder=dict(
+ type='DeltaXYWHBBoxCoder',
+ target_means=[0., 0., 0., 0.],
+ target_stds=[0.05, 0.05, 0.1, 0.1]),
+ reg_class_agnostic=False,
+ reg_decoded_bbox=True,
+ norm_cfg=dict(type='SyncBN', requires_grad=True),
+ loss_cls=dict(
+ type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
+ loss_bbox=dict(type='GIoULoss', loss_weight=10.0)),
+ dict(
+ type='ConvFCBBoxHead',
+ num_shared_convs=4,
+ num_shared_fcs=1,
+ in_channels=256,
+ conv_out_channels=256,
+ fc_out_channels=1024,
+ roi_feat_size=7,
+ num_classes=80,
+ bbox_coder=dict(
+ type='DeltaXYWHBBoxCoder',
+ target_means=[0., 0., 0., 0.],
+ target_stds=[0.033, 0.033, 0.067, 0.067]),
+ reg_class_agnostic=False,
+ reg_decoded_bbox=True,
+ norm_cfg=dict(type='SyncBN', requires_grad=True),
+ loss_cls=dict(
+ type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
+ loss_bbox=dict(type='GIoULoss', loss_weight=10.0))
+ ]))
+
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+
+# augmentation strategy originates from DETR / Sparse RCNN
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
+ dict(type='RandomFlip', flip_ratio=0.5),
+ dict(type='AutoAugment',
+ policies=[
+ [
+ dict(type='Resize',
+ img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
+ (608, 1333), (640, 1333), (672, 1333), (704, 1333),
+ (736, 1333), (768, 1333), (800, 1333)],
+ multiscale_mode='value',
+ keep_ratio=True)
+ ],
+ [
+ dict(type='Resize',
+ img_scale=[(400, 1333), (500, 1333), (600, 1333)],
+ multiscale_mode='value',
+ keep_ratio=True),
+ dict(type='RandomCrop',
+ crop_type='absolute_range',
+ crop_size=(384, 600),
+ allow_negative_crop=True),
+ dict(type='Resize',
+ img_scale=[(480, 1333), (512, 1333), (544, 1333),
+ (576, 1333), (608, 1333), (640, 1333),
+ (672, 1333), (704, 1333), (736, 1333),
+ (768, 1333), (800, 1333)],
+ multiscale_mode='value',
+ override=True,
+ keep_ratio=True)
+ ]
+ ]),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='Pad', size_divisor=32),
+ dict(type='DefaultFormatBundle'),
+ dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
+]
+data = dict(train=dict(pipeline=train_pipeline))
+
+optimizer = dict(_delete_=True, type='AdamW', lr=0.0001, betas=(0.9, 0.999), weight_decay=0.05,
+ paramwise_cfg=dict(custom_keys={'absolute_pos_embed': dict(decay_mult=0.),
+ 'relative_position_bias_table': dict(decay_mult=0.),
+ 'norm': dict(decay_mult=0.)}))
+lr_config = dict(step=[27, 33])
+runner = dict(type='EpochBasedRunnerAmp', max_epochs=36)
+
+# do not use mmdet version fp16
+fp16 = None
+optimizer_config = dict(
+ type="DistOptimizerHook",
+ update_interval=1,
+ grad_clip=None,
+ coalesce=True,
+ bucket_size_mb=-1,
+ use_fp16=True,
+)
diff --git a/annotator/uniformer/exp/cascade_mask_rcnn_3x_ms_hybrid_base/run.sh b/annotator/uniformer/exp/cascade_mask_rcnn_3x_ms_hybrid_base/run.sh
new file mode 100644
index 0000000000000000000000000000000000000000..453f0a0a27d04f08558ec1b03312f7815ca991da
--- /dev/null
+++ b/annotator/uniformer/exp/cascade_mask_rcnn_3x_ms_hybrid_base/run.sh
@@ -0,0 +1,10 @@
+#!/usr/bin/env bash
+
+work_path=$(dirname $0)
+PYTHONPATH="$(dirname $0)/../../":$PYTHONPATH \
+python -m torch.distributed.launch --nproc_per_node=8 \
+ tools/train.py ${work_path}/config.py \
+ --launcher pytorch \
+ --cfg-options model.backbone.pretrained_path='your_model_path/uniformer_base_in1k.pth' \
+ --work-dir ${work_path}/ckpt \
+ 2>&1 | tee -a ${work_path}/log.txt
diff --git a/annotator/uniformer/exp/upernet_global_small/config.py b/annotator/uniformer/exp/upernet_global_small/config.py
new file mode 100644
index 0000000000000000000000000000000000000000..01db96bf9b0be531aa0eaf62fee51543712f8670
--- /dev/null
+++ b/annotator/uniformer/exp/upernet_global_small/config.py
@@ -0,0 +1,38 @@
+_base_ = [
+ '../../configs/_base_/models/upernet_uniformer.py',
+ '../../configs/_base_/datasets/ade20k.py',
+ '../../configs/_base_/default_runtime.py',
+ '../../configs/_base_/schedules/schedule_160k.py'
+]
+model = dict(
+ backbone=dict(
+ type='UniFormer',
+ embed_dim=[64, 128, 320, 512],
+ layers=[3, 4, 8, 3],
+ head_dim=64,
+ drop_path_rate=0.25,
+ windows=False,
+ hybrid=False
+ ),
+ decode_head=dict(
+ in_channels=[64, 128, 320, 512],
+ num_classes=150
+ ),
+ auxiliary_head=dict(
+ in_channels=320,
+ num_classes=150
+ ))
+
+# AdamW optimizer, no weight decay for position embedding & layer norm in backbone
+optimizer = dict(_delete_=True, type='AdamW', lr=0.00006, betas=(0.9, 0.999), weight_decay=0.01,
+ paramwise_cfg=dict(custom_keys={'absolute_pos_embed': dict(decay_mult=0.),
+ 'relative_position_bias_table': dict(decay_mult=0.),
+ 'norm': dict(decay_mult=0.)}))
+
+lr_config = dict(_delete_=True, policy='poly',
+ warmup='linear',
+ warmup_iters=1500,
+ warmup_ratio=1e-6,
+ power=1.0, min_lr=0.0, by_epoch=False)
+
+data=dict(samples_per_gpu=2)
\ No newline at end of file
diff --git a/annotator/uniformer/exp/upernet_global_small/run.sh b/annotator/uniformer/exp/upernet_global_small/run.sh
new file mode 100644
index 0000000000000000000000000000000000000000..9fb22edfa7a32624ea08a63fe7d720c40db3b696
--- /dev/null
+++ b/annotator/uniformer/exp/upernet_global_small/run.sh
@@ -0,0 +1,10 @@
+#!/usr/bin/env bash
+
+work_path=$(dirname $0)
+PYTHONPATH="$(dirname $0)/../../":$PYTHONPATH \
+python -m torch.distributed.launch --nproc_per_node=8 \
+ tools/train.py ${work_path}/config.py \
+ --launcher pytorch \
+ --options model.backbone.pretrained_path='your_model_path/uniformer_small_in1k.pth' \
+ --work-dir ${work_path}/ckpt \
+ 2>&1 | tee -a ${work_path}/log.txt
diff --git a/annotator/uniformer/exp/upernet_global_small/test.sh b/annotator/uniformer/exp/upernet_global_small/test.sh
new file mode 100644
index 0000000000000000000000000000000000000000..d9a85e7a0d3b7c96b060f473d41254b37a382fcb
--- /dev/null
+++ b/annotator/uniformer/exp/upernet_global_small/test.sh
@@ -0,0 +1,10 @@
+#!/usr/bin/env bash
+
+work_path=$(dirname $0)
+PYTHONPATH="$(dirname $0)/../../":$PYTHONPATH \
+python -m torch.distributed.launch --nproc_per_node=8 \
+ tools/test.py ${work_path}/test_config_h32.py \
+ ${work_path}/ckpt/latest.pth \
+ --launcher pytorch \
+ --eval mIoU \
+ 2>&1 | tee -a ${work_path}/log.txt
diff --git a/annotator/uniformer/exp/upernet_global_small/test_config_g.py b/annotator/uniformer/exp/upernet_global_small/test_config_g.py
new file mode 100644
index 0000000000000000000000000000000000000000..e43737a98a3b174a9f2fe059c06d511144686459
--- /dev/null
+++ b/annotator/uniformer/exp/upernet_global_small/test_config_g.py
@@ -0,0 +1,38 @@
+_base_ = [
+ '../../configs/_base_/models/upernet_uniformer.py',
+ '../../configs/_base_/datasets/ade20k.py',
+ '../../configs/_base_/default_runtime.py',
+ '../../configs/_base_/schedules/schedule_160k.py'
+]
+model = dict(
+ backbone=dict(
+ type='UniFormer',
+ embed_dim=[64, 128, 320, 512],
+ layers=[3, 4, 8, 3],
+ head_dim=64,
+ drop_path_rate=0.25,
+ windows=False,
+ hybrid=False,
+ ),
+ decode_head=dict(
+ in_channels=[64, 128, 320, 512],
+ num_classes=150
+ ),
+ auxiliary_head=dict(
+ in_channels=320,
+ num_classes=150
+ ))
+
+# AdamW optimizer, no weight decay for position embedding & layer norm in backbone
+optimizer = dict(_delete_=True, type='AdamW', lr=0.00006, betas=(0.9, 0.999), weight_decay=0.01,
+ paramwise_cfg=dict(custom_keys={'absolute_pos_embed': dict(decay_mult=0.),
+ 'relative_position_bias_table': dict(decay_mult=0.),
+ 'norm': dict(decay_mult=0.)}))
+
+lr_config = dict(_delete_=True, policy='poly',
+ warmup='linear',
+ warmup_iters=1500,
+ warmup_ratio=1e-6,
+ power=1.0, min_lr=0.0, by_epoch=False)
+
+data=dict(samples_per_gpu=2)
\ No newline at end of file
diff --git a/annotator/uniformer/exp/upernet_global_small/test_config_h32.py b/annotator/uniformer/exp/upernet_global_small/test_config_h32.py
new file mode 100644
index 0000000000000000000000000000000000000000..a31e3874f76f9f7b089ac8834d85df2441af9b0e
--- /dev/null
+++ b/annotator/uniformer/exp/upernet_global_small/test_config_h32.py
@@ -0,0 +1,39 @@
+_base_ = [
+ '../../configs/_base_/models/upernet_uniformer.py',
+ '../../configs/_base_/datasets/ade20k.py',
+ '../../configs/_base_/default_runtime.py',
+ '../../configs/_base_/schedules/schedule_160k.py'
+]
+model = dict(
+ backbone=dict(
+ type='UniFormer',
+ embed_dim=[64, 128, 320, 512],
+ layers=[3, 4, 8, 3],
+ head_dim=64,
+ drop_path_rate=0.25,
+ windows=False,
+ hybrid=True,
+ window_size=32
+ ),
+ decode_head=dict(
+ in_channels=[64, 128, 320, 512],
+ num_classes=150
+ ),
+ auxiliary_head=dict(
+ in_channels=320,
+ num_classes=150
+ ))
+
+# AdamW optimizer, no weight decay for position embedding & layer norm in backbone
+optimizer = dict(_delete_=True, type='AdamW', lr=0.00006, betas=(0.9, 0.999), weight_decay=0.01,
+ paramwise_cfg=dict(custom_keys={'absolute_pos_embed': dict(decay_mult=0.),
+ 'relative_position_bias_table': dict(decay_mult=0.),
+ 'norm': dict(decay_mult=0.)}))
+
+lr_config = dict(_delete_=True, policy='poly',
+ warmup='linear',
+ warmup_iters=1500,
+ warmup_ratio=1e-6,
+ power=1.0, min_lr=0.0, by_epoch=False)
+
+data=dict(samples_per_gpu=2)
\ No newline at end of file
diff --git a/annotator/uniformer/exp/upernet_global_small/test_config_w32.py b/annotator/uniformer/exp/upernet_global_small/test_config_w32.py
new file mode 100644
index 0000000000000000000000000000000000000000..3d9e06f029e46c14cb9ddb39319cabe86fef9b44
--- /dev/null
+++ b/annotator/uniformer/exp/upernet_global_small/test_config_w32.py
@@ -0,0 +1,39 @@
+_base_ = [
+ '../../configs/_base_/models/upernet_uniformer.py',
+ '../../configs/_base_/datasets/ade20k.py',
+ '../../configs/_base_/default_runtime.py',
+ '../../configs/_base_/schedules/schedule_160k.py'
+]
+model = dict(
+ backbone=dict(
+ type='UniFormer',
+ embed_dim=[64, 128, 320, 512],
+ layers=[3, 4, 8, 3],
+ head_dim=64,
+ drop_path_rate=0.25,
+ windows=True,
+ hybrid=False,
+ window_size=32
+ ),
+ decode_head=dict(
+ in_channels=[64, 128, 320, 512],
+ num_classes=150
+ ),
+ auxiliary_head=dict(
+ in_channels=320,
+ num_classes=150
+ ))
+
+# AdamW optimizer, no weight decay for position embedding & layer norm in backbone
+optimizer = dict(_delete_=True, type='AdamW', lr=0.00006, betas=(0.9, 0.999), weight_decay=0.01,
+ paramwise_cfg=dict(custom_keys={'absolute_pos_embed': dict(decay_mult=0.),
+ 'relative_position_bias_table': dict(decay_mult=0.),
+ 'norm': dict(decay_mult=0.)}))
+
+lr_config = dict(_delete_=True, policy='poly',
+ warmup='linear',
+ warmup_iters=1500,
+ warmup_ratio=1e-6,
+ power=1.0, min_lr=0.0, by_epoch=False)
+
+data=dict(samples_per_gpu=2)
\ No newline at end of file
diff --git a/annotator/uniformer/mmcv/__init__.py b/annotator/uniformer/mmcv/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..210a2989138380559f23045b568d0fbbeb918c03
--- /dev/null
+++ b/annotator/uniformer/mmcv/__init__.py
@@ -0,0 +1,15 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+# flake8: noqa
+from .arraymisc import *
+from .fileio import *
+from .image import *
+from .utils import *
+from .version import *
+from .video import *
+from .visualization import *
+
+# The following modules are not imported to this level, so mmcv may be used
+# without PyTorch.
+# - runner
+# - parallel
+# - op
diff --git a/annotator/uniformer/mmcv/__pycache__/__init__.cpython-310.pyc b/annotator/uniformer/mmcv/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..213872f52d2ef5f18d1fba7736c20cc5bf0c48fd
Binary files /dev/null and b/annotator/uniformer/mmcv/__pycache__/__init__.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/__pycache__/__init__.cpython-38.pyc b/annotator/uniformer/mmcv/__pycache__/__init__.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1a5e6f85d6a29f7f3eb598cdad46629e029233de
Binary files /dev/null and b/annotator/uniformer/mmcv/__pycache__/__init__.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/__pycache__/version.cpython-310.pyc b/annotator/uniformer/mmcv/__pycache__/version.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..626ff42994266ca73ef49e9975c04aea16348afc
Binary files /dev/null and b/annotator/uniformer/mmcv/__pycache__/version.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/__pycache__/version.cpython-38.pyc b/annotator/uniformer/mmcv/__pycache__/version.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a048ddf5182153d44295602269963096254ada48
Binary files /dev/null and b/annotator/uniformer/mmcv/__pycache__/version.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/arraymisc/__init__.py b/annotator/uniformer/mmcv/arraymisc/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..4b4700d6139ae3d604ff6e542468cce4200c020c
--- /dev/null
+++ b/annotator/uniformer/mmcv/arraymisc/__init__.py
@@ -0,0 +1,4 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+from .quantization import dequantize, quantize
+
+__all__ = ['quantize', 'dequantize']
diff --git a/annotator/uniformer/mmcv/arraymisc/__pycache__/__init__.cpython-310.pyc b/annotator/uniformer/mmcv/arraymisc/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9c5924e0e06e28ca070aa88a0cd2d2c5c78fd612
Binary files /dev/null and b/annotator/uniformer/mmcv/arraymisc/__pycache__/__init__.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/arraymisc/__pycache__/__init__.cpython-38.pyc b/annotator/uniformer/mmcv/arraymisc/__pycache__/__init__.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c0081e5af85e552236551d235d19a7d4d221ff35
Binary files /dev/null and b/annotator/uniformer/mmcv/arraymisc/__pycache__/__init__.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/arraymisc/__pycache__/quantization.cpython-310.pyc b/annotator/uniformer/mmcv/arraymisc/__pycache__/quantization.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..32e1cfb51b7fc7e7d9928f8d14bf999eedb782c3
Binary files /dev/null and b/annotator/uniformer/mmcv/arraymisc/__pycache__/quantization.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/arraymisc/__pycache__/quantization.cpython-38.pyc b/annotator/uniformer/mmcv/arraymisc/__pycache__/quantization.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e6b58b4e8d1ad53e5a501d029a2512c07084adc9
Binary files /dev/null and b/annotator/uniformer/mmcv/arraymisc/__pycache__/quantization.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/arraymisc/quantization.py b/annotator/uniformer/mmcv/arraymisc/quantization.py
new file mode 100644
index 0000000000000000000000000000000000000000..8e47a3545780cf071a1ef8195efb0b7b662c8186
--- /dev/null
+++ b/annotator/uniformer/mmcv/arraymisc/quantization.py
@@ -0,0 +1,55 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import numpy as np
+
+
+def quantize(arr, min_val, max_val, levels, dtype=np.int64):
+ """Quantize an array of (-inf, inf) to [0, levels-1].
+
+ Args:
+ arr (ndarray): Input array.
+ min_val (scalar): Minimum value to be clipped.
+ max_val (scalar): Maximum value to be clipped.
+ levels (int): Quantization levels.
+ dtype (np.type): The type of the quantized array.
+
+ Returns:
+ tuple: Quantized array.
+ """
+ if not (isinstance(levels, int) and levels > 1):
+ raise ValueError(
+ f'levels must be a positive integer, but got {levels}')
+ if min_val >= max_val:
+ raise ValueError(
+ f'min_val ({min_val}) must be smaller than max_val ({max_val})')
+
+ arr = np.clip(arr, min_val, max_val) - min_val
+ quantized_arr = np.minimum(
+ np.floor(levels * arr / (max_val - min_val)).astype(dtype), levels - 1)
+
+ return quantized_arr
+
+
+def dequantize(arr, min_val, max_val, levels, dtype=np.float64):
+ """Dequantize an array.
+
+ Args:
+ arr (ndarray): Input array.
+ min_val (scalar): Minimum value to be clipped.
+ max_val (scalar): Maximum value to be clipped.
+ levels (int): Quantization levels.
+ dtype (np.type): The type of the dequantized array.
+
+ Returns:
+ tuple: Dequantized array.
+ """
+ if not (isinstance(levels, int) and levels > 1):
+ raise ValueError(
+ f'levels must be a positive integer, but got {levels}')
+ if min_val >= max_val:
+ raise ValueError(
+ f'min_val ({min_val}) must be smaller than max_val ({max_val})')
+
+ dequantized_arr = (arr + 0.5).astype(dtype) * (max_val -
+ min_val) / levels + min_val
+
+ return dequantized_arr
diff --git a/annotator/uniformer/mmcv/cnn/__init__.py b/annotator/uniformer/mmcv/cnn/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..7246c897430f0cc7ce12719ad8608824fc734446
--- /dev/null
+++ b/annotator/uniformer/mmcv/cnn/__init__.py
@@ -0,0 +1,41 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+from .alexnet import AlexNet
+# yapf: disable
+from .bricks import (ACTIVATION_LAYERS, CONV_LAYERS, NORM_LAYERS,
+ PADDING_LAYERS, PLUGIN_LAYERS, UPSAMPLE_LAYERS,
+ ContextBlock, Conv2d, Conv3d, ConvAWS2d, ConvModule,
+ ConvTranspose2d, ConvTranspose3d, ConvWS2d,
+ DepthwiseSeparableConvModule, GeneralizedAttention,
+ HSigmoid, HSwish, Linear, MaxPool2d, MaxPool3d,
+ NonLocal1d, NonLocal2d, NonLocal3d, Scale, Swish,
+ build_activation_layer, build_conv_layer,
+ build_norm_layer, build_padding_layer, build_plugin_layer,
+ build_upsample_layer, conv_ws_2d, is_norm)
+from .builder import MODELS, build_model_from_cfg
+# yapf: enable
+from .resnet import ResNet, make_res_layer
+from .utils import (INITIALIZERS, Caffe2XavierInit, ConstantInit, KaimingInit,
+ NormalInit, PretrainedInit, TruncNormalInit, UniformInit,
+ XavierInit, bias_init_with_prob, caffe2_xavier_init,
+ constant_init, fuse_conv_bn, get_model_complexity_info,
+ initialize, kaiming_init, normal_init, trunc_normal_init,
+ uniform_init, xavier_init)
+from .vgg import VGG, make_vgg_layer
+
+__all__ = [
+ 'AlexNet', 'VGG', 'make_vgg_layer', 'ResNet', 'make_res_layer',
+ 'constant_init', 'xavier_init', 'normal_init', 'trunc_normal_init',
+ 'uniform_init', 'kaiming_init', 'caffe2_xavier_init',
+ 'bias_init_with_prob', 'ConvModule', 'build_activation_layer',
+ 'build_conv_layer', 'build_norm_layer', 'build_padding_layer',
+ 'build_upsample_layer', 'build_plugin_layer', 'is_norm', 'NonLocal1d',
+ 'NonLocal2d', 'NonLocal3d', 'ContextBlock', 'HSigmoid', 'Swish', 'HSwish',
+ 'GeneralizedAttention', 'ACTIVATION_LAYERS', 'CONV_LAYERS', 'NORM_LAYERS',
+ 'PADDING_LAYERS', 'UPSAMPLE_LAYERS', 'PLUGIN_LAYERS', 'Scale',
+ 'get_model_complexity_info', 'conv_ws_2d', 'ConvAWS2d', 'ConvWS2d',
+ 'fuse_conv_bn', 'DepthwiseSeparableConvModule', 'Linear', 'Conv2d',
+ 'ConvTranspose2d', 'MaxPool2d', 'ConvTranspose3d', 'MaxPool3d', 'Conv3d',
+ 'initialize', 'INITIALIZERS', 'ConstantInit', 'XavierInit', 'NormalInit',
+ 'TruncNormalInit', 'UniformInit', 'KaimingInit', 'PretrainedInit',
+ 'Caffe2XavierInit', 'MODELS', 'build_model_from_cfg'
+]
diff --git a/annotator/uniformer/mmcv/cnn/__pycache__/__init__.cpython-310.pyc b/annotator/uniformer/mmcv/cnn/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b25d4fb75207105e2e8b1a2e49635658baa8ac10
Binary files /dev/null and b/annotator/uniformer/mmcv/cnn/__pycache__/__init__.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/cnn/__pycache__/__init__.cpython-38.pyc b/annotator/uniformer/mmcv/cnn/__pycache__/__init__.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fedb706d6bda7069868d84fa3a91fdddede9d03a
Binary files /dev/null and b/annotator/uniformer/mmcv/cnn/__pycache__/__init__.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/cnn/__pycache__/alexnet.cpython-310.pyc b/annotator/uniformer/mmcv/cnn/__pycache__/alexnet.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a29516d6e386d14a756b41101f2cdec8700a0bd9
Binary files /dev/null and b/annotator/uniformer/mmcv/cnn/__pycache__/alexnet.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/cnn/__pycache__/alexnet.cpython-38.pyc b/annotator/uniformer/mmcv/cnn/__pycache__/alexnet.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..60a03b6d8f8c4c1b293b945b0457a4700767b203
Binary files /dev/null and b/annotator/uniformer/mmcv/cnn/__pycache__/alexnet.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/cnn/__pycache__/builder.cpython-310.pyc b/annotator/uniformer/mmcv/cnn/__pycache__/builder.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c0af78f59d282b6b728326078371b87bfcf4d823
Binary files /dev/null and b/annotator/uniformer/mmcv/cnn/__pycache__/builder.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/cnn/__pycache__/builder.cpython-38.pyc b/annotator/uniformer/mmcv/cnn/__pycache__/builder.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9cd895137956e61b0dbc8439d715289c3ae441a4
Binary files /dev/null and b/annotator/uniformer/mmcv/cnn/__pycache__/builder.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/cnn/__pycache__/resnet.cpython-310.pyc b/annotator/uniformer/mmcv/cnn/__pycache__/resnet.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..620f1981d5d89b741d5f1e0499904db872cbb532
Binary files /dev/null and b/annotator/uniformer/mmcv/cnn/__pycache__/resnet.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/cnn/__pycache__/resnet.cpython-38.pyc b/annotator/uniformer/mmcv/cnn/__pycache__/resnet.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..dc4ae748b18481e319facc2861dbfa5fe15a6bfc
Binary files /dev/null and b/annotator/uniformer/mmcv/cnn/__pycache__/resnet.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/cnn/__pycache__/vgg.cpython-310.pyc b/annotator/uniformer/mmcv/cnn/__pycache__/vgg.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0c243a214d990531bd0ad4e8760c021a8912180b
Binary files /dev/null and b/annotator/uniformer/mmcv/cnn/__pycache__/vgg.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/cnn/__pycache__/vgg.cpython-38.pyc b/annotator/uniformer/mmcv/cnn/__pycache__/vgg.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c5ac9874eb7a9ab068c51084e4ed2a378d345628
Binary files /dev/null and b/annotator/uniformer/mmcv/cnn/__pycache__/vgg.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/cnn/alexnet.py b/annotator/uniformer/mmcv/cnn/alexnet.py
new file mode 100644
index 0000000000000000000000000000000000000000..89e36b8c7851f895d9ae7f07149f0e707456aab0
--- /dev/null
+++ b/annotator/uniformer/mmcv/cnn/alexnet.py
@@ -0,0 +1,61 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import logging
+
+import torch.nn as nn
+
+
+class AlexNet(nn.Module):
+ """AlexNet backbone.
+
+ Args:
+ num_classes (int): number of classes for classification.
+ """
+
+ def __init__(self, num_classes=-1):
+ super(AlexNet, self).__init__()
+ self.num_classes = num_classes
+ self.features = nn.Sequential(
+ nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2),
+ nn.ReLU(inplace=True),
+ nn.MaxPool2d(kernel_size=3, stride=2),
+ nn.Conv2d(64, 192, kernel_size=5, padding=2),
+ nn.ReLU(inplace=True),
+ nn.MaxPool2d(kernel_size=3, stride=2),
+ nn.Conv2d(192, 384, kernel_size=3, padding=1),
+ nn.ReLU(inplace=True),
+ nn.Conv2d(384, 256, kernel_size=3, padding=1),
+ nn.ReLU(inplace=True),
+ nn.Conv2d(256, 256, kernel_size=3, padding=1),
+ nn.ReLU(inplace=True),
+ nn.MaxPool2d(kernel_size=3, stride=2),
+ )
+ if self.num_classes > 0:
+ self.classifier = nn.Sequential(
+ nn.Dropout(),
+ nn.Linear(256 * 6 * 6, 4096),
+ nn.ReLU(inplace=True),
+ nn.Dropout(),
+ nn.Linear(4096, 4096),
+ nn.ReLU(inplace=True),
+ nn.Linear(4096, num_classes),
+ )
+
+ def init_weights(self, pretrained=None):
+ if isinstance(pretrained, str):
+ logger = logging.getLogger()
+ from ..runner import load_checkpoint
+ load_checkpoint(self, pretrained, strict=False, logger=logger)
+ elif pretrained is None:
+ # use default initializer
+ pass
+ else:
+ raise TypeError('pretrained must be a str or None')
+
+ def forward(self, x):
+
+ x = self.features(x)
+ if self.num_classes > 0:
+ x = x.view(x.size(0), 256 * 6 * 6)
+ x = self.classifier(x)
+
+ return x
diff --git a/annotator/uniformer/mmcv/cnn/bricks/__init__.py b/annotator/uniformer/mmcv/cnn/bricks/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..0f33124ed23fc6f27119a37bcb5ab004d3572be0
--- /dev/null
+++ b/annotator/uniformer/mmcv/cnn/bricks/__init__.py
@@ -0,0 +1,35 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+from .activation import build_activation_layer
+from .context_block import ContextBlock
+from .conv import build_conv_layer
+from .conv2d_adaptive_padding import Conv2dAdaptivePadding
+from .conv_module import ConvModule
+from .conv_ws import ConvAWS2d, ConvWS2d, conv_ws_2d
+from .depthwise_separable_conv_module import DepthwiseSeparableConvModule
+from .drop import Dropout, DropPath
+from .generalized_attention import GeneralizedAttention
+from .hsigmoid import HSigmoid
+from .hswish import HSwish
+from .non_local import NonLocal1d, NonLocal2d, NonLocal3d
+from .norm import build_norm_layer, is_norm
+from .padding import build_padding_layer
+from .plugin import build_plugin_layer
+from .registry import (ACTIVATION_LAYERS, CONV_LAYERS, NORM_LAYERS,
+ PADDING_LAYERS, PLUGIN_LAYERS, UPSAMPLE_LAYERS)
+from .scale import Scale
+from .swish import Swish
+from .upsample import build_upsample_layer
+from .wrappers import (Conv2d, Conv3d, ConvTranspose2d, ConvTranspose3d,
+ Linear, MaxPool2d, MaxPool3d)
+
+__all__ = [
+ 'ConvModule', 'build_activation_layer', 'build_conv_layer',
+ 'build_norm_layer', 'build_padding_layer', 'build_upsample_layer',
+ 'build_plugin_layer', 'is_norm', 'HSigmoid', 'HSwish', 'NonLocal1d',
+ 'NonLocal2d', 'NonLocal3d', 'ContextBlock', 'GeneralizedAttention',
+ 'ACTIVATION_LAYERS', 'CONV_LAYERS', 'NORM_LAYERS', 'PADDING_LAYERS',
+ 'UPSAMPLE_LAYERS', 'PLUGIN_LAYERS', 'Scale', 'ConvAWS2d', 'ConvWS2d',
+ 'conv_ws_2d', 'DepthwiseSeparableConvModule', 'Swish', 'Linear',
+ 'Conv2dAdaptivePadding', 'Conv2d', 'ConvTranspose2d', 'MaxPool2d',
+ 'ConvTranspose3d', 'MaxPool3d', 'Conv3d', 'Dropout', 'DropPath'
+]
diff --git a/annotator/uniformer/mmcv/cnn/bricks/__pycache__/__init__.cpython-310.pyc b/annotator/uniformer/mmcv/cnn/bricks/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..67580c99539f40adba74508288d4e08f5ec34a38
Binary files /dev/null and b/annotator/uniformer/mmcv/cnn/bricks/__pycache__/__init__.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/cnn/bricks/__pycache__/__init__.cpython-38.pyc b/annotator/uniformer/mmcv/cnn/bricks/__pycache__/__init__.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9eeb4c837a85665d2cb73e954393d83187510c58
Binary files /dev/null and b/annotator/uniformer/mmcv/cnn/bricks/__pycache__/__init__.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/cnn/bricks/__pycache__/activation.cpython-310.pyc b/annotator/uniformer/mmcv/cnn/bricks/__pycache__/activation.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1fcb6b318d034afabcac46864daf6518c83fec4e
Binary files /dev/null and b/annotator/uniformer/mmcv/cnn/bricks/__pycache__/activation.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/cnn/bricks/__pycache__/activation.cpython-38.pyc b/annotator/uniformer/mmcv/cnn/bricks/__pycache__/activation.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b48edca46fa991f9620828c4d2f1b22c6d508370
Binary files /dev/null and b/annotator/uniformer/mmcv/cnn/bricks/__pycache__/activation.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/cnn/bricks/__pycache__/context_block.cpython-310.pyc b/annotator/uniformer/mmcv/cnn/bricks/__pycache__/context_block.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7a9d13b5c9803aa79e3d95a94b7bbd6f4c913fcb
Binary files /dev/null and b/annotator/uniformer/mmcv/cnn/bricks/__pycache__/context_block.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/cnn/bricks/__pycache__/context_block.cpython-38.pyc b/annotator/uniformer/mmcv/cnn/bricks/__pycache__/context_block.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0c7bfd77c4e3e68fe2d6664d4902b58c2494b181
Binary files /dev/null and b/annotator/uniformer/mmcv/cnn/bricks/__pycache__/context_block.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/cnn/bricks/__pycache__/conv.cpython-310.pyc b/annotator/uniformer/mmcv/cnn/bricks/__pycache__/conv.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ffe8787b7b5f7820dab82fd0d6d79ceb3ec97082
Binary files /dev/null and b/annotator/uniformer/mmcv/cnn/bricks/__pycache__/conv.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/cnn/bricks/__pycache__/conv.cpython-38.pyc b/annotator/uniformer/mmcv/cnn/bricks/__pycache__/conv.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..67aee74e9710addb203880e5b3d41ad298857fa2
Binary files /dev/null and b/annotator/uniformer/mmcv/cnn/bricks/__pycache__/conv.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/cnn/bricks/__pycache__/conv2d_adaptive_padding.cpython-310.pyc b/annotator/uniformer/mmcv/cnn/bricks/__pycache__/conv2d_adaptive_padding.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..75b18d8dde2b30857590d505ec6bdc4a00358b53
Binary files /dev/null and b/annotator/uniformer/mmcv/cnn/bricks/__pycache__/conv2d_adaptive_padding.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/cnn/bricks/__pycache__/conv2d_adaptive_padding.cpython-38.pyc b/annotator/uniformer/mmcv/cnn/bricks/__pycache__/conv2d_adaptive_padding.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..37e9cef86e81c4ace0b924ed5b23453ddd6e5a05
Binary files /dev/null and b/annotator/uniformer/mmcv/cnn/bricks/__pycache__/conv2d_adaptive_padding.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/cnn/bricks/__pycache__/conv_module.cpython-310.pyc b/annotator/uniformer/mmcv/cnn/bricks/__pycache__/conv_module.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ea29fa552429f00cfa39f7f7e740073b73a3cd85
Binary files /dev/null and b/annotator/uniformer/mmcv/cnn/bricks/__pycache__/conv_module.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/cnn/bricks/__pycache__/conv_module.cpython-38.pyc b/annotator/uniformer/mmcv/cnn/bricks/__pycache__/conv_module.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2098669e15afc38b8cbca7ea6870f671ec7b4e2a
Binary files /dev/null and b/annotator/uniformer/mmcv/cnn/bricks/__pycache__/conv_module.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/cnn/bricks/__pycache__/conv_ws.cpython-310.pyc b/annotator/uniformer/mmcv/cnn/bricks/__pycache__/conv_ws.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..347df21d9c6f44a4067c43bb2d07c6e3c9a55061
Binary files /dev/null and b/annotator/uniformer/mmcv/cnn/bricks/__pycache__/conv_ws.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/cnn/bricks/__pycache__/conv_ws.cpython-38.pyc b/annotator/uniformer/mmcv/cnn/bricks/__pycache__/conv_ws.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b461deda00fd7fd0160c66df8805b34eaa1277d8
Binary files /dev/null and b/annotator/uniformer/mmcv/cnn/bricks/__pycache__/conv_ws.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/cnn/bricks/__pycache__/depthwise_separable_conv_module.cpython-310.pyc b/annotator/uniformer/mmcv/cnn/bricks/__pycache__/depthwise_separable_conv_module.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..be9c0af2d62e1bd2280abaa0c9621e6e7324f6f4
Binary files /dev/null and b/annotator/uniformer/mmcv/cnn/bricks/__pycache__/depthwise_separable_conv_module.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/cnn/bricks/__pycache__/depthwise_separable_conv_module.cpython-38.pyc b/annotator/uniformer/mmcv/cnn/bricks/__pycache__/depthwise_separable_conv_module.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..bca2821dc1d8ffe321d4a5de7bd112096cf4711a
Binary files /dev/null and b/annotator/uniformer/mmcv/cnn/bricks/__pycache__/depthwise_separable_conv_module.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/cnn/bricks/__pycache__/drop.cpython-310.pyc b/annotator/uniformer/mmcv/cnn/bricks/__pycache__/drop.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..013fee1ad16978c0a32cd48b5f0042ff23ce1c42
Binary files /dev/null and b/annotator/uniformer/mmcv/cnn/bricks/__pycache__/drop.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/cnn/bricks/__pycache__/drop.cpython-38.pyc b/annotator/uniformer/mmcv/cnn/bricks/__pycache__/drop.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d4895222d51916dcd65a18446b55aa0671ef486a
Binary files /dev/null and b/annotator/uniformer/mmcv/cnn/bricks/__pycache__/drop.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/cnn/bricks/__pycache__/generalized_attention.cpython-310.pyc b/annotator/uniformer/mmcv/cnn/bricks/__pycache__/generalized_attention.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6545fe3aeb1b6f6884fbe04d823c90484f7de657
Binary files /dev/null and b/annotator/uniformer/mmcv/cnn/bricks/__pycache__/generalized_attention.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/cnn/bricks/__pycache__/generalized_attention.cpython-38.pyc b/annotator/uniformer/mmcv/cnn/bricks/__pycache__/generalized_attention.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7c35f48948a8c9421428450beaaa320aeb698db9
Binary files /dev/null and b/annotator/uniformer/mmcv/cnn/bricks/__pycache__/generalized_attention.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/cnn/bricks/__pycache__/hsigmoid.cpython-310.pyc b/annotator/uniformer/mmcv/cnn/bricks/__pycache__/hsigmoid.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..81710f441de4f3af1003c507d53df6fe05ae9d65
Binary files /dev/null and b/annotator/uniformer/mmcv/cnn/bricks/__pycache__/hsigmoid.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/cnn/bricks/__pycache__/hsigmoid.cpython-38.pyc b/annotator/uniformer/mmcv/cnn/bricks/__pycache__/hsigmoid.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d9e0845e0a1245473ddfc320bf0d04fe52d8c792
Binary files /dev/null and b/annotator/uniformer/mmcv/cnn/bricks/__pycache__/hsigmoid.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/cnn/bricks/__pycache__/hswish.cpython-310.pyc b/annotator/uniformer/mmcv/cnn/bricks/__pycache__/hswish.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6ae0671340459636173da23721fef87c5995c1cb
Binary files /dev/null and b/annotator/uniformer/mmcv/cnn/bricks/__pycache__/hswish.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/cnn/bricks/__pycache__/hswish.cpython-38.pyc b/annotator/uniformer/mmcv/cnn/bricks/__pycache__/hswish.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9bcbe7758bc433dbdf744c138ddb0bfbc6aa3a48
Binary files /dev/null and b/annotator/uniformer/mmcv/cnn/bricks/__pycache__/hswish.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/cnn/bricks/__pycache__/non_local.cpython-310.pyc b/annotator/uniformer/mmcv/cnn/bricks/__pycache__/non_local.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..29fb184157587e70b1dea0e1cc3aa74f6694c60c
Binary files /dev/null and b/annotator/uniformer/mmcv/cnn/bricks/__pycache__/non_local.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/cnn/bricks/__pycache__/non_local.cpython-38.pyc b/annotator/uniformer/mmcv/cnn/bricks/__pycache__/non_local.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ffc00db9f2c728ecfc5823f2ff6d67db8b32ab1f
Binary files /dev/null and b/annotator/uniformer/mmcv/cnn/bricks/__pycache__/non_local.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/cnn/bricks/__pycache__/norm.cpython-310.pyc b/annotator/uniformer/mmcv/cnn/bricks/__pycache__/norm.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..abb2e40654e23da25cb1d1e2495af23d5a6b54e4
Binary files /dev/null and b/annotator/uniformer/mmcv/cnn/bricks/__pycache__/norm.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/cnn/bricks/__pycache__/norm.cpython-38.pyc b/annotator/uniformer/mmcv/cnn/bricks/__pycache__/norm.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ed6cedcb460c508f8a7f2b1db7b2061bc6eac960
Binary files /dev/null and b/annotator/uniformer/mmcv/cnn/bricks/__pycache__/norm.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/cnn/bricks/__pycache__/padding.cpython-310.pyc b/annotator/uniformer/mmcv/cnn/bricks/__pycache__/padding.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d3d4db3a57192ff11e2b2fc12aa345655b25422f
Binary files /dev/null and b/annotator/uniformer/mmcv/cnn/bricks/__pycache__/padding.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/cnn/bricks/__pycache__/padding.cpython-38.pyc b/annotator/uniformer/mmcv/cnn/bricks/__pycache__/padding.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5e6e5349399edd6ad7a9a5ba53e3f218106a44d8
Binary files /dev/null and b/annotator/uniformer/mmcv/cnn/bricks/__pycache__/padding.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/cnn/bricks/__pycache__/plugin.cpython-310.pyc b/annotator/uniformer/mmcv/cnn/bricks/__pycache__/plugin.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..bd616ffe26f9bb442a1fbe9aa321b5d1ed567c9e
Binary files /dev/null and b/annotator/uniformer/mmcv/cnn/bricks/__pycache__/plugin.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/cnn/bricks/__pycache__/plugin.cpython-38.pyc b/annotator/uniformer/mmcv/cnn/bricks/__pycache__/plugin.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9a556daef05ebca6cae511251f2106d9fec0fc60
Binary files /dev/null and b/annotator/uniformer/mmcv/cnn/bricks/__pycache__/plugin.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/cnn/bricks/__pycache__/registry.cpython-310.pyc b/annotator/uniformer/mmcv/cnn/bricks/__pycache__/registry.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a54bae63bb545ddbfb84473fe6f3331915ddde7d
Binary files /dev/null and b/annotator/uniformer/mmcv/cnn/bricks/__pycache__/registry.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/cnn/bricks/__pycache__/registry.cpython-38.pyc b/annotator/uniformer/mmcv/cnn/bricks/__pycache__/registry.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c38f792b5d4daac119a9346fc7c36a32515f35cd
Binary files /dev/null and b/annotator/uniformer/mmcv/cnn/bricks/__pycache__/registry.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/cnn/bricks/__pycache__/scale.cpython-310.pyc b/annotator/uniformer/mmcv/cnn/bricks/__pycache__/scale.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c8056bed28c520e7a6911d8359f2a51e7555ff1f
Binary files /dev/null and b/annotator/uniformer/mmcv/cnn/bricks/__pycache__/scale.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/cnn/bricks/__pycache__/scale.cpython-38.pyc b/annotator/uniformer/mmcv/cnn/bricks/__pycache__/scale.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0828ba805453bfd1281348c45328d51960eb9036
Binary files /dev/null and b/annotator/uniformer/mmcv/cnn/bricks/__pycache__/scale.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/cnn/bricks/__pycache__/swish.cpython-310.pyc b/annotator/uniformer/mmcv/cnn/bricks/__pycache__/swish.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..be94068869b9073c07ffd8dec965109841dd1068
Binary files /dev/null and b/annotator/uniformer/mmcv/cnn/bricks/__pycache__/swish.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/cnn/bricks/__pycache__/swish.cpython-38.pyc b/annotator/uniformer/mmcv/cnn/bricks/__pycache__/swish.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..83899b83521b7ce1f2f794892e3824d62739bbe8
Binary files /dev/null and b/annotator/uniformer/mmcv/cnn/bricks/__pycache__/swish.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/cnn/bricks/__pycache__/upsample.cpython-310.pyc b/annotator/uniformer/mmcv/cnn/bricks/__pycache__/upsample.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f6a494104217a2a850f8ab82f79266868f90ecc7
Binary files /dev/null and b/annotator/uniformer/mmcv/cnn/bricks/__pycache__/upsample.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/cnn/bricks/__pycache__/upsample.cpython-38.pyc b/annotator/uniformer/mmcv/cnn/bricks/__pycache__/upsample.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..661107d59e3a340601dc41743ddd2145a4d838a5
Binary files /dev/null and b/annotator/uniformer/mmcv/cnn/bricks/__pycache__/upsample.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/cnn/bricks/__pycache__/wrappers.cpython-310.pyc b/annotator/uniformer/mmcv/cnn/bricks/__pycache__/wrappers.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..db658fe303f52f05594140c0b9dfc346ebac5f74
Binary files /dev/null and b/annotator/uniformer/mmcv/cnn/bricks/__pycache__/wrappers.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/cnn/bricks/__pycache__/wrappers.cpython-38.pyc b/annotator/uniformer/mmcv/cnn/bricks/__pycache__/wrappers.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1c1b447ac97cef452d3fad35101d1b293b5759fb
Binary files /dev/null and b/annotator/uniformer/mmcv/cnn/bricks/__pycache__/wrappers.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/cnn/bricks/activation.py b/annotator/uniformer/mmcv/cnn/bricks/activation.py
new file mode 100644
index 0000000000000000000000000000000000000000..cab2712287d5ef7be2f079dcb54a94b96394eab5
--- /dev/null
+++ b/annotator/uniformer/mmcv/cnn/bricks/activation.py
@@ -0,0 +1,92 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+from annotator.uniformer.mmcv.utils import TORCH_VERSION, build_from_cfg, digit_version
+from .registry import ACTIVATION_LAYERS
+
+for module in [
+ nn.ReLU, nn.LeakyReLU, nn.PReLU, nn.RReLU, nn.ReLU6, nn.ELU,
+ nn.Sigmoid, nn.Tanh
+]:
+ ACTIVATION_LAYERS.register_module(module=module)
+
+
+@ACTIVATION_LAYERS.register_module(name='Clip')
+@ACTIVATION_LAYERS.register_module()
+class Clamp(nn.Module):
+ """Clamp activation layer.
+
+ This activation function is to clamp the feature map value within
+ :math:`[min, max]`. More details can be found in ``torch.clamp()``.
+
+ Args:
+ min (Number | optional): Lower-bound of the range to be clamped to.
+ Default to -1.
+ max (Number | optional): Upper-bound of the range to be clamped to.
+ Default to 1.
+ """
+
+ def __init__(self, min=-1., max=1.):
+ super(Clamp, self).__init__()
+ self.min = min
+ self.max = max
+
+ def forward(self, x):
+ """Forward function.
+
+ Args:
+ x (torch.Tensor): The input tensor.
+
+ Returns:
+ torch.Tensor: Clamped tensor.
+ """
+ return torch.clamp(x, min=self.min, max=self.max)
+
+
+class GELU(nn.Module):
+ r"""Applies the Gaussian Error Linear Units function:
+
+ .. math::
+ \text{GELU}(x) = x * \Phi(x)
+ where :math:`\Phi(x)` is the Cumulative Distribution Function for
+ Gaussian Distribution.
+
+ Shape:
+ - Input: :math:`(N, *)` where `*` means, any number of additional
+ dimensions
+ - Output: :math:`(N, *)`, same shape as the input
+
+ .. image:: scripts/activation_images/GELU.png
+
+ Examples::
+
+ >>> m = nn.GELU()
+ >>> input = torch.randn(2)
+ >>> output = m(input)
+ """
+
+ def forward(self, input):
+ return F.gelu(input)
+
+
+if (TORCH_VERSION == 'parrots'
+ or digit_version(TORCH_VERSION) < digit_version('1.4')):
+ ACTIVATION_LAYERS.register_module(module=GELU)
+else:
+ ACTIVATION_LAYERS.register_module(module=nn.GELU)
+
+
+def build_activation_layer(cfg):
+ """Build activation layer.
+
+ Args:
+ cfg (dict): The activation layer config, which should contain:
+ - type (str): Layer type.
+ - layer args: Args needed to instantiate an activation layer.
+
+ Returns:
+ nn.Module: Created activation layer.
+ """
+ return build_from_cfg(cfg, ACTIVATION_LAYERS)
diff --git a/annotator/uniformer/mmcv/cnn/bricks/context_block.py b/annotator/uniformer/mmcv/cnn/bricks/context_block.py
new file mode 100644
index 0000000000000000000000000000000000000000..d60fdb904c749ce3b251510dff3cc63cea70d42e
--- /dev/null
+++ b/annotator/uniformer/mmcv/cnn/bricks/context_block.py
@@ -0,0 +1,125 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import torch
+from torch import nn
+
+from ..utils import constant_init, kaiming_init
+from .registry import PLUGIN_LAYERS
+
+
+def last_zero_init(m):
+ if isinstance(m, nn.Sequential):
+ constant_init(m[-1], val=0)
+ else:
+ constant_init(m, val=0)
+
+
+@PLUGIN_LAYERS.register_module()
+class ContextBlock(nn.Module):
+ """ContextBlock module in GCNet.
+
+ See 'GCNet: Non-local Networks Meet Squeeze-Excitation Networks and Beyond'
+ (https://arxiv.org/abs/1904.11492) for details.
+
+ Args:
+ in_channels (int): Channels of the input feature map.
+ ratio (float): Ratio of channels of transform bottleneck
+ pooling_type (str): Pooling method for context modeling.
+ Options are 'att' and 'avg', stand for attention pooling and
+ average pooling respectively. Default: 'att'.
+ fusion_types (Sequence[str]): Fusion method for feature fusion,
+ Options are 'channels_add', 'channel_mul', stand for channelwise
+ addition and multiplication respectively. Default: ('channel_add',)
+ """
+
+ _abbr_ = 'context_block'
+
+ def __init__(self,
+ in_channels,
+ ratio,
+ pooling_type='att',
+ fusion_types=('channel_add', )):
+ super(ContextBlock, self).__init__()
+ assert pooling_type in ['avg', 'att']
+ assert isinstance(fusion_types, (list, tuple))
+ valid_fusion_types = ['channel_add', 'channel_mul']
+ assert all([f in valid_fusion_types for f in fusion_types])
+ assert len(fusion_types) > 0, 'at least one fusion should be used'
+ self.in_channels = in_channels
+ self.ratio = ratio
+ self.planes = int(in_channels * ratio)
+ self.pooling_type = pooling_type
+ self.fusion_types = fusion_types
+ if pooling_type == 'att':
+ self.conv_mask = nn.Conv2d(in_channels, 1, kernel_size=1)
+ self.softmax = nn.Softmax(dim=2)
+ else:
+ self.avg_pool = nn.AdaptiveAvgPool2d(1)
+ if 'channel_add' in fusion_types:
+ self.channel_add_conv = nn.Sequential(
+ nn.Conv2d(self.in_channels, self.planes, kernel_size=1),
+ nn.LayerNorm([self.planes, 1, 1]),
+ nn.ReLU(inplace=True), # yapf: disable
+ nn.Conv2d(self.planes, self.in_channels, kernel_size=1))
+ else:
+ self.channel_add_conv = None
+ if 'channel_mul' in fusion_types:
+ self.channel_mul_conv = nn.Sequential(
+ nn.Conv2d(self.in_channels, self.planes, kernel_size=1),
+ nn.LayerNorm([self.planes, 1, 1]),
+ nn.ReLU(inplace=True), # yapf: disable
+ nn.Conv2d(self.planes, self.in_channels, kernel_size=1))
+ else:
+ self.channel_mul_conv = None
+ self.reset_parameters()
+
+ def reset_parameters(self):
+ if self.pooling_type == 'att':
+ kaiming_init(self.conv_mask, mode='fan_in')
+ self.conv_mask.inited = True
+
+ if self.channel_add_conv is not None:
+ last_zero_init(self.channel_add_conv)
+ if self.channel_mul_conv is not None:
+ last_zero_init(self.channel_mul_conv)
+
+ def spatial_pool(self, x):
+ batch, channel, height, width = x.size()
+ if self.pooling_type == 'att':
+ input_x = x
+ # [N, C, H * W]
+ input_x = input_x.view(batch, channel, height * width)
+ # [N, 1, C, H * W]
+ input_x = input_x.unsqueeze(1)
+ # [N, 1, H, W]
+ context_mask = self.conv_mask(x)
+ # [N, 1, H * W]
+ context_mask = context_mask.view(batch, 1, height * width)
+ # [N, 1, H * W]
+ context_mask = self.softmax(context_mask)
+ # [N, 1, H * W, 1]
+ context_mask = context_mask.unsqueeze(-1)
+ # [N, 1, C, 1]
+ context = torch.matmul(input_x, context_mask)
+ # [N, C, 1, 1]
+ context = context.view(batch, channel, 1, 1)
+ else:
+ # [N, C, 1, 1]
+ context = self.avg_pool(x)
+
+ return context
+
+ def forward(self, x):
+ # [N, C, 1, 1]
+ context = self.spatial_pool(x)
+
+ out = x
+ if self.channel_mul_conv is not None:
+ # [N, C, 1, 1]
+ channel_mul_term = torch.sigmoid(self.channel_mul_conv(context))
+ out = out * channel_mul_term
+ if self.channel_add_conv is not None:
+ # [N, C, 1, 1]
+ channel_add_term = self.channel_add_conv(context)
+ out = out + channel_add_term
+
+ return out
diff --git a/annotator/uniformer/mmcv/cnn/bricks/conv.py b/annotator/uniformer/mmcv/cnn/bricks/conv.py
new file mode 100644
index 0000000000000000000000000000000000000000..cf54491997a48ac3e7fadc4183ab7bf3e831024c
--- /dev/null
+++ b/annotator/uniformer/mmcv/cnn/bricks/conv.py
@@ -0,0 +1,44 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+from torch import nn
+
+from .registry import CONV_LAYERS
+
+CONV_LAYERS.register_module('Conv1d', module=nn.Conv1d)
+CONV_LAYERS.register_module('Conv2d', module=nn.Conv2d)
+CONV_LAYERS.register_module('Conv3d', module=nn.Conv3d)
+CONV_LAYERS.register_module('Conv', module=nn.Conv2d)
+
+
+def build_conv_layer(cfg, *args, **kwargs):
+ """Build convolution layer.
+
+ Args:
+ cfg (None or dict): The conv layer config, which should contain:
+ - type (str): Layer type.
+ - layer args: Args needed to instantiate an conv layer.
+ args (argument list): Arguments passed to the `__init__`
+ method of the corresponding conv layer.
+ kwargs (keyword arguments): Keyword arguments passed to the `__init__`
+ method of the corresponding conv layer.
+
+ Returns:
+ nn.Module: Created conv layer.
+ """
+ if cfg is None:
+ cfg_ = dict(type='Conv2d')
+ else:
+ if not isinstance(cfg, dict):
+ raise TypeError('cfg must be a dict')
+ if 'type' not in cfg:
+ raise KeyError('the cfg dict must contain the key "type"')
+ cfg_ = cfg.copy()
+
+ layer_type = cfg_.pop('type')
+ if layer_type not in CONV_LAYERS:
+ raise KeyError(f'Unrecognized norm type {layer_type}')
+ else:
+ conv_layer = CONV_LAYERS.get(layer_type)
+
+ layer = conv_layer(*args, **kwargs, **cfg_)
+
+ return layer
diff --git a/annotator/uniformer/mmcv/cnn/bricks/conv2d_adaptive_padding.py b/annotator/uniformer/mmcv/cnn/bricks/conv2d_adaptive_padding.py
new file mode 100644
index 0000000000000000000000000000000000000000..b45e758ac6cf8dfb0382d072fe09125bc7e9b888
--- /dev/null
+++ b/annotator/uniformer/mmcv/cnn/bricks/conv2d_adaptive_padding.py
@@ -0,0 +1,62 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import math
+
+from torch import nn
+from torch.nn import functional as F
+
+from .registry import CONV_LAYERS
+
+
+@CONV_LAYERS.register_module()
+class Conv2dAdaptivePadding(nn.Conv2d):
+ """Implementation of 2D convolution in tensorflow with `padding` as "same",
+ which applies padding to input (if needed) so that input image gets fully
+ covered by filter and stride you specified. For stride 1, this will ensure
+ that output image size is same as input. For stride of 2, output dimensions
+ will be half, for example.
+
+ Args:
+ in_channels (int): Number of channels in the input image
+ out_channels (int): Number of channels produced by the convolution
+ kernel_size (int or tuple): Size of the convolving kernel
+ stride (int or tuple, optional): Stride of the convolution. Default: 1
+ padding (int or tuple, optional): Zero-padding added to both sides of
+ the input. Default: 0
+ dilation (int or tuple, optional): Spacing between kernel elements.
+ Default: 1
+ groups (int, optional): Number of blocked connections from input
+ channels to output channels. Default: 1
+ bias (bool, optional): If ``True``, adds a learnable bias to the
+ output. Default: ``True``
+ """
+
+ def __init__(self,
+ in_channels,
+ out_channels,
+ kernel_size,
+ stride=1,
+ padding=0,
+ dilation=1,
+ groups=1,
+ bias=True):
+ super().__init__(in_channels, out_channels, kernel_size, stride, 0,
+ dilation, groups, bias)
+
+ def forward(self, x):
+ img_h, img_w = x.size()[-2:]
+ kernel_h, kernel_w = self.weight.size()[-2:]
+ stride_h, stride_w = self.stride
+ output_h = math.ceil(img_h / stride_h)
+ output_w = math.ceil(img_w / stride_w)
+ pad_h = (
+ max((output_h - 1) * self.stride[0] +
+ (kernel_h - 1) * self.dilation[0] + 1 - img_h, 0))
+ pad_w = (
+ max((output_w - 1) * self.stride[1] +
+ (kernel_w - 1) * self.dilation[1] + 1 - img_w, 0))
+ if pad_h > 0 or pad_w > 0:
+ x = F.pad(x, [
+ pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2
+ ])
+ return F.conv2d(x, self.weight, self.bias, self.stride, self.padding,
+ self.dilation, self.groups)
diff --git a/annotator/uniformer/mmcv/cnn/bricks/conv_module.py b/annotator/uniformer/mmcv/cnn/bricks/conv_module.py
new file mode 100644
index 0000000000000000000000000000000000000000..e60e7e62245071c77b652093fddebff3948d7c3e
--- /dev/null
+++ b/annotator/uniformer/mmcv/cnn/bricks/conv_module.py
@@ -0,0 +1,206 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import warnings
+
+import torch.nn as nn
+
+from annotator.uniformer.mmcv.utils import _BatchNorm, _InstanceNorm
+from ..utils import constant_init, kaiming_init
+from .activation import build_activation_layer
+from .conv import build_conv_layer
+from .norm import build_norm_layer
+from .padding import build_padding_layer
+from .registry import PLUGIN_LAYERS
+
+
+@PLUGIN_LAYERS.register_module()
+class ConvModule(nn.Module):
+ """A conv block that bundles conv/norm/activation layers.
+
+ This block simplifies the usage of convolution layers, which are commonly
+ used with a norm layer (e.g., BatchNorm) and activation layer (e.g., ReLU).
+ It is based upon three build methods: `build_conv_layer()`,
+ `build_norm_layer()` and `build_activation_layer()`.
+
+ Besides, we add some additional features in this module.
+ 1. Automatically set `bias` of the conv layer.
+ 2. Spectral norm is supported.
+ 3. More padding modes are supported. Before PyTorch 1.5, nn.Conv2d only
+ supports zero and circular padding, and we add "reflect" padding mode.
+
+ Args:
+ in_channels (int): Number of channels in the input feature map.
+ Same as that in ``nn._ConvNd``.
+ out_channels (int): Number of channels produced by the convolution.
+ Same as that in ``nn._ConvNd``.
+ kernel_size (int | tuple[int]): Size of the convolving kernel.
+ Same as that in ``nn._ConvNd``.
+ stride (int | tuple[int]): Stride of the convolution.
+ Same as that in ``nn._ConvNd``.
+ padding (int | tuple[int]): Zero-padding added to both sides of
+ the input. Same as that in ``nn._ConvNd``.
+ dilation (int | tuple[int]): Spacing between kernel elements.
+ Same as that in ``nn._ConvNd``.
+ groups (int): Number of blocked connections from input channels to
+ output channels. Same as that in ``nn._ConvNd``.
+ bias (bool | str): If specified as `auto`, it will be decided by the
+ norm_cfg. Bias will be set as True if `norm_cfg` is None, otherwise
+ False. Default: "auto".
+ conv_cfg (dict): Config dict for convolution layer. Default: None,
+ which means using conv2d.
+ norm_cfg (dict): Config dict for normalization layer. Default: None.
+ act_cfg (dict): Config dict for activation layer.
+ Default: dict(type='ReLU').
+ inplace (bool): Whether to use inplace mode for activation.
+ Default: True.
+ with_spectral_norm (bool): Whether use spectral norm in conv module.
+ Default: False.
+ padding_mode (str): If the `padding_mode` has not been supported by
+ current `Conv2d` in PyTorch, we will use our own padding layer
+ instead. Currently, we support ['zeros', 'circular'] with official
+ implementation and ['reflect'] with our own implementation.
+ Default: 'zeros'.
+ order (tuple[str]): The order of conv/norm/activation layers. It is a
+ sequence of "conv", "norm" and "act". Common examples are
+ ("conv", "norm", "act") and ("act", "conv", "norm").
+ Default: ('conv', 'norm', 'act').
+ """
+
+ _abbr_ = 'conv_block'
+
+ def __init__(self,
+ in_channels,
+ out_channels,
+ kernel_size,
+ stride=1,
+ padding=0,
+ dilation=1,
+ groups=1,
+ bias='auto',
+ conv_cfg=None,
+ norm_cfg=None,
+ act_cfg=dict(type='ReLU'),
+ inplace=True,
+ with_spectral_norm=False,
+ padding_mode='zeros',
+ order=('conv', 'norm', 'act')):
+ super(ConvModule, self).__init__()
+ assert conv_cfg is None or isinstance(conv_cfg, dict)
+ assert norm_cfg is None or isinstance(norm_cfg, dict)
+ assert act_cfg is None or isinstance(act_cfg, dict)
+ official_padding_mode = ['zeros', 'circular']
+ self.conv_cfg = conv_cfg
+ self.norm_cfg = norm_cfg
+ self.act_cfg = act_cfg
+ self.inplace = inplace
+ self.with_spectral_norm = with_spectral_norm
+ self.with_explicit_padding = padding_mode not in official_padding_mode
+ self.order = order
+ assert isinstance(self.order, tuple) and len(self.order) == 3
+ assert set(order) == set(['conv', 'norm', 'act'])
+
+ self.with_norm = norm_cfg is not None
+ self.with_activation = act_cfg is not None
+ # if the conv layer is before a norm layer, bias is unnecessary.
+ if bias == 'auto':
+ bias = not self.with_norm
+ self.with_bias = bias
+
+ if self.with_explicit_padding:
+ pad_cfg = dict(type=padding_mode)
+ self.padding_layer = build_padding_layer(pad_cfg, padding)
+
+ # reset padding to 0 for conv module
+ conv_padding = 0 if self.with_explicit_padding else padding
+ # build convolution layer
+ self.conv = build_conv_layer(
+ conv_cfg,
+ in_channels,
+ out_channels,
+ kernel_size,
+ stride=stride,
+ padding=conv_padding,
+ dilation=dilation,
+ groups=groups,
+ bias=bias)
+ # export the attributes of self.conv to a higher level for convenience
+ self.in_channels = self.conv.in_channels
+ self.out_channels = self.conv.out_channels
+ self.kernel_size = self.conv.kernel_size
+ self.stride = self.conv.stride
+ self.padding = padding
+ self.dilation = self.conv.dilation
+ self.transposed = self.conv.transposed
+ self.output_padding = self.conv.output_padding
+ self.groups = self.conv.groups
+
+ if self.with_spectral_norm:
+ self.conv = nn.utils.spectral_norm(self.conv)
+
+ # build normalization layers
+ if self.with_norm:
+ # norm layer is after conv layer
+ if order.index('norm') > order.index('conv'):
+ norm_channels = out_channels
+ else:
+ norm_channels = in_channels
+ self.norm_name, norm = build_norm_layer(norm_cfg, norm_channels)
+ self.add_module(self.norm_name, norm)
+ if self.with_bias:
+ if isinstance(norm, (_BatchNorm, _InstanceNorm)):
+ warnings.warn(
+ 'Unnecessary conv bias before batch/instance norm')
+ else:
+ self.norm_name = None
+
+ # build activation layer
+ if self.with_activation:
+ act_cfg_ = act_cfg.copy()
+ # nn.Tanh has no 'inplace' argument
+ if act_cfg_['type'] not in [
+ 'Tanh', 'PReLU', 'Sigmoid', 'HSigmoid', 'Swish'
+ ]:
+ act_cfg_.setdefault('inplace', inplace)
+ self.activate = build_activation_layer(act_cfg_)
+
+ # Use msra init by default
+ self.init_weights()
+
+ @property
+ def norm(self):
+ if self.norm_name:
+ return getattr(self, self.norm_name)
+ else:
+ return None
+
+ def init_weights(self):
+ # 1. It is mainly for customized conv layers with their own
+ # initialization manners by calling their own ``init_weights()``,
+ # and we do not want ConvModule to override the initialization.
+ # 2. For customized conv layers without their own initialization
+ # manners (that is, they don't have their own ``init_weights()``)
+ # and PyTorch's conv layers, they will be initialized by
+ # this method with default ``kaiming_init``.
+ # Note: For PyTorch's conv layers, they will be overwritten by our
+ # initialization implementation using default ``kaiming_init``.
+ if not hasattr(self.conv, 'init_weights'):
+ if self.with_activation and self.act_cfg['type'] == 'LeakyReLU':
+ nonlinearity = 'leaky_relu'
+ a = self.act_cfg.get('negative_slope', 0.01)
+ else:
+ nonlinearity = 'relu'
+ a = 0
+ kaiming_init(self.conv, a=a, nonlinearity=nonlinearity)
+ if self.with_norm:
+ constant_init(self.norm, 1, bias=0)
+
+ def forward(self, x, activate=True, norm=True):
+ for layer in self.order:
+ if layer == 'conv':
+ if self.with_explicit_padding:
+ x = self.padding_layer(x)
+ x = self.conv(x)
+ elif layer == 'norm' and norm and self.with_norm:
+ x = self.norm(x)
+ elif layer == 'act' and activate and self.with_activation:
+ x = self.activate(x)
+ return x
diff --git a/annotator/uniformer/mmcv/cnn/bricks/conv_ws.py b/annotator/uniformer/mmcv/cnn/bricks/conv_ws.py
new file mode 100644
index 0000000000000000000000000000000000000000..a3941e27874993418b3b5708d5a7485f175ff9c8
--- /dev/null
+++ b/annotator/uniformer/mmcv/cnn/bricks/conv_ws.py
@@ -0,0 +1,148 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+from .registry import CONV_LAYERS
+
+
+def conv_ws_2d(input,
+ weight,
+ bias=None,
+ stride=1,
+ padding=0,
+ dilation=1,
+ groups=1,
+ eps=1e-5):
+ c_in = weight.size(0)
+ weight_flat = weight.view(c_in, -1)
+ mean = weight_flat.mean(dim=1, keepdim=True).view(c_in, 1, 1, 1)
+ std = weight_flat.std(dim=1, keepdim=True).view(c_in, 1, 1, 1)
+ weight = (weight - mean) / (std + eps)
+ return F.conv2d(input, weight, bias, stride, padding, dilation, groups)
+
+
+@CONV_LAYERS.register_module('ConvWS')
+class ConvWS2d(nn.Conv2d):
+
+ def __init__(self,
+ in_channels,
+ out_channels,
+ kernel_size,
+ stride=1,
+ padding=0,
+ dilation=1,
+ groups=1,
+ bias=True,
+ eps=1e-5):
+ super(ConvWS2d, self).__init__(
+ in_channels,
+ out_channels,
+ kernel_size,
+ stride=stride,
+ padding=padding,
+ dilation=dilation,
+ groups=groups,
+ bias=bias)
+ self.eps = eps
+
+ def forward(self, x):
+ return conv_ws_2d(x, self.weight, self.bias, self.stride, self.padding,
+ self.dilation, self.groups, self.eps)
+
+
+@CONV_LAYERS.register_module(name='ConvAWS')
+class ConvAWS2d(nn.Conv2d):
+ """AWS (Adaptive Weight Standardization)
+
+ This is a variant of Weight Standardization
+ (https://arxiv.org/pdf/1903.10520.pdf)
+ It is used in DetectoRS to avoid NaN
+ (https://arxiv.org/pdf/2006.02334.pdf)
+
+ Args:
+ in_channels (int): Number of channels in the input image
+ out_channels (int): Number of channels produced by the convolution
+ kernel_size (int or tuple): Size of the conv kernel
+ stride (int or tuple, optional): Stride of the convolution. Default: 1
+ padding (int or tuple, optional): Zero-padding added to both sides of
+ the input. Default: 0
+ dilation (int or tuple, optional): Spacing between kernel elements.
+ Default: 1
+ groups (int, optional): Number of blocked connections from input
+ channels to output channels. Default: 1
+ bias (bool, optional): If set True, adds a learnable bias to the
+ output. Default: True
+ """
+
+ def __init__(self,
+ in_channels,
+ out_channels,
+ kernel_size,
+ stride=1,
+ padding=0,
+ dilation=1,
+ groups=1,
+ bias=True):
+ super().__init__(
+ in_channels,
+ out_channels,
+ kernel_size,
+ stride=stride,
+ padding=padding,
+ dilation=dilation,
+ groups=groups,
+ bias=bias)
+ self.register_buffer('weight_gamma',
+ torch.ones(self.out_channels, 1, 1, 1))
+ self.register_buffer('weight_beta',
+ torch.zeros(self.out_channels, 1, 1, 1))
+
+ def _get_weight(self, weight):
+ weight_flat = weight.view(weight.size(0), -1)
+ mean = weight_flat.mean(dim=1).view(-1, 1, 1, 1)
+ std = torch.sqrt(weight_flat.var(dim=1) + 1e-5).view(-1, 1, 1, 1)
+ weight = (weight - mean) / std
+ weight = self.weight_gamma * weight + self.weight_beta
+ return weight
+
+ def forward(self, x):
+ weight = self._get_weight(self.weight)
+ return F.conv2d(x, weight, self.bias, self.stride, self.padding,
+ self.dilation, self.groups)
+
+ def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
+ missing_keys, unexpected_keys, error_msgs):
+ """Override default load function.
+
+ AWS overrides the function _load_from_state_dict to recover
+ weight_gamma and weight_beta if they are missing. If weight_gamma and
+ weight_beta are found in the checkpoint, this function will return
+ after super()._load_from_state_dict. Otherwise, it will compute the
+ mean and std of the pretrained weights and store them in weight_beta
+ and weight_gamma.
+ """
+
+ self.weight_gamma.data.fill_(-1)
+ local_missing_keys = []
+ super()._load_from_state_dict(state_dict, prefix, local_metadata,
+ strict, local_missing_keys,
+ unexpected_keys, error_msgs)
+ if self.weight_gamma.data.mean() > 0:
+ for k in local_missing_keys:
+ missing_keys.append(k)
+ return
+ weight = self.weight.data
+ weight_flat = weight.view(weight.size(0), -1)
+ mean = weight_flat.mean(dim=1).view(-1, 1, 1, 1)
+ std = torch.sqrt(weight_flat.var(dim=1) + 1e-5).view(-1, 1, 1, 1)
+ self.weight_beta.data.copy_(mean)
+ self.weight_gamma.data.copy_(std)
+ missing_gamma_beta = [
+ k for k in local_missing_keys
+ if k.endswith('weight_gamma') or k.endswith('weight_beta')
+ ]
+ for k in missing_gamma_beta:
+ local_missing_keys.remove(k)
+ for k in local_missing_keys:
+ missing_keys.append(k)
diff --git a/annotator/uniformer/mmcv/cnn/bricks/depthwise_separable_conv_module.py b/annotator/uniformer/mmcv/cnn/bricks/depthwise_separable_conv_module.py
new file mode 100644
index 0000000000000000000000000000000000000000..722d5d8d71f75486e2db3008907c4eadfca41d63
--- /dev/null
+++ b/annotator/uniformer/mmcv/cnn/bricks/depthwise_separable_conv_module.py
@@ -0,0 +1,96 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import torch.nn as nn
+
+from .conv_module import ConvModule
+
+
+class DepthwiseSeparableConvModule(nn.Module):
+ """Depthwise separable convolution module.
+
+ See https://arxiv.org/pdf/1704.04861.pdf for details.
+
+ This module can replace a ConvModule with the conv block replaced by two
+ conv block: depthwise conv block and pointwise conv block. The depthwise
+ conv block contains depthwise-conv/norm/activation layers. The pointwise
+ conv block contains pointwise-conv/norm/activation layers. It should be
+ noted that there will be norm/activation layer in the depthwise conv block
+ if `norm_cfg` and `act_cfg` are specified.
+
+ Args:
+ in_channels (int): Number of channels in the input feature map.
+ Same as that in ``nn._ConvNd``.
+ out_channels (int): Number of channels produced by the convolution.
+ Same as that in ``nn._ConvNd``.
+ kernel_size (int | tuple[int]): Size of the convolving kernel.
+ Same as that in ``nn._ConvNd``.
+ stride (int | tuple[int]): Stride of the convolution.
+ Same as that in ``nn._ConvNd``. Default: 1.
+ padding (int | tuple[int]): Zero-padding added to both sides of
+ the input. Same as that in ``nn._ConvNd``. Default: 0.
+ dilation (int | tuple[int]): Spacing between kernel elements.
+ Same as that in ``nn._ConvNd``. Default: 1.
+ norm_cfg (dict): Default norm config for both depthwise ConvModule and
+ pointwise ConvModule. Default: None.
+ act_cfg (dict): Default activation config for both depthwise ConvModule
+ and pointwise ConvModule. Default: dict(type='ReLU').
+ dw_norm_cfg (dict): Norm config of depthwise ConvModule. If it is
+ 'default', it will be the same as `norm_cfg`. Default: 'default'.
+ dw_act_cfg (dict): Activation config of depthwise ConvModule. If it is
+ 'default', it will be the same as `act_cfg`. Default: 'default'.
+ pw_norm_cfg (dict): Norm config of pointwise ConvModule. If it is
+ 'default', it will be the same as `norm_cfg`. Default: 'default'.
+ pw_act_cfg (dict): Activation config of pointwise ConvModule. If it is
+ 'default', it will be the same as `act_cfg`. Default: 'default'.
+ kwargs (optional): Other shared arguments for depthwise and pointwise
+ ConvModule. See ConvModule for ref.
+ """
+
+ def __init__(self,
+ in_channels,
+ out_channels,
+ kernel_size,
+ stride=1,
+ padding=0,
+ dilation=1,
+ norm_cfg=None,
+ act_cfg=dict(type='ReLU'),
+ dw_norm_cfg='default',
+ dw_act_cfg='default',
+ pw_norm_cfg='default',
+ pw_act_cfg='default',
+ **kwargs):
+ super(DepthwiseSeparableConvModule, self).__init__()
+ assert 'groups' not in kwargs, 'groups should not be specified'
+
+ # if norm/activation config of depthwise/pointwise ConvModule is not
+ # specified, use default config.
+ dw_norm_cfg = dw_norm_cfg if dw_norm_cfg != 'default' else norm_cfg
+ dw_act_cfg = dw_act_cfg if dw_act_cfg != 'default' else act_cfg
+ pw_norm_cfg = pw_norm_cfg if pw_norm_cfg != 'default' else norm_cfg
+ pw_act_cfg = pw_act_cfg if pw_act_cfg != 'default' else act_cfg
+
+ # depthwise convolution
+ self.depthwise_conv = ConvModule(
+ in_channels,
+ in_channels,
+ kernel_size,
+ stride=stride,
+ padding=padding,
+ dilation=dilation,
+ groups=in_channels,
+ norm_cfg=dw_norm_cfg,
+ act_cfg=dw_act_cfg,
+ **kwargs)
+
+ self.pointwise_conv = ConvModule(
+ in_channels,
+ out_channels,
+ 1,
+ norm_cfg=pw_norm_cfg,
+ act_cfg=pw_act_cfg,
+ **kwargs)
+
+ def forward(self, x):
+ x = self.depthwise_conv(x)
+ x = self.pointwise_conv(x)
+ return x
diff --git a/annotator/uniformer/mmcv/cnn/bricks/drop.py b/annotator/uniformer/mmcv/cnn/bricks/drop.py
new file mode 100644
index 0000000000000000000000000000000000000000..b7b4fccd457a0d51fb10c789df3c8537fe7b67c1
--- /dev/null
+++ b/annotator/uniformer/mmcv/cnn/bricks/drop.py
@@ -0,0 +1,65 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import torch
+import torch.nn as nn
+
+from annotator.uniformer.mmcv import build_from_cfg
+from .registry import DROPOUT_LAYERS
+
+
+def drop_path(x, drop_prob=0., training=False):
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of
+ residual blocks).
+
+ We follow the implementation
+ https://github.com/rwightman/pytorch-image-models/blob/a2727c1bf78ba0d7b5727f5f95e37fb7f8866b1f/timm/models/layers/drop.py # noqa: E501
+ """
+ if drop_prob == 0. or not training:
+ return x
+ keep_prob = 1 - drop_prob
+ # handle tensors with different dimensions, not just 4D tensors.
+ shape = (x.shape[0], ) + (1, ) * (x.ndim - 1)
+ random_tensor = keep_prob + torch.rand(
+ shape, dtype=x.dtype, device=x.device)
+ output = x.div(keep_prob) * random_tensor.floor()
+ return output
+
+
+@DROPOUT_LAYERS.register_module()
+class DropPath(nn.Module):
+ """Drop paths (Stochastic Depth) per sample (when applied in main path of
+ residual blocks).
+
+ We follow the implementation
+ https://github.com/rwightman/pytorch-image-models/blob/a2727c1bf78ba0d7b5727f5f95e37fb7f8866b1f/timm/models/layers/drop.py # noqa: E501
+
+ Args:
+ drop_prob (float): Probability of the path to be zeroed. Default: 0.1
+ """
+
+ def __init__(self, drop_prob=0.1):
+ super(DropPath, self).__init__()
+ self.drop_prob = drop_prob
+
+ def forward(self, x):
+ return drop_path(x, self.drop_prob, self.training)
+
+
+@DROPOUT_LAYERS.register_module()
+class Dropout(nn.Dropout):
+ """A wrapper for ``torch.nn.Dropout``, We rename the ``p`` of
+ ``torch.nn.Dropout`` to ``drop_prob`` so as to be consistent with
+ ``DropPath``
+
+ Args:
+ drop_prob (float): Probability of the elements to be
+ zeroed. Default: 0.5.
+ inplace (bool): Do the operation inplace or not. Default: False.
+ """
+
+ def __init__(self, drop_prob=0.5, inplace=False):
+ super().__init__(p=drop_prob, inplace=inplace)
+
+
+def build_dropout(cfg, default_args=None):
+ """Builder for drop out layers."""
+ return build_from_cfg(cfg, DROPOUT_LAYERS, default_args)
diff --git a/annotator/uniformer/mmcv/cnn/bricks/generalized_attention.py b/annotator/uniformer/mmcv/cnn/bricks/generalized_attention.py
new file mode 100644
index 0000000000000000000000000000000000000000..988d9adf2f289ef223bd1c680a5ae1d3387f0269
--- /dev/null
+++ b/annotator/uniformer/mmcv/cnn/bricks/generalized_attention.py
@@ -0,0 +1,412 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import math
+
+import numpy as np
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+from ..utils import kaiming_init
+from .registry import PLUGIN_LAYERS
+
+
+@PLUGIN_LAYERS.register_module()
+class GeneralizedAttention(nn.Module):
+ """GeneralizedAttention module.
+
+ See 'An Empirical Study of Spatial Attention Mechanisms in Deep Networks'
+ (https://arxiv.org/abs/1711.07971) for details.
+
+ Args:
+ in_channels (int): Channels of the input feature map.
+ spatial_range (int): The spatial range. -1 indicates no spatial range
+ constraint. Default: -1.
+ num_heads (int): The head number of empirical_attention module.
+ Default: 9.
+ position_embedding_dim (int): The position embedding dimension.
+ Default: -1.
+ position_magnitude (int): A multiplier acting on coord difference.
+ Default: 1.
+ kv_stride (int): The feature stride acting on key/value feature map.
+ Default: 2.
+ q_stride (int): The feature stride acting on query feature map.
+ Default: 1.
+ attention_type (str): A binary indicator string for indicating which
+ items in generalized empirical_attention module are used.
+ Default: '1111'.
+
+ - '1000' indicates 'query and key content' (appr - appr) item,
+ - '0100' indicates 'query content and relative position'
+ (appr - position) item,
+ - '0010' indicates 'key content only' (bias - appr) item,
+ - '0001' indicates 'relative position only' (bias - position) item.
+ """
+
+ _abbr_ = 'gen_attention_block'
+
+ def __init__(self,
+ in_channels,
+ spatial_range=-1,
+ num_heads=9,
+ position_embedding_dim=-1,
+ position_magnitude=1,
+ kv_stride=2,
+ q_stride=1,
+ attention_type='1111'):
+
+ super(GeneralizedAttention, self).__init__()
+
+ # hard range means local range for non-local operation
+ self.position_embedding_dim = (
+ position_embedding_dim
+ if position_embedding_dim > 0 else in_channels)
+
+ self.position_magnitude = position_magnitude
+ self.num_heads = num_heads
+ self.in_channels = in_channels
+ self.spatial_range = spatial_range
+ self.kv_stride = kv_stride
+ self.q_stride = q_stride
+ self.attention_type = [bool(int(_)) for _ in attention_type]
+ self.qk_embed_dim = in_channels // num_heads
+ out_c = self.qk_embed_dim * num_heads
+
+ if self.attention_type[0] or self.attention_type[1]:
+ self.query_conv = nn.Conv2d(
+ in_channels=in_channels,
+ out_channels=out_c,
+ kernel_size=1,
+ bias=False)
+ self.query_conv.kaiming_init = True
+
+ if self.attention_type[0] or self.attention_type[2]:
+ self.key_conv = nn.Conv2d(
+ in_channels=in_channels,
+ out_channels=out_c,
+ kernel_size=1,
+ bias=False)
+ self.key_conv.kaiming_init = True
+
+ self.v_dim = in_channels // num_heads
+ self.value_conv = nn.Conv2d(
+ in_channels=in_channels,
+ out_channels=self.v_dim * num_heads,
+ kernel_size=1,
+ bias=False)
+ self.value_conv.kaiming_init = True
+
+ if self.attention_type[1] or self.attention_type[3]:
+ self.appr_geom_fc_x = nn.Linear(
+ self.position_embedding_dim // 2, out_c, bias=False)
+ self.appr_geom_fc_x.kaiming_init = True
+
+ self.appr_geom_fc_y = nn.Linear(
+ self.position_embedding_dim // 2, out_c, bias=False)
+ self.appr_geom_fc_y.kaiming_init = True
+
+ if self.attention_type[2]:
+ stdv = 1.0 / math.sqrt(self.qk_embed_dim * 2)
+ appr_bias_value = -2 * stdv * torch.rand(out_c) + stdv
+ self.appr_bias = nn.Parameter(appr_bias_value)
+
+ if self.attention_type[3]:
+ stdv = 1.0 / math.sqrt(self.qk_embed_dim * 2)
+ geom_bias_value = -2 * stdv * torch.rand(out_c) + stdv
+ self.geom_bias = nn.Parameter(geom_bias_value)
+
+ self.proj_conv = nn.Conv2d(
+ in_channels=self.v_dim * num_heads,
+ out_channels=in_channels,
+ kernel_size=1,
+ bias=True)
+ self.proj_conv.kaiming_init = True
+ self.gamma = nn.Parameter(torch.zeros(1))
+
+ if self.spatial_range >= 0:
+ # only works when non local is after 3*3 conv
+ if in_channels == 256:
+ max_len = 84
+ elif in_channels == 512:
+ max_len = 42
+
+ max_len_kv = int((max_len - 1.0) / self.kv_stride + 1)
+ local_constraint_map = np.ones(
+ (max_len, max_len, max_len_kv, max_len_kv), dtype=np.int)
+ for iy in range(max_len):
+ for ix in range(max_len):
+ local_constraint_map[
+ iy, ix,
+ max((iy - self.spatial_range) //
+ self.kv_stride, 0):min((iy + self.spatial_range +
+ 1) // self.kv_stride +
+ 1, max_len),
+ max((ix - self.spatial_range) //
+ self.kv_stride, 0):min((ix + self.spatial_range +
+ 1) // self.kv_stride +
+ 1, max_len)] = 0
+
+ self.local_constraint_map = nn.Parameter(
+ torch.from_numpy(local_constraint_map).byte(),
+ requires_grad=False)
+
+ if self.q_stride > 1:
+ self.q_downsample = nn.AvgPool2d(
+ kernel_size=1, stride=self.q_stride)
+ else:
+ self.q_downsample = None
+
+ if self.kv_stride > 1:
+ self.kv_downsample = nn.AvgPool2d(
+ kernel_size=1, stride=self.kv_stride)
+ else:
+ self.kv_downsample = None
+
+ self.init_weights()
+
+ def get_position_embedding(self,
+ h,
+ w,
+ h_kv,
+ w_kv,
+ q_stride,
+ kv_stride,
+ device,
+ dtype,
+ feat_dim,
+ wave_length=1000):
+ # the default type of Tensor is float32, leading to type mismatch
+ # in fp16 mode. Cast it to support fp16 mode.
+ h_idxs = torch.linspace(0, h - 1, h).to(device=device, dtype=dtype)
+ h_idxs = h_idxs.view((h, 1)) * q_stride
+
+ w_idxs = torch.linspace(0, w - 1, w).to(device=device, dtype=dtype)
+ w_idxs = w_idxs.view((w, 1)) * q_stride
+
+ h_kv_idxs = torch.linspace(0, h_kv - 1, h_kv).to(
+ device=device, dtype=dtype)
+ h_kv_idxs = h_kv_idxs.view((h_kv, 1)) * kv_stride
+
+ w_kv_idxs = torch.linspace(0, w_kv - 1, w_kv).to(
+ device=device, dtype=dtype)
+ w_kv_idxs = w_kv_idxs.view((w_kv, 1)) * kv_stride
+
+ # (h, h_kv, 1)
+ h_diff = h_idxs.unsqueeze(1) - h_kv_idxs.unsqueeze(0)
+ h_diff *= self.position_magnitude
+
+ # (w, w_kv, 1)
+ w_diff = w_idxs.unsqueeze(1) - w_kv_idxs.unsqueeze(0)
+ w_diff *= self.position_magnitude
+
+ feat_range = torch.arange(0, feat_dim / 4).to(
+ device=device, dtype=dtype)
+
+ dim_mat = torch.Tensor([wave_length]).to(device=device, dtype=dtype)
+ dim_mat = dim_mat**((4. / feat_dim) * feat_range)
+ dim_mat = dim_mat.view((1, 1, -1))
+
+ embedding_x = torch.cat(
+ ((w_diff / dim_mat).sin(), (w_diff / dim_mat).cos()), dim=2)
+
+ embedding_y = torch.cat(
+ ((h_diff / dim_mat).sin(), (h_diff / dim_mat).cos()), dim=2)
+
+ return embedding_x, embedding_y
+
+ def forward(self, x_input):
+ num_heads = self.num_heads
+
+ # use empirical_attention
+ if self.q_downsample is not None:
+ x_q = self.q_downsample(x_input)
+ else:
+ x_q = x_input
+ n, _, h, w = x_q.shape
+
+ if self.kv_downsample is not None:
+ x_kv = self.kv_downsample(x_input)
+ else:
+ x_kv = x_input
+ _, _, h_kv, w_kv = x_kv.shape
+
+ if self.attention_type[0] or self.attention_type[1]:
+ proj_query = self.query_conv(x_q).view(
+ (n, num_heads, self.qk_embed_dim, h * w))
+ proj_query = proj_query.permute(0, 1, 3, 2)
+
+ if self.attention_type[0] or self.attention_type[2]:
+ proj_key = self.key_conv(x_kv).view(
+ (n, num_heads, self.qk_embed_dim, h_kv * w_kv))
+
+ if self.attention_type[1] or self.attention_type[3]:
+ position_embed_x, position_embed_y = self.get_position_embedding(
+ h, w, h_kv, w_kv, self.q_stride, self.kv_stride,
+ x_input.device, x_input.dtype, self.position_embedding_dim)
+ # (n, num_heads, w, w_kv, dim)
+ position_feat_x = self.appr_geom_fc_x(position_embed_x).\
+ view(1, w, w_kv, num_heads, self.qk_embed_dim).\
+ permute(0, 3, 1, 2, 4).\
+ repeat(n, 1, 1, 1, 1)
+
+ # (n, num_heads, h, h_kv, dim)
+ position_feat_y = self.appr_geom_fc_y(position_embed_y).\
+ view(1, h, h_kv, num_heads, self.qk_embed_dim).\
+ permute(0, 3, 1, 2, 4).\
+ repeat(n, 1, 1, 1, 1)
+
+ position_feat_x /= math.sqrt(2)
+ position_feat_y /= math.sqrt(2)
+
+ # accelerate for saliency only
+ if (np.sum(self.attention_type) == 1) and self.attention_type[2]:
+ appr_bias = self.appr_bias.\
+ view(1, num_heads, 1, self.qk_embed_dim).\
+ repeat(n, 1, 1, 1)
+
+ energy = torch.matmul(appr_bias, proj_key).\
+ view(n, num_heads, 1, h_kv * w_kv)
+
+ h = 1
+ w = 1
+ else:
+ # (n, num_heads, h*w, h_kv*w_kv), query before key, 540mb for
+ if not self.attention_type[0]:
+ energy = torch.zeros(
+ n,
+ num_heads,
+ h,
+ w,
+ h_kv,
+ w_kv,
+ dtype=x_input.dtype,
+ device=x_input.device)
+
+ # attention_type[0]: appr - appr
+ # attention_type[1]: appr - position
+ # attention_type[2]: bias - appr
+ # attention_type[3]: bias - position
+ if self.attention_type[0] or self.attention_type[2]:
+ if self.attention_type[0] and self.attention_type[2]:
+ appr_bias = self.appr_bias.\
+ view(1, num_heads, 1, self.qk_embed_dim)
+ energy = torch.matmul(proj_query + appr_bias, proj_key).\
+ view(n, num_heads, h, w, h_kv, w_kv)
+
+ elif self.attention_type[0]:
+ energy = torch.matmul(proj_query, proj_key).\
+ view(n, num_heads, h, w, h_kv, w_kv)
+
+ elif self.attention_type[2]:
+ appr_bias = self.appr_bias.\
+ view(1, num_heads, 1, self.qk_embed_dim).\
+ repeat(n, 1, 1, 1)
+
+ energy += torch.matmul(appr_bias, proj_key).\
+ view(n, num_heads, 1, 1, h_kv, w_kv)
+
+ if self.attention_type[1] or self.attention_type[3]:
+ if self.attention_type[1] and self.attention_type[3]:
+ geom_bias = self.geom_bias.\
+ view(1, num_heads, 1, self.qk_embed_dim)
+
+ proj_query_reshape = (proj_query + geom_bias).\
+ view(n, num_heads, h, w, self.qk_embed_dim)
+
+ energy_x = torch.matmul(
+ proj_query_reshape.permute(0, 1, 3, 2, 4),
+ position_feat_x.permute(0, 1, 2, 4, 3))
+ energy_x = energy_x.\
+ permute(0, 1, 3, 2, 4).unsqueeze(4)
+
+ energy_y = torch.matmul(
+ proj_query_reshape,
+ position_feat_y.permute(0, 1, 2, 4, 3))
+ energy_y = energy_y.unsqueeze(5)
+
+ energy += energy_x + energy_y
+
+ elif self.attention_type[1]:
+ proj_query_reshape = proj_query.\
+ view(n, num_heads, h, w, self.qk_embed_dim)
+ proj_query_reshape = proj_query_reshape.\
+ permute(0, 1, 3, 2, 4)
+ position_feat_x_reshape = position_feat_x.\
+ permute(0, 1, 2, 4, 3)
+ position_feat_y_reshape = position_feat_y.\
+ permute(0, 1, 2, 4, 3)
+
+ energy_x = torch.matmul(proj_query_reshape,
+ position_feat_x_reshape)
+ energy_x = energy_x.permute(0, 1, 3, 2, 4).unsqueeze(4)
+
+ energy_y = torch.matmul(proj_query_reshape,
+ position_feat_y_reshape)
+ energy_y = energy_y.unsqueeze(5)
+
+ energy += energy_x + energy_y
+
+ elif self.attention_type[3]:
+ geom_bias = self.geom_bias.\
+ view(1, num_heads, self.qk_embed_dim, 1).\
+ repeat(n, 1, 1, 1)
+
+ position_feat_x_reshape = position_feat_x.\
+ view(n, num_heads, w*w_kv, self.qk_embed_dim)
+
+ position_feat_y_reshape = position_feat_y.\
+ view(n, num_heads, h * h_kv, self.qk_embed_dim)
+
+ energy_x = torch.matmul(position_feat_x_reshape, geom_bias)
+ energy_x = energy_x.view(n, num_heads, 1, w, 1, w_kv)
+
+ energy_y = torch.matmul(position_feat_y_reshape, geom_bias)
+ energy_y = energy_y.view(n, num_heads, h, 1, h_kv, 1)
+
+ energy += energy_x + energy_y
+
+ energy = energy.view(n, num_heads, h * w, h_kv * w_kv)
+
+ if self.spatial_range >= 0:
+ cur_local_constraint_map = \
+ self.local_constraint_map[:h, :w, :h_kv, :w_kv].\
+ contiguous().\
+ view(1, 1, h*w, h_kv*w_kv)
+
+ energy = energy.masked_fill_(cur_local_constraint_map,
+ float('-inf'))
+
+ attention = F.softmax(energy, 3)
+
+ proj_value = self.value_conv(x_kv)
+ proj_value_reshape = proj_value.\
+ view((n, num_heads, self.v_dim, h_kv * w_kv)).\
+ permute(0, 1, 3, 2)
+
+ out = torch.matmul(attention, proj_value_reshape).\
+ permute(0, 1, 3, 2).\
+ contiguous().\
+ view(n, self.v_dim * self.num_heads, h, w)
+
+ out = self.proj_conv(out)
+
+ # output is downsampled, upsample back to input size
+ if self.q_downsample is not None:
+ out = F.interpolate(
+ out,
+ size=x_input.shape[2:],
+ mode='bilinear',
+ align_corners=False)
+
+ out = self.gamma * out + x_input
+ return out
+
+ def init_weights(self):
+ for m in self.modules():
+ if hasattr(m, 'kaiming_init') and m.kaiming_init:
+ kaiming_init(
+ m,
+ mode='fan_in',
+ nonlinearity='leaky_relu',
+ bias=0,
+ distribution='uniform',
+ a=1)
diff --git a/annotator/uniformer/mmcv/cnn/bricks/hsigmoid.py b/annotator/uniformer/mmcv/cnn/bricks/hsigmoid.py
new file mode 100644
index 0000000000000000000000000000000000000000..30b1a3d6580cf0360710426fbea1f05acdf07b4b
--- /dev/null
+++ b/annotator/uniformer/mmcv/cnn/bricks/hsigmoid.py
@@ -0,0 +1,34 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import torch.nn as nn
+
+from .registry import ACTIVATION_LAYERS
+
+
+@ACTIVATION_LAYERS.register_module()
+class HSigmoid(nn.Module):
+ """Hard Sigmoid Module. Apply the hard sigmoid function:
+ Hsigmoid(x) = min(max((x + bias) / divisor, min_value), max_value)
+ Default: Hsigmoid(x) = min(max((x + 1) / 2, 0), 1)
+
+ Args:
+ bias (float): Bias of the input feature map. Default: 1.0.
+ divisor (float): Divisor of the input feature map. Default: 2.0.
+ min_value (float): Lower bound value. Default: 0.0.
+ max_value (float): Upper bound value. Default: 1.0.
+
+ Returns:
+ Tensor: The output tensor.
+ """
+
+ def __init__(self, bias=1.0, divisor=2.0, min_value=0.0, max_value=1.0):
+ super(HSigmoid, self).__init__()
+ self.bias = bias
+ self.divisor = divisor
+ assert self.divisor != 0
+ self.min_value = min_value
+ self.max_value = max_value
+
+ def forward(self, x):
+ x = (x + self.bias) / self.divisor
+
+ return x.clamp_(self.min_value, self.max_value)
diff --git a/annotator/uniformer/mmcv/cnn/bricks/hswish.py b/annotator/uniformer/mmcv/cnn/bricks/hswish.py
new file mode 100644
index 0000000000000000000000000000000000000000..7e0c090ff037c99ee6c5c84c4592e87beae02208
--- /dev/null
+++ b/annotator/uniformer/mmcv/cnn/bricks/hswish.py
@@ -0,0 +1,29 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import torch.nn as nn
+
+from .registry import ACTIVATION_LAYERS
+
+
+@ACTIVATION_LAYERS.register_module()
+class HSwish(nn.Module):
+ """Hard Swish Module.
+
+ This module applies the hard swish function:
+
+ .. math::
+ Hswish(x) = x * ReLU6(x + 3) / 6
+
+ Args:
+ inplace (bool): can optionally do the operation in-place.
+ Default: False.
+
+ Returns:
+ Tensor: The output tensor.
+ """
+
+ def __init__(self, inplace=False):
+ super(HSwish, self).__init__()
+ self.act = nn.ReLU6(inplace)
+
+ def forward(self, x):
+ return x * self.act(x + 3) / 6
diff --git a/annotator/uniformer/mmcv/cnn/bricks/non_local.py b/annotator/uniformer/mmcv/cnn/bricks/non_local.py
new file mode 100644
index 0000000000000000000000000000000000000000..92d00155ef275c1201ea66bba30470a1785cc5d7
--- /dev/null
+++ b/annotator/uniformer/mmcv/cnn/bricks/non_local.py
@@ -0,0 +1,306 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+from abc import ABCMeta
+
+import torch
+import torch.nn as nn
+
+from ..utils import constant_init, normal_init
+from .conv_module import ConvModule
+from .registry import PLUGIN_LAYERS
+
+
+class _NonLocalNd(nn.Module, metaclass=ABCMeta):
+ """Basic Non-local module.
+
+ This module is proposed in
+ "Non-local Neural Networks"
+ Paper reference: https://arxiv.org/abs/1711.07971
+ Code reference: https://github.com/AlexHex7/Non-local_pytorch
+
+ Args:
+ in_channels (int): Channels of the input feature map.
+ reduction (int): Channel reduction ratio. Default: 2.
+ use_scale (bool): Whether to scale pairwise_weight by
+ `1/sqrt(inter_channels)` when the mode is `embedded_gaussian`.
+ Default: True.
+ conv_cfg (None | dict): The config dict for convolution layers.
+ If not specified, it will use `nn.Conv2d` for convolution layers.
+ Default: None.
+ norm_cfg (None | dict): The config dict for normalization layers.
+ Default: None. (This parameter is only applicable to conv_out.)
+ mode (str): Options are `gaussian`, `concatenation`,
+ `embedded_gaussian` and `dot_product`. Default: embedded_gaussian.
+ """
+
+ def __init__(self,
+ in_channels,
+ reduction=2,
+ use_scale=True,
+ conv_cfg=None,
+ norm_cfg=None,
+ mode='embedded_gaussian',
+ **kwargs):
+ super(_NonLocalNd, self).__init__()
+ self.in_channels = in_channels
+ self.reduction = reduction
+ self.use_scale = use_scale
+ self.inter_channels = max(in_channels // reduction, 1)
+ self.mode = mode
+
+ if mode not in [
+ 'gaussian', 'embedded_gaussian', 'dot_product', 'concatenation'
+ ]:
+ raise ValueError("Mode should be in 'gaussian', 'concatenation', "
+ f"'embedded_gaussian' or 'dot_product', but got "
+ f'{mode} instead.')
+
+ # g, theta, phi are defaulted as `nn.ConvNd`.
+ # Here we use ConvModule for potential usage.
+ self.g = ConvModule(
+ self.in_channels,
+ self.inter_channels,
+ kernel_size=1,
+ conv_cfg=conv_cfg,
+ act_cfg=None)
+ self.conv_out = ConvModule(
+ self.inter_channels,
+ self.in_channels,
+ kernel_size=1,
+ conv_cfg=conv_cfg,
+ norm_cfg=norm_cfg,
+ act_cfg=None)
+
+ if self.mode != 'gaussian':
+ self.theta = ConvModule(
+ self.in_channels,
+ self.inter_channels,
+ kernel_size=1,
+ conv_cfg=conv_cfg,
+ act_cfg=None)
+ self.phi = ConvModule(
+ self.in_channels,
+ self.inter_channels,
+ kernel_size=1,
+ conv_cfg=conv_cfg,
+ act_cfg=None)
+
+ if self.mode == 'concatenation':
+ self.concat_project = ConvModule(
+ self.inter_channels * 2,
+ 1,
+ kernel_size=1,
+ stride=1,
+ padding=0,
+ bias=False,
+ act_cfg=dict(type='ReLU'))
+
+ self.init_weights(**kwargs)
+
+ def init_weights(self, std=0.01, zeros_init=True):
+ if self.mode != 'gaussian':
+ for m in [self.g, self.theta, self.phi]:
+ normal_init(m.conv, std=std)
+ else:
+ normal_init(self.g.conv, std=std)
+ if zeros_init:
+ if self.conv_out.norm_cfg is None:
+ constant_init(self.conv_out.conv, 0)
+ else:
+ constant_init(self.conv_out.norm, 0)
+ else:
+ if self.conv_out.norm_cfg is None:
+ normal_init(self.conv_out.conv, std=std)
+ else:
+ normal_init(self.conv_out.norm, std=std)
+
+ def gaussian(self, theta_x, phi_x):
+ # NonLocal1d pairwise_weight: [N, H, H]
+ # NonLocal2d pairwise_weight: [N, HxW, HxW]
+ # NonLocal3d pairwise_weight: [N, TxHxW, TxHxW]
+ pairwise_weight = torch.matmul(theta_x, phi_x)
+ pairwise_weight = pairwise_weight.softmax(dim=-1)
+ return pairwise_weight
+
+ def embedded_gaussian(self, theta_x, phi_x):
+ # NonLocal1d pairwise_weight: [N, H, H]
+ # NonLocal2d pairwise_weight: [N, HxW, HxW]
+ # NonLocal3d pairwise_weight: [N, TxHxW, TxHxW]
+ pairwise_weight = torch.matmul(theta_x, phi_x)
+ if self.use_scale:
+ # theta_x.shape[-1] is `self.inter_channels`
+ pairwise_weight /= theta_x.shape[-1]**0.5
+ pairwise_weight = pairwise_weight.softmax(dim=-1)
+ return pairwise_weight
+
+ def dot_product(self, theta_x, phi_x):
+ # NonLocal1d pairwise_weight: [N, H, H]
+ # NonLocal2d pairwise_weight: [N, HxW, HxW]
+ # NonLocal3d pairwise_weight: [N, TxHxW, TxHxW]
+ pairwise_weight = torch.matmul(theta_x, phi_x)
+ pairwise_weight /= pairwise_weight.shape[-1]
+ return pairwise_weight
+
+ def concatenation(self, theta_x, phi_x):
+ # NonLocal1d pairwise_weight: [N, H, H]
+ # NonLocal2d pairwise_weight: [N, HxW, HxW]
+ # NonLocal3d pairwise_weight: [N, TxHxW, TxHxW]
+ h = theta_x.size(2)
+ w = phi_x.size(3)
+ theta_x = theta_x.repeat(1, 1, 1, w)
+ phi_x = phi_x.repeat(1, 1, h, 1)
+
+ concat_feature = torch.cat([theta_x, phi_x], dim=1)
+ pairwise_weight = self.concat_project(concat_feature)
+ n, _, h, w = pairwise_weight.size()
+ pairwise_weight = pairwise_weight.view(n, h, w)
+ pairwise_weight /= pairwise_weight.shape[-1]
+
+ return pairwise_weight
+
+ def forward(self, x):
+ # Assume `reduction = 1`, then `inter_channels = C`
+ # or `inter_channels = C` when `mode="gaussian"`
+
+ # NonLocal1d x: [N, C, H]
+ # NonLocal2d x: [N, C, H, W]
+ # NonLocal3d x: [N, C, T, H, W]
+ n = x.size(0)
+
+ # NonLocal1d g_x: [N, H, C]
+ # NonLocal2d g_x: [N, HxW, C]
+ # NonLocal3d g_x: [N, TxHxW, C]
+ g_x = self.g(x).view(n, self.inter_channels, -1)
+ g_x = g_x.permute(0, 2, 1)
+
+ # NonLocal1d theta_x: [N, H, C], phi_x: [N, C, H]
+ # NonLocal2d theta_x: [N, HxW, C], phi_x: [N, C, HxW]
+ # NonLocal3d theta_x: [N, TxHxW, C], phi_x: [N, C, TxHxW]
+ if self.mode == 'gaussian':
+ theta_x = x.view(n, self.in_channels, -1)
+ theta_x = theta_x.permute(0, 2, 1)
+ if self.sub_sample:
+ phi_x = self.phi(x).view(n, self.in_channels, -1)
+ else:
+ phi_x = x.view(n, self.in_channels, -1)
+ elif self.mode == 'concatenation':
+ theta_x = self.theta(x).view(n, self.inter_channels, -1, 1)
+ phi_x = self.phi(x).view(n, self.inter_channels, 1, -1)
+ else:
+ theta_x = self.theta(x).view(n, self.inter_channels, -1)
+ theta_x = theta_x.permute(0, 2, 1)
+ phi_x = self.phi(x).view(n, self.inter_channels, -1)
+
+ pairwise_func = getattr(self, self.mode)
+ # NonLocal1d pairwise_weight: [N, H, H]
+ # NonLocal2d pairwise_weight: [N, HxW, HxW]
+ # NonLocal3d pairwise_weight: [N, TxHxW, TxHxW]
+ pairwise_weight = pairwise_func(theta_x, phi_x)
+
+ # NonLocal1d y: [N, H, C]
+ # NonLocal2d y: [N, HxW, C]
+ # NonLocal3d y: [N, TxHxW, C]
+ y = torch.matmul(pairwise_weight, g_x)
+ # NonLocal1d y: [N, C, H]
+ # NonLocal2d y: [N, C, H, W]
+ # NonLocal3d y: [N, C, T, H, W]
+ y = y.permute(0, 2, 1).contiguous().reshape(n, self.inter_channels,
+ *x.size()[2:])
+
+ output = x + self.conv_out(y)
+
+ return output
+
+
+class NonLocal1d(_NonLocalNd):
+ """1D Non-local module.
+
+ Args:
+ in_channels (int): Same as `NonLocalND`.
+ sub_sample (bool): Whether to apply max pooling after pairwise
+ function (Note that the `sub_sample` is applied on spatial only).
+ Default: False.
+ conv_cfg (None | dict): Same as `NonLocalND`.
+ Default: dict(type='Conv1d').
+ """
+
+ def __init__(self,
+ in_channels,
+ sub_sample=False,
+ conv_cfg=dict(type='Conv1d'),
+ **kwargs):
+ super(NonLocal1d, self).__init__(
+ in_channels, conv_cfg=conv_cfg, **kwargs)
+
+ self.sub_sample = sub_sample
+
+ if sub_sample:
+ max_pool_layer = nn.MaxPool1d(kernel_size=2)
+ self.g = nn.Sequential(self.g, max_pool_layer)
+ if self.mode != 'gaussian':
+ self.phi = nn.Sequential(self.phi, max_pool_layer)
+ else:
+ self.phi = max_pool_layer
+
+
+@PLUGIN_LAYERS.register_module()
+class NonLocal2d(_NonLocalNd):
+ """2D Non-local module.
+
+ Args:
+ in_channels (int): Same as `NonLocalND`.
+ sub_sample (bool): Whether to apply max pooling after pairwise
+ function (Note that the `sub_sample` is applied on spatial only).
+ Default: False.
+ conv_cfg (None | dict): Same as `NonLocalND`.
+ Default: dict(type='Conv2d').
+ """
+
+ _abbr_ = 'nonlocal_block'
+
+ def __init__(self,
+ in_channels,
+ sub_sample=False,
+ conv_cfg=dict(type='Conv2d'),
+ **kwargs):
+ super(NonLocal2d, self).__init__(
+ in_channels, conv_cfg=conv_cfg, **kwargs)
+
+ self.sub_sample = sub_sample
+
+ if sub_sample:
+ max_pool_layer = nn.MaxPool2d(kernel_size=(2, 2))
+ self.g = nn.Sequential(self.g, max_pool_layer)
+ if self.mode != 'gaussian':
+ self.phi = nn.Sequential(self.phi, max_pool_layer)
+ else:
+ self.phi = max_pool_layer
+
+
+class NonLocal3d(_NonLocalNd):
+ """3D Non-local module.
+
+ Args:
+ in_channels (int): Same as `NonLocalND`.
+ sub_sample (bool): Whether to apply max pooling after pairwise
+ function (Note that the `sub_sample` is applied on spatial only).
+ Default: False.
+ conv_cfg (None | dict): Same as `NonLocalND`.
+ Default: dict(type='Conv3d').
+ """
+
+ def __init__(self,
+ in_channels,
+ sub_sample=False,
+ conv_cfg=dict(type='Conv3d'),
+ **kwargs):
+ super(NonLocal3d, self).__init__(
+ in_channels, conv_cfg=conv_cfg, **kwargs)
+ self.sub_sample = sub_sample
+
+ if sub_sample:
+ max_pool_layer = nn.MaxPool3d(kernel_size=(1, 2, 2))
+ self.g = nn.Sequential(self.g, max_pool_layer)
+ if self.mode != 'gaussian':
+ self.phi = nn.Sequential(self.phi, max_pool_layer)
+ else:
+ self.phi = max_pool_layer
diff --git a/annotator/uniformer/mmcv/cnn/bricks/norm.py b/annotator/uniformer/mmcv/cnn/bricks/norm.py
new file mode 100644
index 0000000000000000000000000000000000000000..408f4b42731b19a3beeef68b6a5e610d0bbc18b3
--- /dev/null
+++ b/annotator/uniformer/mmcv/cnn/bricks/norm.py
@@ -0,0 +1,144 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import inspect
+
+import torch.nn as nn
+
+from annotator.uniformer.mmcv.utils import is_tuple_of
+from annotator.uniformer.mmcv.utils.parrots_wrapper import SyncBatchNorm, _BatchNorm, _InstanceNorm
+from .registry import NORM_LAYERS
+
+NORM_LAYERS.register_module('BN', module=nn.BatchNorm2d)
+NORM_LAYERS.register_module('BN1d', module=nn.BatchNorm1d)
+NORM_LAYERS.register_module('BN2d', module=nn.BatchNorm2d)
+NORM_LAYERS.register_module('BN3d', module=nn.BatchNorm3d)
+NORM_LAYERS.register_module('SyncBN', module=SyncBatchNorm)
+NORM_LAYERS.register_module('GN', module=nn.GroupNorm)
+NORM_LAYERS.register_module('LN', module=nn.LayerNorm)
+NORM_LAYERS.register_module('IN', module=nn.InstanceNorm2d)
+NORM_LAYERS.register_module('IN1d', module=nn.InstanceNorm1d)
+NORM_LAYERS.register_module('IN2d', module=nn.InstanceNorm2d)
+NORM_LAYERS.register_module('IN3d', module=nn.InstanceNorm3d)
+
+
+def infer_abbr(class_type):
+ """Infer abbreviation from the class name.
+
+ When we build a norm layer with `build_norm_layer()`, we want to preserve
+ the norm type in variable names, e.g, self.bn1, self.gn. This method will
+ infer the abbreviation to map class types to abbreviations.
+
+ Rule 1: If the class has the property "_abbr_", return the property.
+ Rule 2: If the parent class is _BatchNorm, GroupNorm, LayerNorm or
+ InstanceNorm, the abbreviation of this layer will be "bn", "gn", "ln" and
+ "in" respectively.
+ Rule 3: If the class name contains "batch", "group", "layer" or "instance",
+ the abbreviation of this layer will be "bn", "gn", "ln" and "in"
+ respectively.
+ Rule 4: Otherwise, the abbreviation falls back to "norm".
+
+ Args:
+ class_type (type): The norm layer type.
+
+ Returns:
+ str: The inferred abbreviation.
+ """
+ if not inspect.isclass(class_type):
+ raise TypeError(
+ f'class_type must be a type, but got {type(class_type)}')
+ if hasattr(class_type, '_abbr_'):
+ return class_type._abbr_
+ if issubclass(class_type, _InstanceNorm): # IN is a subclass of BN
+ return 'in'
+ elif issubclass(class_type, _BatchNorm):
+ return 'bn'
+ elif issubclass(class_type, nn.GroupNorm):
+ return 'gn'
+ elif issubclass(class_type, nn.LayerNorm):
+ return 'ln'
+ else:
+ class_name = class_type.__name__.lower()
+ if 'batch' in class_name:
+ return 'bn'
+ elif 'group' in class_name:
+ return 'gn'
+ elif 'layer' in class_name:
+ return 'ln'
+ elif 'instance' in class_name:
+ return 'in'
+ else:
+ return 'norm_layer'
+
+
+def build_norm_layer(cfg, num_features, postfix=''):
+ """Build normalization layer.
+
+ Args:
+ cfg (dict): The norm layer config, which should contain:
+
+ - type (str): Layer type.
+ - layer args: Args needed to instantiate a norm layer.
+ - requires_grad (bool, optional): Whether stop gradient updates.
+ num_features (int): Number of input channels.
+ postfix (int | str): The postfix to be appended into norm abbreviation
+ to create named layer.
+
+ Returns:
+ (str, nn.Module): The first element is the layer name consisting of
+ abbreviation and postfix, e.g., bn1, gn. The second element is the
+ created norm layer.
+ """
+ if not isinstance(cfg, dict):
+ raise TypeError('cfg must be a dict')
+ if 'type' not in cfg:
+ raise KeyError('the cfg dict must contain the key "type"')
+ cfg_ = cfg.copy()
+
+ layer_type = cfg_.pop('type')
+ if layer_type not in NORM_LAYERS:
+ raise KeyError(f'Unrecognized norm type {layer_type}')
+
+ norm_layer = NORM_LAYERS.get(layer_type)
+ abbr = infer_abbr(norm_layer)
+
+ assert isinstance(postfix, (int, str))
+ name = abbr + str(postfix)
+
+ requires_grad = cfg_.pop('requires_grad', True)
+ cfg_.setdefault('eps', 1e-5)
+ if layer_type != 'GN':
+ layer = norm_layer(num_features, **cfg_)
+ if layer_type == 'SyncBN' and hasattr(layer, '_specify_ddp_gpu_num'):
+ layer._specify_ddp_gpu_num(1)
+ else:
+ assert 'num_groups' in cfg_
+ layer = norm_layer(num_channels=num_features, **cfg_)
+
+ for param in layer.parameters():
+ param.requires_grad = requires_grad
+
+ return name, layer
+
+
+def is_norm(layer, exclude=None):
+ """Check if a layer is a normalization layer.
+
+ Args:
+ layer (nn.Module): The layer to be checked.
+ exclude (type | tuple[type]): Types to be excluded.
+
+ Returns:
+ bool: Whether the layer is a norm layer.
+ """
+ if exclude is not None:
+ if not isinstance(exclude, tuple):
+ exclude = (exclude, )
+ if not is_tuple_of(exclude, type):
+ raise TypeError(
+ f'"exclude" must be either None or type or a tuple of types, '
+ f'but got {type(exclude)}: {exclude}')
+
+ if exclude and isinstance(layer, exclude):
+ return False
+
+ all_norm_bases = (_BatchNorm, _InstanceNorm, nn.GroupNorm, nn.LayerNorm)
+ return isinstance(layer, all_norm_bases)
diff --git a/annotator/uniformer/mmcv/cnn/bricks/padding.py b/annotator/uniformer/mmcv/cnn/bricks/padding.py
new file mode 100644
index 0000000000000000000000000000000000000000..e4ac6b28a1789bd551c613a7d3e7b622433ac7ec
--- /dev/null
+++ b/annotator/uniformer/mmcv/cnn/bricks/padding.py
@@ -0,0 +1,36 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import torch.nn as nn
+
+from .registry import PADDING_LAYERS
+
+PADDING_LAYERS.register_module('zero', module=nn.ZeroPad2d)
+PADDING_LAYERS.register_module('reflect', module=nn.ReflectionPad2d)
+PADDING_LAYERS.register_module('replicate', module=nn.ReplicationPad2d)
+
+
+def build_padding_layer(cfg, *args, **kwargs):
+ """Build padding layer.
+
+ Args:
+ cfg (None or dict): The padding layer config, which should contain:
+ - type (str): Layer type.
+ - layer args: Args needed to instantiate a padding layer.
+
+ Returns:
+ nn.Module: Created padding layer.
+ """
+ if not isinstance(cfg, dict):
+ raise TypeError('cfg must be a dict')
+ if 'type' not in cfg:
+ raise KeyError('the cfg dict must contain the key "type"')
+
+ cfg_ = cfg.copy()
+ padding_type = cfg_.pop('type')
+ if padding_type not in PADDING_LAYERS:
+ raise KeyError(f'Unrecognized padding type {padding_type}.')
+ else:
+ padding_layer = PADDING_LAYERS.get(padding_type)
+
+ layer = padding_layer(*args, **kwargs, **cfg_)
+
+ return layer
diff --git a/annotator/uniformer/mmcv/cnn/bricks/plugin.py b/annotator/uniformer/mmcv/cnn/bricks/plugin.py
new file mode 100644
index 0000000000000000000000000000000000000000..07c010d4053174dd41107aa654ea67e82b46a25c
--- /dev/null
+++ b/annotator/uniformer/mmcv/cnn/bricks/plugin.py
@@ -0,0 +1,88 @@
+import inspect
+import platform
+
+from .registry import PLUGIN_LAYERS
+
+if platform.system() == 'Windows':
+ import regex as re
+else:
+ import re
+
+
+def infer_abbr(class_type):
+ """Infer abbreviation from the class name.
+
+ This method will infer the abbreviation to map class types to
+ abbreviations.
+
+ Rule 1: If the class has the property "abbr", return the property.
+ Rule 2: Otherwise, the abbreviation falls back to snake case of class
+ name, e.g. the abbreviation of ``FancyBlock`` will be ``fancy_block``.
+
+ Args:
+ class_type (type): The norm layer type.
+
+ Returns:
+ str: The inferred abbreviation.
+ """
+
+ def camel2snack(word):
+ """Convert camel case word into snack case.
+
+ Modified from `inflection lib
+ `_.
+
+ Example::
+
+ >>> camel2snack("FancyBlock")
+ 'fancy_block'
+ """
+
+ word = re.sub(r'([A-Z]+)([A-Z][a-z])', r'\1_\2', word)
+ word = re.sub(r'([a-z\d])([A-Z])', r'\1_\2', word)
+ word = word.replace('-', '_')
+ return word.lower()
+
+ if not inspect.isclass(class_type):
+ raise TypeError(
+ f'class_type must be a type, but got {type(class_type)}')
+ if hasattr(class_type, '_abbr_'):
+ return class_type._abbr_
+ else:
+ return camel2snack(class_type.__name__)
+
+
+def build_plugin_layer(cfg, postfix='', **kwargs):
+ """Build plugin layer.
+
+ Args:
+ cfg (None or dict): cfg should contain:
+ type (str): identify plugin layer type.
+ layer args: args needed to instantiate a plugin layer.
+ postfix (int, str): appended into norm abbreviation to
+ create named layer. Default: ''.
+
+ Returns:
+ tuple[str, nn.Module]:
+ name (str): abbreviation + postfix
+ layer (nn.Module): created plugin layer
+ """
+ if not isinstance(cfg, dict):
+ raise TypeError('cfg must be a dict')
+ if 'type' not in cfg:
+ raise KeyError('the cfg dict must contain the key "type"')
+ cfg_ = cfg.copy()
+
+ layer_type = cfg_.pop('type')
+ if layer_type not in PLUGIN_LAYERS:
+ raise KeyError(f'Unrecognized plugin type {layer_type}')
+
+ plugin_layer = PLUGIN_LAYERS.get(layer_type)
+ abbr = infer_abbr(plugin_layer)
+
+ assert isinstance(postfix, (int, str))
+ name = abbr + str(postfix)
+
+ layer = plugin_layer(**kwargs, **cfg_)
+
+ return name, layer
diff --git a/annotator/uniformer/mmcv/cnn/bricks/registry.py b/annotator/uniformer/mmcv/cnn/bricks/registry.py
new file mode 100644
index 0000000000000000000000000000000000000000..39eabc58db4b5954478a2ac1ab91cea5e45ab055
--- /dev/null
+++ b/annotator/uniformer/mmcv/cnn/bricks/registry.py
@@ -0,0 +1,16 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+from annotator.uniformer.mmcv.utils import Registry
+
+CONV_LAYERS = Registry('conv layer')
+NORM_LAYERS = Registry('norm layer')
+ACTIVATION_LAYERS = Registry('activation layer')
+PADDING_LAYERS = Registry('padding layer')
+UPSAMPLE_LAYERS = Registry('upsample layer')
+PLUGIN_LAYERS = Registry('plugin layer')
+
+DROPOUT_LAYERS = Registry('drop out layers')
+POSITIONAL_ENCODING = Registry('position encoding')
+ATTENTION = Registry('attention')
+FEEDFORWARD_NETWORK = Registry('feed-forward Network')
+TRANSFORMER_LAYER = Registry('transformerLayer')
+TRANSFORMER_LAYER_SEQUENCE = Registry('transformer-layers sequence')
diff --git a/annotator/uniformer/mmcv/cnn/bricks/scale.py b/annotator/uniformer/mmcv/cnn/bricks/scale.py
new file mode 100644
index 0000000000000000000000000000000000000000..c905fffcc8bf998d18d94f927591963c428025e2
--- /dev/null
+++ b/annotator/uniformer/mmcv/cnn/bricks/scale.py
@@ -0,0 +1,21 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import torch
+import torch.nn as nn
+
+
+class Scale(nn.Module):
+ """A learnable scale parameter.
+
+ This layer scales the input by a learnable factor. It multiplies a
+ learnable scale parameter of shape (1,) with input of any shape.
+
+ Args:
+ scale (float): Initial value of scale factor. Default: 1.0
+ """
+
+ def __init__(self, scale=1.0):
+ super(Scale, self).__init__()
+ self.scale = nn.Parameter(torch.tensor(scale, dtype=torch.float))
+
+ def forward(self, x):
+ return x * self.scale
diff --git a/annotator/uniformer/mmcv/cnn/bricks/swish.py b/annotator/uniformer/mmcv/cnn/bricks/swish.py
new file mode 100644
index 0000000000000000000000000000000000000000..e2ca8ed7b749413f011ae54aac0cab27e6f0b51f
--- /dev/null
+++ b/annotator/uniformer/mmcv/cnn/bricks/swish.py
@@ -0,0 +1,25 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import torch
+import torch.nn as nn
+
+from .registry import ACTIVATION_LAYERS
+
+
+@ACTIVATION_LAYERS.register_module()
+class Swish(nn.Module):
+ """Swish Module.
+
+ This module applies the swish function:
+
+ .. math::
+ Swish(x) = x * Sigmoid(x)
+
+ Returns:
+ Tensor: The output tensor.
+ """
+
+ def __init__(self):
+ super(Swish, self).__init__()
+
+ def forward(self, x):
+ return x * torch.sigmoid(x)
diff --git a/annotator/uniformer/mmcv/cnn/bricks/transformer.py b/annotator/uniformer/mmcv/cnn/bricks/transformer.py
new file mode 100644
index 0000000000000000000000000000000000000000..e61ae0dd941a7be00b3e41a3de833ec50470a45f
--- /dev/null
+++ b/annotator/uniformer/mmcv/cnn/bricks/transformer.py
@@ -0,0 +1,595 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import copy
+import warnings
+
+import torch
+import torch.nn as nn
+
+from annotator.uniformer.mmcv import ConfigDict, deprecated_api_warning
+from annotator.uniformer.mmcv.cnn import Linear, build_activation_layer, build_norm_layer
+from annotator.uniformer.mmcv.runner.base_module import BaseModule, ModuleList, Sequential
+from annotator.uniformer.mmcv.utils import build_from_cfg
+from .drop import build_dropout
+from .registry import (ATTENTION, FEEDFORWARD_NETWORK, POSITIONAL_ENCODING,
+ TRANSFORMER_LAYER, TRANSFORMER_LAYER_SEQUENCE)
+
+# Avoid BC-breaking of importing MultiScaleDeformableAttention from this file
+try:
+ from annotator.uniformer.mmcv.ops.multi_scale_deform_attn import MultiScaleDeformableAttention # noqa F401
+ warnings.warn(
+ ImportWarning(
+ '``MultiScaleDeformableAttention`` has been moved to '
+ '``mmcv.ops.multi_scale_deform_attn``, please change original path ' # noqa E501
+ '``from annotator.uniformer.mmcv.cnn.bricks.transformer import MultiScaleDeformableAttention`` ' # noqa E501
+ 'to ``from annotator.uniformer.mmcv.ops.multi_scale_deform_attn import MultiScaleDeformableAttention`` ' # noqa E501
+ ))
+
+except ImportError:
+ warnings.warn('Fail to import ``MultiScaleDeformableAttention`` from '
+ '``mmcv.ops.multi_scale_deform_attn``, '
+ 'You should install ``mmcv-full`` if you need this module. ')
+
+
+def build_positional_encoding(cfg, default_args=None):
+ """Builder for Position Encoding."""
+ return build_from_cfg(cfg, POSITIONAL_ENCODING, default_args)
+
+
+def build_attention(cfg, default_args=None):
+ """Builder for attention."""
+ return build_from_cfg(cfg, ATTENTION, default_args)
+
+
+def build_feedforward_network(cfg, default_args=None):
+ """Builder for feed-forward network (FFN)."""
+ return build_from_cfg(cfg, FEEDFORWARD_NETWORK, default_args)
+
+
+def build_transformer_layer(cfg, default_args=None):
+ """Builder for transformer layer."""
+ return build_from_cfg(cfg, TRANSFORMER_LAYER, default_args)
+
+
+def build_transformer_layer_sequence(cfg, default_args=None):
+ """Builder for transformer encoder and transformer decoder."""
+ return build_from_cfg(cfg, TRANSFORMER_LAYER_SEQUENCE, default_args)
+
+
+@ATTENTION.register_module()
+class MultiheadAttention(BaseModule):
+ """A wrapper for ``torch.nn.MultiheadAttention``.
+
+ This module implements MultiheadAttention with identity connection,
+ and positional encoding is also passed as input.
+
+ Args:
+ embed_dims (int): The embedding dimension.
+ num_heads (int): Parallel attention heads.
+ attn_drop (float): A Dropout layer on attn_output_weights.
+ Default: 0.0.
+ proj_drop (float): A Dropout layer after `nn.MultiheadAttention`.
+ Default: 0.0.
+ dropout_layer (obj:`ConfigDict`): The dropout_layer used
+ when adding the shortcut.
+ init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization.
+ Default: None.
+ batch_first (bool): When it is True, Key, Query and Value are shape of
+ (batch, n, embed_dim), otherwise (n, batch, embed_dim).
+ Default to False.
+ """
+
+ def __init__(self,
+ embed_dims,
+ num_heads,
+ attn_drop=0.,
+ proj_drop=0.,
+ dropout_layer=dict(type='Dropout', drop_prob=0.),
+ init_cfg=None,
+ batch_first=False,
+ **kwargs):
+ super(MultiheadAttention, self).__init__(init_cfg)
+ if 'dropout' in kwargs:
+ warnings.warn('The arguments `dropout` in MultiheadAttention '
+ 'has been deprecated, now you can separately '
+ 'set `attn_drop`(float), proj_drop(float), '
+ 'and `dropout_layer`(dict) ')
+ attn_drop = kwargs['dropout']
+ dropout_layer['drop_prob'] = kwargs.pop('dropout')
+
+ self.embed_dims = embed_dims
+ self.num_heads = num_heads
+ self.batch_first = batch_first
+
+ self.attn = nn.MultiheadAttention(embed_dims, num_heads, attn_drop,
+ **kwargs)
+
+ self.proj_drop = nn.Dropout(proj_drop)
+ self.dropout_layer = build_dropout(
+ dropout_layer) if dropout_layer else nn.Identity()
+
+ @deprecated_api_warning({'residual': 'identity'},
+ cls_name='MultiheadAttention')
+ def forward(self,
+ query,
+ key=None,
+ value=None,
+ identity=None,
+ query_pos=None,
+ key_pos=None,
+ attn_mask=None,
+ key_padding_mask=None,
+ **kwargs):
+ """Forward function for `MultiheadAttention`.
+
+ **kwargs allow passing a more general data flow when combining
+ with other operations in `transformerlayer`.
+
+ Args:
+ query (Tensor): The input query with shape [num_queries, bs,
+ embed_dims] if self.batch_first is False, else
+ [bs, num_queries embed_dims].
+ key (Tensor): The key tensor with shape [num_keys, bs,
+ embed_dims] if self.batch_first is False, else
+ [bs, num_keys, embed_dims] .
+ If None, the ``query`` will be used. Defaults to None.
+ value (Tensor): The value tensor with same shape as `key`.
+ Same in `nn.MultiheadAttention.forward`. Defaults to None.
+ If None, the `key` will be used.
+ identity (Tensor): This tensor, with the same shape as x,
+ will be used for the identity link.
+ If None, `x` will be used. Defaults to None.
+ query_pos (Tensor): The positional encoding for query, with
+ the same shape as `x`. If not None, it will
+ be added to `x` before forward function. Defaults to None.
+ key_pos (Tensor): The positional encoding for `key`, with the
+ same shape as `key`. Defaults to None. If not None, it will
+ be added to `key` before forward function. If None, and
+ `query_pos` has the same shape as `key`, then `query_pos`
+ will be used for `key_pos`. Defaults to None.
+ attn_mask (Tensor): ByteTensor mask with shape [num_queries,
+ num_keys]. Same in `nn.MultiheadAttention.forward`.
+ Defaults to None.
+ key_padding_mask (Tensor): ByteTensor with shape [bs, num_keys].
+ Defaults to None.
+
+ Returns:
+ Tensor: forwarded results with shape
+ [num_queries, bs, embed_dims]
+ if self.batch_first is False, else
+ [bs, num_queries embed_dims].
+ """
+
+ if key is None:
+ key = query
+ if value is None:
+ value = key
+ if identity is None:
+ identity = query
+ if key_pos is None:
+ if query_pos is not None:
+ # use query_pos if key_pos is not available
+ if query_pos.shape == key.shape:
+ key_pos = query_pos
+ else:
+ warnings.warn(f'position encoding of key is'
+ f'missing in {self.__class__.__name__}.')
+ if query_pos is not None:
+ query = query + query_pos
+ if key_pos is not None:
+ key = key + key_pos
+
+ # Because the dataflow('key', 'query', 'value') of
+ # ``torch.nn.MultiheadAttention`` is (num_query, batch,
+ # embed_dims), We should adjust the shape of dataflow from
+ # batch_first (batch, num_query, embed_dims) to num_query_first
+ # (num_query ,batch, embed_dims), and recover ``attn_output``
+ # from num_query_first to batch_first.
+ if self.batch_first:
+ query = query.transpose(0, 1)
+ key = key.transpose(0, 1)
+ value = value.transpose(0, 1)
+
+ out = self.attn(
+ query=query,
+ key=key,
+ value=value,
+ attn_mask=attn_mask,
+ key_padding_mask=key_padding_mask)[0]
+
+ if self.batch_first:
+ out = out.transpose(0, 1)
+
+ return identity + self.dropout_layer(self.proj_drop(out))
+
+
+@FEEDFORWARD_NETWORK.register_module()
+class FFN(BaseModule):
+ """Implements feed-forward networks (FFNs) with identity connection.
+
+ Args:
+ embed_dims (int): The feature dimension. Same as
+ `MultiheadAttention`. Defaults: 256.
+ feedforward_channels (int): The hidden dimension of FFNs.
+ Defaults: 1024.
+ num_fcs (int, optional): The number of fully-connected layers in
+ FFNs. Default: 2.
+ act_cfg (dict, optional): The activation config for FFNs.
+ Default: dict(type='ReLU')
+ ffn_drop (float, optional): Probability of an element to be
+ zeroed in FFN. Default 0.0.
+ add_identity (bool, optional): Whether to add the
+ identity connection. Default: `True`.
+ dropout_layer (obj:`ConfigDict`): The dropout_layer used
+ when adding the shortcut.
+ init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization.
+ Default: None.
+ """
+
+ @deprecated_api_warning(
+ {
+ 'dropout': 'ffn_drop',
+ 'add_residual': 'add_identity'
+ },
+ cls_name='FFN')
+ def __init__(self,
+ embed_dims=256,
+ feedforward_channels=1024,
+ num_fcs=2,
+ act_cfg=dict(type='ReLU', inplace=True),
+ ffn_drop=0.,
+ dropout_layer=None,
+ add_identity=True,
+ init_cfg=None,
+ **kwargs):
+ super(FFN, self).__init__(init_cfg)
+ assert num_fcs >= 2, 'num_fcs should be no less ' \
+ f'than 2. got {num_fcs}.'
+ self.embed_dims = embed_dims
+ self.feedforward_channels = feedforward_channels
+ self.num_fcs = num_fcs
+ self.act_cfg = act_cfg
+ self.activate = build_activation_layer(act_cfg)
+
+ layers = []
+ in_channels = embed_dims
+ for _ in range(num_fcs - 1):
+ layers.append(
+ Sequential(
+ Linear(in_channels, feedforward_channels), self.activate,
+ nn.Dropout(ffn_drop)))
+ in_channels = feedforward_channels
+ layers.append(Linear(feedforward_channels, embed_dims))
+ layers.append(nn.Dropout(ffn_drop))
+ self.layers = Sequential(*layers)
+ self.dropout_layer = build_dropout(
+ dropout_layer) if dropout_layer else torch.nn.Identity()
+ self.add_identity = add_identity
+
+ @deprecated_api_warning({'residual': 'identity'}, cls_name='FFN')
+ def forward(self, x, identity=None):
+ """Forward function for `FFN`.
+
+ The function would add x to the output tensor if residue is None.
+ """
+ out = self.layers(x)
+ if not self.add_identity:
+ return self.dropout_layer(out)
+ if identity is None:
+ identity = x
+ return identity + self.dropout_layer(out)
+
+
+@TRANSFORMER_LAYER.register_module()
+class BaseTransformerLayer(BaseModule):
+ """Base `TransformerLayer` for vision transformer.
+
+ It can be built from `mmcv.ConfigDict` and support more flexible
+ customization, for example, using any number of `FFN or LN ` and
+ use different kinds of `attention` by specifying a list of `ConfigDict`
+ named `attn_cfgs`. It is worth mentioning that it supports `prenorm`
+ when you specifying `norm` as the first element of `operation_order`.
+ More details about the `prenorm`: `On Layer Normalization in the
+ Transformer Architecture `_ .
+
+ Args:
+ attn_cfgs (list[`mmcv.ConfigDict`] | obj:`mmcv.ConfigDict` | None )):
+ Configs for `self_attention` or `cross_attention` modules,
+ The order of the configs in the list should be consistent with
+ corresponding attentions in operation_order.
+ If it is a dict, all of the attention modules in operation_order
+ will be built with this config. Default: None.
+ ffn_cfgs (list[`mmcv.ConfigDict`] | obj:`mmcv.ConfigDict` | None )):
+ Configs for FFN, The order of the configs in the list should be
+ consistent with corresponding ffn in operation_order.
+ If it is a dict, all of the attention modules in operation_order
+ will be built with this config.
+ operation_order (tuple[str]): The execution order of operation
+ in transformer. Such as ('self_attn', 'norm', 'ffn', 'norm').
+ Support `prenorm` when you specifying first element as `norm`.
+ Default:None.
+ norm_cfg (dict): Config dict for normalization layer.
+ Default: dict(type='LN').
+ init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization.
+ Default: None.
+ batch_first (bool): Key, Query and Value are shape
+ of (batch, n, embed_dim)
+ or (n, batch, embed_dim). Default to False.
+ """
+
+ def __init__(self,
+ attn_cfgs=None,
+ ffn_cfgs=dict(
+ type='FFN',
+ embed_dims=256,
+ feedforward_channels=1024,
+ num_fcs=2,
+ ffn_drop=0.,
+ act_cfg=dict(type='ReLU', inplace=True),
+ ),
+ operation_order=None,
+ norm_cfg=dict(type='LN'),
+ init_cfg=None,
+ batch_first=False,
+ **kwargs):
+
+ deprecated_args = dict(
+ feedforward_channels='feedforward_channels',
+ ffn_dropout='ffn_drop',
+ ffn_num_fcs='num_fcs')
+ for ori_name, new_name in deprecated_args.items():
+ if ori_name in kwargs:
+ warnings.warn(
+ f'The arguments `{ori_name}` in BaseTransformerLayer '
+ f'has been deprecated, now you should set `{new_name}` '
+ f'and other FFN related arguments '
+ f'to a dict named `ffn_cfgs`. ')
+ ffn_cfgs[new_name] = kwargs[ori_name]
+
+ super(BaseTransformerLayer, self).__init__(init_cfg)
+
+ self.batch_first = batch_first
+
+ assert set(operation_order) & set(
+ ['self_attn', 'norm', 'ffn', 'cross_attn']) == \
+ set(operation_order), f'The operation_order of' \
+ f' {self.__class__.__name__} should ' \
+ f'contains all four operation type ' \
+ f"{['self_attn', 'norm', 'ffn', 'cross_attn']}"
+
+ num_attn = operation_order.count('self_attn') + operation_order.count(
+ 'cross_attn')
+ if isinstance(attn_cfgs, dict):
+ attn_cfgs = [copy.deepcopy(attn_cfgs) for _ in range(num_attn)]
+ else:
+ assert num_attn == len(attn_cfgs), f'The length ' \
+ f'of attn_cfg {num_attn} is ' \
+ f'not consistent with the number of attention' \
+ f'in operation_order {operation_order}.'
+
+ self.num_attn = num_attn
+ self.operation_order = operation_order
+ self.norm_cfg = norm_cfg
+ self.pre_norm = operation_order[0] == 'norm'
+ self.attentions = ModuleList()
+
+ index = 0
+ for operation_name in operation_order:
+ if operation_name in ['self_attn', 'cross_attn']:
+ if 'batch_first' in attn_cfgs[index]:
+ assert self.batch_first == attn_cfgs[index]['batch_first']
+ else:
+ attn_cfgs[index]['batch_first'] = self.batch_first
+ attention = build_attention(attn_cfgs[index])
+ # Some custom attentions used as `self_attn`
+ # or `cross_attn` can have different behavior.
+ attention.operation_name = operation_name
+ self.attentions.append(attention)
+ index += 1
+
+ self.embed_dims = self.attentions[0].embed_dims
+
+ self.ffns = ModuleList()
+ num_ffns = operation_order.count('ffn')
+ if isinstance(ffn_cfgs, dict):
+ ffn_cfgs = ConfigDict(ffn_cfgs)
+ if isinstance(ffn_cfgs, dict):
+ ffn_cfgs = [copy.deepcopy(ffn_cfgs) for _ in range(num_ffns)]
+ assert len(ffn_cfgs) == num_ffns
+ for ffn_index in range(num_ffns):
+ if 'embed_dims' not in ffn_cfgs[ffn_index]:
+ ffn_cfgs['embed_dims'] = self.embed_dims
+ else:
+ assert ffn_cfgs[ffn_index]['embed_dims'] == self.embed_dims
+ self.ffns.append(
+ build_feedforward_network(ffn_cfgs[ffn_index],
+ dict(type='FFN')))
+
+ self.norms = ModuleList()
+ num_norms = operation_order.count('norm')
+ for _ in range(num_norms):
+ self.norms.append(build_norm_layer(norm_cfg, self.embed_dims)[1])
+
+ def forward(self,
+ query,
+ key=None,
+ value=None,
+ query_pos=None,
+ key_pos=None,
+ attn_masks=None,
+ query_key_padding_mask=None,
+ key_padding_mask=None,
+ **kwargs):
+ """Forward function for `TransformerDecoderLayer`.
+
+ **kwargs contains some specific arguments of attentions.
+
+ Args:
+ query (Tensor): The input query with shape
+ [num_queries, bs, embed_dims] if
+ self.batch_first is False, else
+ [bs, num_queries embed_dims].
+ key (Tensor): The key tensor with shape [num_keys, bs,
+ embed_dims] if self.batch_first is False, else
+ [bs, num_keys, embed_dims] .
+ value (Tensor): The value tensor with same shape as `key`.
+ query_pos (Tensor): The positional encoding for `query`.
+ Default: None.
+ key_pos (Tensor): The positional encoding for `key`.
+ Default: None.
+ attn_masks (List[Tensor] | None): 2D Tensor used in
+ calculation of corresponding attention. The length of
+ it should equal to the number of `attention` in
+ `operation_order`. Default: None.
+ query_key_padding_mask (Tensor): ByteTensor for `query`, with
+ shape [bs, num_queries]. Only used in `self_attn` layer.
+ Defaults to None.
+ key_padding_mask (Tensor): ByteTensor for `query`, with
+ shape [bs, num_keys]. Default: None.
+
+ Returns:
+ Tensor: forwarded results with shape [num_queries, bs, embed_dims].
+ """
+
+ norm_index = 0
+ attn_index = 0
+ ffn_index = 0
+ identity = query
+ if attn_masks is None:
+ attn_masks = [None for _ in range(self.num_attn)]
+ elif isinstance(attn_masks, torch.Tensor):
+ attn_masks = [
+ copy.deepcopy(attn_masks) for _ in range(self.num_attn)
+ ]
+ warnings.warn(f'Use same attn_mask in all attentions in '
+ f'{self.__class__.__name__} ')
+ else:
+ assert len(attn_masks) == self.num_attn, f'The length of ' \
+ f'attn_masks {len(attn_masks)} must be equal ' \
+ f'to the number of attention in ' \
+ f'operation_order {self.num_attn}'
+
+ for layer in self.operation_order:
+ if layer == 'self_attn':
+ temp_key = temp_value = query
+ query = self.attentions[attn_index](
+ query,
+ temp_key,
+ temp_value,
+ identity if self.pre_norm else None,
+ query_pos=query_pos,
+ key_pos=query_pos,
+ attn_mask=attn_masks[attn_index],
+ key_padding_mask=query_key_padding_mask,
+ **kwargs)
+ attn_index += 1
+ identity = query
+
+ elif layer == 'norm':
+ query = self.norms[norm_index](query)
+ norm_index += 1
+
+ elif layer == 'cross_attn':
+ query = self.attentions[attn_index](
+ query,
+ key,
+ value,
+ identity if self.pre_norm else None,
+ query_pos=query_pos,
+ key_pos=key_pos,
+ attn_mask=attn_masks[attn_index],
+ key_padding_mask=key_padding_mask,
+ **kwargs)
+ attn_index += 1
+ identity = query
+
+ elif layer == 'ffn':
+ query = self.ffns[ffn_index](
+ query, identity if self.pre_norm else None)
+ ffn_index += 1
+
+ return query
+
+
+@TRANSFORMER_LAYER_SEQUENCE.register_module()
+class TransformerLayerSequence(BaseModule):
+ """Base class for TransformerEncoder and TransformerDecoder in vision
+ transformer.
+
+ As base-class of Encoder and Decoder in vision transformer.
+ Support customization such as specifying different kind
+ of `transformer_layer` in `transformer_coder`.
+
+ Args:
+ transformerlayer (list[obj:`mmcv.ConfigDict`] |
+ obj:`mmcv.ConfigDict`): Config of transformerlayer
+ in TransformerCoder. If it is obj:`mmcv.ConfigDict`,
+ it would be repeated `num_layer` times to a
+ list[`mmcv.ConfigDict`]. Default: None.
+ num_layers (int): The number of `TransformerLayer`. Default: None.
+ init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization.
+ Default: None.
+ """
+
+ def __init__(self, transformerlayers=None, num_layers=None, init_cfg=None):
+ super(TransformerLayerSequence, self).__init__(init_cfg)
+ if isinstance(transformerlayers, dict):
+ transformerlayers = [
+ copy.deepcopy(transformerlayers) for _ in range(num_layers)
+ ]
+ else:
+ assert isinstance(transformerlayers, list) and \
+ len(transformerlayers) == num_layers
+ self.num_layers = num_layers
+ self.layers = ModuleList()
+ for i in range(num_layers):
+ self.layers.append(build_transformer_layer(transformerlayers[i]))
+ self.embed_dims = self.layers[0].embed_dims
+ self.pre_norm = self.layers[0].pre_norm
+
+ def forward(self,
+ query,
+ key,
+ value,
+ query_pos=None,
+ key_pos=None,
+ attn_masks=None,
+ query_key_padding_mask=None,
+ key_padding_mask=None,
+ **kwargs):
+ """Forward function for `TransformerCoder`.
+
+ Args:
+ query (Tensor): Input query with shape
+ `(num_queries, bs, embed_dims)`.
+ key (Tensor): The key tensor with shape
+ `(num_keys, bs, embed_dims)`.
+ value (Tensor): The value tensor with shape
+ `(num_keys, bs, embed_dims)`.
+ query_pos (Tensor): The positional encoding for `query`.
+ Default: None.
+ key_pos (Tensor): The positional encoding for `key`.
+ Default: None.
+ attn_masks (List[Tensor], optional): Each element is 2D Tensor
+ which is used in calculation of corresponding attention in
+ operation_order. Default: None.
+ query_key_padding_mask (Tensor): ByteTensor for `query`, with
+ shape [bs, num_queries]. Only used in self-attention
+ Default: None.
+ key_padding_mask (Tensor): ByteTensor for `query`, with
+ shape [bs, num_keys]. Default: None.
+
+ Returns:
+ Tensor: results with shape [num_queries, bs, embed_dims].
+ """
+ for layer in self.layers:
+ query = layer(
+ query,
+ key,
+ value,
+ query_pos=query_pos,
+ key_pos=key_pos,
+ attn_masks=attn_masks,
+ query_key_padding_mask=query_key_padding_mask,
+ key_padding_mask=key_padding_mask,
+ **kwargs)
+ return query
diff --git a/annotator/uniformer/mmcv/cnn/bricks/upsample.py b/annotator/uniformer/mmcv/cnn/bricks/upsample.py
new file mode 100644
index 0000000000000000000000000000000000000000..a1a353767d0ce8518f0d7289bed10dba0178ed12
--- /dev/null
+++ b/annotator/uniformer/mmcv/cnn/bricks/upsample.py
@@ -0,0 +1,84 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import torch.nn as nn
+import torch.nn.functional as F
+
+from ..utils import xavier_init
+from .registry import UPSAMPLE_LAYERS
+
+UPSAMPLE_LAYERS.register_module('nearest', module=nn.Upsample)
+UPSAMPLE_LAYERS.register_module('bilinear', module=nn.Upsample)
+
+
+@UPSAMPLE_LAYERS.register_module(name='pixel_shuffle')
+class PixelShufflePack(nn.Module):
+ """Pixel Shuffle upsample layer.
+
+ This module packs `F.pixel_shuffle()` and a nn.Conv2d module together to
+ achieve a simple upsampling with pixel shuffle.
+
+ Args:
+ in_channels (int): Number of input channels.
+ out_channels (int): Number of output channels.
+ scale_factor (int): Upsample ratio.
+ upsample_kernel (int): Kernel size of the conv layer to expand the
+ channels.
+ """
+
+ def __init__(self, in_channels, out_channels, scale_factor,
+ upsample_kernel):
+ super(PixelShufflePack, self).__init__()
+ self.in_channels = in_channels
+ self.out_channels = out_channels
+ self.scale_factor = scale_factor
+ self.upsample_kernel = upsample_kernel
+ self.upsample_conv = nn.Conv2d(
+ self.in_channels,
+ self.out_channels * scale_factor * scale_factor,
+ self.upsample_kernel,
+ padding=(self.upsample_kernel - 1) // 2)
+ self.init_weights()
+
+ def init_weights(self):
+ xavier_init(self.upsample_conv, distribution='uniform')
+
+ def forward(self, x):
+ x = self.upsample_conv(x)
+ x = F.pixel_shuffle(x, self.scale_factor)
+ return x
+
+
+def build_upsample_layer(cfg, *args, **kwargs):
+ """Build upsample layer.
+
+ Args:
+ cfg (dict): The upsample layer config, which should contain:
+
+ - type (str): Layer type.
+ - scale_factor (int): Upsample ratio, which is not applicable to
+ deconv.
+ - layer args: Args needed to instantiate a upsample layer.
+ args (argument list): Arguments passed to the ``__init__``
+ method of the corresponding conv layer.
+ kwargs (keyword arguments): Keyword arguments passed to the
+ ``__init__`` method of the corresponding conv layer.
+
+ Returns:
+ nn.Module: Created upsample layer.
+ """
+ if not isinstance(cfg, dict):
+ raise TypeError(f'cfg must be a dict, but got {type(cfg)}')
+ if 'type' not in cfg:
+ raise KeyError(
+ f'the cfg dict must contain the key "type", but got {cfg}')
+ cfg_ = cfg.copy()
+
+ layer_type = cfg_.pop('type')
+ if layer_type not in UPSAMPLE_LAYERS:
+ raise KeyError(f'Unrecognized upsample type {layer_type}')
+ else:
+ upsample = UPSAMPLE_LAYERS.get(layer_type)
+
+ if upsample is nn.Upsample:
+ cfg_['mode'] = layer_type
+ layer = upsample(*args, **kwargs, **cfg_)
+ return layer
diff --git a/annotator/uniformer/mmcv/cnn/bricks/wrappers.py b/annotator/uniformer/mmcv/cnn/bricks/wrappers.py
new file mode 100644
index 0000000000000000000000000000000000000000..8aebf67bf52355a513f21756ee74fe510902d075
--- /dev/null
+++ b/annotator/uniformer/mmcv/cnn/bricks/wrappers.py
@@ -0,0 +1,180 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+r"""Modified from https://github.com/facebookresearch/detectron2/blob/master/detectron2/layers/wrappers.py # noqa: E501
+
+Wrap some nn modules to support empty tensor input. Currently, these wrappers
+are mainly used in mask heads like fcn_mask_head and maskiou_heads since mask
+heads are trained on only positive RoIs.
+"""
+import math
+
+import torch
+import torch.nn as nn
+from torch.nn.modules.utils import _pair, _triple
+
+from .registry import CONV_LAYERS, UPSAMPLE_LAYERS
+
+if torch.__version__ == 'parrots':
+ TORCH_VERSION = torch.__version__
+else:
+ # torch.__version__ could be 1.3.1+cu92, we only need the first two
+ # for comparison
+ TORCH_VERSION = tuple(int(x) for x in torch.__version__.split('.')[:2])
+
+
+def obsolete_torch_version(torch_version, version_threshold):
+ return torch_version == 'parrots' or torch_version <= version_threshold
+
+
+class NewEmptyTensorOp(torch.autograd.Function):
+
+ @staticmethod
+ def forward(ctx, x, new_shape):
+ ctx.shape = x.shape
+ return x.new_empty(new_shape)
+
+ @staticmethod
+ def backward(ctx, grad):
+ shape = ctx.shape
+ return NewEmptyTensorOp.apply(grad, shape), None
+
+
+@CONV_LAYERS.register_module('Conv', force=True)
+class Conv2d(nn.Conv2d):
+
+ def forward(self, x):
+ if x.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 4)):
+ out_shape = [x.shape[0], self.out_channels]
+ for i, k, p, s, d in zip(x.shape[-2:], self.kernel_size,
+ self.padding, self.stride, self.dilation):
+ o = (i + 2 * p - (d * (k - 1) + 1)) // s + 1
+ out_shape.append(o)
+ empty = NewEmptyTensorOp.apply(x, out_shape)
+ if self.training:
+ # produce dummy gradient to avoid DDP warning.
+ dummy = sum(x.view(-1)[0] for x in self.parameters()) * 0.0
+ return empty + dummy
+ else:
+ return empty
+
+ return super().forward(x)
+
+
+@CONV_LAYERS.register_module('Conv3d', force=True)
+class Conv3d(nn.Conv3d):
+
+ def forward(self, x):
+ if x.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 4)):
+ out_shape = [x.shape[0], self.out_channels]
+ for i, k, p, s, d in zip(x.shape[-3:], self.kernel_size,
+ self.padding, self.stride, self.dilation):
+ o = (i + 2 * p - (d * (k - 1) + 1)) // s + 1
+ out_shape.append(o)
+ empty = NewEmptyTensorOp.apply(x, out_shape)
+ if self.training:
+ # produce dummy gradient to avoid DDP warning.
+ dummy = sum(x.view(-1)[0] for x in self.parameters()) * 0.0
+ return empty + dummy
+ else:
+ return empty
+
+ return super().forward(x)
+
+
+@CONV_LAYERS.register_module()
+@CONV_LAYERS.register_module('deconv')
+@UPSAMPLE_LAYERS.register_module('deconv', force=True)
+class ConvTranspose2d(nn.ConvTranspose2d):
+
+ def forward(self, x):
+ if x.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 4)):
+ out_shape = [x.shape[0], self.out_channels]
+ for i, k, p, s, d, op in zip(x.shape[-2:], self.kernel_size,
+ self.padding, self.stride,
+ self.dilation, self.output_padding):
+ out_shape.append((i - 1) * s - 2 * p + (d * (k - 1) + 1) + op)
+ empty = NewEmptyTensorOp.apply(x, out_shape)
+ if self.training:
+ # produce dummy gradient to avoid DDP warning.
+ dummy = sum(x.view(-1)[0] for x in self.parameters()) * 0.0
+ return empty + dummy
+ else:
+ return empty
+
+ return super().forward(x)
+
+
+@CONV_LAYERS.register_module()
+@CONV_LAYERS.register_module('deconv3d')
+@UPSAMPLE_LAYERS.register_module('deconv3d', force=True)
+class ConvTranspose3d(nn.ConvTranspose3d):
+
+ def forward(self, x):
+ if x.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 4)):
+ out_shape = [x.shape[0], self.out_channels]
+ for i, k, p, s, d, op in zip(x.shape[-3:], self.kernel_size,
+ self.padding, self.stride,
+ self.dilation, self.output_padding):
+ out_shape.append((i - 1) * s - 2 * p + (d * (k - 1) + 1) + op)
+ empty = NewEmptyTensorOp.apply(x, out_shape)
+ if self.training:
+ # produce dummy gradient to avoid DDP warning.
+ dummy = sum(x.view(-1)[0] for x in self.parameters()) * 0.0
+ return empty + dummy
+ else:
+ return empty
+
+ return super().forward(x)
+
+
+class MaxPool2d(nn.MaxPool2d):
+
+ def forward(self, x):
+ # PyTorch 1.9 does not support empty tensor inference yet
+ if x.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 9)):
+ out_shape = list(x.shape[:2])
+ for i, k, p, s, d in zip(x.shape[-2:], _pair(self.kernel_size),
+ _pair(self.padding), _pair(self.stride),
+ _pair(self.dilation)):
+ o = (i + 2 * p - (d * (k - 1) + 1)) / s + 1
+ o = math.ceil(o) if self.ceil_mode else math.floor(o)
+ out_shape.append(o)
+ empty = NewEmptyTensorOp.apply(x, out_shape)
+ return empty
+
+ return super().forward(x)
+
+
+class MaxPool3d(nn.MaxPool3d):
+
+ def forward(self, x):
+ # PyTorch 1.9 does not support empty tensor inference yet
+ if x.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 9)):
+ out_shape = list(x.shape[:2])
+ for i, k, p, s, d in zip(x.shape[-3:], _triple(self.kernel_size),
+ _triple(self.padding),
+ _triple(self.stride),
+ _triple(self.dilation)):
+ o = (i + 2 * p - (d * (k - 1) + 1)) / s + 1
+ o = math.ceil(o) if self.ceil_mode else math.floor(o)
+ out_shape.append(o)
+ empty = NewEmptyTensorOp.apply(x, out_shape)
+ return empty
+
+ return super().forward(x)
+
+
+class Linear(torch.nn.Linear):
+
+ def forward(self, x):
+ # empty tensor forward of Linear layer is supported in Pytorch 1.6
+ if x.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 5)):
+ out_shape = [x.shape[0], self.out_features]
+ empty = NewEmptyTensorOp.apply(x, out_shape)
+ if self.training:
+ # produce dummy gradient to avoid DDP warning.
+ dummy = sum(x.view(-1)[0] for x in self.parameters()) * 0.0
+ return empty + dummy
+ else:
+ return empty
+
+ return super().forward(x)
diff --git a/annotator/uniformer/mmcv/cnn/builder.py b/annotator/uniformer/mmcv/cnn/builder.py
new file mode 100644
index 0000000000000000000000000000000000000000..7567316c566bd3aca6d8f65a84b00e9e890948a7
--- /dev/null
+++ b/annotator/uniformer/mmcv/cnn/builder.py
@@ -0,0 +1,30 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+from ..runner import Sequential
+from ..utils import Registry, build_from_cfg
+
+
+def build_model_from_cfg(cfg, registry, default_args=None):
+ """Build a PyTorch model from config dict(s). Different from
+ ``build_from_cfg``, if cfg is a list, a ``nn.Sequential`` will be built.
+
+ Args:
+ cfg (dict, list[dict]): The config of modules, is is either a config
+ dict or a list of config dicts. If cfg is a list, a
+ the built modules will be wrapped with ``nn.Sequential``.
+ registry (:obj:`Registry`): A registry the module belongs to.
+ default_args (dict, optional): Default arguments to build the module.
+ Defaults to None.
+
+ Returns:
+ nn.Module: A built nn module.
+ """
+ if isinstance(cfg, list):
+ modules = [
+ build_from_cfg(cfg_, registry, default_args) for cfg_ in cfg
+ ]
+ return Sequential(*modules)
+ else:
+ return build_from_cfg(cfg, registry, default_args)
+
+
+MODELS = Registry('model', build_func=build_model_from_cfg)
diff --git a/annotator/uniformer/mmcv/cnn/resnet.py b/annotator/uniformer/mmcv/cnn/resnet.py
new file mode 100644
index 0000000000000000000000000000000000000000..1cb3ac057ee2d52c46fc94685b5d4e698aad8d5f
--- /dev/null
+++ b/annotator/uniformer/mmcv/cnn/resnet.py
@@ -0,0 +1,316 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import logging
+
+import torch.nn as nn
+import torch.utils.checkpoint as cp
+
+from .utils import constant_init, kaiming_init
+
+
+def conv3x3(in_planes, out_planes, stride=1, dilation=1):
+ """3x3 convolution with padding."""
+ return nn.Conv2d(
+ in_planes,
+ out_planes,
+ kernel_size=3,
+ stride=stride,
+ padding=dilation,
+ dilation=dilation,
+ bias=False)
+
+
+class BasicBlock(nn.Module):
+ expansion = 1
+
+ def __init__(self,
+ inplanes,
+ planes,
+ stride=1,
+ dilation=1,
+ downsample=None,
+ style='pytorch',
+ with_cp=False):
+ super(BasicBlock, self).__init__()
+ assert style in ['pytorch', 'caffe']
+ self.conv1 = conv3x3(inplanes, planes, stride, dilation)
+ self.bn1 = nn.BatchNorm2d(planes)
+ self.relu = nn.ReLU(inplace=True)
+ self.conv2 = conv3x3(planes, planes)
+ self.bn2 = nn.BatchNorm2d(planes)
+ self.downsample = downsample
+ self.stride = stride
+ self.dilation = dilation
+ assert not with_cp
+
+ def forward(self, x):
+ residual = x
+
+ out = self.conv1(x)
+ out = self.bn1(out)
+ out = self.relu(out)
+
+ out = self.conv2(out)
+ out = self.bn2(out)
+
+ if self.downsample is not None:
+ residual = self.downsample(x)
+
+ out += residual
+ out = self.relu(out)
+
+ return out
+
+
+class Bottleneck(nn.Module):
+ expansion = 4
+
+ def __init__(self,
+ inplanes,
+ planes,
+ stride=1,
+ dilation=1,
+ downsample=None,
+ style='pytorch',
+ with_cp=False):
+ """Bottleneck block.
+
+ If style is "pytorch", the stride-two layer is the 3x3 conv layer, if
+ it is "caffe", the stride-two layer is the first 1x1 conv layer.
+ """
+ super(Bottleneck, self).__init__()
+ assert style in ['pytorch', 'caffe']
+ if style == 'pytorch':
+ conv1_stride = 1
+ conv2_stride = stride
+ else:
+ conv1_stride = stride
+ conv2_stride = 1
+ self.conv1 = nn.Conv2d(
+ inplanes, planes, kernel_size=1, stride=conv1_stride, bias=False)
+ self.conv2 = nn.Conv2d(
+ planes,
+ planes,
+ kernel_size=3,
+ stride=conv2_stride,
+ padding=dilation,
+ dilation=dilation,
+ bias=False)
+
+ self.bn1 = nn.BatchNorm2d(planes)
+ self.bn2 = nn.BatchNorm2d(planes)
+ self.conv3 = nn.Conv2d(
+ planes, planes * self.expansion, kernel_size=1, bias=False)
+ self.bn3 = nn.BatchNorm2d(planes * self.expansion)
+ self.relu = nn.ReLU(inplace=True)
+ self.downsample = downsample
+ self.stride = stride
+ self.dilation = dilation
+ self.with_cp = with_cp
+
+ def forward(self, x):
+
+ def _inner_forward(x):
+ residual = x
+
+ out = self.conv1(x)
+ out = self.bn1(out)
+ out = self.relu(out)
+
+ out = self.conv2(out)
+ out = self.bn2(out)
+ out = self.relu(out)
+
+ out = self.conv3(out)
+ out = self.bn3(out)
+
+ if self.downsample is not None:
+ residual = self.downsample(x)
+
+ out += residual
+
+ return out
+
+ if self.with_cp and x.requires_grad:
+ out = cp.checkpoint(_inner_forward, x)
+ else:
+ out = _inner_forward(x)
+
+ out = self.relu(out)
+
+ return out
+
+
+def make_res_layer(block,
+ inplanes,
+ planes,
+ blocks,
+ stride=1,
+ dilation=1,
+ style='pytorch',
+ with_cp=False):
+ downsample = None
+ if stride != 1 or inplanes != planes * block.expansion:
+ downsample = nn.Sequential(
+ nn.Conv2d(
+ inplanes,
+ planes * block.expansion,
+ kernel_size=1,
+ stride=stride,
+ bias=False),
+ nn.BatchNorm2d(planes * block.expansion),
+ )
+
+ layers = []
+ layers.append(
+ block(
+ inplanes,
+ planes,
+ stride,
+ dilation,
+ downsample,
+ style=style,
+ with_cp=with_cp))
+ inplanes = planes * block.expansion
+ for _ in range(1, blocks):
+ layers.append(
+ block(inplanes, planes, 1, dilation, style=style, with_cp=with_cp))
+
+ return nn.Sequential(*layers)
+
+
+class ResNet(nn.Module):
+ """ResNet backbone.
+
+ Args:
+ depth (int): Depth of resnet, from {18, 34, 50, 101, 152}.
+ num_stages (int): Resnet stages, normally 4.
+ strides (Sequence[int]): Strides of the first block of each stage.
+ dilations (Sequence[int]): Dilation of each stage.
+ out_indices (Sequence[int]): Output from which stages.
+ style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two
+ layer is the 3x3 conv layer, otherwise the stride-two layer is
+ the first 1x1 conv layer.
+ frozen_stages (int): Stages to be frozen (all param fixed). -1 means
+ not freezing any parameters.
+ bn_eval (bool): Whether to set BN layers as eval mode, namely, freeze
+ running stats (mean and var).
+ bn_frozen (bool): Whether to freeze weight and bias of BN layers.
+ with_cp (bool): Use checkpoint or not. Using checkpoint will save some
+ memory while slowing down the training speed.
+ """
+
+ arch_settings = {
+ 18: (BasicBlock, (2, 2, 2, 2)),
+ 34: (BasicBlock, (3, 4, 6, 3)),
+ 50: (Bottleneck, (3, 4, 6, 3)),
+ 101: (Bottleneck, (3, 4, 23, 3)),
+ 152: (Bottleneck, (3, 8, 36, 3))
+ }
+
+ def __init__(self,
+ depth,
+ num_stages=4,
+ strides=(1, 2, 2, 2),
+ dilations=(1, 1, 1, 1),
+ out_indices=(0, 1, 2, 3),
+ style='pytorch',
+ frozen_stages=-1,
+ bn_eval=True,
+ bn_frozen=False,
+ with_cp=False):
+ super(ResNet, self).__init__()
+ if depth not in self.arch_settings:
+ raise KeyError(f'invalid depth {depth} for resnet')
+ assert num_stages >= 1 and num_stages <= 4
+ block, stage_blocks = self.arch_settings[depth]
+ stage_blocks = stage_blocks[:num_stages]
+ assert len(strides) == len(dilations) == num_stages
+ assert max(out_indices) < num_stages
+
+ self.out_indices = out_indices
+ self.style = style
+ self.frozen_stages = frozen_stages
+ self.bn_eval = bn_eval
+ self.bn_frozen = bn_frozen
+ self.with_cp = with_cp
+
+ self.inplanes = 64
+ self.conv1 = nn.Conv2d(
+ 3, 64, kernel_size=7, stride=2, padding=3, bias=False)
+ self.bn1 = nn.BatchNorm2d(64)
+ self.relu = nn.ReLU(inplace=True)
+ self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
+
+ self.res_layers = []
+ for i, num_blocks in enumerate(stage_blocks):
+ stride = strides[i]
+ dilation = dilations[i]
+ planes = 64 * 2**i
+ res_layer = make_res_layer(
+ block,
+ self.inplanes,
+ planes,
+ num_blocks,
+ stride=stride,
+ dilation=dilation,
+ style=self.style,
+ with_cp=with_cp)
+ self.inplanes = planes * block.expansion
+ layer_name = f'layer{i + 1}'
+ self.add_module(layer_name, res_layer)
+ self.res_layers.append(layer_name)
+
+ self.feat_dim = block.expansion * 64 * 2**(len(stage_blocks) - 1)
+
+ def init_weights(self, pretrained=None):
+ if isinstance(pretrained, str):
+ logger = logging.getLogger()
+ from ..runner import load_checkpoint
+ load_checkpoint(self, pretrained, strict=False, logger=logger)
+ elif pretrained is None:
+ for m in self.modules():
+ if isinstance(m, nn.Conv2d):
+ kaiming_init(m)
+ elif isinstance(m, nn.BatchNorm2d):
+ constant_init(m, 1)
+ else:
+ raise TypeError('pretrained must be a str or None')
+
+ def forward(self, x):
+ x = self.conv1(x)
+ x = self.bn1(x)
+ x = self.relu(x)
+ x = self.maxpool(x)
+ outs = []
+ for i, layer_name in enumerate(self.res_layers):
+ res_layer = getattr(self, layer_name)
+ x = res_layer(x)
+ if i in self.out_indices:
+ outs.append(x)
+ if len(outs) == 1:
+ return outs[0]
+ else:
+ return tuple(outs)
+
+ def train(self, mode=True):
+ super(ResNet, self).train(mode)
+ if self.bn_eval:
+ for m in self.modules():
+ if isinstance(m, nn.BatchNorm2d):
+ m.eval()
+ if self.bn_frozen:
+ for params in m.parameters():
+ params.requires_grad = False
+ if mode and self.frozen_stages >= 0:
+ for param in self.conv1.parameters():
+ param.requires_grad = False
+ for param in self.bn1.parameters():
+ param.requires_grad = False
+ self.bn1.eval()
+ self.bn1.weight.requires_grad = False
+ self.bn1.bias.requires_grad = False
+ for i in range(1, self.frozen_stages + 1):
+ mod = getattr(self, f'layer{i}')
+ mod.eval()
+ for param in mod.parameters():
+ param.requires_grad = False
diff --git a/annotator/uniformer/mmcv/cnn/utils/__init__.py b/annotator/uniformer/mmcv/cnn/utils/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..a263e31c1e3977712827ca229bbc04910b4e928e
--- /dev/null
+++ b/annotator/uniformer/mmcv/cnn/utils/__init__.py
@@ -0,0 +1,19 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+from .flops_counter import get_model_complexity_info
+from .fuse_conv_bn import fuse_conv_bn
+from .sync_bn import revert_sync_batchnorm
+from .weight_init import (INITIALIZERS, Caffe2XavierInit, ConstantInit,
+ KaimingInit, NormalInit, PretrainedInit,
+ TruncNormalInit, UniformInit, XavierInit,
+ bias_init_with_prob, caffe2_xavier_init,
+ constant_init, initialize, kaiming_init, normal_init,
+ trunc_normal_init, uniform_init, xavier_init)
+
+__all__ = [
+ 'get_model_complexity_info', 'bias_init_with_prob', 'caffe2_xavier_init',
+ 'constant_init', 'kaiming_init', 'normal_init', 'trunc_normal_init',
+ 'uniform_init', 'xavier_init', 'fuse_conv_bn', 'initialize',
+ 'INITIALIZERS', 'ConstantInit', 'XavierInit', 'NormalInit',
+ 'TruncNormalInit', 'UniformInit', 'KaimingInit', 'PretrainedInit',
+ 'Caffe2XavierInit', 'revert_sync_batchnorm'
+]
diff --git a/annotator/uniformer/mmcv/cnn/utils/__pycache__/__init__.cpython-310.pyc b/annotator/uniformer/mmcv/cnn/utils/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..da69b37c875ea11d1d06ee02eef7a1c5434fdf8a
Binary files /dev/null and b/annotator/uniformer/mmcv/cnn/utils/__pycache__/__init__.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/cnn/utils/__pycache__/__init__.cpython-38.pyc b/annotator/uniformer/mmcv/cnn/utils/__pycache__/__init__.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5bdda355b50b1d0acd75bc4a5c466a6d678d5180
Binary files /dev/null and b/annotator/uniformer/mmcv/cnn/utils/__pycache__/__init__.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/cnn/utils/__pycache__/flops_counter.cpython-310.pyc b/annotator/uniformer/mmcv/cnn/utils/__pycache__/flops_counter.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..126bce606074416deaf6b3cd98bfaf63eeedc010
Binary files /dev/null and b/annotator/uniformer/mmcv/cnn/utils/__pycache__/flops_counter.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/cnn/utils/__pycache__/flops_counter.cpython-38.pyc b/annotator/uniformer/mmcv/cnn/utils/__pycache__/flops_counter.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4b96ed77ea6b8aaaff747b916f31e55c43f62b13
Binary files /dev/null and b/annotator/uniformer/mmcv/cnn/utils/__pycache__/flops_counter.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/cnn/utils/__pycache__/fuse_conv_bn.cpython-310.pyc b/annotator/uniformer/mmcv/cnn/utils/__pycache__/fuse_conv_bn.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2271dac4fca6f0125dfe077bd89665e69d35a44b
Binary files /dev/null and b/annotator/uniformer/mmcv/cnn/utils/__pycache__/fuse_conv_bn.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/cnn/utils/__pycache__/fuse_conv_bn.cpython-38.pyc b/annotator/uniformer/mmcv/cnn/utils/__pycache__/fuse_conv_bn.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f6457c55b212f8e48832b909069d1b7a372a4b4d
Binary files /dev/null and b/annotator/uniformer/mmcv/cnn/utils/__pycache__/fuse_conv_bn.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/cnn/utils/__pycache__/sync_bn.cpython-310.pyc b/annotator/uniformer/mmcv/cnn/utils/__pycache__/sync_bn.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..48627e3774ffac741ef0b995ab0d1b616610138c
Binary files /dev/null and b/annotator/uniformer/mmcv/cnn/utils/__pycache__/sync_bn.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/cnn/utils/__pycache__/sync_bn.cpython-38.pyc b/annotator/uniformer/mmcv/cnn/utils/__pycache__/sync_bn.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a59b4e6b963a313193c52c45bd2ed961d1f1c674
Binary files /dev/null and b/annotator/uniformer/mmcv/cnn/utils/__pycache__/sync_bn.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/cnn/utils/__pycache__/weight_init.cpython-310.pyc b/annotator/uniformer/mmcv/cnn/utils/__pycache__/weight_init.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8a9625e503bef8b5a5bc256f077cffdd9aa4a045
Binary files /dev/null and b/annotator/uniformer/mmcv/cnn/utils/__pycache__/weight_init.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/cnn/utils/__pycache__/weight_init.cpython-38.pyc b/annotator/uniformer/mmcv/cnn/utils/__pycache__/weight_init.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0f9210aca26a13b86c0d74fb2fc8fcca285f31c8
Binary files /dev/null and b/annotator/uniformer/mmcv/cnn/utils/__pycache__/weight_init.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/cnn/utils/flops_counter.py b/annotator/uniformer/mmcv/cnn/utils/flops_counter.py
new file mode 100644
index 0000000000000000000000000000000000000000..d10af5feca7f4b8c0ba359b7b1c826f754e048be
--- /dev/null
+++ b/annotator/uniformer/mmcv/cnn/utils/flops_counter.py
@@ -0,0 +1,599 @@
+# Modified from flops-counter.pytorch by Vladislav Sovrasov
+# original repo: https://github.com/sovrasov/flops-counter.pytorch
+
+# MIT License
+
+# Copyright (c) 2018 Vladislav Sovrasov
+
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+import sys
+from functools import partial
+
+import numpy as np
+import torch
+import torch.nn as nn
+
+import annotator.uniformer.mmcv as mmcv
+
+
+def get_model_complexity_info(model,
+ input_shape,
+ print_per_layer_stat=True,
+ as_strings=True,
+ input_constructor=None,
+ flush=False,
+ ost=sys.stdout):
+ """Get complexity information of a model.
+
+ This method can calculate FLOPs and parameter counts of a model with
+ corresponding input shape. It can also print complexity information for
+ each layer in a model.
+
+ Supported layers are listed as below:
+ - Convolutions: ``nn.Conv1d``, ``nn.Conv2d``, ``nn.Conv3d``.
+ - Activations: ``nn.ReLU``, ``nn.PReLU``, ``nn.ELU``, ``nn.LeakyReLU``,
+ ``nn.ReLU6``.
+ - Poolings: ``nn.MaxPool1d``, ``nn.MaxPool2d``, ``nn.MaxPool3d``,
+ ``nn.AvgPool1d``, ``nn.AvgPool2d``, ``nn.AvgPool3d``,
+ ``nn.AdaptiveMaxPool1d``, ``nn.AdaptiveMaxPool2d``,
+ ``nn.AdaptiveMaxPool3d``, ``nn.AdaptiveAvgPool1d``,
+ ``nn.AdaptiveAvgPool2d``, ``nn.AdaptiveAvgPool3d``.
+ - BatchNorms: ``nn.BatchNorm1d``, ``nn.BatchNorm2d``,
+ ``nn.BatchNorm3d``, ``nn.GroupNorm``, ``nn.InstanceNorm1d``,
+ ``InstanceNorm2d``, ``InstanceNorm3d``, ``nn.LayerNorm``.
+ - Linear: ``nn.Linear``.
+ - Deconvolution: ``nn.ConvTranspose2d``.
+ - Upsample: ``nn.Upsample``.
+
+ Args:
+ model (nn.Module): The model for complexity calculation.
+ input_shape (tuple): Input shape used for calculation.
+ print_per_layer_stat (bool): Whether to print complexity information
+ for each layer in a model. Default: True.
+ as_strings (bool): Output FLOPs and params counts in a string form.
+ Default: True.
+ input_constructor (None | callable): If specified, it takes a callable
+ method that generates input. otherwise, it will generate a random
+ tensor with input shape to calculate FLOPs. Default: None.
+ flush (bool): same as that in :func:`print`. Default: False.
+ ost (stream): same as ``file`` param in :func:`print`.
+ Default: sys.stdout.
+
+ Returns:
+ tuple[float | str]: If ``as_strings`` is set to True, it will return
+ FLOPs and parameter counts in a string format. otherwise, it will
+ return those in a float number format.
+ """
+ assert type(input_shape) is tuple
+ assert len(input_shape) >= 1
+ assert isinstance(model, nn.Module)
+ flops_model = add_flops_counting_methods(model)
+ flops_model.eval()
+ flops_model.start_flops_count()
+ if input_constructor:
+ input = input_constructor(input_shape)
+ _ = flops_model(**input)
+ else:
+ try:
+ batch = torch.ones(()).new_empty(
+ (1, *input_shape),
+ dtype=next(flops_model.parameters()).dtype,
+ device=next(flops_model.parameters()).device)
+ except StopIteration:
+ # Avoid StopIteration for models which have no parameters,
+ # like `nn.Relu()`, `nn.AvgPool2d`, etc.
+ batch = torch.ones(()).new_empty((1, *input_shape))
+
+ _ = flops_model(batch)
+
+ flops_count, params_count = flops_model.compute_average_flops_cost()
+ if print_per_layer_stat:
+ print_model_with_flops(
+ flops_model, flops_count, params_count, ost=ost, flush=flush)
+ flops_model.stop_flops_count()
+
+ if as_strings:
+ return flops_to_string(flops_count), params_to_string(params_count)
+
+ return flops_count, params_count
+
+
+def flops_to_string(flops, units='GFLOPs', precision=2):
+ """Convert FLOPs number into a string.
+
+ Note that Here we take a multiply-add counts as one FLOP.
+
+ Args:
+ flops (float): FLOPs number to be converted.
+ units (str | None): Converted FLOPs units. Options are None, 'GFLOPs',
+ 'MFLOPs', 'KFLOPs', 'FLOPs'. If set to None, it will automatically
+ choose the most suitable unit for FLOPs. Default: 'GFLOPs'.
+ precision (int): Digit number after the decimal point. Default: 2.
+
+ Returns:
+ str: The converted FLOPs number with units.
+
+ Examples:
+ >>> flops_to_string(1e9)
+ '1.0 GFLOPs'
+ >>> flops_to_string(2e5, 'MFLOPs')
+ '0.2 MFLOPs'
+ >>> flops_to_string(3e-9, None)
+ '3e-09 FLOPs'
+ """
+ if units is None:
+ if flops // 10**9 > 0:
+ return str(round(flops / 10.**9, precision)) + ' GFLOPs'
+ elif flops // 10**6 > 0:
+ return str(round(flops / 10.**6, precision)) + ' MFLOPs'
+ elif flops // 10**3 > 0:
+ return str(round(flops / 10.**3, precision)) + ' KFLOPs'
+ else:
+ return str(flops) + ' FLOPs'
+ else:
+ if units == 'GFLOPs':
+ return str(round(flops / 10.**9, precision)) + ' ' + units
+ elif units == 'MFLOPs':
+ return str(round(flops / 10.**6, precision)) + ' ' + units
+ elif units == 'KFLOPs':
+ return str(round(flops / 10.**3, precision)) + ' ' + units
+ else:
+ return str(flops) + ' FLOPs'
+
+
+def params_to_string(num_params, units=None, precision=2):
+ """Convert parameter number into a string.
+
+ Args:
+ num_params (float): Parameter number to be converted.
+ units (str | None): Converted FLOPs units. Options are None, 'M',
+ 'K' and ''. If set to None, it will automatically choose the most
+ suitable unit for Parameter number. Default: None.
+ precision (int): Digit number after the decimal point. Default: 2.
+
+ Returns:
+ str: The converted parameter number with units.
+
+ Examples:
+ >>> params_to_string(1e9)
+ '1000.0 M'
+ >>> params_to_string(2e5)
+ '200.0 k'
+ >>> params_to_string(3e-9)
+ '3e-09'
+ """
+ if units is None:
+ if num_params // 10**6 > 0:
+ return str(round(num_params / 10**6, precision)) + ' M'
+ elif num_params // 10**3:
+ return str(round(num_params / 10**3, precision)) + ' k'
+ else:
+ return str(num_params)
+ else:
+ if units == 'M':
+ return str(round(num_params / 10.**6, precision)) + ' ' + units
+ elif units == 'K':
+ return str(round(num_params / 10.**3, precision)) + ' ' + units
+ else:
+ return str(num_params)
+
+
+def print_model_with_flops(model,
+ total_flops,
+ total_params,
+ units='GFLOPs',
+ precision=3,
+ ost=sys.stdout,
+ flush=False):
+ """Print a model with FLOPs for each layer.
+
+ Args:
+ model (nn.Module): The model to be printed.
+ total_flops (float): Total FLOPs of the model.
+ total_params (float): Total parameter counts of the model.
+ units (str | None): Converted FLOPs units. Default: 'GFLOPs'.
+ precision (int): Digit number after the decimal point. Default: 3.
+ ost (stream): same as `file` param in :func:`print`.
+ Default: sys.stdout.
+ flush (bool): same as that in :func:`print`. Default: False.
+
+ Example:
+ >>> class ExampleModel(nn.Module):
+
+ >>> def __init__(self):
+ >>> super().__init__()
+ >>> self.conv1 = nn.Conv2d(3, 8, 3)
+ >>> self.conv2 = nn.Conv2d(8, 256, 3)
+ >>> self.conv3 = nn.Conv2d(256, 8, 3)
+ >>> self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
+ >>> self.flatten = nn.Flatten()
+ >>> self.fc = nn.Linear(8, 1)
+
+ >>> def forward(self, x):
+ >>> x = self.conv1(x)
+ >>> x = self.conv2(x)
+ >>> x = self.conv3(x)
+ >>> x = self.avg_pool(x)
+ >>> x = self.flatten(x)
+ >>> x = self.fc(x)
+ >>> return x
+
+ >>> model = ExampleModel()
+ >>> x = (3, 16, 16)
+ to print the complexity information state for each layer, you can use
+ >>> get_model_complexity_info(model, x)
+ or directly use
+ >>> print_model_with_flops(model, 4579784.0, 37361)
+ ExampleModel(
+ 0.037 M, 100.000% Params, 0.005 GFLOPs, 100.000% FLOPs,
+ (conv1): Conv2d(0.0 M, 0.600% Params, 0.0 GFLOPs, 0.959% FLOPs, 3, 8, kernel_size=(3, 3), stride=(1, 1)) # noqa: E501
+ (conv2): Conv2d(0.019 M, 50.020% Params, 0.003 GFLOPs, 58.760% FLOPs, 8, 256, kernel_size=(3, 3), stride=(1, 1))
+ (conv3): Conv2d(0.018 M, 49.356% Params, 0.002 GFLOPs, 40.264% FLOPs, 256, 8, kernel_size=(3, 3), stride=(1, 1))
+ (avg_pool): AdaptiveAvgPool2d(0.0 M, 0.000% Params, 0.0 GFLOPs, 0.017% FLOPs, output_size=(1, 1))
+ (flatten): Flatten(0.0 M, 0.000% Params, 0.0 GFLOPs, 0.000% FLOPs, )
+ (fc): Linear(0.0 M, 0.024% Params, 0.0 GFLOPs, 0.000% FLOPs, in_features=8, out_features=1, bias=True)
+ )
+ """
+
+ def accumulate_params(self):
+ if is_supported_instance(self):
+ return self.__params__
+ else:
+ sum = 0
+ for m in self.children():
+ sum += m.accumulate_params()
+ return sum
+
+ def accumulate_flops(self):
+ if is_supported_instance(self):
+ return self.__flops__ / model.__batch_counter__
+ else:
+ sum = 0
+ for m in self.children():
+ sum += m.accumulate_flops()
+ return sum
+
+ def flops_repr(self):
+ accumulated_num_params = self.accumulate_params()
+ accumulated_flops_cost = self.accumulate_flops()
+ return ', '.join([
+ params_to_string(
+ accumulated_num_params, units='M', precision=precision),
+ '{:.3%} Params'.format(accumulated_num_params / total_params),
+ flops_to_string(
+ accumulated_flops_cost, units=units, precision=precision),
+ '{:.3%} FLOPs'.format(accumulated_flops_cost / total_flops),
+ self.original_extra_repr()
+ ])
+
+ def add_extra_repr(m):
+ m.accumulate_flops = accumulate_flops.__get__(m)
+ m.accumulate_params = accumulate_params.__get__(m)
+ flops_extra_repr = flops_repr.__get__(m)
+ if m.extra_repr != flops_extra_repr:
+ m.original_extra_repr = m.extra_repr
+ m.extra_repr = flops_extra_repr
+ assert m.extra_repr != m.original_extra_repr
+
+ def del_extra_repr(m):
+ if hasattr(m, 'original_extra_repr'):
+ m.extra_repr = m.original_extra_repr
+ del m.original_extra_repr
+ if hasattr(m, 'accumulate_flops'):
+ del m.accumulate_flops
+
+ model.apply(add_extra_repr)
+ print(model, file=ost, flush=flush)
+ model.apply(del_extra_repr)
+
+
+def get_model_parameters_number(model):
+ """Calculate parameter number of a model.
+
+ Args:
+ model (nn.module): The model for parameter number calculation.
+
+ Returns:
+ float: Parameter number of the model.
+ """
+ num_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
+ return num_params
+
+
+def add_flops_counting_methods(net_main_module):
+ # adding additional methods to the existing module object,
+ # this is done this way so that each function has access to self object
+ net_main_module.start_flops_count = start_flops_count.__get__(
+ net_main_module)
+ net_main_module.stop_flops_count = stop_flops_count.__get__(
+ net_main_module)
+ net_main_module.reset_flops_count = reset_flops_count.__get__(
+ net_main_module)
+ net_main_module.compute_average_flops_cost = compute_average_flops_cost.__get__( # noqa: E501
+ net_main_module)
+
+ net_main_module.reset_flops_count()
+
+ return net_main_module
+
+
+def compute_average_flops_cost(self):
+ """Compute average FLOPs cost.
+
+ A method to compute average FLOPs cost, which will be available after
+ `add_flops_counting_methods()` is called on a desired net object.
+
+ Returns:
+ float: Current mean flops consumption per image.
+ """
+ batches_count = self.__batch_counter__
+ flops_sum = 0
+ for module in self.modules():
+ if is_supported_instance(module):
+ flops_sum += module.__flops__
+ params_sum = get_model_parameters_number(self)
+ return flops_sum / batches_count, params_sum
+
+
+def start_flops_count(self):
+ """Activate the computation of mean flops consumption per image.
+
+ A method to activate the computation of mean flops consumption per image.
+ which will be available after ``add_flops_counting_methods()`` is called on
+ a desired net object. It should be called before running the network.
+ """
+ add_batch_counter_hook_function(self)
+
+ def add_flops_counter_hook_function(module):
+ if is_supported_instance(module):
+ if hasattr(module, '__flops_handle__'):
+ return
+
+ else:
+ handle = module.register_forward_hook(
+ get_modules_mapping()[type(module)])
+
+ module.__flops_handle__ = handle
+
+ self.apply(partial(add_flops_counter_hook_function))
+
+
+def stop_flops_count(self):
+ """Stop computing the mean flops consumption per image.
+
+ A method to stop computing the mean flops consumption per image, which will
+ be available after ``add_flops_counting_methods()`` is called on a desired
+ net object. It can be called to pause the computation whenever.
+ """
+ remove_batch_counter_hook_function(self)
+ self.apply(remove_flops_counter_hook_function)
+
+
+def reset_flops_count(self):
+ """Reset statistics computed so far.
+
+ A method to Reset computed statistics, which will be available after
+ `add_flops_counting_methods()` is called on a desired net object.
+ """
+ add_batch_counter_variables_or_reset(self)
+ self.apply(add_flops_counter_variable_or_reset)
+
+
+# ---- Internal functions
+def empty_flops_counter_hook(module, input, output):
+ module.__flops__ += 0
+
+
+def upsample_flops_counter_hook(module, input, output):
+ output_size = output[0]
+ batch_size = output_size.shape[0]
+ output_elements_count = batch_size
+ for val in output_size.shape[1:]:
+ output_elements_count *= val
+ module.__flops__ += int(output_elements_count)
+
+
+def relu_flops_counter_hook(module, input, output):
+ active_elements_count = output.numel()
+ module.__flops__ += int(active_elements_count)
+
+
+def linear_flops_counter_hook(module, input, output):
+ input = input[0]
+ output_last_dim = output.shape[
+ -1] # pytorch checks dimensions, so here we don't care much
+ module.__flops__ += int(np.prod(input.shape) * output_last_dim)
+
+
+def pool_flops_counter_hook(module, input, output):
+ input = input[0]
+ module.__flops__ += int(np.prod(input.shape))
+
+
+def norm_flops_counter_hook(module, input, output):
+ input = input[0]
+
+ batch_flops = np.prod(input.shape)
+ if (getattr(module, 'affine', False)
+ or getattr(module, 'elementwise_affine', False)):
+ batch_flops *= 2
+ module.__flops__ += int(batch_flops)
+
+
+def deconv_flops_counter_hook(conv_module, input, output):
+ # Can have multiple inputs, getting the first one
+ input = input[0]
+
+ batch_size = input.shape[0]
+ input_height, input_width = input.shape[2:]
+
+ kernel_height, kernel_width = conv_module.kernel_size
+ in_channels = conv_module.in_channels
+ out_channels = conv_module.out_channels
+ groups = conv_module.groups
+
+ filters_per_channel = out_channels // groups
+ conv_per_position_flops = (
+ kernel_height * kernel_width * in_channels * filters_per_channel)
+
+ active_elements_count = batch_size * input_height * input_width
+ overall_conv_flops = conv_per_position_flops * active_elements_count
+ bias_flops = 0
+ if conv_module.bias is not None:
+ output_height, output_width = output.shape[2:]
+ bias_flops = out_channels * batch_size * output_height * output_height
+ overall_flops = overall_conv_flops + bias_flops
+
+ conv_module.__flops__ += int(overall_flops)
+
+
+def conv_flops_counter_hook(conv_module, input, output):
+ # Can have multiple inputs, getting the first one
+ input = input[0]
+
+ batch_size = input.shape[0]
+ output_dims = list(output.shape[2:])
+
+ kernel_dims = list(conv_module.kernel_size)
+ in_channels = conv_module.in_channels
+ out_channels = conv_module.out_channels
+ groups = conv_module.groups
+
+ filters_per_channel = out_channels // groups
+ conv_per_position_flops = int(
+ np.prod(kernel_dims)) * in_channels * filters_per_channel
+
+ active_elements_count = batch_size * int(np.prod(output_dims))
+
+ overall_conv_flops = conv_per_position_flops * active_elements_count
+
+ bias_flops = 0
+
+ if conv_module.bias is not None:
+
+ bias_flops = out_channels * active_elements_count
+
+ overall_flops = overall_conv_flops + bias_flops
+
+ conv_module.__flops__ += int(overall_flops)
+
+
+def batch_counter_hook(module, input, output):
+ batch_size = 1
+ if len(input) > 0:
+ # Can have multiple inputs, getting the first one
+ input = input[0]
+ batch_size = len(input)
+ else:
+ pass
+ print('Warning! No positional inputs found for a module, '
+ 'assuming batch size is 1.')
+ module.__batch_counter__ += batch_size
+
+
+def add_batch_counter_variables_or_reset(module):
+
+ module.__batch_counter__ = 0
+
+
+def add_batch_counter_hook_function(module):
+ if hasattr(module, '__batch_counter_handle__'):
+ return
+
+ handle = module.register_forward_hook(batch_counter_hook)
+ module.__batch_counter_handle__ = handle
+
+
+def remove_batch_counter_hook_function(module):
+ if hasattr(module, '__batch_counter_handle__'):
+ module.__batch_counter_handle__.remove()
+ del module.__batch_counter_handle__
+
+
+def add_flops_counter_variable_or_reset(module):
+ if is_supported_instance(module):
+ if hasattr(module, '__flops__') or hasattr(module, '__params__'):
+ print('Warning: variables __flops__ or __params__ are already '
+ 'defined for the module' + type(module).__name__ +
+ ' ptflops can affect your code!')
+ module.__flops__ = 0
+ module.__params__ = get_model_parameters_number(module)
+
+
+def is_supported_instance(module):
+ if type(module) in get_modules_mapping():
+ return True
+ return False
+
+
+def remove_flops_counter_hook_function(module):
+ if is_supported_instance(module):
+ if hasattr(module, '__flops_handle__'):
+ module.__flops_handle__.remove()
+ del module.__flops_handle__
+
+
+def get_modules_mapping():
+ return {
+ # convolutions
+ nn.Conv1d: conv_flops_counter_hook,
+ nn.Conv2d: conv_flops_counter_hook,
+ mmcv.cnn.bricks.Conv2d: conv_flops_counter_hook,
+ nn.Conv3d: conv_flops_counter_hook,
+ mmcv.cnn.bricks.Conv3d: conv_flops_counter_hook,
+ # activations
+ nn.ReLU: relu_flops_counter_hook,
+ nn.PReLU: relu_flops_counter_hook,
+ nn.ELU: relu_flops_counter_hook,
+ nn.LeakyReLU: relu_flops_counter_hook,
+ nn.ReLU6: relu_flops_counter_hook,
+ # poolings
+ nn.MaxPool1d: pool_flops_counter_hook,
+ nn.AvgPool1d: pool_flops_counter_hook,
+ nn.AvgPool2d: pool_flops_counter_hook,
+ nn.MaxPool2d: pool_flops_counter_hook,
+ mmcv.cnn.bricks.MaxPool2d: pool_flops_counter_hook,
+ nn.MaxPool3d: pool_flops_counter_hook,
+ mmcv.cnn.bricks.MaxPool3d: pool_flops_counter_hook,
+ nn.AvgPool3d: pool_flops_counter_hook,
+ nn.AdaptiveMaxPool1d: pool_flops_counter_hook,
+ nn.AdaptiveAvgPool1d: pool_flops_counter_hook,
+ nn.AdaptiveMaxPool2d: pool_flops_counter_hook,
+ nn.AdaptiveAvgPool2d: pool_flops_counter_hook,
+ nn.AdaptiveMaxPool3d: pool_flops_counter_hook,
+ nn.AdaptiveAvgPool3d: pool_flops_counter_hook,
+ # normalizations
+ nn.BatchNorm1d: norm_flops_counter_hook,
+ nn.BatchNorm2d: norm_flops_counter_hook,
+ nn.BatchNorm3d: norm_flops_counter_hook,
+ nn.GroupNorm: norm_flops_counter_hook,
+ nn.InstanceNorm1d: norm_flops_counter_hook,
+ nn.InstanceNorm2d: norm_flops_counter_hook,
+ nn.InstanceNorm3d: norm_flops_counter_hook,
+ nn.LayerNorm: norm_flops_counter_hook,
+ # FC
+ nn.Linear: linear_flops_counter_hook,
+ mmcv.cnn.bricks.Linear: linear_flops_counter_hook,
+ # Upscale
+ nn.Upsample: upsample_flops_counter_hook,
+ # Deconvolution
+ nn.ConvTranspose2d: deconv_flops_counter_hook,
+ mmcv.cnn.bricks.ConvTranspose2d: deconv_flops_counter_hook,
+ }
diff --git a/annotator/uniformer/mmcv/cnn/utils/fuse_conv_bn.py b/annotator/uniformer/mmcv/cnn/utils/fuse_conv_bn.py
new file mode 100644
index 0000000000000000000000000000000000000000..cb7076f80bf37f7931185bf0293ffcc1ce19c8ef
--- /dev/null
+++ b/annotator/uniformer/mmcv/cnn/utils/fuse_conv_bn.py
@@ -0,0 +1,59 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import torch
+import torch.nn as nn
+
+
+def _fuse_conv_bn(conv, bn):
+ """Fuse conv and bn into one module.
+
+ Args:
+ conv (nn.Module): Conv to be fused.
+ bn (nn.Module): BN to be fused.
+
+ Returns:
+ nn.Module: Fused module.
+ """
+ conv_w = conv.weight
+ conv_b = conv.bias if conv.bias is not None else torch.zeros_like(
+ bn.running_mean)
+
+ factor = bn.weight / torch.sqrt(bn.running_var + bn.eps)
+ conv.weight = nn.Parameter(conv_w *
+ factor.reshape([conv.out_channels, 1, 1, 1]))
+ conv.bias = nn.Parameter((conv_b - bn.running_mean) * factor + bn.bias)
+ return conv
+
+
+def fuse_conv_bn(module):
+ """Recursively fuse conv and bn in a module.
+
+ During inference, the functionary of batch norm layers is turned off
+ but only the mean and var alone channels are used, which exposes the
+ chance to fuse it with the preceding conv layers to save computations and
+ simplify network structures.
+
+ Args:
+ module (nn.Module): Module to be fused.
+
+ Returns:
+ nn.Module: Fused module.
+ """
+ last_conv = None
+ last_conv_name = None
+
+ for name, child in module.named_children():
+ if isinstance(child,
+ (nn.modules.batchnorm._BatchNorm, nn.SyncBatchNorm)):
+ if last_conv is None: # only fuse BN that is after Conv
+ continue
+ fused_conv = _fuse_conv_bn(last_conv, child)
+ module._modules[last_conv_name] = fused_conv
+ # To reduce changes, set BN as Identity instead of deleting it.
+ module._modules[name] = nn.Identity()
+ last_conv = None
+ elif isinstance(child, nn.Conv2d):
+ last_conv = child
+ last_conv_name = name
+ else:
+ fuse_conv_bn(child)
+ return module
diff --git a/annotator/uniformer/mmcv/cnn/utils/sync_bn.py b/annotator/uniformer/mmcv/cnn/utils/sync_bn.py
new file mode 100644
index 0000000000000000000000000000000000000000..f78f39181d75bb85c53e8c7c8eaf45690e9f0bee
--- /dev/null
+++ b/annotator/uniformer/mmcv/cnn/utils/sync_bn.py
@@ -0,0 +1,59 @@
+import torch
+
+import annotator.uniformer.mmcv as mmcv
+
+
+class _BatchNormXd(torch.nn.modules.batchnorm._BatchNorm):
+ """A general BatchNorm layer without input dimension check.
+
+ Reproduced from @kapily's work:
+ (https://github.com/pytorch/pytorch/issues/41081#issuecomment-783961547)
+ The only difference between BatchNorm1d, BatchNorm2d, BatchNorm3d, etc
+ is `_check_input_dim` that is designed for tensor sanity checks.
+ The check has been bypassed in this class for the convenience of converting
+ SyncBatchNorm.
+ """
+
+ def _check_input_dim(self, input):
+ return
+
+
+def revert_sync_batchnorm(module):
+ """Helper function to convert all `SyncBatchNorm` (SyncBN) and
+ `mmcv.ops.sync_bn.SyncBatchNorm`(MMSyncBN) layers in the model to
+ `BatchNormXd` layers.
+
+ Adapted from @kapily's work:
+ (https://github.com/pytorch/pytorch/issues/41081#issuecomment-783961547)
+
+ Args:
+ module (nn.Module): The module containing `SyncBatchNorm` layers.
+
+ Returns:
+ module_output: The converted module with `BatchNormXd` layers.
+ """
+ module_output = module
+ module_checklist = [torch.nn.modules.batchnorm.SyncBatchNorm]
+ if hasattr(mmcv, 'ops'):
+ module_checklist.append(mmcv.ops.SyncBatchNorm)
+ if isinstance(module, tuple(module_checklist)):
+ module_output = _BatchNormXd(module.num_features, module.eps,
+ module.momentum, module.affine,
+ module.track_running_stats)
+ if module.affine:
+ # no_grad() may not be needed here but
+ # just to be consistent with `convert_sync_batchnorm()`
+ with torch.no_grad():
+ module_output.weight = module.weight
+ module_output.bias = module.bias
+ module_output.running_mean = module.running_mean
+ module_output.running_var = module.running_var
+ module_output.num_batches_tracked = module.num_batches_tracked
+ module_output.training = module.training
+ # qconfig exists in quantized models
+ if hasattr(module, 'qconfig'):
+ module_output.qconfig = module.qconfig
+ for name, child in module.named_children():
+ module_output.add_module(name, revert_sync_batchnorm(child))
+ del module
+ return module_output
diff --git a/annotator/uniformer/mmcv/cnn/utils/weight_init.py b/annotator/uniformer/mmcv/cnn/utils/weight_init.py
new file mode 100644
index 0000000000000000000000000000000000000000..287a1d0bffe26e023029d48634d9b761deda7ba4
--- /dev/null
+++ b/annotator/uniformer/mmcv/cnn/utils/weight_init.py
@@ -0,0 +1,684 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import copy
+import math
+import warnings
+
+import numpy as np
+import torch
+import torch.nn as nn
+from torch import Tensor
+
+from annotator.uniformer.mmcv.utils import Registry, build_from_cfg, get_logger, print_log
+
+INITIALIZERS = Registry('initializer')
+
+
+def update_init_info(module, init_info):
+ """Update the `_params_init_info` in the module if the value of parameters
+ are changed.
+
+ Args:
+ module (obj:`nn.Module`): The module of PyTorch with a user-defined
+ attribute `_params_init_info` which records the initialization
+ information.
+ init_info (str): The string that describes the initialization.
+ """
+ assert hasattr(
+ module,
+ '_params_init_info'), f'Can not find `_params_init_info` in {module}'
+ for name, param in module.named_parameters():
+
+ assert param in module._params_init_info, (
+ f'Find a new :obj:`Parameter` '
+ f'named `{name}` during executing the '
+ f'`init_weights` of '
+ f'`{module.__class__.__name__}`. '
+ f'Please do not add or '
+ f'replace parameters during executing '
+ f'the `init_weights`. ')
+
+ # The parameter has been changed during executing the
+ # `init_weights` of module
+ mean_value = param.data.mean()
+ if module._params_init_info[param]['tmp_mean_value'] != mean_value:
+ module._params_init_info[param]['init_info'] = init_info
+ module._params_init_info[param]['tmp_mean_value'] = mean_value
+
+
+def constant_init(module, val, bias=0):
+ if hasattr(module, 'weight') and module.weight is not None:
+ nn.init.constant_(module.weight, val)
+ if hasattr(module, 'bias') and module.bias is not None:
+ nn.init.constant_(module.bias, bias)
+
+
+def xavier_init(module, gain=1, bias=0, distribution='normal'):
+ assert distribution in ['uniform', 'normal']
+ if hasattr(module, 'weight') and module.weight is not None:
+ if distribution == 'uniform':
+ nn.init.xavier_uniform_(module.weight, gain=gain)
+ else:
+ nn.init.xavier_normal_(module.weight, gain=gain)
+ if hasattr(module, 'bias') and module.bias is not None:
+ nn.init.constant_(module.bias, bias)
+
+
+def normal_init(module, mean=0, std=1, bias=0):
+ if hasattr(module, 'weight') and module.weight is not None:
+ nn.init.normal_(module.weight, mean, std)
+ if hasattr(module, 'bias') and module.bias is not None:
+ nn.init.constant_(module.bias, bias)
+
+
+def trunc_normal_init(module: nn.Module,
+ mean: float = 0,
+ std: float = 1,
+ a: float = -2,
+ b: float = 2,
+ bias: float = 0) -> None:
+ if hasattr(module, 'weight') and module.weight is not None:
+ trunc_normal_(module.weight, mean, std, a, b) # type: ignore
+ if hasattr(module, 'bias') and module.bias is not None:
+ nn.init.constant_(module.bias, bias) # type: ignore
+
+
+def uniform_init(module, a=0, b=1, bias=0):
+ if hasattr(module, 'weight') and module.weight is not None:
+ nn.init.uniform_(module.weight, a, b)
+ if hasattr(module, 'bias') and module.bias is not None:
+ nn.init.constant_(module.bias, bias)
+
+
+def kaiming_init(module,
+ a=0,
+ mode='fan_out',
+ nonlinearity='relu',
+ bias=0,
+ distribution='normal'):
+ assert distribution in ['uniform', 'normal']
+ if hasattr(module, 'weight') and module.weight is not None:
+ if distribution == 'uniform':
+ nn.init.kaiming_uniform_(
+ module.weight, a=a, mode=mode, nonlinearity=nonlinearity)
+ else:
+ nn.init.kaiming_normal_(
+ module.weight, a=a, mode=mode, nonlinearity=nonlinearity)
+ if hasattr(module, 'bias') and module.bias is not None:
+ nn.init.constant_(module.bias, bias)
+
+
+def caffe2_xavier_init(module, bias=0):
+ # `XavierFill` in Caffe2 corresponds to `kaiming_uniform_` in PyTorch
+ # Acknowledgment to FAIR's internal code
+ kaiming_init(
+ module,
+ a=1,
+ mode='fan_in',
+ nonlinearity='leaky_relu',
+ bias=bias,
+ distribution='uniform')
+
+
+def bias_init_with_prob(prior_prob):
+ """initialize conv/fc bias value according to a given probability value."""
+ bias_init = float(-np.log((1 - prior_prob) / prior_prob))
+ return bias_init
+
+
+def _get_bases_name(m):
+ return [b.__name__ for b in m.__class__.__bases__]
+
+
+class BaseInit(object):
+
+ def __init__(self, *, bias=0, bias_prob=None, layer=None):
+ self.wholemodule = False
+ if not isinstance(bias, (int, float)):
+ raise TypeError(f'bias must be a number, but got a {type(bias)}')
+
+ if bias_prob is not None:
+ if not isinstance(bias_prob, float):
+ raise TypeError(f'bias_prob type must be float, \
+ but got {type(bias_prob)}')
+
+ if layer is not None:
+ if not isinstance(layer, (str, list)):
+ raise TypeError(f'layer must be a str or a list of str, \
+ but got a {type(layer)}')
+ else:
+ layer = []
+
+ if bias_prob is not None:
+ self.bias = bias_init_with_prob(bias_prob)
+ else:
+ self.bias = bias
+ self.layer = [layer] if isinstance(layer, str) else layer
+
+ def _get_init_info(self):
+ info = f'{self.__class__.__name__}, bias={self.bias}'
+ return info
+
+
+@INITIALIZERS.register_module(name='Constant')
+class ConstantInit(BaseInit):
+ """Initialize module parameters with constant values.
+
+ Args:
+ val (int | float): the value to fill the weights in the module with
+ bias (int | float): the value to fill the bias. Defaults to 0.
+ bias_prob (float, optional): the probability for bias initialization.
+ Defaults to None.
+ layer (str | list[str], optional): the layer will be initialized.
+ Defaults to None.
+ """
+
+ def __init__(self, val, **kwargs):
+ super().__init__(**kwargs)
+ self.val = val
+
+ def __call__(self, module):
+
+ def init(m):
+ if self.wholemodule:
+ constant_init(m, self.val, self.bias)
+ else:
+ layername = m.__class__.__name__
+ basesname = _get_bases_name(m)
+ if len(set(self.layer) & set([layername] + basesname)):
+ constant_init(m, self.val, self.bias)
+
+ module.apply(init)
+ if hasattr(module, '_params_init_info'):
+ update_init_info(module, init_info=self._get_init_info())
+
+ def _get_init_info(self):
+ info = f'{self.__class__.__name__}: val={self.val}, bias={self.bias}'
+ return info
+
+
+@INITIALIZERS.register_module(name='Xavier')
+class XavierInit(BaseInit):
+ r"""Initialize module parameters with values according to the method
+ described in `Understanding the difficulty of training deep feedforward
+ neural networks - Glorot, X. & Bengio, Y. (2010).
+ `_
+
+ Args:
+ gain (int | float): an optional scaling factor. Defaults to 1.
+ bias (int | float): the value to fill the bias. Defaults to 0.
+ bias_prob (float, optional): the probability for bias initialization.
+ Defaults to None.
+ distribution (str): distribution either be ``'normal'``
+ or ``'uniform'``. Defaults to ``'normal'``.
+ layer (str | list[str], optional): the layer will be initialized.
+ Defaults to None.
+ """
+
+ def __init__(self, gain=1, distribution='normal', **kwargs):
+ super().__init__(**kwargs)
+ self.gain = gain
+ self.distribution = distribution
+
+ def __call__(self, module):
+
+ def init(m):
+ if self.wholemodule:
+ xavier_init(m, self.gain, self.bias, self.distribution)
+ else:
+ layername = m.__class__.__name__
+ basesname = _get_bases_name(m)
+ if len(set(self.layer) & set([layername] + basesname)):
+ xavier_init(m, self.gain, self.bias, self.distribution)
+
+ module.apply(init)
+ if hasattr(module, '_params_init_info'):
+ update_init_info(module, init_info=self._get_init_info())
+
+ def _get_init_info(self):
+ info = f'{self.__class__.__name__}: gain={self.gain}, ' \
+ f'distribution={self.distribution}, bias={self.bias}'
+ return info
+
+
+@INITIALIZERS.register_module(name='Normal')
+class NormalInit(BaseInit):
+ r"""Initialize module parameters with the values drawn from the normal
+ distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`.
+
+ Args:
+ mean (int | float):the mean of the normal distribution. Defaults to 0.
+ std (int | float): the standard deviation of the normal distribution.
+ Defaults to 1.
+ bias (int | float): the value to fill the bias. Defaults to 0.
+ bias_prob (float, optional): the probability for bias initialization.
+ Defaults to None.
+ layer (str | list[str], optional): the layer will be initialized.
+ Defaults to None.
+
+ """
+
+ def __init__(self, mean=0, std=1, **kwargs):
+ super().__init__(**kwargs)
+ self.mean = mean
+ self.std = std
+
+ def __call__(self, module):
+
+ def init(m):
+ if self.wholemodule:
+ normal_init(m, self.mean, self.std, self.bias)
+ else:
+ layername = m.__class__.__name__
+ basesname = _get_bases_name(m)
+ if len(set(self.layer) & set([layername] + basesname)):
+ normal_init(m, self.mean, self.std, self.bias)
+
+ module.apply(init)
+ if hasattr(module, '_params_init_info'):
+ update_init_info(module, init_info=self._get_init_info())
+
+ def _get_init_info(self):
+ info = f'{self.__class__.__name__}: mean={self.mean},' \
+ f' std={self.std}, bias={self.bias}'
+ return info
+
+
+@INITIALIZERS.register_module(name='TruncNormal')
+class TruncNormalInit(BaseInit):
+ r"""Initialize module parameters with the values drawn from the normal
+ distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)` with values
+ outside :math:`[a, b]`.
+
+ Args:
+ mean (float): the mean of the normal distribution. Defaults to 0.
+ std (float): the standard deviation of the normal distribution.
+ Defaults to 1.
+ a (float): The minimum cutoff value.
+ b ( float): The maximum cutoff value.
+ bias (float): the value to fill the bias. Defaults to 0.
+ bias_prob (float, optional): the probability for bias initialization.
+ Defaults to None.
+ layer (str | list[str], optional): the layer will be initialized.
+ Defaults to None.
+
+ """
+
+ def __init__(self,
+ mean: float = 0,
+ std: float = 1,
+ a: float = -2,
+ b: float = 2,
+ **kwargs) -> None:
+ super().__init__(**kwargs)
+ self.mean = mean
+ self.std = std
+ self.a = a
+ self.b = b
+
+ def __call__(self, module: nn.Module) -> None:
+
+ def init(m):
+ if self.wholemodule:
+ trunc_normal_init(m, self.mean, self.std, self.a, self.b,
+ self.bias)
+ else:
+ layername = m.__class__.__name__
+ basesname = _get_bases_name(m)
+ if len(set(self.layer) & set([layername] + basesname)):
+ trunc_normal_init(m, self.mean, self.std, self.a, self.b,
+ self.bias)
+
+ module.apply(init)
+ if hasattr(module, '_params_init_info'):
+ update_init_info(module, init_info=self._get_init_info())
+
+ def _get_init_info(self):
+ info = f'{self.__class__.__name__}: a={self.a}, b={self.b},' \
+ f' mean={self.mean}, std={self.std}, bias={self.bias}'
+ return info
+
+
+@INITIALIZERS.register_module(name='Uniform')
+class UniformInit(BaseInit):
+ r"""Initialize module parameters with values drawn from the uniform
+ distribution :math:`\mathcal{U}(a, b)`.
+
+ Args:
+ a (int | float): the lower bound of the uniform distribution.
+ Defaults to 0.
+ b (int | float): the upper bound of the uniform distribution.
+ Defaults to 1.
+ bias (int | float): the value to fill the bias. Defaults to 0.
+ bias_prob (float, optional): the probability for bias initialization.
+ Defaults to None.
+ layer (str | list[str], optional): the layer will be initialized.
+ Defaults to None.
+ """
+
+ def __init__(self, a=0, b=1, **kwargs):
+ super().__init__(**kwargs)
+ self.a = a
+ self.b = b
+
+ def __call__(self, module):
+
+ def init(m):
+ if self.wholemodule:
+ uniform_init(m, self.a, self.b, self.bias)
+ else:
+ layername = m.__class__.__name__
+ basesname = _get_bases_name(m)
+ if len(set(self.layer) & set([layername] + basesname)):
+ uniform_init(m, self.a, self.b, self.bias)
+
+ module.apply(init)
+ if hasattr(module, '_params_init_info'):
+ update_init_info(module, init_info=self._get_init_info())
+
+ def _get_init_info(self):
+ info = f'{self.__class__.__name__}: a={self.a},' \
+ f' b={self.b}, bias={self.bias}'
+ return info
+
+
+@INITIALIZERS.register_module(name='Kaiming')
+class KaimingInit(BaseInit):
+ r"""Initialize module parameters with the values according to the method
+ described in `Delving deep into rectifiers: Surpassing human-level
+ performance on ImageNet classification - He, K. et al. (2015).
+ `_
+
+ Args:
+ a (int | float): the negative slope of the rectifier used after this
+ layer (only used with ``'leaky_relu'``). Defaults to 0.
+ mode (str): either ``'fan_in'`` or ``'fan_out'``. Choosing
+ ``'fan_in'`` preserves the magnitude of the variance of the weights
+ in the forward pass. Choosing ``'fan_out'`` preserves the
+ magnitudes in the backwards pass. Defaults to ``'fan_out'``.
+ nonlinearity (str): the non-linear function (`nn.functional` name),
+ recommended to use only with ``'relu'`` or ``'leaky_relu'`` .
+ Defaults to 'relu'.
+ bias (int | float): the value to fill the bias. Defaults to 0.
+ bias_prob (float, optional): the probability for bias initialization.
+ Defaults to None.
+ distribution (str): distribution either be ``'normal'`` or
+ ``'uniform'``. Defaults to ``'normal'``.
+ layer (str | list[str], optional): the layer will be initialized.
+ Defaults to None.
+ """
+
+ def __init__(self,
+ a=0,
+ mode='fan_out',
+ nonlinearity='relu',
+ distribution='normal',
+ **kwargs):
+ super().__init__(**kwargs)
+ self.a = a
+ self.mode = mode
+ self.nonlinearity = nonlinearity
+ self.distribution = distribution
+
+ def __call__(self, module):
+
+ def init(m):
+ if self.wholemodule:
+ kaiming_init(m, self.a, self.mode, self.nonlinearity,
+ self.bias, self.distribution)
+ else:
+ layername = m.__class__.__name__
+ basesname = _get_bases_name(m)
+ if len(set(self.layer) & set([layername] + basesname)):
+ kaiming_init(m, self.a, self.mode, self.nonlinearity,
+ self.bias, self.distribution)
+
+ module.apply(init)
+ if hasattr(module, '_params_init_info'):
+ update_init_info(module, init_info=self._get_init_info())
+
+ def _get_init_info(self):
+ info = f'{self.__class__.__name__}: a={self.a}, mode={self.mode}, ' \
+ f'nonlinearity={self.nonlinearity}, ' \
+ f'distribution ={self.distribution}, bias={self.bias}'
+ return info
+
+
+@INITIALIZERS.register_module(name='Caffe2Xavier')
+class Caffe2XavierInit(KaimingInit):
+ # `XavierFill` in Caffe2 corresponds to `kaiming_uniform_` in PyTorch
+ # Acknowledgment to FAIR's internal code
+ def __init__(self, **kwargs):
+ super().__init__(
+ a=1,
+ mode='fan_in',
+ nonlinearity='leaky_relu',
+ distribution='uniform',
+ **kwargs)
+
+ def __call__(self, module):
+ super().__call__(module)
+
+
+@INITIALIZERS.register_module(name='Pretrained')
+class PretrainedInit(object):
+ """Initialize module by loading a pretrained model.
+
+ Args:
+ checkpoint (str): the checkpoint file of the pretrained model should
+ be load.
+ prefix (str, optional): the prefix of a sub-module in the pretrained
+ model. it is for loading a part of the pretrained model to
+ initialize. For example, if we would like to only load the
+ backbone of a detector model, we can set ``prefix='backbone.'``.
+ Defaults to None.
+ map_location (str): map tensors into proper locations.
+ """
+
+ def __init__(self, checkpoint, prefix=None, map_location=None):
+ self.checkpoint = checkpoint
+ self.prefix = prefix
+ self.map_location = map_location
+
+ def __call__(self, module):
+ from annotator.uniformer.mmcv.runner import (_load_checkpoint_with_prefix, load_checkpoint,
+ load_state_dict)
+ logger = get_logger('mmcv')
+ if self.prefix is None:
+ print_log(f'load model from: {self.checkpoint}', logger=logger)
+ load_checkpoint(
+ module,
+ self.checkpoint,
+ map_location=self.map_location,
+ strict=False,
+ logger=logger)
+ else:
+ print_log(
+ f'load {self.prefix} in model from: {self.checkpoint}',
+ logger=logger)
+ state_dict = _load_checkpoint_with_prefix(
+ self.prefix, self.checkpoint, map_location=self.map_location)
+ load_state_dict(module, state_dict, strict=False, logger=logger)
+
+ if hasattr(module, '_params_init_info'):
+ update_init_info(module, init_info=self._get_init_info())
+
+ def _get_init_info(self):
+ info = f'{self.__class__.__name__}: load from {self.checkpoint}'
+ return info
+
+
+def _initialize(module, cfg, wholemodule=False):
+ func = build_from_cfg(cfg, INITIALIZERS)
+ # wholemodule flag is for override mode, there is no layer key in override
+ # and initializer will give init values for the whole module with the name
+ # in override.
+ func.wholemodule = wholemodule
+ func(module)
+
+
+def _initialize_override(module, override, cfg):
+ if not isinstance(override, (dict, list)):
+ raise TypeError(f'override must be a dict or a list of dict, \
+ but got {type(override)}')
+
+ override = [override] if isinstance(override, dict) else override
+
+ for override_ in override:
+
+ cp_override = copy.deepcopy(override_)
+ name = cp_override.pop('name', None)
+ if name is None:
+ raise ValueError('`override` must contain the key "name",'
+ f'but got {cp_override}')
+ # if override only has name key, it means use args in init_cfg
+ if not cp_override:
+ cp_override.update(cfg)
+ # if override has name key and other args except type key, it will
+ # raise error
+ elif 'type' not in cp_override.keys():
+ raise ValueError(
+ f'`override` need "type" key, but got {cp_override}')
+
+ if hasattr(module, name):
+ _initialize(getattr(module, name), cp_override, wholemodule=True)
+ else:
+ raise RuntimeError(f'module did not have attribute {name}, '
+ f'but init_cfg is {cp_override}.')
+
+
+def initialize(module, init_cfg):
+ """Initialize a module.
+
+ Args:
+ module (``torch.nn.Module``): the module will be initialized.
+ init_cfg (dict | list[dict]): initialization configuration dict to
+ define initializer. OpenMMLab has implemented 6 initializers
+ including ``Constant``, ``Xavier``, ``Normal``, ``Uniform``,
+ ``Kaiming``, and ``Pretrained``.
+ Example:
+ >>> module = nn.Linear(2, 3, bias=True)
+ >>> init_cfg = dict(type='Constant', layer='Linear', val =1 , bias =2)
+ >>> initialize(module, init_cfg)
+
+ >>> module = nn.Sequential(nn.Conv1d(3, 1, 3), nn.Linear(1,2))
+ >>> # define key ``'layer'`` for initializing layer with different
+ >>> # configuration
+ >>> init_cfg = [dict(type='Constant', layer='Conv1d', val=1),
+ dict(type='Constant', layer='Linear', val=2)]
+ >>> initialize(module, init_cfg)
+
+ >>> # define key``'override'`` to initialize some specific part in
+ >>> # module
+ >>> class FooNet(nn.Module):
+ >>> def __init__(self):
+ >>> super().__init__()
+ >>> self.feat = nn.Conv2d(3, 16, 3)
+ >>> self.reg = nn.Conv2d(16, 10, 3)
+ >>> self.cls = nn.Conv2d(16, 5, 3)
+ >>> model = FooNet()
+ >>> init_cfg = dict(type='Constant', val=1, bias=2, layer='Conv2d',
+ >>> override=dict(type='Constant', name='reg', val=3, bias=4))
+ >>> initialize(model, init_cfg)
+
+ >>> model = ResNet(depth=50)
+ >>> # Initialize weights with the pretrained model.
+ >>> init_cfg = dict(type='Pretrained',
+ checkpoint='torchvision://resnet50')
+ >>> initialize(model, init_cfg)
+
+ >>> # Initialize weights of a sub-module with the specific part of
+ >>> # a pretrained model by using "prefix".
+ >>> url = 'http://download.openmmlab.com/mmdetection/v2.0/retinanet/'\
+ >>> 'retinanet_r50_fpn_1x_coco/'\
+ >>> 'retinanet_r50_fpn_1x_coco_20200130-c2398f9e.pth'
+ >>> init_cfg = dict(type='Pretrained',
+ checkpoint=url, prefix='backbone.')
+ """
+ if not isinstance(init_cfg, (dict, list)):
+ raise TypeError(f'init_cfg must be a dict or a list of dict, \
+ but got {type(init_cfg)}')
+
+ if isinstance(init_cfg, dict):
+ init_cfg = [init_cfg]
+
+ for cfg in init_cfg:
+ # should deeply copy the original config because cfg may be used by
+ # other modules, e.g., one init_cfg shared by multiple bottleneck
+ # blocks, the expected cfg will be changed after pop and will change
+ # the initialization behavior of other modules
+ cp_cfg = copy.deepcopy(cfg)
+ override = cp_cfg.pop('override', None)
+ _initialize(module, cp_cfg)
+
+ if override is not None:
+ cp_cfg.pop('layer', None)
+ _initialize_override(module, override, cp_cfg)
+ else:
+ # All attributes in module have same initialization.
+ pass
+
+
+def _no_grad_trunc_normal_(tensor: Tensor, mean: float, std: float, a: float,
+ b: float) -> Tensor:
+ # Method based on
+ # https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
+ # Modified from
+ # https://github.com/pytorch/pytorch/blob/master/torch/nn/init.py
+ def norm_cdf(x):
+ # Computes standard normal cumulative distribution function
+ return (1. + math.erf(x / math.sqrt(2.))) / 2.
+
+ if (mean < a - 2 * std) or (mean > b + 2 * std):
+ warnings.warn(
+ 'mean is more than 2 std from [a, b] in nn.init.trunc_normal_. '
+ 'The distribution of values may be incorrect.',
+ stacklevel=2)
+
+ with torch.no_grad():
+ # Values are generated by using a truncated uniform distribution and
+ # then using the inverse CDF for the normal distribution.
+ # Get upper and lower cdf values
+ lower = norm_cdf((a - mean) / std)
+ upper = norm_cdf((b - mean) / std)
+
+ # Uniformly fill tensor with values from [lower, upper], then translate
+ # to [2lower-1, 2upper-1].
+ tensor.uniform_(2 * lower - 1, 2 * upper - 1)
+
+ # Use inverse cdf transform for normal distribution to get truncated
+ # standard normal
+ tensor.erfinv_()
+
+ # Transform to proper mean, std
+ tensor.mul_(std * math.sqrt(2.))
+ tensor.add_(mean)
+
+ # Clamp to ensure it's in the proper range
+ tensor.clamp_(min=a, max=b)
+ return tensor
+
+
+def trunc_normal_(tensor: Tensor,
+ mean: float = 0.,
+ std: float = 1.,
+ a: float = -2.,
+ b: float = 2.) -> Tensor:
+ r"""Fills the input Tensor with values drawn from a truncated
+ normal distribution. The values are effectively drawn from the
+ normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`
+ with values outside :math:`[a, b]` redrawn until they are within
+ the bounds. The method used for generating the random values works
+ best when :math:`a \leq \text{mean} \leq b`.
+
+ Modified from
+ https://github.com/pytorch/pytorch/blob/master/torch/nn/init.py
+
+ Args:
+ tensor (``torch.Tensor``): an n-dimensional `torch.Tensor`.
+ mean (float): the mean of the normal distribution.
+ std (float): the standard deviation of the normal distribution.
+ a (float): the minimum cutoff value.
+ b (float): the maximum cutoff value.
+ """
+ return _no_grad_trunc_normal_(tensor, mean, std, a, b)
diff --git a/annotator/uniformer/mmcv/cnn/vgg.py b/annotator/uniformer/mmcv/cnn/vgg.py
new file mode 100644
index 0000000000000000000000000000000000000000..8778b649561a45a9652b1a15a26c2d171e58f3e1
--- /dev/null
+++ b/annotator/uniformer/mmcv/cnn/vgg.py
@@ -0,0 +1,175 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import logging
+
+import torch.nn as nn
+
+from .utils import constant_init, kaiming_init, normal_init
+
+
+def conv3x3(in_planes, out_planes, dilation=1):
+ """3x3 convolution with padding."""
+ return nn.Conv2d(
+ in_planes,
+ out_planes,
+ kernel_size=3,
+ padding=dilation,
+ dilation=dilation)
+
+
+def make_vgg_layer(inplanes,
+ planes,
+ num_blocks,
+ dilation=1,
+ with_bn=False,
+ ceil_mode=False):
+ layers = []
+ for _ in range(num_blocks):
+ layers.append(conv3x3(inplanes, planes, dilation))
+ if with_bn:
+ layers.append(nn.BatchNorm2d(planes))
+ layers.append(nn.ReLU(inplace=True))
+ inplanes = planes
+ layers.append(nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=ceil_mode))
+
+ return layers
+
+
+class VGG(nn.Module):
+ """VGG backbone.
+
+ Args:
+ depth (int): Depth of vgg, from {11, 13, 16, 19}.
+ with_bn (bool): Use BatchNorm or not.
+ num_classes (int): number of classes for classification.
+ num_stages (int): VGG stages, normally 5.
+ dilations (Sequence[int]): Dilation of each stage.
+ out_indices (Sequence[int]): Output from which stages.
+ frozen_stages (int): Stages to be frozen (all param fixed). -1 means
+ not freezing any parameters.
+ bn_eval (bool): Whether to set BN layers as eval mode, namely, freeze
+ running stats (mean and var).
+ bn_frozen (bool): Whether to freeze weight and bias of BN layers.
+ """
+
+ arch_settings = {
+ 11: (1, 1, 2, 2, 2),
+ 13: (2, 2, 2, 2, 2),
+ 16: (2, 2, 3, 3, 3),
+ 19: (2, 2, 4, 4, 4)
+ }
+
+ def __init__(self,
+ depth,
+ with_bn=False,
+ num_classes=-1,
+ num_stages=5,
+ dilations=(1, 1, 1, 1, 1),
+ out_indices=(0, 1, 2, 3, 4),
+ frozen_stages=-1,
+ bn_eval=True,
+ bn_frozen=False,
+ ceil_mode=False,
+ with_last_pool=True):
+ super(VGG, self).__init__()
+ if depth not in self.arch_settings:
+ raise KeyError(f'invalid depth {depth} for vgg')
+ assert num_stages >= 1 and num_stages <= 5
+ stage_blocks = self.arch_settings[depth]
+ self.stage_blocks = stage_blocks[:num_stages]
+ assert len(dilations) == num_stages
+ assert max(out_indices) <= num_stages
+
+ self.num_classes = num_classes
+ self.out_indices = out_indices
+ self.frozen_stages = frozen_stages
+ self.bn_eval = bn_eval
+ self.bn_frozen = bn_frozen
+
+ self.inplanes = 3
+ start_idx = 0
+ vgg_layers = []
+ self.range_sub_modules = []
+ for i, num_blocks in enumerate(self.stage_blocks):
+ num_modules = num_blocks * (2 + with_bn) + 1
+ end_idx = start_idx + num_modules
+ dilation = dilations[i]
+ planes = 64 * 2**i if i < 4 else 512
+ vgg_layer = make_vgg_layer(
+ self.inplanes,
+ planes,
+ num_blocks,
+ dilation=dilation,
+ with_bn=with_bn,
+ ceil_mode=ceil_mode)
+ vgg_layers.extend(vgg_layer)
+ self.inplanes = planes
+ self.range_sub_modules.append([start_idx, end_idx])
+ start_idx = end_idx
+ if not with_last_pool:
+ vgg_layers.pop(-1)
+ self.range_sub_modules[-1][1] -= 1
+ self.module_name = 'features'
+ self.add_module(self.module_name, nn.Sequential(*vgg_layers))
+
+ if self.num_classes > 0:
+ self.classifier = nn.Sequential(
+ nn.Linear(512 * 7 * 7, 4096),
+ nn.ReLU(True),
+ nn.Dropout(),
+ nn.Linear(4096, 4096),
+ nn.ReLU(True),
+ nn.Dropout(),
+ nn.Linear(4096, num_classes),
+ )
+
+ def init_weights(self, pretrained=None):
+ if isinstance(pretrained, str):
+ logger = logging.getLogger()
+ from ..runner import load_checkpoint
+ load_checkpoint(self, pretrained, strict=False, logger=logger)
+ elif pretrained is None:
+ for m in self.modules():
+ if isinstance(m, nn.Conv2d):
+ kaiming_init(m)
+ elif isinstance(m, nn.BatchNorm2d):
+ constant_init(m, 1)
+ elif isinstance(m, nn.Linear):
+ normal_init(m, std=0.01)
+ else:
+ raise TypeError('pretrained must be a str or None')
+
+ def forward(self, x):
+ outs = []
+ vgg_layers = getattr(self, self.module_name)
+ for i in range(len(self.stage_blocks)):
+ for j in range(*self.range_sub_modules[i]):
+ vgg_layer = vgg_layers[j]
+ x = vgg_layer(x)
+ if i in self.out_indices:
+ outs.append(x)
+ if self.num_classes > 0:
+ x = x.view(x.size(0), -1)
+ x = self.classifier(x)
+ outs.append(x)
+ if len(outs) == 1:
+ return outs[0]
+ else:
+ return tuple(outs)
+
+ def train(self, mode=True):
+ super(VGG, self).train(mode)
+ if self.bn_eval:
+ for m in self.modules():
+ if isinstance(m, nn.BatchNorm2d):
+ m.eval()
+ if self.bn_frozen:
+ for params in m.parameters():
+ params.requires_grad = False
+ vgg_layers = getattr(self, self.module_name)
+ if mode and self.frozen_stages >= 0:
+ for i in range(self.frozen_stages):
+ for j in range(*self.range_sub_modules[i]):
+ mod = vgg_layers[j]
+ mod.eval()
+ for param in mod.parameters():
+ param.requires_grad = False
diff --git a/annotator/uniformer/mmcv/engine/__init__.py b/annotator/uniformer/mmcv/engine/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..3193b7f664e19ce2458d81c836597fa22e4bb082
--- /dev/null
+++ b/annotator/uniformer/mmcv/engine/__init__.py
@@ -0,0 +1,8 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+from .test import (collect_results_cpu, collect_results_gpu, multi_gpu_test,
+ single_gpu_test)
+
+__all__ = [
+ 'collect_results_cpu', 'collect_results_gpu', 'multi_gpu_test',
+ 'single_gpu_test'
+]
diff --git a/annotator/uniformer/mmcv/engine/test.py b/annotator/uniformer/mmcv/engine/test.py
new file mode 100644
index 0000000000000000000000000000000000000000..8dbeef271db634ec2dadfda3bc0b5ef9c7a677ff
--- /dev/null
+++ b/annotator/uniformer/mmcv/engine/test.py
@@ -0,0 +1,202 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import os.path as osp
+import pickle
+import shutil
+import tempfile
+import time
+
+import torch
+import torch.distributed as dist
+
+import annotator.uniformer.mmcv as mmcv
+from annotator.uniformer.mmcv.runner import get_dist_info
+
+
+def single_gpu_test(model, data_loader):
+ """Test model with a single gpu.
+
+ This method tests model with a single gpu and displays test progress bar.
+
+ Args:
+ model (nn.Module): Model to be tested.
+ data_loader (nn.Dataloader): Pytorch data loader.
+
+ Returns:
+ list: The prediction results.
+ """
+ model.eval()
+ results = []
+ dataset = data_loader.dataset
+ prog_bar = mmcv.ProgressBar(len(dataset))
+ for data in data_loader:
+ with torch.no_grad():
+ result = model(return_loss=False, **data)
+ results.extend(result)
+
+ # Assume result has the same length of batch_size
+ # refer to https://github.com/open-mmlab/mmcv/issues/985
+ batch_size = len(result)
+ for _ in range(batch_size):
+ prog_bar.update()
+ return results
+
+
+def multi_gpu_test(model, data_loader, tmpdir=None, gpu_collect=False):
+ """Test model with multiple gpus.
+
+ This method tests model with multiple gpus and collects the results
+ under two different modes: gpu and cpu modes. By setting
+ ``gpu_collect=True``, it encodes results to gpu tensors and use gpu
+ communication for results collection. On cpu mode it saves the results on
+ different gpus to ``tmpdir`` and collects them by the rank 0 worker.
+
+ Args:
+ model (nn.Module): Model to be tested.
+ data_loader (nn.Dataloader): Pytorch data loader.
+ tmpdir (str): Path of directory to save the temporary results from
+ different gpus under cpu mode.
+ gpu_collect (bool): Option to use either gpu or cpu to collect results.
+
+ Returns:
+ list: The prediction results.
+ """
+ model.eval()
+ results = []
+ dataset = data_loader.dataset
+ rank, world_size = get_dist_info()
+ if rank == 0:
+ prog_bar = mmcv.ProgressBar(len(dataset))
+ time.sleep(2) # This line can prevent deadlock problem in some cases.
+ for i, data in enumerate(data_loader):
+ with torch.no_grad():
+ result = model(return_loss=False, **data)
+ results.extend(result)
+
+ if rank == 0:
+ batch_size = len(result)
+ batch_size_all = batch_size * world_size
+ if batch_size_all + prog_bar.completed > len(dataset):
+ batch_size_all = len(dataset) - prog_bar.completed
+ for _ in range(batch_size_all):
+ prog_bar.update()
+
+ # collect results from all ranks
+ if gpu_collect:
+ results = collect_results_gpu(results, len(dataset))
+ else:
+ results = collect_results_cpu(results, len(dataset), tmpdir)
+ return results
+
+
+def collect_results_cpu(result_part, size, tmpdir=None):
+ """Collect results under cpu mode.
+
+ On cpu mode, this function will save the results on different gpus to
+ ``tmpdir`` and collect them by the rank 0 worker.
+
+ Args:
+ result_part (list): Result list containing result parts
+ to be collected.
+ size (int): Size of the results, commonly equal to length of
+ the results.
+ tmpdir (str | None): temporal directory for collected results to
+ store. If set to None, it will create a random temporal directory
+ for it.
+
+ Returns:
+ list: The collected results.
+ """
+ rank, world_size = get_dist_info()
+ # create a tmp dir if it is not specified
+ if tmpdir is None:
+ MAX_LEN = 512
+ # 32 is whitespace
+ dir_tensor = torch.full((MAX_LEN, ),
+ 32,
+ dtype=torch.uint8,
+ device='cuda')
+ if rank == 0:
+ mmcv.mkdir_or_exist('.dist_test')
+ tmpdir = tempfile.mkdtemp(dir='.dist_test')
+ tmpdir = torch.tensor(
+ bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda')
+ dir_tensor[:len(tmpdir)] = tmpdir
+ dist.broadcast(dir_tensor, 0)
+ tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip()
+ else:
+ mmcv.mkdir_or_exist(tmpdir)
+ # dump the part result to the dir
+ mmcv.dump(result_part, osp.join(tmpdir, f'part_{rank}.pkl'))
+ dist.barrier()
+ # collect all parts
+ if rank != 0:
+ return None
+ else:
+ # load results of all parts from tmp dir
+ part_list = []
+ for i in range(world_size):
+ part_file = osp.join(tmpdir, f'part_{i}.pkl')
+ part_result = mmcv.load(part_file)
+ # When data is severely insufficient, an empty part_result
+ # on a certain gpu could makes the overall outputs empty.
+ if part_result:
+ part_list.append(part_result)
+ # sort the results
+ ordered_results = []
+ for res in zip(*part_list):
+ ordered_results.extend(list(res))
+ # the dataloader may pad some samples
+ ordered_results = ordered_results[:size]
+ # remove tmp dir
+ shutil.rmtree(tmpdir)
+ return ordered_results
+
+
+def collect_results_gpu(result_part, size):
+ """Collect results under gpu mode.
+
+ On gpu mode, this function will encode results to gpu tensors and use gpu
+ communication for results collection.
+
+ Args:
+ result_part (list): Result list containing result parts
+ to be collected.
+ size (int): Size of the results, commonly equal to length of
+ the results.
+
+ Returns:
+ list: The collected results.
+ """
+ rank, world_size = get_dist_info()
+ # dump result part to tensor with pickle
+ part_tensor = torch.tensor(
+ bytearray(pickle.dumps(result_part)), dtype=torch.uint8, device='cuda')
+ # gather all result part tensor shape
+ shape_tensor = torch.tensor(part_tensor.shape, device='cuda')
+ shape_list = [shape_tensor.clone() for _ in range(world_size)]
+ dist.all_gather(shape_list, shape_tensor)
+ # padding result part tensor to max length
+ shape_max = torch.tensor(shape_list).max()
+ part_send = torch.zeros(shape_max, dtype=torch.uint8, device='cuda')
+ part_send[:shape_tensor[0]] = part_tensor
+ part_recv_list = [
+ part_tensor.new_zeros(shape_max) for _ in range(world_size)
+ ]
+ # gather all result part
+ dist.all_gather(part_recv_list, part_send)
+
+ if rank == 0:
+ part_list = []
+ for recv, shape in zip(part_recv_list, shape_list):
+ part_result = pickle.loads(recv[:shape[0]].cpu().numpy().tobytes())
+ # When data is severely insufficient, an empty part_result
+ # on a certain gpu could makes the overall outputs empty.
+ if part_result:
+ part_list.append(part_result)
+ # sort the results
+ ordered_results = []
+ for res in zip(*part_list):
+ ordered_results.extend(list(res))
+ # the dataloader may pad some samples
+ ordered_results = ordered_results[:size]
+ return ordered_results
diff --git a/annotator/uniformer/mmcv/fileio/__init__.py b/annotator/uniformer/mmcv/fileio/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..2051b85f7e59bff7bdbaa131849ce8cd31f059a4
--- /dev/null
+++ b/annotator/uniformer/mmcv/fileio/__init__.py
@@ -0,0 +1,11 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+from .file_client import BaseStorageBackend, FileClient
+from .handlers import BaseFileHandler, JsonHandler, PickleHandler, YamlHandler
+from .io import dump, load, register_handler
+from .parse import dict_from_file, list_from_file
+
+__all__ = [
+ 'BaseStorageBackend', 'FileClient', 'load', 'dump', 'register_handler',
+ 'BaseFileHandler', 'JsonHandler', 'PickleHandler', 'YamlHandler',
+ 'list_from_file', 'dict_from_file'
+]
diff --git a/annotator/uniformer/mmcv/fileio/__pycache__/__init__.cpython-310.pyc b/annotator/uniformer/mmcv/fileio/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..93afe1f95307d05903aef7ebc0fa8ce5d9a44a4c
Binary files /dev/null and b/annotator/uniformer/mmcv/fileio/__pycache__/__init__.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/fileio/__pycache__/__init__.cpython-38.pyc b/annotator/uniformer/mmcv/fileio/__pycache__/__init__.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..52d6188a1475fed013ce434e66f8748aba2e51aa
Binary files /dev/null and b/annotator/uniformer/mmcv/fileio/__pycache__/__init__.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/fileio/__pycache__/file_client.cpython-310.pyc b/annotator/uniformer/mmcv/fileio/__pycache__/file_client.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..88d62ecc1e8687fbb4be095b6be0ece8be5eedba
Binary files /dev/null and b/annotator/uniformer/mmcv/fileio/__pycache__/file_client.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/fileio/__pycache__/file_client.cpython-38.pyc b/annotator/uniformer/mmcv/fileio/__pycache__/file_client.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b2b6d2eeabe4093cb322a62a830485335186d32e
Binary files /dev/null and b/annotator/uniformer/mmcv/fileio/__pycache__/file_client.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/fileio/__pycache__/io.cpython-310.pyc b/annotator/uniformer/mmcv/fileio/__pycache__/io.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1ef8c1349e92e9cb092b46eb14abf2a30389d796
Binary files /dev/null and b/annotator/uniformer/mmcv/fileio/__pycache__/io.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/fileio/__pycache__/io.cpython-38.pyc b/annotator/uniformer/mmcv/fileio/__pycache__/io.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..bcff67f6101664c2ed0835e5326434cbc1ab6362
Binary files /dev/null and b/annotator/uniformer/mmcv/fileio/__pycache__/io.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/fileio/__pycache__/parse.cpython-310.pyc b/annotator/uniformer/mmcv/fileio/__pycache__/parse.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..395367e307bce5d9f676d39f91d35f4691737724
Binary files /dev/null and b/annotator/uniformer/mmcv/fileio/__pycache__/parse.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/fileio/__pycache__/parse.cpython-38.pyc b/annotator/uniformer/mmcv/fileio/__pycache__/parse.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e3ccc1711cc051f08e1be1c5256aa158b544e519
Binary files /dev/null and b/annotator/uniformer/mmcv/fileio/__pycache__/parse.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/fileio/file_client.py b/annotator/uniformer/mmcv/fileio/file_client.py
new file mode 100644
index 0000000000000000000000000000000000000000..950f0c1aeab14b8e308a7455ccd64a95b5d98add
--- /dev/null
+++ b/annotator/uniformer/mmcv/fileio/file_client.py
@@ -0,0 +1,1148 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import inspect
+import os
+import os.path as osp
+import re
+import tempfile
+import warnings
+from abc import ABCMeta, abstractmethod
+from contextlib import contextmanager
+from pathlib import Path
+from typing import Iterable, Iterator, Optional, Tuple, Union
+from urllib.request import urlopen
+
+import annotator.uniformer.mmcv as mmcv
+from annotator.uniformer.mmcv.utils.misc import has_method
+from annotator.uniformer.mmcv.utils.path import is_filepath
+
+
+class BaseStorageBackend(metaclass=ABCMeta):
+ """Abstract class of storage backends.
+
+ All backends need to implement two apis: ``get()`` and ``get_text()``.
+ ``get()`` reads the file as a byte stream and ``get_text()`` reads the file
+ as texts.
+ """
+
+ # a flag to indicate whether the backend can create a symlink for a file
+ _allow_symlink = False
+
+ @property
+ def name(self):
+ return self.__class__.__name__
+
+ @property
+ def allow_symlink(self):
+ return self._allow_symlink
+
+ @abstractmethod
+ def get(self, filepath):
+ pass
+
+ @abstractmethod
+ def get_text(self, filepath):
+ pass
+
+
+class CephBackend(BaseStorageBackend):
+ """Ceph storage backend (for internal use).
+
+ Args:
+ path_mapping (dict|None): path mapping dict from local path to Petrel
+ path. When ``path_mapping={'src': 'dst'}``, ``src`` in ``filepath``
+ will be replaced by ``dst``. Default: None.
+
+ .. warning::
+ :class:`mmcv.fileio.file_client.CephBackend` will be deprecated,
+ please use :class:`mmcv.fileio.file_client.PetrelBackend` instead.
+ """
+
+ def __init__(self, path_mapping=None):
+ try:
+ import ceph
+ except ImportError:
+ raise ImportError('Please install ceph to enable CephBackend.')
+
+ warnings.warn(
+ 'CephBackend will be deprecated, please use PetrelBackend instead')
+ self._client = ceph.S3Client()
+ assert isinstance(path_mapping, dict) or path_mapping is None
+ self.path_mapping = path_mapping
+
+ def get(self, filepath):
+ filepath = str(filepath)
+ if self.path_mapping is not None:
+ for k, v in self.path_mapping.items():
+ filepath = filepath.replace(k, v)
+ value = self._client.Get(filepath)
+ value_buf = memoryview(value)
+ return value_buf
+
+ def get_text(self, filepath, encoding=None):
+ raise NotImplementedError
+
+
+class PetrelBackend(BaseStorageBackend):
+ """Petrel storage backend (for internal use).
+
+ PetrelBackend supports reading and writing data to multiple clusters.
+ If the file path contains the cluster name, PetrelBackend will read data
+ from specified cluster or write data to it. Otherwise, PetrelBackend will
+ access the default cluster.
+
+ Args:
+ path_mapping (dict, optional): Path mapping dict from local path to
+ Petrel path. When ``path_mapping={'src': 'dst'}``, ``src`` in
+ ``filepath`` will be replaced by ``dst``. Default: None.
+ enable_mc (bool, optional): Whether to enable memcached support.
+ Default: True.
+
+ Examples:
+ >>> filepath1 = 's3://path/of/file'
+ >>> filepath2 = 'cluster-name:s3://path/of/file'
+ >>> client = PetrelBackend()
+ >>> client.get(filepath1) # get data from default cluster
+ >>> client.get(filepath2) # get data from 'cluster-name' cluster
+ """
+
+ def __init__(self,
+ path_mapping: Optional[dict] = None,
+ enable_mc: bool = True):
+ try:
+ from petrel_client import client
+ except ImportError:
+ raise ImportError('Please install petrel_client to enable '
+ 'PetrelBackend.')
+
+ self._client = client.Client(enable_mc=enable_mc)
+ assert isinstance(path_mapping, dict) or path_mapping is None
+ self.path_mapping = path_mapping
+
+ def _map_path(self, filepath: Union[str, Path]) -> str:
+ """Map ``filepath`` to a string path whose prefix will be replaced by
+ :attr:`self.path_mapping`.
+
+ Args:
+ filepath (str): Path to be mapped.
+ """
+ filepath = str(filepath)
+ if self.path_mapping is not None:
+ for k, v in self.path_mapping.items():
+ filepath = filepath.replace(k, v)
+ return filepath
+
+ def _format_path(self, filepath: str) -> str:
+ """Convert a ``filepath`` to standard format of petrel oss.
+
+ If the ``filepath`` is concatenated by ``os.path.join``, in a Windows
+ environment, the ``filepath`` will be the format of
+ 's3://bucket_name\\image.jpg'. By invoking :meth:`_format_path`, the
+ above ``filepath`` will be converted to 's3://bucket_name/image.jpg'.
+
+ Args:
+ filepath (str): Path to be formatted.
+ """
+ return re.sub(r'\\+', '/', filepath)
+
+ def get(self, filepath: Union[str, Path]) -> memoryview:
+ """Read data from a given ``filepath`` with 'rb' mode.
+
+ Args:
+ filepath (str or Path): Path to read data.
+
+ Returns:
+ memoryview: A memory view of expected bytes object to avoid
+ copying. The memoryview object can be converted to bytes by
+ ``value_buf.tobytes()``.
+ """
+ filepath = self._map_path(filepath)
+ filepath = self._format_path(filepath)
+ value = self._client.Get(filepath)
+ value_buf = memoryview(value)
+ return value_buf
+
+ def get_text(self,
+ filepath: Union[str, Path],
+ encoding: str = 'utf-8') -> str:
+ """Read data from a given ``filepath`` with 'r' mode.
+
+ Args:
+ filepath (str or Path): Path to read data.
+ encoding (str): The encoding format used to open the ``filepath``.
+ Default: 'utf-8'.
+
+ Returns:
+ str: Expected text reading from ``filepath``.
+ """
+ return str(self.get(filepath), encoding=encoding)
+
+ def put(self, obj: bytes, filepath: Union[str, Path]) -> None:
+ """Save data to a given ``filepath``.
+
+ Args:
+ obj (bytes): Data to be saved.
+ filepath (str or Path): Path to write data.
+ """
+ filepath = self._map_path(filepath)
+ filepath = self._format_path(filepath)
+ self._client.put(filepath, obj)
+
+ def put_text(self,
+ obj: str,
+ filepath: Union[str, Path],
+ encoding: str = 'utf-8') -> None:
+ """Save data to a given ``filepath``.
+
+ Args:
+ obj (str): Data to be written.
+ filepath (str or Path): Path to write data.
+ encoding (str): The encoding format used to encode the ``obj``.
+ Default: 'utf-8'.
+ """
+ self.put(bytes(obj, encoding=encoding), filepath)
+
+ def remove(self, filepath: Union[str, Path]) -> None:
+ """Remove a file.
+
+ Args:
+ filepath (str or Path): Path to be removed.
+ """
+ if not has_method(self._client, 'delete'):
+ raise NotImplementedError(
+ ('Current version of Petrel Python SDK has not supported '
+ 'the `delete` method, please use a higher version or dev'
+ ' branch instead.'))
+
+ filepath = self._map_path(filepath)
+ filepath = self._format_path(filepath)
+ self._client.delete(filepath)
+
+ def exists(self, filepath: Union[str, Path]) -> bool:
+ """Check whether a file path exists.
+
+ Args:
+ filepath (str or Path): Path to be checked whether exists.
+
+ Returns:
+ bool: Return ``True`` if ``filepath`` exists, ``False`` otherwise.
+ """
+ if not (has_method(self._client, 'contains')
+ and has_method(self._client, 'isdir')):
+ raise NotImplementedError(
+ ('Current version of Petrel Python SDK has not supported '
+ 'the `contains` and `isdir` methods, please use a higher'
+ 'version or dev branch instead.'))
+
+ filepath = self._map_path(filepath)
+ filepath = self._format_path(filepath)
+ return self._client.contains(filepath) or self._client.isdir(filepath)
+
+ def isdir(self, filepath: Union[str, Path]) -> bool:
+ """Check whether a file path is a directory.
+
+ Args:
+ filepath (str or Path): Path to be checked whether it is a
+ directory.
+
+ Returns:
+ bool: Return ``True`` if ``filepath`` points to a directory,
+ ``False`` otherwise.
+ """
+ if not has_method(self._client, 'isdir'):
+ raise NotImplementedError(
+ ('Current version of Petrel Python SDK has not supported '
+ 'the `isdir` method, please use a higher version or dev'
+ ' branch instead.'))
+
+ filepath = self._map_path(filepath)
+ filepath = self._format_path(filepath)
+ return self._client.isdir(filepath)
+
+ def isfile(self, filepath: Union[str, Path]) -> bool:
+ """Check whether a file path is a file.
+
+ Args:
+ filepath (str or Path): Path to be checked whether it is a file.
+
+ Returns:
+ bool: Return ``True`` if ``filepath`` points to a file, ``False``
+ otherwise.
+ """
+ if not has_method(self._client, 'contains'):
+ raise NotImplementedError(
+ ('Current version of Petrel Python SDK has not supported '
+ 'the `contains` method, please use a higher version or '
+ 'dev branch instead.'))
+
+ filepath = self._map_path(filepath)
+ filepath = self._format_path(filepath)
+ return self._client.contains(filepath)
+
+ def join_path(self, filepath: Union[str, Path],
+ *filepaths: Union[str, Path]) -> str:
+ """Concatenate all file paths.
+
+ Args:
+ filepath (str or Path): Path to be concatenated.
+
+ Returns:
+ str: The result after concatenation.
+ """
+ filepath = self._format_path(self._map_path(filepath))
+ if filepath.endswith('/'):
+ filepath = filepath[:-1]
+ formatted_paths = [filepath]
+ for path in filepaths:
+ formatted_paths.append(self._format_path(self._map_path(path)))
+ return '/'.join(formatted_paths)
+
+ @contextmanager
+ def get_local_path(self, filepath: Union[str, Path]) -> Iterable[str]:
+ """Download a file from ``filepath`` and return a temporary path.
+
+ ``get_local_path`` is decorated by :meth:`contxtlib.contextmanager`. It
+ can be called with ``with`` statement, and when exists from the
+ ``with`` statement, the temporary path will be released.
+
+ Args:
+ filepath (str | Path): Download a file from ``filepath``.
+
+ Examples:
+ >>> client = PetrelBackend()
+ >>> # After existing from the ``with`` clause,
+ >>> # the path will be removed
+ >>> with client.get_local_path('s3://path/of/your/file') as path:
+ ... # do something here
+
+ Yields:
+ Iterable[str]: Only yield one temporary path.
+ """
+ filepath = self._map_path(filepath)
+ filepath = self._format_path(filepath)
+ assert self.isfile(filepath)
+ try:
+ f = tempfile.NamedTemporaryFile(delete=False)
+ f.write(self.get(filepath))
+ f.close()
+ yield f.name
+ finally:
+ os.remove(f.name)
+
+ def list_dir_or_file(self,
+ dir_path: Union[str, Path],
+ list_dir: bool = True,
+ list_file: bool = True,
+ suffix: Optional[Union[str, Tuple[str]]] = None,
+ recursive: bool = False) -> Iterator[str]:
+ """Scan a directory to find the interested directories or files in
+ arbitrary order.
+
+ Note:
+ Petrel has no concept of directories but it simulates the directory
+ hierarchy in the filesystem through public prefixes. In addition,
+ if the returned path ends with '/', it means the path is a public
+ prefix which is a logical directory.
+
+ Note:
+ :meth:`list_dir_or_file` returns the path relative to ``dir_path``.
+ In addition, the returned path of directory will not contains the
+ suffix '/' which is consistent with other backends.
+
+ Args:
+ dir_path (str | Path): Path of the directory.
+ list_dir (bool): List the directories. Default: True.
+ list_file (bool): List the path of files. Default: True.
+ suffix (str or tuple[str], optional): File suffix
+ that we are interested in. Default: None.
+ recursive (bool): If set to True, recursively scan the
+ directory. Default: False.
+
+ Yields:
+ Iterable[str]: A relative path to ``dir_path``.
+ """
+ if not has_method(self._client, 'list'):
+ raise NotImplementedError(
+ ('Current version of Petrel Python SDK has not supported '
+ 'the `list` method, please use a higher version or dev'
+ ' branch instead.'))
+
+ dir_path = self._map_path(dir_path)
+ dir_path = self._format_path(dir_path)
+ if list_dir and suffix is not None:
+ raise TypeError(
+ '`list_dir` should be False when `suffix` is not None')
+
+ if (suffix is not None) and not isinstance(suffix, (str, tuple)):
+ raise TypeError('`suffix` must be a string or tuple of strings')
+
+ # Petrel's simulated directory hierarchy assumes that directory paths
+ # should end with `/`
+ if not dir_path.endswith('/'):
+ dir_path += '/'
+
+ root = dir_path
+
+ def _list_dir_or_file(dir_path, list_dir, list_file, suffix,
+ recursive):
+ for path in self._client.list(dir_path):
+ # the `self.isdir` is not used here to determine whether path
+ # is a directory, because `self.isdir` relies on
+ # `self._client.list`
+ if path.endswith('/'): # a directory path
+ next_dir_path = self.join_path(dir_path, path)
+ if list_dir:
+ # get the relative path and exclude the last
+ # character '/'
+ rel_dir = next_dir_path[len(root):-1]
+ yield rel_dir
+ if recursive:
+ yield from _list_dir_or_file(next_dir_path, list_dir,
+ list_file, suffix,
+ recursive)
+ else: # a file path
+ absolute_path = self.join_path(dir_path, path)
+ rel_path = absolute_path[len(root):]
+ if (suffix is None
+ or rel_path.endswith(suffix)) and list_file:
+ yield rel_path
+
+ return _list_dir_or_file(dir_path, list_dir, list_file, suffix,
+ recursive)
+
+
+class MemcachedBackend(BaseStorageBackend):
+ """Memcached storage backend.
+
+ Attributes:
+ server_list_cfg (str): Config file for memcached server list.
+ client_cfg (str): Config file for memcached client.
+ sys_path (str | None): Additional path to be appended to `sys.path`.
+ Default: None.
+ """
+
+ def __init__(self, server_list_cfg, client_cfg, sys_path=None):
+ if sys_path is not None:
+ import sys
+ sys.path.append(sys_path)
+ try:
+ import mc
+ except ImportError:
+ raise ImportError(
+ 'Please install memcached to enable MemcachedBackend.')
+
+ self.server_list_cfg = server_list_cfg
+ self.client_cfg = client_cfg
+ self._client = mc.MemcachedClient.GetInstance(self.server_list_cfg,
+ self.client_cfg)
+ # mc.pyvector servers as a point which points to a memory cache
+ self._mc_buffer = mc.pyvector()
+
+ def get(self, filepath):
+ filepath = str(filepath)
+ import mc
+ self._client.Get(filepath, self._mc_buffer)
+ value_buf = mc.ConvertBuffer(self._mc_buffer)
+ return value_buf
+
+ def get_text(self, filepath, encoding=None):
+ raise NotImplementedError
+
+
+class LmdbBackend(BaseStorageBackend):
+ """Lmdb storage backend.
+
+ Args:
+ db_path (str): Lmdb database path.
+ readonly (bool, optional): Lmdb environment parameter. If True,
+ disallow any write operations. Default: True.
+ lock (bool, optional): Lmdb environment parameter. If False, when
+ concurrent access occurs, do not lock the database. Default: False.
+ readahead (bool, optional): Lmdb environment parameter. If False,
+ disable the OS filesystem readahead mechanism, which may improve
+ random read performance when a database is larger than RAM.
+ Default: False.
+
+ Attributes:
+ db_path (str): Lmdb database path.
+ """
+
+ def __init__(self,
+ db_path,
+ readonly=True,
+ lock=False,
+ readahead=False,
+ **kwargs):
+ try:
+ import lmdb
+ except ImportError:
+ raise ImportError('Please install lmdb to enable LmdbBackend.')
+
+ self.db_path = str(db_path)
+ self._client = lmdb.open(
+ self.db_path,
+ readonly=readonly,
+ lock=lock,
+ readahead=readahead,
+ **kwargs)
+
+ def get(self, filepath):
+ """Get values according to the filepath.
+
+ Args:
+ filepath (str | obj:`Path`): Here, filepath is the lmdb key.
+ """
+ filepath = str(filepath)
+ with self._client.begin(write=False) as txn:
+ value_buf = txn.get(filepath.encode('ascii'))
+ return value_buf
+
+ def get_text(self, filepath, encoding=None):
+ raise NotImplementedError
+
+
+class HardDiskBackend(BaseStorageBackend):
+ """Raw hard disks storage backend."""
+
+ _allow_symlink = True
+
+ def get(self, filepath: Union[str, Path]) -> bytes:
+ """Read data from a given ``filepath`` with 'rb' mode.
+
+ Args:
+ filepath (str or Path): Path to read data.
+
+ Returns:
+ bytes: Expected bytes object.
+ """
+ with open(filepath, 'rb') as f:
+ value_buf = f.read()
+ return value_buf
+
+ def get_text(self,
+ filepath: Union[str, Path],
+ encoding: str = 'utf-8') -> str:
+ """Read data from a given ``filepath`` with 'r' mode.
+
+ Args:
+ filepath (str or Path): Path to read data.
+ encoding (str): The encoding format used to open the ``filepath``.
+ Default: 'utf-8'.
+
+ Returns:
+ str: Expected text reading from ``filepath``.
+ """
+ with open(filepath, 'r', encoding=encoding) as f:
+ value_buf = f.read()
+ return value_buf
+
+ def put(self, obj: bytes, filepath: Union[str, Path]) -> None:
+ """Write data to a given ``filepath`` with 'wb' mode.
+
+ Note:
+ ``put`` will create a directory if the directory of ``filepath``
+ does not exist.
+
+ Args:
+ obj (bytes): Data to be written.
+ filepath (str or Path): Path to write data.
+ """
+ mmcv.mkdir_or_exist(osp.dirname(filepath))
+ with open(filepath, 'wb') as f:
+ f.write(obj)
+
+ def put_text(self,
+ obj: str,
+ filepath: Union[str, Path],
+ encoding: str = 'utf-8') -> None:
+ """Write data to a given ``filepath`` with 'w' mode.
+
+ Note:
+ ``put_text`` will create a directory if the directory of
+ ``filepath`` does not exist.
+
+ Args:
+ obj (str): Data to be written.
+ filepath (str or Path): Path to write data.
+ encoding (str): The encoding format used to open the ``filepath``.
+ Default: 'utf-8'.
+ """
+ mmcv.mkdir_or_exist(osp.dirname(filepath))
+ with open(filepath, 'w', encoding=encoding) as f:
+ f.write(obj)
+
+ def remove(self, filepath: Union[str, Path]) -> None:
+ """Remove a file.
+
+ Args:
+ filepath (str or Path): Path to be removed.
+ """
+ os.remove(filepath)
+
+ def exists(self, filepath: Union[str, Path]) -> bool:
+ """Check whether a file path exists.
+
+ Args:
+ filepath (str or Path): Path to be checked whether exists.
+
+ Returns:
+ bool: Return ``True`` if ``filepath`` exists, ``False`` otherwise.
+ """
+ return osp.exists(filepath)
+
+ def isdir(self, filepath: Union[str, Path]) -> bool:
+ """Check whether a file path is a directory.
+
+ Args:
+ filepath (str or Path): Path to be checked whether it is a
+ directory.
+
+ Returns:
+ bool: Return ``True`` if ``filepath`` points to a directory,
+ ``False`` otherwise.
+ """
+ return osp.isdir(filepath)
+
+ def isfile(self, filepath: Union[str, Path]) -> bool:
+ """Check whether a file path is a file.
+
+ Args:
+ filepath (str or Path): Path to be checked whether it is a file.
+
+ Returns:
+ bool: Return ``True`` if ``filepath`` points to a file, ``False``
+ otherwise.
+ """
+ return osp.isfile(filepath)
+
+ def join_path(self, filepath: Union[str, Path],
+ *filepaths: Union[str, Path]) -> str:
+ """Concatenate all file paths.
+
+ Join one or more filepath components intelligently. The return value
+ is the concatenation of filepath and any members of *filepaths.
+
+ Args:
+ filepath (str or Path): Path to be concatenated.
+
+ Returns:
+ str: The result of concatenation.
+ """
+ return osp.join(filepath, *filepaths)
+
+ @contextmanager
+ def get_local_path(
+ self, filepath: Union[str, Path]) -> Iterable[Union[str, Path]]:
+ """Only for unified API and do nothing."""
+ yield filepath
+
+ def list_dir_or_file(self,
+ dir_path: Union[str, Path],
+ list_dir: bool = True,
+ list_file: bool = True,
+ suffix: Optional[Union[str, Tuple[str]]] = None,
+ recursive: bool = False) -> Iterator[str]:
+ """Scan a directory to find the interested directories or files in
+ arbitrary order.
+
+ Note:
+ :meth:`list_dir_or_file` returns the path relative to ``dir_path``.
+
+ Args:
+ dir_path (str | Path): Path of the directory.
+ list_dir (bool): List the directories. Default: True.
+ list_file (bool): List the path of files. Default: True.
+ suffix (str or tuple[str], optional): File suffix
+ that we are interested in. Default: None.
+ recursive (bool): If set to True, recursively scan the
+ directory. Default: False.
+
+ Yields:
+ Iterable[str]: A relative path to ``dir_path``.
+ """
+ if list_dir and suffix is not None:
+ raise TypeError('`suffix` should be None when `list_dir` is True')
+
+ if (suffix is not None) and not isinstance(suffix, (str, tuple)):
+ raise TypeError('`suffix` must be a string or tuple of strings')
+
+ root = dir_path
+
+ def _list_dir_or_file(dir_path, list_dir, list_file, suffix,
+ recursive):
+ for entry in os.scandir(dir_path):
+ if not entry.name.startswith('.') and entry.is_file():
+ rel_path = osp.relpath(entry.path, root)
+ if (suffix is None
+ or rel_path.endswith(suffix)) and list_file:
+ yield rel_path
+ elif osp.isdir(entry.path):
+ if list_dir:
+ rel_dir = osp.relpath(entry.path, root)
+ yield rel_dir
+ if recursive:
+ yield from _list_dir_or_file(entry.path, list_dir,
+ list_file, suffix,
+ recursive)
+
+ return _list_dir_or_file(dir_path, list_dir, list_file, suffix,
+ recursive)
+
+
+class HTTPBackend(BaseStorageBackend):
+ """HTTP and HTTPS storage bachend."""
+
+ def get(self, filepath):
+ value_buf = urlopen(filepath).read()
+ return value_buf
+
+ def get_text(self, filepath, encoding='utf-8'):
+ value_buf = urlopen(filepath).read()
+ return value_buf.decode(encoding)
+
+ @contextmanager
+ def get_local_path(self, filepath: str) -> Iterable[str]:
+ """Download a file from ``filepath``.
+
+ ``get_local_path`` is decorated by :meth:`contxtlib.contextmanager`. It
+ can be called with ``with`` statement, and when exists from the
+ ``with`` statement, the temporary path will be released.
+
+ Args:
+ filepath (str): Download a file from ``filepath``.
+
+ Examples:
+ >>> client = HTTPBackend()
+ >>> # After existing from the ``with`` clause,
+ >>> # the path will be removed
+ >>> with client.get_local_path('http://path/of/your/file') as path:
+ ... # do something here
+ """
+ try:
+ f = tempfile.NamedTemporaryFile(delete=False)
+ f.write(self.get(filepath))
+ f.close()
+ yield f.name
+ finally:
+ os.remove(f.name)
+
+
+class FileClient:
+ """A general file client to access files in different backends.
+
+ The client loads a file or text in a specified backend from its path
+ and returns it as a binary or text file. There are two ways to choose a
+ backend, the name of backend and the prefix of path. Although both of them
+ can be used to choose a storage backend, ``backend`` has a higher priority
+ that is if they are all set, the storage backend will be chosen by the
+ backend argument. If they are all `None`, the disk backend will be chosen.
+ Note that It can also register other backend accessor with a given name,
+ prefixes, and backend class. In addition, We use the singleton pattern to
+ avoid repeated object creation. If the arguments are the same, the same
+ object will be returned.
+
+ Args:
+ backend (str, optional): The storage backend type. Options are "disk",
+ "ceph", "memcached", "lmdb", "http" and "petrel". Default: None.
+ prefix (str, optional): The prefix of the registered storage backend.
+ Options are "s3", "http", "https". Default: None.
+
+ Examples:
+ >>> # only set backend
+ >>> file_client = FileClient(backend='petrel')
+ >>> # only set prefix
+ >>> file_client = FileClient(prefix='s3')
+ >>> # set both backend and prefix but use backend to choose client
+ >>> file_client = FileClient(backend='petrel', prefix='s3')
+ >>> # if the arguments are the same, the same object is returned
+ >>> file_client1 = FileClient(backend='petrel')
+ >>> file_client1 is file_client
+ True
+
+ Attributes:
+ client (:obj:`BaseStorageBackend`): The backend object.
+ """
+
+ _backends = {
+ 'disk': HardDiskBackend,
+ 'ceph': CephBackend,
+ 'memcached': MemcachedBackend,
+ 'lmdb': LmdbBackend,
+ 'petrel': PetrelBackend,
+ 'http': HTTPBackend,
+ }
+ # This collection is used to record the overridden backends, and when a
+ # backend appears in the collection, the singleton pattern is disabled for
+ # that backend, because if the singleton pattern is used, then the object
+ # returned will be the backend before overwriting
+ _overridden_backends = set()
+ _prefix_to_backends = {
+ 's3': PetrelBackend,
+ 'http': HTTPBackend,
+ 'https': HTTPBackend,
+ }
+ _overridden_prefixes = set()
+
+ _instances = {}
+
+ def __new__(cls, backend=None, prefix=None, **kwargs):
+ if backend is None and prefix is None:
+ backend = 'disk'
+ if backend is not None and backend not in cls._backends:
+ raise ValueError(
+ f'Backend {backend} is not supported. Currently supported ones'
+ f' are {list(cls._backends.keys())}')
+ if prefix is not None and prefix not in cls._prefix_to_backends:
+ raise ValueError(
+ f'prefix {prefix} is not supported. Currently supported ones '
+ f'are {list(cls._prefix_to_backends.keys())}')
+
+ # concatenate the arguments to a unique key for determining whether
+ # objects with the same arguments were created
+ arg_key = f'{backend}:{prefix}'
+ for key, value in kwargs.items():
+ arg_key += f':{key}:{value}'
+
+ # if a backend was overridden, it will create a new object
+ if (arg_key in cls._instances
+ and backend not in cls._overridden_backends
+ and prefix not in cls._overridden_prefixes):
+ _instance = cls._instances[arg_key]
+ else:
+ # create a new object and put it to _instance
+ _instance = super().__new__(cls)
+ if backend is not None:
+ _instance.client = cls._backends[backend](**kwargs)
+ else:
+ _instance.client = cls._prefix_to_backends[prefix](**kwargs)
+
+ cls._instances[arg_key] = _instance
+
+ return _instance
+
+ @property
+ def name(self):
+ return self.client.name
+
+ @property
+ def allow_symlink(self):
+ return self.client.allow_symlink
+
+ @staticmethod
+ def parse_uri_prefix(uri: Union[str, Path]) -> Optional[str]:
+ """Parse the prefix of a uri.
+
+ Args:
+ uri (str | Path): Uri to be parsed that contains the file prefix.
+
+ Examples:
+ >>> FileClient.parse_uri_prefix('s3://path/of/your/file')
+ 's3'
+
+ Returns:
+ str | None: Return the prefix of uri if the uri contains '://'
+ else ``None``.
+ """
+ assert is_filepath(uri)
+ uri = str(uri)
+ if '://' not in uri:
+ return None
+ else:
+ prefix, _ = uri.split('://')
+ # In the case of PetrelBackend, the prefix may contains the cluster
+ # name like clusterName:s3
+ if ':' in prefix:
+ _, prefix = prefix.split(':')
+ return prefix
+
+ @classmethod
+ def infer_client(cls,
+ file_client_args: Optional[dict] = None,
+ uri: Optional[Union[str, Path]] = None) -> 'FileClient':
+ """Infer a suitable file client based on the URI and arguments.
+
+ Args:
+ file_client_args (dict, optional): Arguments to instantiate a
+ FileClient. Default: None.
+ uri (str | Path, optional): Uri to be parsed that contains the file
+ prefix. Default: None.
+
+ Examples:
+ >>> uri = 's3://path/of/your/file'
+ >>> file_client = FileClient.infer_client(uri=uri)
+ >>> file_client_args = {'backend': 'petrel'}
+ >>> file_client = FileClient.infer_client(file_client_args)
+
+ Returns:
+ FileClient: Instantiated FileClient object.
+ """
+ assert file_client_args is not None or uri is not None
+ if file_client_args is None:
+ file_prefix = cls.parse_uri_prefix(uri) # type: ignore
+ return cls(prefix=file_prefix)
+ else:
+ return cls(**file_client_args)
+
+ @classmethod
+ def _register_backend(cls, name, backend, force=False, prefixes=None):
+ if not isinstance(name, str):
+ raise TypeError('the backend name should be a string, '
+ f'but got {type(name)}')
+ if not inspect.isclass(backend):
+ raise TypeError(
+ f'backend should be a class but got {type(backend)}')
+ if not issubclass(backend, BaseStorageBackend):
+ raise TypeError(
+ f'backend {backend} is not a subclass of BaseStorageBackend')
+ if not force and name in cls._backends:
+ raise KeyError(
+ f'{name} is already registered as a storage backend, '
+ 'add "force=True" if you want to override it')
+
+ if name in cls._backends and force:
+ cls._overridden_backends.add(name)
+ cls._backends[name] = backend
+
+ if prefixes is not None:
+ if isinstance(prefixes, str):
+ prefixes = [prefixes]
+ else:
+ assert isinstance(prefixes, (list, tuple))
+ for prefix in prefixes:
+ if prefix not in cls._prefix_to_backends:
+ cls._prefix_to_backends[prefix] = backend
+ elif (prefix in cls._prefix_to_backends) and force:
+ cls._overridden_prefixes.add(prefix)
+ cls._prefix_to_backends[prefix] = backend
+ else:
+ raise KeyError(
+ f'{prefix} is already registered as a storage backend,'
+ ' add "force=True" if you want to override it')
+
+ @classmethod
+ def register_backend(cls, name, backend=None, force=False, prefixes=None):
+ """Register a backend to FileClient.
+
+ This method can be used as a normal class method or a decorator.
+
+ .. code-block:: python
+
+ class NewBackend(BaseStorageBackend):
+
+ def get(self, filepath):
+ return filepath
+
+ def get_text(self, filepath):
+ return filepath
+
+ FileClient.register_backend('new', NewBackend)
+
+ or
+
+ .. code-block:: python
+
+ @FileClient.register_backend('new')
+ class NewBackend(BaseStorageBackend):
+
+ def get(self, filepath):
+ return filepath
+
+ def get_text(self, filepath):
+ return filepath
+
+ Args:
+ name (str): The name of the registered backend.
+ backend (class, optional): The backend class to be registered,
+ which must be a subclass of :class:`BaseStorageBackend`.
+ When this method is used as a decorator, backend is None.
+ Defaults to None.
+ force (bool, optional): Whether to override the backend if the name
+ has already been registered. Defaults to False.
+ prefixes (str or list[str] or tuple[str], optional): The prefixes
+ of the registered storage backend. Default: None.
+ `New in version 1.3.15.`
+ """
+ if backend is not None:
+ cls._register_backend(
+ name, backend, force=force, prefixes=prefixes)
+ return
+
+ def _register(backend_cls):
+ cls._register_backend(
+ name, backend_cls, force=force, prefixes=prefixes)
+ return backend_cls
+
+ return _register
+
+ def get(self, filepath: Union[str, Path]) -> Union[bytes, memoryview]:
+ """Read data from a given ``filepath`` with 'rb' mode.
+
+ Note:
+ There are two types of return values for ``get``, one is ``bytes``
+ and the other is ``memoryview``. The advantage of using memoryview
+ is that you can avoid copying, and if you want to convert it to
+ ``bytes``, you can use ``.tobytes()``.
+
+ Args:
+ filepath (str or Path): Path to read data.
+
+ Returns:
+ bytes | memoryview: Expected bytes object or a memory view of the
+ bytes object.
+ """
+ return self.client.get(filepath)
+
+ def get_text(self, filepath: Union[str, Path], encoding='utf-8') -> str:
+ """Read data from a given ``filepath`` with 'r' mode.
+
+ Args:
+ filepath (str or Path): Path to read data.
+ encoding (str): The encoding format used to open the ``filepath``.
+ Default: 'utf-8'.
+
+ Returns:
+ str: Expected text reading from ``filepath``.
+ """
+ return self.client.get_text(filepath, encoding)
+
+ def put(self, obj: bytes, filepath: Union[str, Path]) -> None:
+ """Write data to a given ``filepath`` with 'wb' mode.
+
+ Note:
+ ``put`` should create a directory if the directory of ``filepath``
+ does not exist.
+
+ Args:
+ obj (bytes): Data to be written.
+ filepath (str or Path): Path to write data.
+ """
+ self.client.put(obj, filepath)
+
+ def put_text(self, obj: str, filepath: Union[str, Path]) -> None:
+ """Write data to a given ``filepath`` with 'w' mode.
+
+ Note:
+ ``put_text`` should create a directory if the directory of
+ ``filepath`` does not exist.
+
+ Args:
+ obj (str): Data to be written.
+ filepath (str or Path): Path to write data.
+ encoding (str, optional): The encoding format used to open the
+ `filepath`. Default: 'utf-8'.
+ """
+ self.client.put_text(obj, filepath)
+
+ def remove(self, filepath: Union[str, Path]) -> None:
+ """Remove a file.
+
+ Args:
+ filepath (str, Path): Path to be removed.
+ """
+ self.client.remove(filepath)
+
+ def exists(self, filepath: Union[str, Path]) -> bool:
+ """Check whether a file path exists.
+
+ Args:
+ filepath (str or Path): Path to be checked whether exists.
+
+ Returns:
+ bool: Return ``True`` if ``filepath`` exists, ``False`` otherwise.
+ """
+ return self.client.exists(filepath)
+
+ def isdir(self, filepath: Union[str, Path]) -> bool:
+ """Check whether a file path is a directory.
+
+ Args:
+ filepath (str or Path): Path to be checked whether it is a
+ directory.
+
+ Returns:
+ bool: Return ``True`` if ``filepath`` points to a directory,
+ ``False`` otherwise.
+ """
+ return self.client.isdir(filepath)
+
+ def isfile(self, filepath: Union[str, Path]) -> bool:
+ """Check whether a file path is a file.
+
+ Args:
+ filepath (str or Path): Path to be checked whether it is a file.
+
+ Returns:
+ bool: Return ``True`` if ``filepath`` points to a file, ``False``
+ otherwise.
+ """
+ return self.client.isfile(filepath)
+
+ def join_path(self, filepath: Union[str, Path],
+ *filepaths: Union[str, Path]) -> str:
+ """Concatenate all file paths.
+
+ Join one or more filepath components intelligently. The return value
+ is the concatenation of filepath and any members of *filepaths.
+
+ Args:
+ filepath (str or Path): Path to be concatenated.
+
+ Returns:
+ str: The result of concatenation.
+ """
+ return self.client.join_path(filepath, *filepaths)
+
+ @contextmanager
+ def get_local_path(self, filepath: Union[str, Path]) -> Iterable[str]:
+ """Download data from ``filepath`` and write the data to local path.
+
+ ``get_local_path`` is decorated by :meth:`contxtlib.contextmanager`. It
+ can be called with ``with`` statement, and when exists from the
+ ``with`` statement, the temporary path will be released.
+
+ Note:
+ If the ``filepath`` is a local path, just return itself.
+
+ .. warning::
+ ``get_local_path`` is an experimental interface that may change in
+ the future.
+
+ Args:
+ filepath (str or Path): Path to be read data.
+
+ Examples:
+ >>> file_client = FileClient(prefix='s3')
+ >>> with file_client.get_local_path('s3://bucket/abc.jpg') as path:
+ ... # do something here
+
+ Yields:
+ Iterable[str]: Only yield one path.
+ """
+ with self.client.get_local_path(str(filepath)) as local_path:
+ yield local_path
+
+ def list_dir_or_file(self,
+ dir_path: Union[str, Path],
+ list_dir: bool = True,
+ list_file: bool = True,
+ suffix: Optional[Union[str, Tuple[str]]] = None,
+ recursive: bool = False) -> Iterator[str]:
+ """Scan a directory to find the interested directories or files in
+ arbitrary order.
+
+ Note:
+ :meth:`list_dir_or_file` returns the path relative to ``dir_path``.
+
+ Args:
+ dir_path (str | Path): Path of the directory.
+ list_dir (bool): List the directories. Default: True.
+ list_file (bool): List the path of files. Default: True.
+ suffix (str or tuple[str], optional): File suffix
+ that we are interested in. Default: None.
+ recursive (bool): If set to True, recursively scan the
+ directory. Default: False.
+
+ Yields:
+ Iterable[str]: A relative path to ``dir_path``.
+ """
+ yield from self.client.list_dir_or_file(dir_path, list_dir, list_file,
+ suffix, recursive)
diff --git a/annotator/uniformer/mmcv/fileio/handlers/__init__.py b/annotator/uniformer/mmcv/fileio/handlers/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..aa24d91972837b8756b225f4879bac20436eb72a
--- /dev/null
+++ b/annotator/uniformer/mmcv/fileio/handlers/__init__.py
@@ -0,0 +1,7 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+from .base import BaseFileHandler
+from .json_handler import JsonHandler
+from .pickle_handler import PickleHandler
+from .yaml_handler import YamlHandler
+
+__all__ = ['BaseFileHandler', 'JsonHandler', 'PickleHandler', 'YamlHandler']
diff --git a/annotator/uniformer/mmcv/fileio/handlers/__pycache__/__init__.cpython-310.pyc b/annotator/uniformer/mmcv/fileio/handlers/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ab86f4613357acdd6ff45a8d5a7d20c8e7251138
Binary files /dev/null and b/annotator/uniformer/mmcv/fileio/handlers/__pycache__/__init__.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/fileio/handlers/__pycache__/__init__.cpython-38.pyc b/annotator/uniformer/mmcv/fileio/handlers/__pycache__/__init__.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e685a922da511bdc946d903539837060deb67a72
Binary files /dev/null and b/annotator/uniformer/mmcv/fileio/handlers/__pycache__/__init__.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/fileio/handlers/__pycache__/base.cpython-310.pyc b/annotator/uniformer/mmcv/fileio/handlers/__pycache__/base.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0ea3b922122e610450ce30c0274f58a259fd7b3d
Binary files /dev/null and b/annotator/uniformer/mmcv/fileio/handlers/__pycache__/base.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/fileio/handlers/__pycache__/base.cpython-38.pyc b/annotator/uniformer/mmcv/fileio/handlers/__pycache__/base.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7dafd9f98772538dda77483991315cd4b42941a6
Binary files /dev/null and b/annotator/uniformer/mmcv/fileio/handlers/__pycache__/base.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/fileio/handlers/__pycache__/json_handler.cpython-310.pyc b/annotator/uniformer/mmcv/fileio/handlers/__pycache__/json_handler.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..df2fc2b0c98727784dc778193d9e6ccacd8bfcf6
Binary files /dev/null and b/annotator/uniformer/mmcv/fileio/handlers/__pycache__/json_handler.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/fileio/handlers/__pycache__/json_handler.cpython-38.pyc b/annotator/uniformer/mmcv/fileio/handlers/__pycache__/json_handler.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..31557d390d157cae4059f35d86a62b3fe11d8283
Binary files /dev/null and b/annotator/uniformer/mmcv/fileio/handlers/__pycache__/json_handler.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/fileio/handlers/__pycache__/pickle_handler.cpython-310.pyc b/annotator/uniformer/mmcv/fileio/handlers/__pycache__/pickle_handler.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..067638774bc1d51a105effb82ef0a4735416cf83
Binary files /dev/null and b/annotator/uniformer/mmcv/fileio/handlers/__pycache__/pickle_handler.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/fileio/handlers/__pycache__/pickle_handler.cpython-38.pyc b/annotator/uniformer/mmcv/fileio/handlers/__pycache__/pickle_handler.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..40c6b0392bb827951c0576c88dd25e9fe3a1a206
Binary files /dev/null and b/annotator/uniformer/mmcv/fileio/handlers/__pycache__/pickle_handler.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/fileio/handlers/__pycache__/yaml_handler.cpython-310.pyc b/annotator/uniformer/mmcv/fileio/handlers/__pycache__/yaml_handler.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..be99adce4d83ff1215a3363776ba8f71629d1ae5
Binary files /dev/null and b/annotator/uniformer/mmcv/fileio/handlers/__pycache__/yaml_handler.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/fileio/handlers/__pycache__/yaml_handler.cpython-38.pyc b/annotator/uniformer/mmcv/fileio/handlers/__pycache__/yaml_handler.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d24a548683198e1c2972f20653f89ebe9b247a2b
Binary files /dev/null and b/annotator/uniformer/mmcv/fileio/handlers/__pycache__/yaml_handler.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/fileio/handlers/base.py b/annotator/uniformer/mmcv/fileio/handlers/base.py
new file mode 100644
index 0000000000000000000000000000000000000000..288878bc57282fbb2f12b32290152ca8e9d3cab0
--- /dev/null
+++ b/annotator/uniformer/mmcv/fileio/handlers/base.py
@@ -0,0 +1,30 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+from abc import ABCMeta, abstractmethod
+
+
+class BaseFileHandler(metaclass=ABCMeta):
+ # `str_like` is a flag to indicate whether the type of file object is
+ # str-like object or bytes-like object. Pickle only processes bytes-like
+ # objects but json only processes str-like object. If it is str-like
+ # object, `StringIO` will be used to process the buffer.
+ str_like = True
+
+ @abstractmethod
+ def load_from_fileobj(self, file, **kwargs):
+ pass
+
+ @abstractmethod
+ def dump_to_fileobj(self, obj, file, **kwargs):
+ pass
+
+ @abstractmethod
+ def dump_to_str(self, obj, **kwargs):
+ pass
+
+ def load_from_path(self, filepath, mode='r', **kwargs):
+ with open(filepath, mode) as f:
+ return self.load_from_fileobj(f, **kwargs)
+
+ def dump_to_path(self, obj, filepath, mode='w', **kwargs):
+ with open(filepath, mode) as f:
+ self.dump_to_fileobj(obj, f, **kwargs)
diff --git a/annotator/uniformer/mmcv/fileio/handlers/json_handler.py b/annotator/uniformer/mmcv/fileio/handlers/json_handler.py
new file mode 100644
index 0000000000000000000000000000000000000000..18d4f15f74139d20adff18b20be5529c592a66b6
--- /dev/null
+++ b/annotator/uniformer/mmcv/fileio/handlers/json_handler.py
@@ -0,0 +1,36 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import json
+
+import numpy as np
+
+from .base import BaseFileHandler
+
+
+def set_default(obj):
+ """Set default json values for non-serializable values.
+
+ It helps convert ``set``, ``range`` and ``np.ndarray`` data types to list.
+ It also converts ``np.generic`` (including ``np.int32``, ``np.float32``,
+ etc.) into plain numbers of plain python built-in types.
+ """
+ if isinstance(obj, (set, range)):
+ return list(obj)
+ elif isinstance(obj, np.ndarray):
+ return obj.tolist()
+ elif isinstance(obj, np.generic):
+ return obj.item()
+ raise TypeError(f'{type(obj)} is unsupported for json dump')
+
+
+class JsonHandler(BaseFileHandler):
+
+ def load_from_fileobj(self, file):
+ return json.load(file)
+
+ def dump_to_fileobj(self, obj, file, **kwargs):
+ kwargs.setdefault('default', set_default)
+ json.dump(obj, file, **kwargs)
+
+ def dump_to_str(self, obj, **kwargs):
+ kwargs.setdefault('default', set_default)
+ return json.dumps(obj, **kwargs)
diff --git a/annotator/uniformer/mmcv/fileio/handlers/pickle_handler.py b/annotator/uniformer/mmcv/fileio/handlers/pickle_handler.py
new file mode 100644
index 0000000000000000000000000000000000000000..b37c79bed4ef9fd8913715e62dbe3fc5cafdc3aa
--- /dev/null
+++ b/annotator/uniformer/mmcv/fileio/handlers/pickle_handler.py
@@ -0,0 +1,28 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import pickle
+
+from .base import BaseFileHandler
+
+
+class PickleHandler(BaseFileHandler):
+
+ str_like = False
+
+ def load_from_fileobj(self, file, **kwargs):
+ return pickle.load(file, **kwargs)
+
+ def load_from_path(self, filepath, **kwargs):
+ return super(PickleHandler, self).load_from_path(
+ filepath, mode='rb', **kwargs)
+
+ def dump_to_str(self, obj, **kwargs):
+ kwargs.setdefault('protocol', 2)
+ return pickle.dumps(obj, **kwargs)
+
+ def dump_to_fileobj(self, obj, file, **kwargs):
+ kwargs.setdefault('protocol', 2)
+ pickle.dump(obj, file, **kwargs)
+
+ def dump_to_path(self, obj, filepath, **kwargs):
+ super(PickleHandler, self).dump_to_path(
+ obj, filepath, mode='wb', **kwargs)
diff --git a/annotator/uniformer/mmcv/fileio/handlers/yaml_handler.py b/annotator/uniformer/mmcv/fileio/handlers/yaml_handler.py
new file mode 100644
index 0000000000000000000000000000000000000000..c5aa2eea1e8c76f8baf753d1c8c959dee665e543
--- /dev/null
+++ b/annotator/uniformer/mmcv/fileio/handlers/yaml_handler.py
@@ -0,0 +1,24 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import yaml
+
+try:
+ from yaml import CLoader as Loader, CDumper as Dumper
+except ImportError:
+ from yaml import Loader, Dumper
+
+from .base import BaseFileHandler # isort:skip
+
+
+class YamlHandler(BaseFileHandler):
+
+ def load_from_fileobj(self, file, **kwargs):
+ kwargs.setdefault('Loader', Loader)
+ return yaml.load(file, **kwargs)
+
+ def dump_to_fileobj(self, obj, file, **kwargs):
+ kwargs.setdefault('Dumper', Dumper)
+ yaml.dump(obj, file, **kwargs)
+
+ def dump_to_str(self, obj, **kwargs):
+ kwargs.setdefault('Dumper', Dumper)
+ return yaml.dump(obj, **kwargs)
diff --git a/annotator/uniformer/mmcv/fileio/io.py b/annotator/uniformer/mmcv/fileio/io.py
new file mode 100644
index 0000000000000000000000000000000000000000..aaefde58aa3ea5b58f86249ce7e1c40c186eb8dd
--- /dev/null
+++ b/annotator/uniformer/mmcv/fileio/io.py
@@ -0,0 +1,151 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+from io import BytesIO, StringIO
+from pathlib import Path
+
+from ..utils import is_list_of, is_str
+from .file_client import FileClient
+from .handlers import BaseFileHandler, JsonHandler, PickleHandler, YamlHandler
+
+file_handlers = {
+ 'json': JsonHandler(),
+ 'yaml': YamlHandler(),
+ 'yml': YamlHandler(),
+ 'pickle': PickleHandler(),
+ 'pkl': PickleHandler()
+}
+
+
+def load(file, file_format=None, file_client_args=None, **kwargs):
+ """Load data from json/yaml/pickle files.
+
+ This method provides a unified api for loading data from serialized files.
+
+ Note:
+ In v1.3.16 and later, ``load`` supports loading data from serialized
+ files those can be storaged in different backends.
+
+ Args:
+ file (str or :obj:`Path` or file-like object): Filename or a file-like
+ object.
+ file_format (str, optional): If not specified, the file format will be
+ inferred from the file extension, otherwise use the specified one.
+ Currently supported formats include "json", "yaml/yml" and
+ "pickle/pkl".
+ file_client_args (dict, optional): Arguments to instantiate a
+ FileClient. See :class:`mmcv.fileio.FileClient` for details.
+ Default: None.
+
+ Examples:
+ >>> load('/path/of/your/file') # file is storaged in disk
+ >>> load('https://path/of/your/file') # file is storaged in Internet
+ >>> load('s3://path/of/your/file') # file is storaged in petrel
+
+ Returns:
+ The content from the file.
+ """
+ if isinstance(file, Path):
+ file = str(file)
+ if file_format is None and is_str(file):
+ file_format = file.split('.')[-1]
+ if file_format not in file_handlers:
+ raise TypeError(f'Unsupported format: {file_format}')
+
+ handler = file_handlers[file_format]
+ if is_str(file):
+ file_client = FileClient.infer_client(file_client_args, file)
+ if handler.str_like:
+ with StringIO(file_client.get_text(file)) as f:
+ obj = handler.load_from_fileobj(f, **kwargs)
+ else:
+ with BytesIO(file_client.get(file)) as f:
+ obj = handler.load_from_fileobj(f, **kwargs)
+ elif hasattr(file, 'read'):
+ obj = handler.load_from_fileobj(file, **kwargs)
+ else:
+ raise TypeError('"file" must be a filepath str or a file-object')
+ return obj
+
+
+def dump(obj, file=None, file_format=None, file_client_args=None, **kwargs):
+ """Dump data to json/yaml/pickle strings or files.
+
+ This method provides a unified api for dumping data as strings or to files,
+ and also supports custom arguments for each file format.
+
+ Note:
+ In v1.3.16 and later, ``dump`` supports dumping data as strings or to
+ files which is saved to different backends.
+
+ Args:
+ obj (any): The python object to be dumped.
+ file (str or :obj:`Path` or file-like object, optional): If not
+ specified, then the object is dumped to a str, otherwise to a file
+ specified by the filename or file-like object.
+ file_format (str, optional): Same as :func:`load`.
+ file_client_args (dict, optional): Arguments to instantiate a
+ FileClient. See :class:`mmcv.fileio.FileClient` for details.
+ Default: None.
+
+ Examples:
+ >>> dump('hello world', '/path/of/your/file') # disk
+ >>> dump('hello world', 's3://path/of/your/file') # ceph or petrel
+
+ Returns:
+ bool: True for success, False otherwise.
+ """
+ if isinstance(file, Path):
+ file = str(file)
+ if file_format is None:
+ if is_str(file):
+ file_format = file.split('.')[-1]
+ elif file is None:
+ raise ValueError(
+ 'file_format must be specified since file is None')
+ if file_format not in file_handlers:
+ raise TypeError(f'Unsupported format: {file_format}')
+
+ handler = file_handlers[file_format]
+ if file is None:
+ return handler.dump_to_str(obj, **kwargs)
+ elif is_str(file):
+ file_client = FileClient.infer_client(file_client_args, file)
+ if handler.str_like:
+ with StringIO() as f:
+ handler.dump_to_fileobj(obj, f, **kwargs)
+ file_client.put_text(f.getvalue(), file)
+ else:
+ with BytesIO() as f:
+ handler.dump_to_fileobj(obj, f, **kwargs)
+ file_client.put(f.getvalue(), file)
+ elif hasattr(file, 'write'):
+ handler.dump_to_fileobj(obj, file, **kwargs)
+ else:
+ raise TypeError('"file" must be a filename str or a file-object')
+
+
+def _register_handler(handler, file_formats):
+ """Register a handler for some file extensions.
+
+ Args:
+ handler (:obj:`BaseFileHandler`): Handler to be registered.
+ file_formats (str or list[str]): File formats to be handled by this
+ handler.
+ """
+ if not isinstance(handler, BaseFileHandler):
+ raise TypeError(
+ f'handler must be a child of BaseFileHandler, not {type(handler)}')
+ if isinstance(file_formats, str):
+ file_formats = [file_formats]
+ if not is_list_of(file_formats, str):
+ raise TypeError('file_formats must be a str or a list of str')
+ for ext in file_formats:
+ file_handlers[ext] = handler
+
+
+def register_handler(file_formats, **kwargs):
+
+ def wrap(cls):
+ _register_handler(cls(**kwargs), file_formats)
+ return cls
+
+ return wrap
diff --git a/annotator/uniformer/mmcv/fileio/parse.py b/annotator/uniformer/mmcv/fileio/parse.py
new file mode 100644
index 0000000000000000000000000000000000000000..f60f0d611b8d75692221d0edd7dc993b0a6445c9
--- /dev/null
+++ b/annotator/uniformer/mmcv/fileio/parse.py
@@ -0,0 +1,97 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+
+from io import StringIO
+
+from .file_client import FileClient
+
+
+def list_from_file(filename,
+ prefix='',
+ offset=0,
+ max_num=0,
+ encoding='utf-8',
+ file_client_args=None):
+ """Load a text file and parse the content as a list of strings.
+
+ Note:
+ In v1.3.16 and later, ``list_from_file`` supports loading a text file
+ which can be storaged in different backends and parsing the content as
+ a list for strings.
+
+ Args:
+ filename (str): Filename.
+ prefix (str): The prefix to be inserted to the beginning of each item.
+ offset (int): The offset of lines.
+ max_num (int): The maximum number of lines to be read,
+ zeros and negatives mean no limitation.
+ encoding (str): Encoding used to open the file. Default utf-8.
+ file_client_args (dict, optional): Arguments to instantiate a
+ FileClient. See :class:`mmcv.fileio.FileClient` for details.
+ Default: None.
+
+ Examples:
+ >>> list_from_file('/path/of/your/file') # disk
+ ['hello', 'world']
+ >>> list_from_file('s3://path/of/your/file') # ceph or petrel
+ ['hello', 'world']
+
+ Returns:
+ list[str]: A list of strings.
+ """
+ cnt = 0
+ item_list = []
+ file_client = FileClient.infer_client(file_client_args, filename)
+ with StringIO(file_client.get_text(filename, encoding)) as f:
+ for _ in range(offset):
+ f.readline()
+ for line in f:
+ if 0 < max_num <= cnt:
+ break
+ item_list.append(prefix + line.rstrip('\n\r'))
+ cnt += 1
+ return item_list
+
+
+def dict_from_file(filename,
+ key_type=str,
+ encoding='utf-8',
+ file_client_args=None):
+ """Load a text file and parse the content as a dict.
+
+ Each line of the text file will be two or more columns split by
+ whitespaces or tabs. The first column will be parsed as dict keys, and
+ the following columns will be parsed as dict values.
+
+ Note:
+ In v1.3.16 and later, ``dict_from_file`` supports loading a text file
+ which can be storaged in different backends and parsing the content as
+ a dict.
+
+ Args:
+ filename(str): Filename.
+ key_type(type): Type of the dict keys. str is user by default and
+ type conversion will be performed if specified.
+ encoding (str): Encoding used to open the file. Default utf-8.
+ file_client_args (dict, optional): Arguments to instantiate a
+ FileClient. See :class:`mmcv.fileio.FileClient` for details.
+ Default: None.
+
+ Examples:
+ >>> dict_from_file('/path/of/your/file') # disk
+ {'key1': 'value1', 'key2': 'value2'}
+ >>> dict_from_file('s3://path/of/your/file') # ceph or petrel
+ {'key1': 'value1', 'key2': 'value2'}
+
+ Returns:
+ dict: The parsed contents.
+ """
+ mapping = {}
+ file_client = FileClient.infer_client(file_client_args, filename)
+ with StringIO(file_client.get_text(filename, encoding)) as f:
+ for line in f:
+ items = line.rstrip('\n').split()
+ assert len(items) >= 2
+ key = key_type(items[0])
+ val = items[1:] if len(items) > 2 else items[1]
+ mapping[key] = val
+ return mapping
diff --git a/annotator/uniformer/mmcv/image/__init__.py b/annotator/uniformer/mmcv/image/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..d0051d609d3de4e7562e3fe638335c66617c4d91
--- /dev/null
+++ b/annotator/uniformer/mmcv/image/__init__.py
@@ -0,0 +1,28 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+from .colorspace import (bgr2gray, bgr2hls, bgr2hsv, bgr2rgb, bgr2ycbcr,
+ gray2bgr, gray2rgb, hls2bgr, hsv2bgr, imconvert,
+ rgb2bgr, rgb2gray, rgb2ycbcr, ycbcr2bgr, ycbcr2rgb)
+from .geometric import (cutout, imcrop, imflip, imflip_, impad,
+ impad_to_multiple, imrescale, imresize, imresize_like,
+ imresize_to_multiple, imrotate, imshear, imtranslate,
+ rescale_size)
+from .io import imfrombytes, imread, imwrite, supported_backends, use_backend
+from .misc import tensor2imgs
+from .photometric import (adjust_brightness, adjust_color, adjust_contrast,
+ adjust_lighting, adjust_sharpness, auto_contrast,
+ clahe, imdenormalize, imequalize, iminvert,
+ imnormalize, imnormalize_, lut_transform, posterize,
+ solarize)
+
+__all__ = [
+ 'bgr2gray', 'bgr2hls', 'bgr2hsv', 'bgr2rgb', 'gray2bgr', 'gray2rgb',
+ 'hls2bgr', 'hsv2bgr', 'imconvert', 'rgb2bgr', 'rgb2gray', 'imrescale',
+ 'imresize', 'imresize_like', 'imresize_to_multiple', 'rescale_size',
+ 'imcrop', 'imflip', 'imflip_', 'impad', 'impad_to_multiple', 'imrotate',
+ 'imfrombytes', 'imread', 'imwrite', 'supported_backends', 'use_backend',
+ 'imdenormalize', 'imnormalize', 'imnormalize_', 'iminvert', 'posterize',
+ 'solarize', 'rgb2ycbcr', 'bgr2ycbcr', 'ycbcr2rgb', 'ycbcr2bgr',
+ 'tensor2imgs', 'imshear', 'imtranslate', 'adjust_color', 'imequalize',
+ 'adjust_brightness', 'adjust_contrast', 'lut_transform', 'clahe',
+ 'adjust_sharpness', 'auto_contrast', 'cutout', 'adjust_lighting'
+]
diff --git a/annotator/uniformer/mmcv/image/__pycache__/__init__.cpython-310.pyc b/annotator/uniformer/mmcv/image/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0cbd1e65ac17d54b4030c08fdab4db8562f2c2cf
Binary files /dev/null and b/annotator/uniformer/mmcv/image/__pycache__/__init__.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/image/__pycache__/__init__.cpython-38.pyc b/annotator/uniformer/mmcv/image/__pycache__/__init__.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..38fb300166846141f4bb79dba3d20083b9be3d75
Binary files /dev/null and b/annotator/uniformer/mmcv/image/__pycache__/__init__.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/image/__pycache__/colorspace.cpython-310.pyc b/annotator/uniformer/mmcv/image/__pycache__/colorspace.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e211201cc73cff06c2bca8667d744d5cf2d1b5a8
Binary files /dev/null and b/annotator/uniformer/mmcv/image/__pycache__/colorspace.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/image/__pycache__/colorspace.cpython-38.pyc b/annotator/uniformer/mmcv/image/__pycache__/colorspace.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..87266d1679fa94ebfacb5e53be71fa280b0657d9
Binary files /dev/null and b/annotator/uniformer/mmcv/image/__pycache__/colorspace.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/image/__pycache__/geometric.cpython-310.pyc b/annotator/uniformer/mmcv/image/__pycache__/geometric.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e175845192872b4a39680fb6a5fc88bdca8e4958
Binary files /dev/null and b/annotator/uniformer/mmcv/image/__pycache__/geometric.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/image/__pycache__/geometric.cpython-38.pyc b/annotator/uniformer/mmcv/image/__pycache__/geometric.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..99ce071bca64a65f2db8bf22f8715391dbd8f831
Binary files /dev/null and b/annotator/uniformer/mmcv/image/__pycache__/geometric.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/image/__pycache__/io.cpython-310.pyc b/annotator/uniformer/mmcv/image/__pycache__/io.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..cdc86b0f4046506b4269f11cb1ce66878f12a766
Binary files /dev/null and b/annotator/uniformer/mmcv/image/__pycache__/io.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/image/__pycache__/io.cpython-38.pyc b/annotator/uniformer/mmcv/image/__pycache__/io.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3c80d973e67d07c72e702ff0132a8def69bc6937
Binary files /dev/null and b/annotator/uniformer/mmcv/image/__pycache__/io.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/image/__pycache__/misc.cpython-310.pyc b/annotator/uniformer/mmcv/image/__pycache__/misc.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..365f82c14aab1c2bf7ebcf4715fc5c46a72e184e
Binary files /dev/null and b/annotator/uniformer/mmcv/image/__pycache__/misc.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/image/__pycache__/misc.cpython-38.pyc b/annotator/uniformer/mmcv/image/__pycache__/misc.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d690e07841c42b3f1f0e823d71a3696f9ad0718e
Binary files /dev/null and b/annotator/uniformer/mmcv/image/__pycache__/misc.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/image/__pycache__/photometric.cpython-310.pyc b/annotator/uniformer/mmcv/image/__pycache__/photometric.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c5ae7212a0e99571c0fe5888f437461ca1d6031f
Binary files /dev/null and b/annotator/uniformer/mmcv/image/__pycache__/photometric.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/image/__pycache__/photometric.cpython-38.pyc b/annotator/uniformer/mmcv/image/__pycache__/photometric.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..423b6bdbd36c12b55ae8d15fa3db792c8b6d9109
Binary files /dev/null and b/annotator/uniformer/mmcv/image/__pycache__/photometric.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/image/colorspace.py b/annotator/uniformer/mmcv/image/colorspace.py
new file mode 100644
index 0000000000000000000000000000000000000000..814533952fdfda23d67cb6a3073692d8c1156add
--- /dev/null
+++ b/annotator/uniformer/mmcv/image/colorspace.py
@@ -0,0 +1,306 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import cv2
+import numpy as np
+
+
+def imconvert(img, src, dst):
+ """Convert an image from the src colorspace to dst colorspace.
+
+ Args:
+ img (ndarray): The input image.
+ src (str): The source colorspace, e.g., 'rgb', 'hsv'.
+ dst (str): The destination colorspace, e.g., 'rgb', 'hsv'.
+
+ Returns:
+ ndarray: The converted image.
+ """
+ code = getattr(cv2, f'COLOR_{src.upper()}2{dst.upper()}')
+ out_img = cv2.cvtColor(img, code)
+ return out_img
+
+
+def bgr2gray(img, keepdim=False):
+ """Convert a BGR image to grayscale image.
+
+ Args:
+ img (ndarray): The input image.
+ keepdim (bool): If False (by default), then return the grayscale image
+ with 2 dims, otherwise 3 dims.
+
+ Returns:
+ ndarray: The converted grayscale image.
+ """
+ out_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
+ if keepdim:
+ out_img = out_img[..., None]
+ return out_img
+
+
+def rgb2gray(img, keepdim=False):
+ """Convert a RGB image to grayscale image.
+
+ Args:
+ img (ndarray): The input image.
+ keepdim (bool): If False (by default), then return the grayscale image
+ with 2 dims, otherwise 3 dims.
+
+ Returns:
+ ndarray: The converted grayscale image.
+ """
+ out_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
+ if keepdim:
+ out_img = out_img[..., None]
+ return out_img
+
+
+def gray2bgr(img):
+ """Convert a grayscale image to BGR image.
+
+ Args:
+ img (ndarray): The input image.
+
+ Returns:
+ ndarray: The converted BGR image.
+ """
+ img = img[..., None] if img.ndim == 2 else img
+ out_img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
+ return out_img
+
+
+def gray2rgb(img):
+ """Convert a grayscale image to RGB image.
+
+ Args:
+ img (ndarray): The input image.
+
+ Returns:
+ ndarray: The converted RGB image.
+ """
+ img = img[..., None] if img.ndim == 2 else img
+ out_img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
+ return out_img
+
+
+def _convert_input_type_range(img):
+ """Convert the type and range of the input image.
+
+ It converts the input image to np.float32 type and range of [0, 1].
+ It is mainly used for pre-processing the input image in colorspace
+ conversion functions such as rgb2ycbcr and ycbcr2rgb.
+
+ Args:
+ img (ndarray): The input image. It accepts:
+ 1. np.uint8 type with range [0, 255];
+ 2. np.float32 type with range [0, 1].
+
+ Returns:
+ (ndarray): The converted image with type of np.float32 and range of
+ [0, 1].
+ """
+ img_type = img.dtype
+ img = img.astype(np.float32)
+ if img_type == np.float32:
+ pass
+ elif img_type == np.uint8:
+ img /= 255.
+ else:
+ raise TypeError('The img type should be np.float32 or np.uint8, '
+ f'but got {img_type}')
+ return img
+
+
+def _convert_output_type_range(img, dst_type):
+ """Convert the type and range of the image according to dst_type.
+
+ It converts the image to desired type and range. If `dst_type` is np.uint8,
+ images will be converted to np.uint8 type with range [0, 255]. If
+ `dst_type` is np.float32, it converts the image to np.float32 type with
+ range [0, 1].
+ It is mainly used for post-processing images in colorspace conversion
+ functions such as rgb2ycbcr and ycbcr2rgb.
+
+ Args:
+ img (ndarray): The image to be converted with np.float32 type and
+ range [0, 255].
+ dst_type (np.uint8 | np.float32): If dst_type is np.uint8, it
+ converts the image to np.uint8 type with range [0, 255]. If
+ dst_type is np.float32, it converts the image to np.float32 type
+ with range [0, 1].
+
+ Returns:
+ (ndarray): The converted image with desired type and range.
+ """
+ if dst_type not in (np.uint8, np.float32):
+ raise TypeError('The dst_type should be np.float32 or np.uint8, '
+ f'but got {dst_type}')
+ if dst_type == np.uint8:
+ img = img.round()
+ else:
+ img /= 255.
+ return img.astype(dst_type)
+
+
+def rgb2ycbcr(img, y_only=False):
+ """Convert a RGB image to YCbCr image.
+
+ This function produces the same results as Matlab's `rgb2ycbcr` function.
+ It implements the ITU-R BT.601 conversion for standard-definition
+ television. See more details in
+ https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion.
+
+ It differs from a similar function in cv2.cvtColor: `RGB <-> YCrCb`.
+ In OpenCV, it implements a JPEG conversion. See more details in
+ https://en.wikipedia.org/wiki/YCbCr#JPEG_conversion.
+
+ Args:
+ img (ndarray): The input image. It accepts:
+ 1. np.uint8 type with range [0, 255];
+ 2. np.float32 type with range [0, 1].
+ y_only (bool): Whether to only return Y channel. Default: False.
+
+ Returns:
+ ndarray: The converted YCbCr image. The output image has the same type
+ and range as input image.
+ """
+ img_type = img.dtype
+ img = _convert_input_type_range(img)
+ if y_only:
+ out_img = np.dot(img, [65.481, 128.553, 24.966]) + 16.0
+ else:
+ out_img = np.matmul(
+ img, [[65.481, -37.797, 112.0], [128.553, -74.203, -93.786],
+ [24.966, 112.0, -18.214]]) + [16, 128, 128]
+ out_img = _convert_output_type_range(out_img, img_type)
+ return out_img
+
+
+def bgr2ycbcr(img, y_only=False):
+ """Convert a BGR image to YCbCr image.
+
+ The bgr version of rgb2ycbcr.
+ It implements the ITU-R BT.601 conversion for standard-definition
+ television. See more details in
+ https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion.
+
+ It differs from a similar function in cv2.cvtColor: `BGR <-> YCrCb`.
+ In OpenCV, it implements a JPEG conversion. See more details in
+ https://en.wikipedia.org/wiki/YCbCr#JPEG_conversion.
+
+ Args:
+ img (ndarray): The input image. It accepts:
+ 1. np.uint8 type with range [0, 255];
+ 2. np.float32 type with range [0, 1].
+ y_only (bool): Whether to only return Y channel. Default: False.
+
+ Returns:
+ ndarray: The converted YCbCr image. The output image has the same type
+ and range as input image.
+ """
+ img_type = img.dtype
+ img = _convert_input_type_range(img)
+ if y_only:
+ out_img = np.dot(img, [24.966, 128.553, 65.481]) + 16.0
+ else:
+ out_img = np.matmul(
+ img, [[24.966, 112.0, -18.214], [128.553, -74.203, -93.786],
+ [65.481, -37.797, 112.0]]) + [16, 128, 128]
+ out_img = _convert_output_type_range(out_img, img_type)
+ return out_img
+
+
+def ycbcr2rgb(img):
+ """Convert a YCbCr image to RGB image.
+
+ This function produces the same results as Matlab's ycbcr2rgb function.
+ It implements the ITU-R BT.601 conversion for standard-definition
+ television. See more details in
+ https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion.
+
+ It differs from a similar function in cv2.cvtColor: `YCrCb <-> RGB`.
+ In OpenCV, it implements a JPEG conversion. See more details in
+ https://en.wikipedia.org/wiki/YCbCr#JPEG_conversion.
+
+ Args:
+ img (ndarray): The input image. It accepts:
+ 1. np.uint8 type with range [0, 255];
+ 2. np.float32 type with range [0, 1].
+
+ Returns:
+ ndarray: The converted RGB image. The output image has the same type
+ and range as input image.
+ """
+ img_type = img.dtype
+ img = _convert_input_type_range(img) * 255
+ out_img = np.matmul(img, [[0.00456621, 0.00456621, 0.00456621],
+ [0, -0.00153632, 0.00791071],
+ [0.00625893, -0.00318811, 0]]) * 255.0 + [
+ -222.921, 135.576, -276.836
+ ]
+ out_img = _convert_output_type_range(out_img, img_type)
+ return out_img
+
+
+def ycbcr2bgr(img):
+ """Convert a YCbCr image to BGR image.
+
+ The bgr version of ycbcr2rgb.
+ It implements the ITU-R BT.601 conversion for standard-definition
+ television. See more details in
+ https://en.wikipedia.org/wiki/YCbCr#ITU-R_BT.601_conversion.
+
+ It differs from a similar function in cv2.cvtColor: `YCrCb <-> BGR`.
+ In OpenCV, it implements a JPEG conversion. See more details in
+ https://en.wikipedia.org/wiki/YCbCr#JPEG_conversion.
+
+ Args:
+ img (ndarray): The input image. It accepts:
+ 1. np.uint8 type with range [0, 255];
+ 2. np.float32 type with range [0, 1].
+
+ Returns:
+ ndarray: The converted BGR image. The output image has the same type
+ and range as input image.
+ """
+ img_type = img.dtype
+ img = _convert_input_type_range(img) * 255
+ out_img = np.matmul(img, [[0.00456621, 0.00456621, 0.00456621],
+ [0.00791071, -0.00153632, 0],
+ [0, -0.00318811, 0.00625893]]) * 255.0 + [
+ -276.836, 135.576, -222.921
+ ]
+ out_img = _convert_output_type_range(out_img, img_type)
+ return out_img
+
+
+def convert_color_factory(src, dst):
+
+ code = getattr(cv2, f'COLOR_{src.upper()}2{dst.upper()}')
+
+ def convert_color(img):
+ out_img = cv2.cvtColor(img, code)
+ return out_img
+
+ convert_color.__doc__ = f"""Convert a {src.upper()} image to {dst.upper()}
+ image.
+
+ Args:
+ img (ndarray or str): The input image.
+
+ Returns:
+ ndarray: The converted {dst.upper()} image.
+ """
+
+ return convert_color
+
+
+bgr2rgb = convert_color_factory('bgr', 'rgb')
+
+rgb2bgr = convert_color_factory('rgb', 'bgr')
+
+bgr2hsv = convert_color_factory('bgr', 'hsv')
+
+hsv2bgr = convert_color_factory('hsv', 'bgr')
+
+bgr2hls = convert_color_factory('bgr', 'hls')
+
+hls2bgr = convert_color_factory('hls', 'bgr')
diff --git a/annotator/uniformer/mmcv/image/geometric.py b/annotator/uniformer/mmcv/image/geometric.py
new file mode 100644
index 0000000000000000000000000000000000000000..cf97c201cb4e43796c911919d03fb26a07ed817d
--- /dev/null
+++ b/annotator/uniformer/mmcv/image/geometric.py
@@ -0,0 +1,728 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import numbers
+
+import cv2
+import numpy as np
+
+from ..utils import to_2tuple
+from .io import imread_backend
+
+try:
+ from PIL import Image
+except ImportError:
+ Image = None
+
+
+def _scale_size(size, scale):
+ """Rescale a size by a ratio.
+
+ Args:
+ size (tuple[int]): (w, h).
+ scale (float | tuple(float)): Scaling factor.
+
+ Returns:
+ tuple[int]: scaled size.
+ """
+ if isinstance(scale, (float, int)):
+ scale = (scale, scale)
+ w, h = size
+ return int(w * float(scale[0]) + 0.5), int(h * float(scale[1]) + 0.5)
+
+
+cv2_interp_codes = {
+ 'nearest': cv2.INTER_NEAREST,
+ 'bilinear': cv2.INTER_LINEAR,
+ 'bicubic': cv2.INTER_CUBIC,
+ 'area': cv2.INTER_AREA,
+ 'lanczos': cv2.INTER_LANCZOS4
+}
+
+if Image is not None:
+ pillow_interp_codes = {
+ 'nearest': Image.NEAREST,
+ 'bilinear': Image.BILINEAR,
+ 'bicubic': Image.BICUBIC,
+ 'box': Image.BOX,
+ 'lanczos': Image.LANCZOS,
+ 'hamming': Image.HAMMING
+ }
+
+
+def imresize(img,
+ size,
+ return_scale=False,
+ interpolation='bilinear',
+ out=None,
+ backend=None):
+ """Resize image to a given size.
+
+ Args:
+ img (ndarray): The input image.
+ size (tuple[int]): Target size (w, h).
+ return_scale (bool): Whether to return `w_scale` and `h_scale`.
+ interpolation (str): Interpolation method, accepted values are
+ "nearest", "bilinear", "bicubic", "area", "lanczos" for 'cv2'
+ backend, "nearest", "bilinear" for 'pillow' backend.
+ out (ndarray): The output destination.
+ backend (str | None): The image resize backend type. Options are `cv2`,
+ `pillow`, `None`. If backend is None, the global imread_backend
+ specified by ``mmcv.use_backend()`` will be used. Default: None.
+
+ Returns:
+ tuple | ndarray: (`resized_img`, `w_scale`, `h_scale`) or
+ `resized_img`.
+ """
+ h, w = img.shape[:2]
+ if backend is None:
+ backend = imread_backend
+ if backend not in ['cv2', 'pillow']:
+ raise ValueError(f'backend: {backend} is not supported for resize.'
+ f"Supported backends are 'cv2', 'pillow'")
+
+ if backend == 'pillow':
+ assert img.dtype == np.uint8, 'Pillow backend only support uint8 type'
+ pil_image = Image.fromarray(img)
+ pil_image = pil_image.resize(size, pillow_interp_codes[interpolation])
+ resized_img = np.array(pil_image)
+ else:
+ resized_img = cv2.resize(
+ img, size, dst=out, interpolation=cv2_interp_codes[interpolation])
+ if not return_scale:
+ return resized_img
+ else:
+ w_scale = size[0] / w
+ h_scale = size[1] / h
+ return resized_img, w_scale, h_scale
+
+
+def imresize_to_multiple(img,
+ divisor,
+ size=None,
+ scale_factor=None,
+ keep_ratio=False,
+ return_scale=False,
+ interpolation='bilinear',
+ out=None,
+ backend=None):
+ """Resize image according to a given size or scale factor and then rounds
+ up the the resized or rescaled image size to the nearest value that can be
+ divided by the divisor.
+
+ Args:
+ img (ndarray): The input image.
+ divisor (int | tuple): Resized image size will be a multiple of
+ divisor. If divisor is a tuple, divisor should be
+ (w_divisor, h_divisor).
+ size (None | int | tuple[int]): Target size (w, h). Default: None.
+ scale_factor (None | float | tuple[float]): Multiplier for spatial
+ size. Should match input size if it is a tuple and the 2D style is
+ (w_scale_factor, h_scale_factor). Default: None.
+ keep_ratio (bool): Whether to keep the aspect ratio when resizing the
+ image. Default: False.
+ return_scale (bool): Whether to return `w_scale` and `h_scale`.
+ interpolation (str): Interpolation method, accepted values are
+ "nearest", "bilinear", "bicubic", "area", "lanczos" for 'cv2'
+ backend, "nearest", "bilinear" for 'pillow' backend.
+ out (ndarray): The output destination.
+ backend (str | None): The image resize backend type. Options are `cv2`,
+ `pillow`, `None`. If backend is None, the global imread_backend
+ specified by ``mmcv.use_backend()`` will be used. Default: None.
+
+ Returns:
+ tuple | ndarray: (`resized_img`, `w_scale`, `h_scale`) or
+ `resized_img`.
+ """
+ h, w = img.shape[:2]
+ if size is not None and scale_factor is not None:
+ raise ValueError('only one of size or scale_factor should be defined')
+ elif size is None and scale_factor is None:
+ raise ValueError('one of size or scale_factor should be defined')
+ elif size is not None:
+ size = to_2tuple(size)
+ if keep_ratio:
+ size = rescale_size((w, h), size, return_scale=False)
+ else:
+ size = _scale_size((w, h), scale_factor)
+
+ divisor = to_2tuple(divisor)
+ size = tuple([int(np.ceil(s / d)) * d for s, d in zip(size, divisor)])
+ resized_img, w_scale, h_scale = imresize(
+ img,
+ size,
+ return_scale=True,
+ interpolation=interpolation,
+ out=out,
+ backend=backend)
+ if return_scale:
+ return resized_img, w_scale, h_scale
+ else:
+ return resized_img
+
+
+def imresize_like(img,
+ dst_img,
+ return_scale=False,
+ interpolation='bilinear',
+ backend=None):
+ """Resize image to the same size of a given image.
+
+ Args:
+ img (ndarray): The input image.
+ dst_img (ndarray): The target image.
+ return_scale (bool): Whether to return `w_scale` and `h_scale`.
+ interpolation (str): Same as :func:`resize`.
+ backend (str | None): Same as :func:`resize`.
+
+ Returns:
+ tuple or ndarray: (`resized_img`, `w_scale`, `h_scale`) or
+ `resized_img`.
+ """
+ h, w = dst_img.shape[:2]
+ return imresize(img, (w, h), return_scale, interpolation, backend=backend)
+
+
+def rescale_size(old_size, scale, return_scale=False):
+ """Calculate the new size to be rescaled to.
+
+ Args:
+ old_size (tuple[int]): The old size (w, h) of image.
+ scale (float | tuple[int]): The scaling factor or maximum size.
+ If it is a float number, then the image will be rescaled by this
+ factor, else if it is a tuple of 2 integers, then the image will
+ be rescaled as large as possible within the scale.
+ return_scale (bool): Whether to return the scaling factor besides the
+ rescaled image size.
+
+ Returns:
+ tuple[int]: The new rescaled image size.
+ """
+ w, h = old_size
+ if isinstance(scale, (float, int)):
+ if scale <= 0:
+ raise ValueError(f'Invalid scale {scale}, must be positive.')
+ scale_factor = scale
+ elif isinstance(scale, tuple):
+ max_long_edge = max(scale)
+ max_short_edge = min(scale)
+ scale_factor = min(max_long_edge / max(h, w),
+ max_short_edge / min(h, w))
+ else:
+ raise TypeError(
+ f'Scale must be a number or tuple of int, but got {type(scale)}')
+
+ new_size = _scale_size((w, h), scale_factor)
+
+ if return_scale:
+ return new_size, scale_factor
+ else:
+ return new_size
+
+
+def imrescale(img,
+ scale,
+ return_scale=False,
+ interpolation='bilinear',
+ backend=None):
+ """Resize image while keeping the aspect ratio.
+
+ Args:
+ img (ndarray): The input image.
+ scale (float | tuple[int]): The scaling factor or maximum size.
+ If it is a float number, then the image will be rescaled by this
+ factor, else if it is a tuple of 2 integers, then the image will
+ be rescaled as large as possible within the scale.
+ return_scale (bool): Whether to return the scaling factor besides the
+ rescaled image.
+ interpolation (str): Same as :func:`resize`.
+ backend (str | None): Same as :func:`resize`.
+
+ Returns:
+ ndarray: The rescaled image.
+ """
+ h, w = img.shape[:2]
+ new_size, scale_factor = rescale_size((w, h), scale, return_scale=True)
+ rescaled_img = imresize(
+ img, new_size, interpolation=interpolation, backend=backend)
+ if return_scale:
+ return rescaled_img, scale_factor
+ else:
+ return rescaled_img
+
+
+def imflip(img, direction='horizontal'):
+ """Flip an image horizontally or vertically.
+
+ Args:
+ img (ndarray): Image to be flipped.
+ direction (str): The flip direction, either "horizontal" or
+ "vertical" or "diagonal".
+
+ Returns:
+ ndarray: The flipped image.
+ """
+ assert direction in ['horizontal', 'vertical', 'diagonal']
+ if direction == 'horizontal':
+ return np.flip(img, axis=1)
+ elif direction == 'vertical':
+ return np.flip(img, axis=0)
+ else:
+ return np.flip(img, axis=(0, 1))
+
+
+def imflip_(img, direction='horizontal'):
+ """Inplace flip an image horizontally or vertically.
+
+ Args:
+ img (ndarray): Image to be flipped.
+ direction (str): The flip direction, either "horizontal" or
+ "vertical" or "diagonal".
+
+ Returns:
+ ndarray: The flipped image (inplace).
+ """
+ assert direction in ['horizontal', 'vertical', 'diagonal']
+ if direction == 'horizontal':
+ return cv2.flip(img, 1, img)
+ elif direction == 'vertical':
+ return cv2.flip(img, 0, img)
+ else:
+ return cv2.flip(img, -1, img)
+
+
+def imrotate(img,
+ angle,
+ center=None,
+ scale=1.0,
+ border_value=0,
+ interpolation='bilinear',
+ auto_bound=False):
+ """Rotate an image.
+
+ Args:
+ img (ndarray): Image to be rotated.
+ angle (float): Rotation angle in degrees, positive values mean
+ clockwise rotation.
+ center (tuple[float], optional): Center point (w, h) of the rotation in
+ the source image. If not specified, the center of the image will be
+ used.
+ scale (float): Isotropic scale factor.
+ border_value (int): Border value.
+ interpolation (str): Same as :func:`resize`.
+ auto_bound (bool): Whether to adjust the image size to cover the whole
+ rotated image.
+
+ Returns:
+ ndarray: The rotated image.
+ """
+ if center is not None and auto_bound:
+ raise ValueError('`auto_bound` conflicts with `center`')
+ h, w = img.shape[:2]
+ if center is None:
+ center = ((w - 1) * 0.5, (h - 1) * 0.5)
+ assert isinstance(center, tuple)
+
+ matrix = cv2.getRotationMatrix2D(center, -angle, scale)
+ if auto_bound:
+ cos = np.abs(matrix[0, 0])
+ sin = np.abs(matrix[0, 1])
+ new_w = h * sin + w * cos
+ new_h = h * cos + w * sin
+ matrix[0, 2] += (new_w - w) * 0.5
+ matrix[1, 2] += (new_h - h) * 0.5
+ w = int(np.round(new_w))
+ h = int(np.round(new_h))
+ rotated = cv2.warpAffine(
+ img,
+ matrix, (w, h),
+ flags=cv2_interp_codes[interpolation],
+ borderValue=border_value)
+ return rotated
+
+
+def bbox_clip(bboxes, img_shape):
+ """Clip bboxes to fit the image shape.
+
+ Args:
+ bboxes (ndarray): Shape (..., 4*k)
+ img_shape (tuple[int]): (height, width) of the image.
+
+ Returns:
+ ndarray: Clipped bboxes.
+ """
+ assert bboxes.shape[-1] % 4 == 0
+ cmin = np.empty(bboxes.shape[-1], dtype=bboxes.dtype)
+ cmin[0::2] = img_shape[1] - 1
+ cmin[1::2] = img_shape[0] - 1
+ clipped_bboxes = np.maximum(np.minimum(bboxes, cmin), 0)
+ return clipped_bboxes
+
+
+def bbox_scaling(bboxes, scale, clip_shape=None):
+ """Scaling bboxes w.r.t the box center.
+
+ Args:
+ bboxes (ndarray): Shape(..., 4).
+ scale (float): Scaling factor.
+ clip_shape (tuple[int], optional): If specified, bboxes that exceed the
+ boundary will be clipped according to the given shape (h, w).
+
+ Returns:
+ ndarray: Scaled bboxes.
+ """
+ if float(scale) == 1.0:
+ scaled_bboxes = bboxes.copy()
+ else:
+ w = bboxes[..., 2] - bboxes[..., 0] + 1
+ h = bboxes[..., 3] - bboxes[..., 1] + 1
+ dw = (w * (scale - 1)) * 0.5
+ dh = (h * (scale - 1)) * 0.5
+ scaled_bboxes = bboxes + np.stack((-dw, -dh, dw, dh), axis=-1)
+ if clip_shape is not None:
+ return bbox_clip(scaled_bboxes, clip_shape)
+ else:
+ return scaled_bboxes
+
+
+def imcrop(img, bboxes, scale=1.0, pad_fill=None):
+ """Crop image patches.
+
+ 3 steps: scale the bboxes -> clip bboxes -> crop and pad.
+
+ Args:
+ img (ndarray): Image to be cropped.
+ bboxes (ndarray): Shape (k, 4) or (4, ), location of cropped bboxes.
+ scale (float, optional): Scale ratio of bboxes, the default value
+ 1.0 means no padding.
+ pad_fill (Number | list[Number]): Value to be filled for padding.
+ Default: None, which means no padding.
+
+ Returns:
+ list[ndarray] | ndarray: The cropped image patches.
+ """
+ chn = 1 if img.ndim == 2 else img.shape[2]
+ if pad_fill is not None:
+ if isinstance(pad_fill, (int, float)):
+ pad_fill = [pad_fill for _ in range(chn)]
+ assert len(pad_fill) == chn
+
+ _bboxes = bboxes[None, ...] if bboxes.ndim == 1 else bboxes
+ scaled_bboxes = bbox_scaling(_bboxes, scale).astype(np.int32)
+ clipped_bbox = bbox_clip(scaled_bboxes, img.shape)
+
+ patches = []
+ for i in range(clipped_bbox.shape[0]):
+ x1, y1, x2, y2 = tuple(clipped_bbox[i, :])
+ if pad_fill is None:
+ patch = img[y1:y2 + 1, x1:x2 + 1, ...]
+ else:
+ _x1, _y1, _x2, _y2 = tuple(scaled_bboxes[i, :])
+ if chn == 1:
+ patch_shape = (_y2 - _y1 + 1, _x2 - _x1 + 1)
+ else:
+ patch_shape = (_y2 - _y1 + 1, _x2 - _x1 + 1, chn)
+ patch = np.array(
+ pad_fill, dtype=img.dtype) * np.ones(
+ patch_shape, dtype=img.dtype)
+ x_start = 0 if _x1 >= 0 else -_x1
+ y_start = 0 if _y1 >= 0 else -_y1
+ w = x2 - x1 + 1
+ h = y2 - y1 + 1
+ patch[y_start:y_start + h, x_start:x_start + w,
+ ...] = img[y1:y1 + h, x1:x1 + w, ...]
+ patches.append(patch)
+
+ if bboxes.ndim == 1:
+ return patches[0]
+ else:
+ return patches
+
+
+def impad(img,
+ *,
+ shape=None,
+ padding=None,
+ pad_val=0,
+ padding_mode='constant'):
+ """Pad the given image to a certain shape or pad on all sides with
+ specified padding mode and padding value.
+
+ Args:
+ img (ndarray): Image to be padded.
+ shape (tuple[int]): Expected padding shape (h, w). Default: None.
+ padding (int or tuple[int]): Padding on each border. If a single int is
+ provided this is used to pad all borders. If tuple of length 2 is
+ provided this is the padding on left/right and top/bottom
+ respectively. If a tuple of length 4 is provided this is the
+ padding for the left, top, right and bottom borders respectively.
+ Default: None. Note that `shape` and `padding` can not be both
+ set.
+ pad_val (Number | Sequence[Number]): Values to be filled in padding
+ areas when padding_mode is 'constant'. Default: 0.
+ padding_mode (str): Type of padding. Should be: constant, edge,
+ reflect or symmetric. Default: constant.
+
+ - constant: pads with a constant value, this value is specified
+ with pad_val.
+ - edge: pads with the last value at the edge of the image.
+ - reflect: pads with reflection of image without repeating the
+ last value on the edge. For example, padding [1, 2, 3, 4]
+ with 2 elements on both sides in reflect mode will result
+ in [3, 2, 1, 2, 3, 4, 3, 2].
+ - symmetric: pads with reflection of image repeating the last
+ value on the edge. For example, padding [1, 2, 3, 4] with
+ 2 elements on both sides in symmetric mode will result in
+ [2, 1, 1, 2, 3, 4, 4, 3]
+
+ Returns:
+ ndarray: The padded image.
+ """
+
+ assert (shape is not None) ^ (padding is not None)
+ if shape is not None:
+ padding = (0, 0, shape[1] - img.shape[1], shape[0] - img.shape[0])
+
+ # check pad_val
+ if isinstance(pad_val, tuple):
+ assert len(pad_val) == img.shape[-1]
+ elif not isinstance(pad_val, numbers.Number):
+ raise TypeError('pad_val must be a int or a tuple. '
+ f'But received {type(pad_val)}')
+
+ # check padding
+ if isinstance(padding, tuple) and len(padding) in [2, 4]:
+ if len(padding) == 2:
+ padding = (padding[0], padding[1], padding[0], padding[1])
+ elif isinstance(padding, numbers.Number):
+ padding = (padding, padding, padding, padding)
+ else:
+ raise ValueError('Padding must be a int or a 2, or 4 element tuple.'
+ f'But received {padding}')
+
+ # check padding mode
+ assert padding_mode in ['constant', 'edge', 'reflect', 'symmetric']
+
+ border_type = {
+ 'constant': cv2.BORDER_CONSTANT,
+ 'edge': cv2.BORDER_REPLICATE,
+ 'reflect': cv2.BORDER_REFLECT_101,
+ 'symmetric': cv2.BORDER_REFLECT
+ }
+ img = cv2.copyMakeBorder(
+ img,
+ padding[1],
+ padding[3],
+ padding[0],
+ padding[2],
+ border_type[padding_mode],
+ value=pad_val)
+
+ return img
+
+
+def impad_to_multiple(img, divisor, pad_val=0):
+ """Pad an image to ensure each edge to be multiple to some number.
+
+ Args:
+ img (ndarray): Image to be padded.
+ divisor (int): Padded image edges will be multiple to divisor.
+ pad_val (Number | Sequence[Number]): Same as :func:`impad`.
+
+ Returns:
+ ndarray: The padded image.
+ """
+ pad_h = int(np.ceil(img.shape[0] / divisor)) * divisor
+ pad_w = int(np.ceil(img.shape[1] / divisor)) * divisor
+ return impad(img, shape=(pad_h, pad_w), pad_val=pad_val)
+
+
+def cutout(img, shape, pad_val=0):
+ """Randomly cut out a rectangle from the original img.
+
+ Args:
+ img (ndarray): Image to be cutout.
+ shape (int | tuple[int]): Expected cutout shape (h, w). If given as a
+ int, the value will be used for both h and w.
+ pad_val (int | float | tuple[int | float]): Values to be filled in the
+ cut area. Defaults to 0.
+
+ Returns:
+ ndarray: The cutout image.
+ """
+
+ channels = 1 if img.ndim == 2 else img.shape[2]
+ if isinstance(shape, int):
+ cut_h, cut_w = shape, shape
+ else:
+ assert isinstance(shape, tuple) and len(shape) == 2, \
+ f'shape must be a int or a tuple with length 2, but got type ' \
+ f'{type(shape)} instead.'
+ cut_h, cut_w = shape
+ if isinstance(pad_val, (int, float)):
+ pad_val = tuple([pad_val] * channels)
+ elif isinstance(pad_val, tuple):
+ assert len(pad_val) == channels, \
+ 'Expected the num of elements in tuple equals the channels' \
+ 'of input image. Found {} vs {}'.format(
+ len(pad_val), channels)
+ else:
+ raise TypeError(f'Invalid type {type(pad_val)} for `pad_val`')
+
+ img_h, img_w = img.shape[:2]
+ y0 = np.random.uniform(img_h)
+ x0 = np.random.uniform(img_w)
+
+ y1 = int(max(0, y0 - cut_h / 2.))
+ x1 = int(max(0, x0 - cut_w / 2.))
+ y2 = min(img_h, y1 + cut_h)
+ x2 = min(img_w, x1 + cut_w)
+
+ if img.ndim == 2:
+ patch_shape = (y2 - y1, x2 - x1)
+ else:
+ patch_shape = (y2 - y1, x2 - x1, channels)
+
+ img_cutout = img.copy()
+ patch = np.array(
+ pad_val, dtype=img.dtype) * np.ones(
+ patch_shape, dtype=img.dtype)
+ img_cutout[y1:y2, x1:x2, ...] = patch
+
+ return img_cutout
+
+
+def _get_shear_matrix(magnitude, direction='horizontal'):
+ """Generate the shear matrix for transformation.
+
+ Args:
+ magnitude (int | float): The magnitude used for shear.
+ direction (str): The flip direction, either "horizontal"
+ or "vertical".
+
+ Returns:
+ ndarray: The shear matrix with dtype float32.
+ """
+ if direction == 'horizontal':
+ shear_matrix = np.float32([[1, magnitude, 0], [0, 1, 0]])
+ elif direction == 'vertical':
+ shear_matrix = np.float32([[1, 0, 0], [magnitude, 1, 0]])
+ return shear_matrix
+
+
+def imshear(img,
+ magnitude,
+ direction='horizontal',
+ border_value=0,
+ interpolation='bilinear'):
+ """Shear an image.
+
+ Args:
+ img (ndarray): Image to be sheared with format (h, w)
+ or (h, w, c).
+ magnitude (int | float): The magnitude used for shear.
+ direction (str): The flip direction, either "horizontal"
+ or "vertical".
+ border_value (int | tuple[int]): Value used in case of a
+ constant border.
+ interpolation (str): Same as :func:`resize`.
+
+ Returns:
+ ndarray: The sheared image.
+ """
+ assert direction in ['horizontal',
+ 'vertical'], f'Invalid direction: {direction}'
+ height, width = img.shape[:2]
+ if img.ndim == 2:
+ channels = 1
+ elif img.ndim == 3:
+ channels = img.shape[-1]
+ if isinstance(border_value, int):
+ border_value = tuple([border_value] * channels)
+ elif isinstance(border_value, tuple):
+ assert len(border_value) == channels, \
+ 'Expected the num of elements in tuple equals the channels' \
+ 'of input image. Found {} vs {}'.format(
+ len(border_value), channels)
+ else:
+ raise ValueError(
+ f'Invalid type {type(border_value)} for `border_value`')
+ shear_matrix = _get_shear_matrix(magnitude, direction)
+ sheared = cv2.warpAffine(
+ img,
+ shear_matrix,
+ (width, height),
+ # Note case when the number elements in `border_value`
+ # greater than 3 (e.g. shearing masks whose channels large
+ # than 3) will raise TypeError in `cv2.warpAffine`.
+ # Here simply slice the first 3 values in `border_value`.
+ borderValue=border_value[:3],
+ flags=cv2_interp_codes[interpolation])
+ return sheared
+
+
+def _get_translate_matrix(offset, direction='horizontal'):
+ """Generate the translate matrix.
+
+ Args:
+ offset (int | float): The offset used for translate.
+ direction (str): The translate direction, either
+ "horizontal" or "vertical".
+
+ Returns:
+ ndarray: The translate matrix with dtype float32.
+ """
+ if direction == 'horizontal':
+ translate_matrix = np.float32([[1, 0, offset], [0, 1, 0]])
+ elif direction == 'vertical':
+ translate_matrix = np.float32([[1, 0, 0], [0, 1, offset]])
+ return translate_matrix
+
+
+def imtranslate(img,
+ offset,
+ direction='horizontal',
+ border_value=0,
+ interpolation='bilinear'):
+ """Translate an image.
+
+ Args:
+ img (ndarray): Image to be translated with format
+ (h, w) or (h, w, c).
+ offset (int | float): The offset used for translate.
+ direction (str): The translate direction, either "horizontal"
+ or "vertical".
+ border_value (int | tuple[int]): Value used in case of a
+ constant border.
+ interpolation (str): Same as :func:`resize`.
+
+ Returns:
+ ndarray: The translated image.
+ """
+ assert direction in ['horizontal',
+ 'vertical'], f'Invalid direction: {direction}'
+ height, width = img.shape[:2]
+ if img.ndim == 2:
+ channels = 1
+ elif img.ndim == 3:
+ channels = img.shape[-1]
+ if isinstance(border_value, int):
+ border_value = tuple([border_value] * channels)
+ elif isinstance(border_value, tuple):
+ assert len(border_value) == channels, \
+ 'Expected the num of elements in tuple equals the channels' \
+ 'of input image. Found {} vs {}'.format(
+ len(border_value), channels)
+ else:
+ raise ValueError(
+ f'Invalid type {type(border_value)} for `border_value`.')
+ translate_matrix = _get_translate_matrix(offset, direction)
+ translated = cv2.warpAffine(
+ img,
+ translate_matrix,
+ (width, height),
+ # Note case when the number elements in `border_value`
+ # greater than 3 (e.g. translating masks whose channels
+ # large than 3) will raise TypeError in `cv2.warpAffine`.
+ # Here simply slice the first 3 values in `border_value`.
+ borderValue=border_value[:3],
+ flags=cv2_interp_codes[interpolation])
+ return translated
diff --git a/annotator/uniformer/mmcv/image/io.py b/annotator/uniformer/mmcv/image/io.py
new file mode 100644
index 0000000000000000000000000000000000000000..d3fa2e8cc06b1a7b0b69de6406980b15d61a1e5d
--- /dev/null
+++ b/annotator/uniformer/mmcv/image/io.py
@@ -0,0 +1,258 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import io
+import os.path as osp
+from pathlib import Path
+
+import cv2
+import numpy as np
+from cv2 import (IMREAD_COLOR, IMREAD_GRAYSCALE, IMREAD_IGNORE_ORIENTATION,
+ IMREAD_UNCHANGED)
+
+from annotator.uniformer.mmcv.utils import check_file_exist, is_str, mkdir_or_exist
+
+try:
+ from turbojpeg import TJCS_RGB, TJPF_BGR, TJPF_GRAY, TurboJPEG
+except ImportError:
+ TJCS_RGB = TJPF_GRAY = TJPF_BGR = TurboJPEG = None
+
+try:
+ from PIL import Image, ImageOps
+except ImportError:
+ Image = None
+
+try:
+ import tifffile
+except ImportError:
+ tifffile = None
+
+jpeg = None
+supported_backends = ['cv2', 'turbojpeg', 'pillow', 'tifffile']
+
+imread_flags = {
+ 'color': IMREAD_COLOR,
+ 'grayscale': IMREAD_GRAYSCALE,
+ 'unchanged': IMREAD_UNCHANGED,
+ 'color_ignore_orientation': IMREAD_IGNORE_ORIENTATION | IMREAD_COLOR,
+ 'grayscale_ignore_orientation':
+ IMREAD_IGNORE_ORIENTATION | IMREAD_GRAYSCALE
+}
+
+imread_backend = 'cv2'
+
+
+def use_backend(backend):
+ """Select a backend for image decoding.
+
+ Args:
+ backend (str): The image decoding backend type. Options are `cv2`,
+ `pillow`, `turbojpeg` (see https://github.com/lilohuang/PyTurboJPEG)
+ and `tifffile`. `turbojpeg` is faster but it only supports `.jpeg`
+ file format.
+ """
+ assert backend in supported_backends
+ global imread_backend
+ imread_backend = backend
+ if imread_backend == 'turbojpeg':
+ if TurboJPEG is None:
+ raise ImportError('`PyTurboJPEG` is not installed')
+ global jpeg
+ if jpeg is None:
+ jpeg = TurboJPEG()
+ elif imread_backend == 'pillow':
+ if Image is None:
+ raise ImportError('`Pillow` is not installed')
+ elif imread_backend == 'tifffile':
+ if tifffile is None:
+ raise ImportError('`tifffile` is not installed')
+
+
+def _jpegflag(flag='color', channel_order='bgr'):
+ channel_order = channel_order.lower()
+ if channel_order not in ['rgb', 'bgr']:
+ raise ValueError('channel order must be either "rgb" or "bgr"')
+
+ if flag == 'color':
+ if channel_order == 'bgr':
+ return TJPF_BGR
+ elif channel_order == 'rgb':
+ return TJCS_RGB
+ elif flag == 'grayscale':
+ return TJPF_GRAY
+ else:
+ raise ValueError('flag must be "color" or "grayscale"')
+
+
+def _pillow2array(img, flag='color', channel_order='bgr'):
+ """Convert a pillow image to numpy array.
+
+ Args:
+ img (:obj:`PIL.Image.Image`): The image loaded using PIL
+ flag (str): Flags specifying the color type of a loaded image,
+ candidates are 'color', 'grayscale' and 'unchanged'.
+ Default to 'color'.
+ channel_order (str): The channel order of the output image array,
+ candidates are 'bgr' and 'rgb'. Default to 'bgr'.
+
+ Returns:
+ np.ndarray: The converted numpy array
+ """
+ channel_order = channel_order.lower()
+ if channel_order not in ['rgb', 'bgr']:
+ raise ValueError('channel order must be either "rgb" or "bgr"')
+
+ if flag == 'unchanged':
+ array = np.array(img)
+ if array.ndim >= 3 and array.shape[2] >= 3: # color image
+ array[:, :, :3] = array[:, :, (2, 1, 0)] # RGB to BGR
+ else:
+ # Handle exif orientation tag
+ if flag in ['color', 'grayscale']:
+ img = ImageOps.exif_transpose(img)
+ # If the image mode is not 'RGB', convert it to 'RGB' first.
+ if img.mode != 'RGB':
+ if img.mode != 'LA':
+ # Most formats except 'LA' can be directly converted to RGB
+ img = img.convert('RGB')
+ else:
+ # When the mode is 'LA', the default conversion will fill in
+ # the canvas with black, which sometimes shadows black objects
+ # in the foreground.
+ #
+ # Therefore, a random color (124, 117, 104) is used for canvas
+ img_rgba = img.convert('RGBA')
+ img = Image.new('RGB', img_rgba.size, (124, 117, 104))
+ img.paste(img_rgba, mask=img_rgba.split()[3]) # 3 is alpha
+ if flag in ['color', 'color_ignore_orientation']:
+ array = np.array(img)
+ if channel_order != 'rgb':
+ array = array[:, :, ::-1] # RGB to BGR
+ elif flag in ['grayscale', 'grayscale_ignore_orientation']:
+ img = img.convert('L')
+ array = np.array(img)
+ else:
+ raise ValueError(
+ 'flag must be "color", "grayscale", "unchanged", '
+ f'"color_ignore_orientation" or "grayscale_ignore_orientation"'
+ f' but got {flag}')
+ return array
+
+
+def imread(img_or_path, flag='color', channel_order='bgr', backend=None):
+ """Read an image.
+
+ Args:
+ img_or_path (ndarray or str or Path): Either a numpy array or str or
+ pathlib.Path. If it is a numpy array (loaded image), then
+ it will be returned as is.
+ flag (str): Flags specifying the color type of a loaded image,
+ candidates are `color`, `grayscale`, `unchanged`,
+ `color_ignore_orientation` and `grayscale_ignore_orientation`.
+ By default, `cv2` and `pillow` backend would rotate the image
+ according to its EXIF info unless called with `unchanged` or
+ `*_ignore_orientation` flags. `turbojpeg` and `tifffile` backend
+ always ignore image's EXIF info regardless of the flag.
+ The `turbojpeg` backend only supports `color` and `grayscale`.
+ channel_order (str): Order of channel, candidates are `bgr` and `rgb`.
+ backend (str | None): The image decoding backend type. Options are
+ `cv2`, `pillow`, `turbojpeg`, `tifffile`, `None`.
+ If backend is None, the global imread_backend specified by
+ ``mmcv.use_backend()`` will be used. Default: None.
+
+ Returns:
+ ndarray: Loaded image array.
+ """
+
+ if backend is None:
+ backend = imread_backend
+ if backend not in supported_backends:
+ raise ValueError(f'backend: {backend} is not supported. Supported '
+ "backends are 'cv2', 'turbojpeg', 'pillow'")
+ if isinstance(img_or_path, Path):
+ img_or_path = str(img_or_path)
+
+ if isinstance(img_or_path, np.ndarray):
+ return img_or_path
+ elif is_str(img_or_path):
+ check_file_exist(img_or_path,
+ f'img file does not exist: {img_or_path}')
+ if backend == 'turbojpeg':
+ with open(img_or_path, 'rb') as in_file:
+ img = jpeg.decode(in_file.read(),
+ _jpegflag(flag, channel_order))
+ if img.shape[-1] == 1:
+ img = img[:, :, 0]
+ return img
+ elif backend == 'pillow':
+ img = Image.open(img_or_path)
+ img = _pillow2array(img, flag, channel_order)
+ return img
+ elif backend == 'tifffile':
+ img = tifffile.imread(img_or_path)
+ return img
+ else:
+ flag = imread_flags[flag] if is_str(flag) else flag
+ img = cv2.imread(img_or_path, flag)
+ if flag == IMREAD_COLOR and channel_order == 'rgb':
+ cv2.cvtColor(img, cv2.COLOR_BGR2RGB, img)
+ return img
+ else:
+ raise TypeError('"img" must be a numpy array or a str or '
+ 'a pathlib.Path object')
+
+
+def imfrombytes(content, flag='color', channel_order='bgr', backend=None):
+ """Read an image from bytes.
+
+ Args:
+ content (bytes): Image bytes got from files or other streams.
+ flag (str): Same as :func:`imread`.
+ backend (str | None): The image decoding backend type. Options are
+ `cv2`, `pillow`, `turbojpeg`, `None`. If backend is None, the
+ global imread_backend specified by ``mmcv.use_backend()`` will be
+ used. Default: None.
+
+ Returns:
+ ndarray: Loaded image array.
+ """
+
+ if backend is None:
+ backend = imread_backend
+ if backend not in supported_backends:
+ raise ValueError(f'backend: {backend} is not supported. Supported '
+ "backends are 'cv2', 'turbojpeg', 'pillow'")
+ if backend == 'turbojpeg':
+ img = jpeg.decode(content, _jpegflag(flag, channel_order))
+ if img.shape[-1] == 1:
+ img = img[:, :, 0]
+ return img
+ elif backend == 'pillow':
+ buff = io.BytesIO(content)
+ img = Image.open(buff)
+ img = _pillow2array(img, flag, channel_order)
+ return img
+ else:
+ img_np = np.frombuffer(content, np.uint8)
+ flag = imread_flags[flag] if is_str(flag) else flag
+ img = cv2.imdecode(img_np, flag)
+ if flag == IMREAD_COLOR and channel_order == 'rgb':
+ cv2.cvtColor(img, cv2.COLOR_BGR2RGB, img)
+ return img
+
+
+def imwrite(img, file_path, params=None, auto_mkdir=True):
+ """Write image to file.
+
+ Args:
+ img (ndarray): Image array to be written.
+ file_path (str): Image file path.
+ params (None or list): Same as opencv :func:`imwrite` interface.
+ auto_mkdir (bool): If the parent folder of `file_path` does not exist,
+ whether to create it automatically.
+
+ Returns:
+ bool: Successful or not.
+ """
+ if auto_mkdir:
+ dir_name = osp.abspath(osp.dirname(file_path))
+ mkdir_or_exist(dir_name)
+ return cv2.imwrite(file_path, img, params)
diff --git a/annotator/uniformer/mmcv/image/misc.py b/annotator/uniformer/mmcv/image/misc.py
new file mode 100644
index 0000000000000000000000000000000000000000..3e61f05e3b05e4c7b40de4eb6c8eb100e6da41d0
--- /dev/null
+++ b/annotator/uniformer/mmcv/image/misc.py
@@ -0,0 +1,44 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import numpy as np
+
+import annotator.uniformer.mmcv as mmcv
+
+try:
+ import torch
+except ImportError:
+ torch = None
+
+
+def tensor2imgs(tensor, mean=(0, 0, 0), std=(1, 1, 1), to_rgb=True):
+ """Convert tensor to 3-channel images.
+
+ Args:
+ tensor (torch.Tensor): Tensor that contains multiple images, shape (
+ N, C, H, W).
+ mean (tuple[float], optional): Mean of images. Defaults to (0, 0, 0).
+ std (tuple[float], optional): Standard deviation of images.
+ Defaults to (1, 1, 1).
+ to_rgb (bool, optional): Whether the tensor was converted to RGB
+ format in the first place. If so, convert it back to BGR.
+ Defaults to True.
+
+ Returns:
+ list[np.ndarray]: A list that contains multiple images.
+ """
+
+ if torch is None:
+ raise RuntimeError('pytorch is not installed')
+ assert torch.is_tensor(tensor) and tensor.ndim == 4
+ assert len(mean) == 3
+ assert len(std) == 3
+
+ num_imgs = tensor.size(0)
+ mean = np.array(mean, dtype=np.float32)
+ std = np.array(std, dtype=np.float32)
+ imgs = []
+ for img_id in range(num_imgs):
+ img = tensor[img_id, ...].cpu().numpy().transpose(1, 2, 0)
+ img = mmcv.imdenormalize(
+ img, mean, std, to_bgr=to_rgb).astype(np.uint8)
+ imgs.append(np.ascontiguousarray(img))
+ return imgs
diff --git a/annotator/uniformer/mmcv/image/photometric.py b/annotator/uniformer/mmcv/image/photometric.py
new file mode 100644
index 0000000000000000000000000000000000000000..5085d012019c0cbf56f66f421a378278c1a058ae
--- /dev/null
+++ b/annotator/uniformer/mmcv/image/photometric.py
@@ -0,0 +1,428 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import cv2
+import numpy as np
+
+from ..utils import is_tuple_of
+from .colorspace import bgr2gray, gray2bgr
+
+
+def imnormalize(img, mean, std, to_rgb=True):
+ """Normalize an image with mean and std.
+
+ Args:
+ img (ndarray): Image to be normalized.
+ mean (ndarray): The mean to be used for normalize.
+ std (ndarray): The std to be used for normalize.
+ to_rgb (bool): Whether to convert to rgb.
+
+ Returns:
+ ndarray: The normalized image.
+ """
+ img = img.copy().astype(np.float32)
+ return imnormalize_(img, mean, std, to_rgb)
+
+
+def imnormalize_(img, mean, std, to_rgb=True):
+ """Inplace normalize an image with mean and std.
+
+ Args:
+ img (ndarray): Image to be normalized.
+ mean (ndarray): The mean to be used for normalize.
+ std (ndarray): The std to be used for normalize.
+ to_rgb (bool): Whether to convert to rgb.
+
+ Returns:
+ ndarray: The normalized image.
+ """
+ # cv2 inplace normalization does not accept uint8
+ assert img.dtype != np.uint8
+ mean = np.float64(mean.reshape(1, -1))
+ stdinv = 1 / np.float64(std.reshape(1, -1))
+ if to_rgb:
+ cv2.cvtColor(img, cv2.COLOR_BGR2RGB, img) # inplace
+ cv2.subtract(img, mean, img) # inplace
+ cv2.multiply(img, stdinv, img) # inplace
+ return img
+
+
+def imdenormalize(img, mean, std, to_bgr=True):
+ assert img.dtype != np.uint8
+ mean = mean.reshape(1, -1).astype(np.float64)
+ std = std.reshape(1, -1).astype(np.float64)
+ img = cv2.multiply(img, std) # make a copy
+ cv2.add(img, mean, img) # inplace
+ if to_bgr:
+ cv2.cvtColor(img, cv2.COLOR_RGB2BGR, img) # inplace
+ return img
+
+
+def iminvert(img):
+ """Invert (negate) an image.
+
+ Args:
+ img (ndarray): Image to be inverted.
+
+ Returns:
+ ndarray: The inverted image.
+ """
+ return np.full_like(img, 255) - img
+
+
+def solarize(img, thr=128):
+ """Solarize an image (invert all pixel values above a threshold)
+
+ Args:
+ img (ndarray): Image to be solarized.
+ thr (int): Threshold for solarizing (0 - 255).
+
+ Returns:
+ ndarray: The solarized image.
+ """
+ img = np.where(img < thr, img, 255 - img)
+ return img
+
+
+def posterize(img, bits):
+ """Posterize an image (reduce the number of bits for each color channel)
+
+ Args:
+ img (ndarray): Image to be posterized.
+ bits (int): Number of bits (1 to 8) to use for posterizing.
+
+ Returns:
+ ndarray: The posterized image.
+ """
+ shift = 8 - bits
+ img = np.left_shift(np.right_shift(img, shift), shift)
+ return img
+
+
+def adjust_color(img, alpha=1, beta=None, gamma=0):
+ r"""It blends the source image and its gray image:
+
+ .. math::
+ output = img * alpha + gray\_img * beta + gamma
+
+ Args:
+ img (ndarray): The input source image.
+ alpha (int | float): Weight for the source image. Default 1.
+ beta (int | float): Weight for the converted gray image.
+ If None, it's assigned the value (1 - `alpha`).
+ gamma (int | float): Scalar added to each sum.
+ Same as :func:`cv2.addWeighted`. Default 0.
+
+ Returns:
+ ndarray: Colored image which has the same size and dtype as input.
+ """
+ gray_img = bgr2gray(img)
+ gray_img = np.tile(gray_img[..., None], [1, 1, 3])
+ if beta is None:
+ beta = 1 - alpha
+ colored_img = cv2.addWeighted(img, alpha, gray_img, beta, gamma)
+ if not colored_img.dtype == np.uint8:
+ # Note when the dtype of `img` is not the default `np.uint8`
+ # (e.g. np.float32), the value in `colored_img` got from cv2
+ # is not guaranteed to be in range [0, 255], so here clip
+ # is needed.
+ colored_img = np.clip(colored_img, 0, 255)
+ return colored_img
+
+
+def imequalize(img):
+ """Equalize the image histogram.
+
+ This function applies a non-linear mapping to the input image,
+ in order to create a uniform distribution of grayscale values
+ in the output image.
+
+ Args:
+ img (ndarray): Image to be equalized.
+
+ Returns:
+ ndarray: The equalized image.
+ """
+
+ def _scale_channel(im, c):
+ """Scale the data in the corresponding channel."""
+ im = im[:, :, c]
+ # Compute the histogram of the image channel.
+ histo = np.histogram(im, 256, (0, 255))[0]
+ # For computing the step, filter out the nonzeros.
+ nonzero_histo = histo[histo > 0]
+ step = (np.sum(nonzero_histo) - nonzero_histo[-1]) // 255
+ if not step:
+ lut = np.array(range(256))
+ else:
+ # Compute the cumulative sum, shifted by step // 2
+ # and then normalized by step.
+ lut = (np.cumsum(histo) + (step // 2)) // step
+ # Shift lut, prepending with 0.
+ lut = np.concatenate([[0], lut[:-1]], 0)
+ # handle potential integer overflow
+ lut[lut > 255] = 255
+ # If step is zero, return the original image.
+ # Otherwise, index from lut.
+ return np.where(np.equal(step, 0), im, lut[im])
+
+ # Scales each channel independently and then stacks
+ # the result.
+ s1 = _scale_channel(img, 0)
+ s2 = _scale_channel(img, 1)
+ s3 = _scale_channel(img, 2)
+ equalized_img = np.stack([s1, s2, s3], axis=-1)
+ return equalized_img.astype(img.dtype)
+
+
+def adjust_brightness(img, factor=1.):
+ """Adjust image brightness.
+
+ This function controls the brightness of an image. An
+ enhancement factor of 0.0 gives a black image.
+ A factor of 1.0 gives the original image. This function
+ blends the source image and the degenerated black image:
+
+ .. math::
+ output = img * factor + degenerated * (1 - factor)
+
+ Args:
+ img (ndarray): Image to be brightened.
+ factor (float): A value controls the enhancement.
+ Factor 1.0 returns the original image, lower
+ factors mean less color (brightness, contrast,
+ etc), and higher values more. Default 1.
+
+ Returns:
+ ndarray: The brightened image.
+ """
+ degenerated = np.zeros_like(img)
+ # Note manually convert the dtype to np.float32, to
+ # achieve as close results as PIL.ImageEnhance.Brightness.
+ # Set beta=1-factor, and gamma=0
+ brightened_img = cv2.addWeighted(
+ img.astype(np.float32), factor, degenerated.astype(np.float32),
+ 1 - factor, 0)
+ brightened_img = np.clip(brightened_img, 0, 255)
+ return brightened_img.astype(img.dtype)
+
+
+def adjust_contrast(img, factor=1.):
+ """Adjust image contrast.
+
+ This function controls the contrast of an image. An
+ enhancement factor of 0.0 gives a solid grey
+ image. A factor of 1.0 gives the original image. It
+ blends the source image and the degenerated mean image:
+
+ .. math::
+ output = img * factor + degenerated * (1 - factor)
+
+ Args:
+ img (ndarray): Image to be contrasted. BGR order.
+ factor (float): Same as :func:`mmcv.adjust_brightness`.
+
+ Returns:
+ ndarray: The contrasted image.
+ """
+ gray_img = bgr2gray(img)
+ hist = np.histogram(gray_img, 256, (0, 255))[0]
+ mean = round(np.sum(gray_img) / np.sum(hist))
+ degenerated = (np.ones_like(img[..., 0]) * mean).astype(img.dtype)
+ degenerated = gray2bgr(degenerated)
+ contrasted_img = cv2.addWeighted(
+ img.astype(np.float32), factor, degenerated.astype(np.float32),
+ 1 - factor, 0)
+ contrasted_img = np.clip(contrasted_img, 0, 255)
+ return contrasted_img.astype(img.dtype)
+
+
+def auto_contrast(img, cutoff=0):
+ """Auto adjust image contrast.
+
+ This function maximize (normalize) image contrast by first removing cutoff
+ percent of the lightest and darkest pixels from the histogram and remapping
+ the image so that the darkest pixel becomes black (0), and the lightest
+ becomes white (255).
+
+ Args:
+ img (ndarray): Image to be contrasted. BGR order.
+ cutoff (int | float | tuple): The cutoff percent of the lightest and
+ darkest pixels to be removed. If given as tuple, it shall be
+ (low, high). Otherwise, the single value will be used for both.
+ Defaults to 0.
+
+ Returns:
+ ndarray: The contrasted image.
+ """
+
+ def _auto_contrast_channel(im, c, cutoff):
+ im = im[:, :, c]
+ # Compute the histogram of the image channel.
+ histo = np.histogram(im, 256, (0, 255))[0]
+ # Remove cut-off percent pixels from histo
+ histo_sum = np.cumsum(histo)
+ cut_low = histo_sum[-1] * cutoff[0] // 100
+ cut_high = histo_sum[-1] - histo_sum[-1] * cutoff[1] // 100
+ histo_sum = np.clip(histo_sum, cut_low, cut_high) - cut_low
+ histo = np.concatenate([[histo_sum[0]], np.diff(histo_sum)], 0)
+
+ # Compute mapping
+ low, high = np.nonzero(histo)[0][0], np.nonzero(histo)[0][-1]
+ # If all the values have been cut off, return the origin img
+ if low >= high:
+ return im
+ scale = 255.0 / (high - low)
+ offset = -low * scale
+ lut = np.array(range(256))
+ lut = lut * scale + offset
+ lut = np.clip(lut, 0, 255)
+ return lut[im]
+
+ if isinstance(cutoff, (int, float)):
+ cutoff = (cutoff, cutoff)
+ else:
+ assert isinstance(cutoff, tuple), 'cutoff must be of type int, ' \
+ f'float or tuple, but got {type(cutoff)} instead.'
+ # Auto adjusts contrast for each channel independently and then stacks
+ # the result.
+ s1 = _auto_contrast_channel(img, 0, cutoff)
+ s2 = _auto_contrast_channel(img, 1, cutoff)
+ s3 = _auto_contrast_channel(img, 2, cutoff)
+ contrasted_img = np.stack([s1, s2, s3], axis=-1)
+ return contrasted_img.astype(img.dtype)
+
+
+def adjust_sharpness(img, factor=1., kernel=None):
+ """Adjust image sharpness.
+
+ This function controls the sharpness of an image. An
+ enhancement factor of 0.0 gives a blurred image. A
+ factor of 1.0 gives the original image. And a factor
+ of 2.0 gives a sharpened image. It blends the source
+ image and the degenerated mean image:
+
+ .. math::
+ output = img * factor + degenerated * (1 - factor)
+
+ Args:
+ img (ndarray): Image to be sharpened. BGR order.
+ factor (float): Same as :func:`mmcv.adjust_brightness`.
+ kernel (np.ndarray, optional): Filter kernel to be applied on the img
+ to obtain the degenerated img. Defaults to None.
+
+ Note:
+ No value sanity check is enforced on the kernel set by users. So with
+ an inappropriate kernel, the ``adjust_sharpness`` may fail to perform
+ the function its name indicates but end up performing whatever
+ transform determined by the kernel.
+
+ Returns:
+ ndarray: The sharpened image.
+ """
+
+ if kernel is None:
+ # adopted from PIL.ImageFilter.SMOOTH
+ kernel = np.array([[1., 1., 1.], [1., 5., 1.], [1., 1., 1.]]) / 13
+ assert isinstance(kernel, np.ndarray), \
+ f'kernel must be of type np.ndarray, but got {type(kernel)} instead.'
+ assert kernel.ndim == 2, \
+ f'kernel must have a dimension of 2, but got {kernel.ndim} instead.'
+
+ degenerated = cv2.filter2D(img, -1, kernel)
+ sharpened_img = cv2.addWeighted(
+ img.astype(np.float32), factor, degenerated.astype(np.float32),
+ 1 - factor, 0)
+ sharpened_img = np.clip(sharpened_img, 0, 255)
+ return sharpened_img.astype(img.dtype)
+
+
+def adjust_lighting(img, eigval, eigvec, alphastd=0.1, to_rgb=True):
+ """AlexNet-style PCA jitter.
+
+ This data augmentation is proposed in `ImageNet Classification with Deep
+ Convolutional Neural Networks
+ `_.
+
+ Args:
+ img (ndarray): Image to be adjusted lighting. BGR order.
+ eigval (ndarray): the eigenvalue of the convariance matrix of pixel
+ values, respectively.
+ eigvec (ndarray): the eigenvector of the convariance matrix of pixel
+ values, respectively.
+ alphastd (float): The standard deviation for distribution of alpha.
+ Defaults to 0.1
+ to_rgb (bool): Whether to convert img to rgb.
+
+ Returns:
+ ndarray: The adjusted image.
+ """
+ assert isinstance(eigval, np.ndarray) and isinstance(eigvec, np.ndarray), \
+ f'eigval and eigvec should both be of type np.ndarray, got ' \
+ f'{type(eigval)} and {type(eigvec)} instead.'
+
+ assert eigval.ndim == 1 and eigvec.ndim == 2
+ assert eigvec.shape == (3, eigval.shape[0])
+ n_eigval = eigval.shape[0]
+ assert isinstance(alphastd, float), 'alphastd should be of type float, ' \
+ f'got {type(alphastd)} instead.'
+
+ img = img.copy().astype(np.float32)
+ if to_rgb:
+ cv2.cvtColor(img, cv2.COLOR_BGR2RGB, img) # inplace
+
+ alpha = np.random.normal(0, alphastd, n_eigval)
+ alter = eigvec \
+ * np.broadcast_to(alpha.reshape(1, n_eigval), (3, n_eigval)) \
+ * np.broadcast_to(eigval.reshape(1, n_eigval), (3, n_eigval))
+ alter = np.broadcast_to(alter.sum(axis=1).reshape(1, 1, 3), img.shape)
+ img_adjusted = img + alter
+ return img_adjusted
+
+
+def lut_transform(img, lut_table):
+ """Transform array by look-up table.
+
+ The function lut_transform fills the output array with values from the
+ look-up table. Indices of the entries are taken from the input array.
+
+ Args:
+ img (ndarray): Image to be transformed.
+ lut_table (ndarray): look-up table of 256 elements; in case of
+ multi-channel input array, the table should either have a single
+ channel (in this case the same table is used for all channels) or
+ the same number of channels as in the input array.
+
+ Returns:
+ ndarray: The transformed image.
+ """
+ assert isinstance(img, np.ndarray)
+ assert 0 <= np.min(img) and np.max(img) <= 255
+ assert isinstance(lut_table, np.ndarray)
+ assert lut_table.shape == (256, )
+
+ return cv2.LUT(np.array(img, dtype=np.uint8), lut_table)
+
+
+def clahe(img, clip_limit=40.0, tile_grid_size=(8, 8)):
+ """Use CLAHE method to process the image.
+
+ See `ZUIDERVELD,K. Contrast Limited Adaptive Histogram Equalization[J].
+ Graphics Gems, 1994:474-485.` for more information.
+
+ Args:
+ img (ndarray): Image to be processed.
+ clip_limit (float): Threshold for contrast limiting. Default: 40.0.
+ tile_grid_size (tuple[int]): Size of grid for histogram equalization.
+ Input image will be divided into equally sized rectangular tiles.
+ It defines the number of tiles in row and column. Default: (8, 8).
+
+ Returns:
+ ndarray: The processed image.
+ """
+ assert isinstance(img, np.ndarray)
+ assert img.ndim == 2
+ assert isinstance(clip_limit, (float, int))
+ assert is_tuple_of(tile_grid_size, int)
+ assert len(tile_grid_size) == 2
+
+ clahe = cv2.createCLAHE(clip_limit, tile_grid_size)
+ return clahe.apply(np.array(img, dtype=np.uint8))
diff --git a/annotator/uniformer/mmcv/model_zoo/deprecated.json b/annotator/uniformer/mmcv/model_zoo/deprecated.json
new file mode 100644
index 0000000000000000000000000000000000000000..25cf6f28caecc22a77e3136fefa6b8dfc0e6cb5b
--- /dev/null
+++ b/annotator/uniformer/mmcv/model_zoo/deprecated.json
@@ -0,0 +1,6 @@
+{
+ "resnet50_caffe": "detectron/resnet50_caffe",
+ "resnet50_caffe_bgr": "detectron2/resnet50_caffe_bgr",
+ "resnet101_caffe": "detectron/resnet101_caffe",
+ "resnet101_caffe_bgr": "detectron2/resnet101_caffe_bgr"
+}
diff --git a/annotator/uniformer/mmcv/model_zoo/mmcls.json b/annotator/uniformer/mmcv/model_zoo/mmcls.json
new file mode 100644
index 0000000000000000000000000000000000000000..bdb311d9fe6d9f317290feedc9e37236c6cf6e8f
--- /dev/null
+++ b/annotator/uniformer/mmcv/model_zoo/mmcls.json
@@ -0,0 +1,31 @@
+{
+ "vgg11": "https://download.openmmlab.com/mmclassification/v0/vgg/vgg11_batch256_imagenet_20210208-4271cd6c.pth",
+ "vgg13": "https://download.openmmlab.com/mmclassification/v0/vgg/vgg13_batch256_imagenet_20210208-4d1d6080.pth",
+ "vgg16": "https://download.openmmlab.com/mmclassification/v0/vgg/vgg16_batch256_imagenet_20210208-db26f1a5.pth",
+ "vgg19": "https://download.openmmlab.com/mmclassification/v0/vgg/vgg19_batch256_imagenet_20210208-e6920e4a.pth",
+ "vgg11_bn": "https://download.openmmlab.com/mmclassification/v0/vgg/vgg11_bn_batch256_imagenet_20210207-f244902c.pth",
+ "vgg13_bn": "https://download.openmmlab.com/mmclassification/v0/vgg/vgg13_bn_batch256_imagenet_20210207-1a8b7864.pth",
+ "vgg16_bn": "https://download.openmmlab.com/mmclassification/v0/vgg/vgg16_bn_batch256_imagenet_20210208-7e55cd29.pth",
+ "vgg19_bn": "https://download.openmmlab.com/mmclassification/v0/vgg/vgg19_bn_batch256_imagenet_20210208-da620c4f.pth",
+ "resnet18": "https://download.openmmlab.com/mmclassification/v0/resnet/resnet18_batch256_imagenet_20200708-34ab8f90.pth",
+ "resnet34": "https://download.openmmlab.com/mmclassification/v0/resnet/resnet34_batch256_imagenet_20200708-32ffb4f7.pth",
+ "resnet50": "https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_batch256_imagenet_20200708-cfb998bf.pth",
+ "resnet101": "https://download.openmmlab.com/mmclassification/v0/resnet/resnet101_batch256_imagenet_20200708-753f3608.pth",
+ "resnet152": "https://download.openmmlab.com/mmclassification/v0/resnet/resnet152_batch256_imagenet_20200708-ec25b1f9.pth",
+ "resnet50_v1d": "https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1d50_batch256_imagenet_20200708-1ad0ce94.pth",
+ "resnet101_v1d": "https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1d101_batch256_imagenet_20200708-9cb302ef.pth",
+ "resnet152_v1d": "https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1d152_batch256_imagenet_20200708-e79cb6a2.pth",
+ "resnext50_32x4d": "https://download.openmmlab.com/mmclassification/v0/resnext/resnext50_32x4d_b32x8_imagenet_20210429-56066e27.pth",
+ "resnext101_32x4d": "https://download.openmmlab.com/mmclassification/v0/resnext/resnext101_32x4d_b32x8_imagenet_20210506-e0fa3dd5.pth",
+ "resnext101_32x8d": "https://download.openmmlab.com/mmclassification/v0/resnext/resnext101_32x8d_b32x8_imagenet_20210506-23a247d5.pth",
+ "resnext152_32x4d": "https://download.openmmlab.com/mmclassification/v0/resnext/resnext152_32x4d_b32x8_imagenet_20210524-927787be.pth",
+ "se-resnet50": "https://download.openmmlab.com/mmclassification/v0/se-resnet/se-resnet50_batch256_imagenet_20200804-ae206104.pth",
+ "se-resnet101": "https://download.openmmlab.com/mmclassification/v0/se-resnet/se-resnet101_batch256_imagenet_20200804-ba5b51d4.pth",
+ "resnest50": "https://download.openmmlab.com/mmclassification/v0/resnest/resnest50_imagenet_converted-1ebf0afe.pth",
+ "resnest101": "https://download.openmmlab.com/mmclassification/v0/resnest/resnest101_imagenet_converted-032caa52.pth",
+ "resnest200": "https://download.openmmlab.com/mmclassification/v0/resnest/resnest200_imagenet_converted-581a60f2.pth",
+ "resnest269": "https://download.openmmlab.com/mmclassification/v0/resnest/resnest269_imagenet_converted-59930960.pth",
+ "shufflenet_v1": "https://download.openmmlab.com/mmclassification/v0/shufflenet_v1/shufflenet_v1_batch1024_imagenet_20200804-5d6cec73.pth",
+ "shufflenet_v2": "https://download.openmmlab.com/mmclassification/v0/shufflenet_v2/shufflenet_v2_batch1024_imagenet_20200812-5bf4721e.pth",
+ "mobilenet_v2": "https://download.openmmlab.com/mmclassification/v0/mobilenet_v2/mobilenet_v2_batch256_imagenet_20200708-3b2dc3af.pth"
+}
diff --git a/annotator/uniformer/mmcv/model_zoo/open_mmlab.json b/annotator/uniformer/mmcv/model_zoo/open_mmlab.json
new file mode 100644
index 0000000000000000000000000000000000000000..8311db4feef92faa0841c697d75efbee8430c3a0
--- /dev/null
+++ b/annotator/uniformer/mmcv/model_zoo/open_mmlab.json
@@ -0,0 +1,50 @@
+{
+ "vgg16_caffe": "https://download.openmmlab.com/pretrain/third_party/vgg16_caffe-292e1171.pth",
+ "detectron/resnet50_caffe": "https://download.openmmlab.com/pretrain/third_party/resnet50_caffe-788b5fa3.pth",
+ "detectron2/resnet50_caffe": "https://download.openmmlab.com/pretrain/third_party/resnet50_msra-5891d200.pth",
+ "detectron/resnet101_caffe": "https://download.openmmlab.com/pretrain/third_party/resnet101_caffe-3ad79236.pth",
+ "detectron2/resnet101_caffe": "https://download.openmmlab.com/pretrain/third_party/resnet101_msra-6cc46731.pth",
+ "detectron2/resnext101_32x8d": "https://download.openmmlab.com/pretrain/third_party/resnext101_32x8d-1516f1aa.pth",
+ "resnext50_32x4d": "https://download.openmmlab.com/pretrain/third_party/resnext50-32x4d-0ab1a123.pth",
+ "resnext101_32x4d": "https://download.openmmlab.com/pretrain/third_party/resnext101_32x4d-a5af3160.pth",
+ "resnext101_64x4d": "https://download.openmmlab.com/pretrain/third_party/resnext101_64x4d-ee2c6f71.pth",
+ "contrib/resnet50_gn": "https://download.openmmlab.com/pretrain/third_party/resnet50_gn_thangvubk-ad1730dd.pth",
+ "detectron/resnet50_gn": "https://download.openmmlab.com/pretrain/third_party/resnet50_gn-9186a21c.pth",
+ "detectron/resnet101_gn": "https://download.openmmlab.com/pretrain/third_party/resnet101_gn-cac0ab98.pth",
+ "jhu/resnet50_gn_ws": "https://download.openmmlab.com/pretrain/third_party/resnet50_gn_ws-15beedd8.pth",
+ "jhu/resnet101_gn_ws": "https://download.openmmlab.com/pretrain/third_party/resnet101_gn_ws-3e3c308c.pth",
+ "jhu/resnext50_32x4d_gn_ws": "https://download.openmmlab.com/pretrain/third_party/resnext50_32x4d_gn_ws-0d87ac85.pth",
+ "jhu/resnext101_32x4d_gn_ws": "https://download.openmmlab.com/pretrain/third_party/resnext101_32x4d_gn_ws-34ac1a9e.pth",
+ "jhu/resnext50_32x4d_gn": "https://download.openmmlab.com/pretrain/third_party/resnext50_32x4d_gn-c7e8b754.pth",
+ "jhu/resnext101_32x4d_gn": "https://download.openmmlab.com/pretrain/third_party/resnext101_32x4d_gn-ac3bb84e.pth",
+ "msra/hrnetv2_w18_small": "https://download.openmmlab.com/pretrain/third_party/hrnetv2_w18_small-b5a04e21.pth",
+ "msra/hrnetv2_w18": "https://download.openmmlab.com/pretrain/third_party/hrnetv2_w18-00eb2006.pth",
+ "msra/hrnetv2_w32": "https://download.openmmlab.com/pretrain/third_party/hrnetv2_w32-dc9eeb4f.pth",
+ "msra/hrnetv2_w40": "https://download.openmmlab.com/pretrain/third_party/hrnetv2_w40-ed0b031c.pth",
+ "msra/hrnetv2_w48": "https://download.openmmlab.com/pretrain/third_party/hrnetv2_w48-d2186c55.pth",
+ "bninception_caffe": "https://download.openmmlab.com/pretrain/third_party/bn_inception_caffe-ed2e8665.pth",
+ "kin400/i3d_r50_f32s2_k400": "https://download.openmmlab.com/pretrain/third_party/i3d_r50_f32s2_k400-2c57e077.pth",
+ "kin400/nl3d_r50_f32s2_k400": "https://download.openmmlab.com/pretrain/third_party/nl3d_r50_f32s2_k400-fa7e7caa.pth",
+ "res2net101_v1d_26w_4s": "https://download.openmmlab.com/pretrain/third_party/res2net101_v1d_26w_4s_mmdetv2-f0a600f9.pth",
+ "regnetx_400mf": "https://download.openmmlab.com/pretrain/third_party/regnetx_400mf-a5b10d96.pth",
+ "regnetx_800mf": "https://download.openmmlab.com/pretrain/third_party/regnetx_800mf-1f4be4c7.pth",
+ "regnetx_1.6gf": "https://download.openmmlab.com/pretrain/third_party/regnetx_1.6gf-5791c176.pth",
+ "regnetx_3.2gf": "https://download.openmmlab.com/pretrain/third_party/regnetx_3.2gf-c2599b0f.pth",
+ "regnetx_4.0gf": "https://download.openmmlab.com/pretrain/third_party/regnetx_4.0gf-a88f671e.pth",
+ "regnetx_6.4gf": "https://download.openmmlab.com/pretrain/third_party/regnetx_6.4gf-006af45d.pth",
+ "regnetx_8.0gf": "https://download.openmmlab.com/pretrain/third_party/regnetx_8.0gf-3c68abe7.pth",
+ "regnetx_12gf": "https://download.openmmlab.com/pretrain/third_party/regnetx_12gf-4c2a3350.pth",
+ "resnet18_v1c": "https://download.openmmlab.com/pretrain/third_party/resnet18_v1c-b5776b93.pth",
+ "resnet50_v1c": "https://download.openmmlab.com/pretrain/third_party/resnet50_v1c-2cccc1ad.pth",
+ "resnet101_v1c": "https://download.openmmlab.com/pretrain/third_party/resnet101_v1c-e67eebb6.pth",
+ "mmedit/vgg16": "https://download.openmmlab.com/mmediting/third_party/vgg_state_dict.pth",
+ "mmedit/res34_en_nomixup": "https://download.openmmlab.com/mmediting/third_party/model_best_resnet34_En_nomixup.pth",
+ "mmedit/mobilenet_v2": "https://download.openmmlab.com/mmediting/third_party/mobilenet_v2.pth",
+ "contrib/mobilenet_v3_large": "https://download.openmmlab.com/pretrain/third_party/mobilenet_v3_large-bc2c3fd3.pth",
+ "contrib/mobilenet_v3_small": "https://download.openmmlab.com/pretrain/third_party/mobilenet_v3_small-47085aa1.pth",
+ "resnest50": "https://download.openmmlab.com/pretrain/third_party/resnest50_d2-7497a55b.pth",
+ "resnest101": "https://download.openmmlab.com/pretrain/third_party/resnest101_d2-f3b931b2.pth",
+ "resnest200": "https://download.openmmlab.com/pretrain/third_party/resnest200_d2-ca88e41f.pth",
+ "darknet53": "https://download.openmmlab.com/pretrain/third_party/darknet53-a628ea1b.pth",
+ "mmdet/mobilenet_v2": "https://download.openmmlab.com/mmdetection/v2.0/third_party/mobilenet_v2_batch256_imagenet-ff34753d.pth"
+}
diff --git a/annotator/uniformer/mmcv/ops/__init__.py b/annotator/uniformer/mmcv/ops/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..999e090a458ee148ceca0649f1e3806a40e909bd
--- /dev/null
+++ b/annotator/uniformer/mmcv/ops/__init__.py
@@ -0,0 +1,81 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+from .assign_score_withk import assign_score_withk
+from .ball_query import ball_query
+from .bbox import bbox_overlaps
+from .border_align import BorderAlign, border_align
+from .box_iou_rotated import box_iou_rotated
+from .carafe import CARAFE, CARAFENaive, CARAFEPack, carafe, carafe_naive
+from .cc_attention import CrissCrossAttention
+from .contour_expand import contour_expand
+from .corner_pool import CornerPool
+from .correlation import Correlation
+from .deform_conv import DeformConv2d, DeformConv2dPack, deform_conv2d
+from .deform_roi_pool import (DeformRoIPool, DeformRoIPoolPack,
+ ModulatedDeformRoIPoolPack, deform_roi_pool)
+from .deprecated_wrappers import Conv2d_deprecated as Conv2d
+from .deprecated_wrappers import ConvTranspose2d_deprecated as ConvTranspose2d
+from .deprecated_wrappers import Linear_deprecated as Linear
+from .deprecated_wrappers import MaxPool2d_deprecated as MaxPool2d
+from .focal_loss import (SigmoidFocalLoss, SoftmaxFocalLoss,
+ sigmoid_focal_loss, softmax_focal_loss)
+from .furthest_point_sample import (furthest_point_sample,
+ furthest_point_sample_with_dist)
+from .fused_bias_leakyrelu import FusedBiasLeakyReLU, fused_bias_leakyrelu
+from .gather_points import gather_points
+from .group_points import GroupAll, QueryAndGroup, grouping_operation
+from .info import (get_compiler_version, get_compiling_cuda_version,
+ get_onnxruntime_op_path)
+from .iou3d import boxes_iou_bev, nms_bev, nms_normal_bev
+from .knn import knn
+from .masked_conv import MaskedConv2d, masked_conv2d
+from .modulated_deform_conv import (ModulatedDeformConv2d,
+ ModulatedDeformConv2dPack,
+ modulated_deform_conv2d)
+from .multi_scale_deform_attn import MultiScaleDeformableAttention
+from .nms import batched_nms, nms, nms_match, nms_rotated, soft_nms
+from .pixel_group import pixel_group
+from .point_sample import (SimpleRoIAlign, point_sample,
+ rel_roi_point_to_rel_img_point)
+from .points_in_boxes import (points_in_boxes_all, points_in_boxes_cpu,
+ points_in_boxes_part)
+from .points_sampler import PointsSampler
+from .psa_mask import PSAMask
+from .roi_align import RoIAlign, roi_align
+from .roi_align_rotated import RoIAlignRotated, roi_align_rotated
+from .roi_pool import RoIPool, roi_pool
+from .roiaware_pool3d import RoIAwarePool3d
+from .roipoint_pool3d import RoIPointPool3d
+from .saconv import SAConv2d
+from .scatter_points import DynamicScatter, dynamic_scatter
+from .sync_bn import SyncBatchNorm
+from .three_interpolate import three_interpolate
+from .three_nn import three_nn
+from .tin_shift import TINShift, tin_shift
+from .upfirdn2d import upfirdn2d
+from .voxelize import Voxelization, voxelization
+
+__all__ = [
+ 'bbox_overlaps', 'CARAFE', 'CARAFENaive', 'CARAFEPack', 'carafe',
+ 'carafe_naive', 'CornerPool', 'DeformConv2d', 'DeformConv2dPack',
+ 'deform_conv2d', 'DeformRoIPool', 'DeformRoIPoolPack',
+ 'ModulatedDeformRoIPoolPack', 'deform_roi_pool', 'SigmoidFocalLoss',
+ 'SoftmaxFocalLoss', 'sigmoid_focal_loss', 'softmax_focal_loss',
+ 'get_compiler_version', 'get_compiling_cuda_version',
+ 'get_onnxruntime_op_path', 'MaskedConv2d', 'masked_conv2d',
+ 'ModulatedDeformConv2d', 'ModulatedDeformConv2dPack',
+ 'modulated_deform_conv2d', 'batched_nms', 'nms', 'soft_nms', 'nms_match',
+ 'RoIAlign', 'roi_align', 'RoIPool', 'roi_pool', 'SyncBatchNorm', 'Conv2d',
+ 'ConvTranspose2d', 'Linear', 'MaxPool2d', 'CrissCrossAttention', 'PSAMask',
+ 'point_sample', 'rel_roi_point_to_rel_img_point', 'SimpleRoIAlign',
+ 'SAConv2d', 'TINShift', 'tin_shift', 'assign_score_withk',
+ 'box_iou_rotated', 'RoIPointPool3d', 'nms_rotated', 'knn', 'ball_query',
+ 'upfirdn2d', 'FusedBiasLeakyReLU', 'fused_bias_leakyrelu',
+ 'RoIAlignRotated', 'roi_align_rotated', 'pixel_group', 'QueryAndGroup',
+ 'GroupAll', 'grouping_operation', 'contour_expand', 'three_nn',
+ 'three_interpolate', 'MultiScaleDeformableAttention', 'BorderAlign',
+ 'border_align', 'gather_points', 'furthest_point_sample',
+ 'furthest_point_sample_with_dist', 'PointsSampler', 'Correlation',
+ 'boxes_iou_bev', 'nms_bev', 'nms_normal_bev', 'Voxelization',
+ 'voxelization', 'dynamic_scatter', 'DynamicScatter', 'RoIAwarePool3d',
+ 'points_in_boxes_part', 'points_in_boxes_cpu', 'points_in_boxes_all'
+]
diff --git a/annotator/uniformer/mmcv/ops/__pycache__/__init__.cpython-310.pyc b/annotator/uniformer/mmcv/ops/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4a13086548c8769965ac14d5dc27f1fc720e9139
Binary files /dev/null and b/annotator/uniformer/mmcv/ops/__pycache__/__init__.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/ops/__pycache__/__init__.cpython-38.pyc b/annotator/uniformer/mmcv/ops/__pycache__/__init__.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a3bc24b5235a23898709c6da1daf4b4401b774a1
Binary files /dev/null and b/annotator/uniformer/mmcv/ops/__pycache__/__init__.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/ops/__pycache__/assign_score_withk.cpython-310.pyc b/annotator/uniformer/mmcv/ops/__pycache__/assign_score_withk.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..11ac3b4238aa884b4b4b44b33f3160e007df4be0
Binary files /dev/null and b/annotator/uniformer/mmcv/ops/__pycache__/assign_score_withk.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/ops/__pycache__/assign_score_withk.cpython-38.pyc b/annotator/uniformer/mmcv/ops/__pycache__/assign_score_withk.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..98b9f5ac220eccffc55ac03a0c7b9c5cf460fa2e
Binary files /dev/null and b/annotator/uniformer/mmcv/ops/__pycache__/assign_score_withk.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/ops/assign_score_withk.py b/annotator/uniformer/mmcv/ops/assign_score_withk.py
new file mode 100644
index 0000000000000000000000000000000000000000..4906adaa2cffd1b46912fbe7d4f87ef2f9fa0012
--- /dev/null
+++ b/annotator/uniformer/mmcv/ops/assign_score_withk.py
@@ -0,0 +1,123 @@
+from torch.autograd import Function
+
+from ..utils import ext_loader
+
+ext_module = ext_loader.load_ext(
+ '_ext', ['assign_score_withk_forward', 'assign_score_withk_backward'])
+
+
+class AssignScoreWithK(Function):
+ r"""Perform weighted sum to generate output features according to scores.
+ Modified from `PAConv `_.
+
+ This is a memory-efficient CUDA implementation of assign_scores operation,
+ which first transform all point features with weight bank, then assemble
+ neighbor features with ``knn_idx`` and perform weighted sum of ``scores``.
+
+ See the `paper `_ appendix Sec. D for
+ more detailed descriptions.
+
+ Note:
+ This implementation assumes using ``neighbor`` kernel input, which is
+ (point_features - center_features, point_features).
+ See https://github.com/CVMI-Lab/PAConv/blob/main/scene_seg/model/
+ pointnet2/paconv.py#L128 for more details.
+ """
+
+ @staticmethod
+ def forward(ctx,
+ scores,
+ point_features,
+ center_features,
+ knn_idx,
+ aggregate='sum'):
+ """
+ Args:
+ scores (torch.Tensor): (B, npoint, K, M), predicted scores to
+ aggregate weight matrices in the weight bank.
+ ``npoint`` is the number of sampled centers.
+ ``K`` is the number of queried neighbors.
+ ``M`` is the number of weight matrices in the weight bank.
+ point_features (torch.Tensor): (B, N, M, out_dim)
+ Pre-computed point features to be aggregated.
+ center_features (torch.Tensor): (B, N, M, out_dim)
+ Pre-computed center features to be aggregated.
+ knn_idx (torch.Tensor): (B, npoint, K), index of sampled kNN.
+ We assume the first idx in each row is the idx of the center.
+ aggregate (str, optional): Aggregation method.
+ Can be 'sum', 'avg' or 'max'. Defaults: 'sum'.
+
+ Returns:
+ torch.Tensor: (B, out_dim, npoint, K), the aggregated features.
+ """
+ agg = {'sum': 0, 'avg': 1, 'max': 2}
+
+ B, N, M, out_dim = point_features.size()
+ _, npoint, K, _ = scores.size()
+
+ output = point_features.new_zeros((B, out_dim, npoint, K))
+ ext_module.assign_score_withk_forward(
+ point_features.contiguous(),
+ center_features.contiguous(),
+ scores.contiguous(),
+ knn_idx.contiguous(),
+ output,
+ B=B,
+ N0=N,
+ N1=npoint,
+ M=M,
+ K=K,
+ O=out_dim,
+ aggregate=agg[aggregate])
+
+ ctx.save_for_backward(output, point_features, center_features, scores,
+ knn_idx)
+ ctx.agg = agg[aggregate]
+
+ return output
+
+ @staticmethod
+ def backward(ctx, grad_out):
+ """
+ Args:
+ grad_out (torch.Tensor): (B, out_dim, npoint, K)
+
+ Returns:
+ grad_scores (torch.Tensor): (B, npoint, K, M)
+ grad_point_features (torch.Tensor): (B, N, M, out_dim)
+ grad_center_features (torch.Tensor): (B, N, M, out_dim)
+ """
+ _, point_features, center_features, scores, knn_idx = ctx.saved_tensors
+
+ agg = ctx.agg
+
+ B, N, M, out_dim = point_features.size()
+ _, npoint, K, _ = scores.size()
+
+ grad_point_features = point_features.new_zeros(point_features.shape)
+ grad_center_features = center_features.new_zeros(center_features.shape)
+ grad_scores = scores.new_zeros(scores.shape)
+
+ ext_module.assign_score_withk_backward(
+ grad_out.contiguous(),
+ point_features.contiguous(),
+ center_features.contiguous(),
+ scores.contiguous(),
+ knn_idx.contiguous(),
+ grad_point_features,
+ grad_center_features,
+ grad_scores,
+ B=B,
+ N0=N,
+ N1=npoint,
+ M=M,
+ K=K,
+ O=out_dim,
+ aggregate=agg)
+
+ return grad_scores, grad_point_features, \
+ grad_center_features, None, None
+
+
+assign_score_withk = AssignScoreWithK.apply
diff --git a/annotator/uniformer/mmcv/ops/ball_query.py b/annotator/uniformer/mmcv/ops/ball_query.py
new file mode 100644
index 0000000000000000000000000000000000000000..d0466847c6e5c1239e359a0397568413ebc1504a
--- /dev/null
+++ b/annotator/uniformer/mmcv/ops/ball_query.py
@@ -0,0 +1,55 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import torch
+from torch.autograd import Function
+
+from ..utils import ext_loader
+
+ext_module = ext_loader.load_ext('_ext', ['ball_query_forward'])
+
+
+class BallQuery(Function):
+ """Find nearby points in spherical space."""
+
+ @staticmethod
+ def forward(ctx, min_radius: float, max_radius: float, sample_num: int,
+ xyz: torch.Tensor, center_xyz: torch.Tensor) -> torch.Tensor:
+ """
+ Args:
+ min_radius (float): minimum radius of the balls.
+ max_radius (float): maximum radius of the balls.
+ sample_num (int): maximum number of features in the balls.
+ xyz (Tensor): (B, N, 3) xyz coordinates of the features.
+ center_xyz (Tensor): (B, npoint, 3) centers of the ball query.
+
+ Returns:
+ Tensor: (B, npoint, nsample) tensor with the indices of
+ the features that form the query balls.
+ """
+ assert center_xyz.is_contiguous()
+ assert xyz.is_contiguous()
+ assert min_radius < max_radius
+
+ B, N, _ = xyz.size()
+ npoint = center_xyz.size(1)
+ idx = xyz.new_zeros(B, npoint, sample_num, dtype=torch.int)
+
+ ext_module.ball_query_forward(
+ center_xyz,
+ xyz,
+ idx,
+ b=B,
+ n=N,
+ m=npoint,
+ min_radius=min_radius,
+ max_radius=max_radius,
+ nsample=sample_num)
+ if torch.__version__ != 'parrots':
+ ctx.mark_non_differentiable(idx)
+ return idx
+
+ @staticmethod
+ def backward(ctx, a=None):
+ return None, None, None, None
+
+
+ball_query = BallQuery.apply
diff --git a/annotator/uniformer/mmcv/ops/bbox.py b/annotator/uniformer/mmcv/ops/bbox.py
new file mode 100644
index 0000000000000000000000000000000000000000..0c4d58b6c91f652933974f519acd3403a833e906
--- /dev/null
+++ b/annotator/uniformer/mmcv/ops/bbox.py
@@ -0,0 +1,72 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+from ..utils import ext_loader
+
+ext_module = ext_loader.load_ext('_ext', ['bbox_overlaps'])
+
+
+def bbox_overlaps(bboxes1, bboxes2, mode='iou', aligned=False, offset=0):
+ """Calculate overlap between two set of bboxes.
+
+ If ``aligned`` is ``False``, then calculate the ious between each bbox
+ of bboxes1 and bboxes2, otherwise the ious between each aligned pair of
+ bboxes1 and bboxes2.
+
+ Args:
+ bboxes1 (Tensor): shape (m, 4) in format or empty.
+ bboxes2 (Tensor): shape (n, 4) in format or empty.
+ If aligned is ``True``, then m and n must be equal.
+ mode (str): "iou" (intersection over union) or iof (intersection over
+ foreground).
+
+ Returns:
+ ious(Tensor): shape (m, n) if aligned == False else shape (m, 1)
+
+ Example:
+ >>> bboxes1 = torch.FloatTensor([
+ >>> [0, 0, 10, 10],
+ >>> [10, 10, 20, 20],
+ >>> [32, 32, 38, 42],
+ >>> ])
+ >>> bboxes2 = torch.FloatTensor([
+ >>> [0, 0, 10, 20],
+ >>> [0, 10, 10, 19],
+ >>> [10, 10, 20, 20],
+ >>> ])
+ >>> bbox_overlaps(bboxes1, bboxes2)
+ tensor([[0.5000, 0.0000, 0.0000],
+ [0.0000, 0.0000, 1.0000],
+ [0.0000, 0.0000, 0.0000]])
+
+ Example:
+ >>> empty = torch.FloatTensor([])
+ >>> nonempty = torch.FloatTensor([
+ >>> [0, 0, 10, 9],
+ >>> ])
+ >>> assert tuple(bbox_overlaps(empty, nonempty).shape) == (0, 1)
+ >>> assert tuple(bbox_overlaps(nonempty, empty).shape) == (1, 0)
+ >>> assert tuple(bbox_overlaps(empty, empty).shape) == (0, 0)
+ """
+
+ mode_dict = {'iou': 0, 'iof': 1}
+ assert mode in mode_dict.keys()
+ mode_flag = mode_dict[mode]
+ # Either the boxes are empty or the length of boxes' last dimension is 4
+ assert (bboxes1.size(-1) == 4 or bboxes1.size(0) == 0)
+ assert (bboxes2.size(-1) == 4 or bboxes2.size(0) == 0)
+ assert offset == 1 or offset == 0
+
+ rows = bboxes1.size(0)
+ cols = bboxes2.size(0)
+ if aligned:
+ assert rows == cols
+
+ if rows * cols == 0:
+ return bboxes1.new(rows, 1) if aligned else bboxes1.new(rows, cols)
+
+ if aligned:
+ ious = bboxes1.new_zeros(rows)
+ else:
+ ious = bboxes1.new_zeros((rows, cols))
+ ext_module.bbox_overlaps(
+ bboxes1, bboxes2, ious, mode=mode_flag, aligned=aligned, offset=offset)
+ return ious
diff --git a/annotator/uniformer/mmcv/ops/border_align.py b/annotator/uniformer/mmcv/ops/border_align.py
new file mode 100644
index 0000000000000000000000000000000000000000..ff305be328e9b0a15e1bbb5e6b41beb940f55c81
--- /dev/null
+++ b/annotator/uniformer/mmcv/ops/border_align.py
@@ -0,0 +1,109 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+# modified from
+# https://github.com/Megvii-BaseDetection/cvpods/blob/master/cvpods/layers/border_align.py
+
+import torch
+import torch.nn as nn
+from torch.autograd import Function
+from torch.autograd.function import once_differentiable
+
+from ..utils import ext_loader
+
+ext_module = ext_loader.load_ext(
+ '_ext', ['border_align_forward', 'border_align_backward'])
+
+
+class BorderAlignFunction(Function):
+
+ @staticmethod
+ def symbolic(g, input, boxes, pool_size):
+ return g.op(
+ 'mmcv::MMCVBorderAlign', input, boxes, pool_size_i=pool_size)
+
+ @staticmethod
+ def forward(ctx, input, boxes, pool_size):
+ ctx.pool_size = pool_size
+ ctx.input_shape = input.size()
+
+ assert boxes.ndim == 3, 'boxes must be with shape [B, H*W, 4]'
+ assert boxes.size(2) == 4, \
+ 'the last dimension of boxes must be (x1, y1, x2, y2)'
+ assert input.size(1) % 4 == 0, \
+ 'the channel for input feature must be divisible by factor 4'
+
+ # [B, C//4, H*W, 4]
+ output_shape = (input.size(0), input.size(1) // 4, boxes.size(1), 4)
+ output = input.new_zeros(output_shape)
+ # `argmax_idx` only used for backward
+ argmax_idx = input.new_zeros(output_shape).to(torch.int)
+
+ ext_module.border_align_forward(
+ input, boxes, output, argmax_idx, pool_size=ctx.pool_size)
+
+ ctx.save_for_backward(boxes, argmax_idx)
+ return output
+
+ @staticmethod
+ @once_differentiable
+ def backward(ctx, grad_output):
+ boxes, argmax_idx = ctx.saved_tensors
+ grad_input = grad_output.new_zeros(ctx.input_shape)
+ # complex head architecture may cause grad_output uncontiguous
+ grad_output = grad_output.contiguous()
+ ext_module.border_align_backward(
+ grad_output,
+ boxes,
+ argmax_idx,
+ grad_input,
+ pool_size=ctx.pool_size)
+ return grad_input, None, None
+
+
+border_align = BorderAlignFunction.apply
+
+
+class BorderAlign(nn.Module):
+ r"""Border align pooling layer.
+
+ Applies border_align over the input feature based on predicted bboxes.
+ The details were described in the paper
+ `BorderDet: Border Feature for Dense Object Detection
+ `_.
+
+ For each border line (e.g. top, left, bottom or right) of each box,
+ border_align does the following:
+ 1. uniformly samples `pool_size`+1 positions on this line, involving \
+ the start and end points.
+ 2. the corresponding features on these points are computed by \
+ bilinear interpolation.
+ 3. max pooling over all the `pool_size`+1 positions are used for \
+ computing pooled feature.
+
+ Args:
+ pool_size (int): number of positions sampled over the boxes' borders
+ (e.g. top, bottom, left, right).
+
+ """
+
+ def __init__(self, pool_size):
+ super(BorderAlign, self).__init__()
+ self.pool_size = pool_size
+
+ def forward(self, input, boxes):
+ """
+ Args:
+ input: Features with shape [N,4C,H,W]. Channels ranged in [0,C),
+ [C,2C), [2C,3C), [3C,4C) represent the top, left, bottom,
+ right features respectively.
+ boxes: Boxes with shape [N,H*W,4]. Coordinate format (x1,y1,x2,y2).
+
+ Returns:
+ Tensor: Pooled features with shape [N,C,H*W,4]. The order is
+ (top,left,bottom,right) for the last dimension.
+ """
+ return border_align(input, boxes, self.pool_size)
+
+ def __repr__(self):
+ s = self.__class__.__name__
+ s += f'(pool_size={self.pool_size})'
+ return s
diff --git a/annotator/uniformer/mmcv/ops/box_iou_rotated.py b/annotator/uniformer/mmcv/ops/box_iou_rotated.py
new file mode 100644
index 0000000000000000000000000000000000000000..2d78015e9c2a9e7a52859b4e18f84a9aa63481a0
--- /dev/null
+++ b/annotator/uniformer/mmcv/ops/box_iou_rotated.py
@@ -0,0 +1,45 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+from ..utils import ext_loader
+
+ext_module = ext_loader.load_ext('_ext', ['box_iou_rotated'])
+
+
+def box_iou_rotated(bboxes1, bboxes2, mode='iou', aligned=False):
+ """Return intersection-over-union (Jaccard index) of boxes.
+
+ Both sets of boxes are expected to be in
+ (x_center, y_center, width, height, angle) format.
+
+ If ``aligned`` is ``False``, then calculate the ious between each bbox
+ of bboxes1 and bboxes2, otherwise the ious between each aligned pair of
+ bboxes1 and bboxes2.
+
+ Arguments:
+ boxes1 (Tensor): rotated bboxes 1. \
+ It has shape (N, 5), indicating (x, y, w, h, theta) for each row.
+ Note that theta is in radian.
+ boxes2 (Tensor): rotated bboxes 2. \
+ It has shape (M, 5), indicating (x, y, w, h, theta) for each row.
+ Note that theta is in radian.
+ mode (str): "iou" (intersection over union) or iof (intersection over
+ foreground).
+
+ Returns:
+ ious(Tensor): shape (N, M) if aligned == False else shape (N,)
+ """
+ assert mode in ['iou', 'iof']
+ mode_dict = {'iou': 0, 'iof': 1}
+ mode_flag = mode_dict[mode]
+ rows = bboxes1.size(0)
+ cols = bboxes2.size(0)
+ if aligned:
+ ious = bboxes1.new_zeros(rows)
+ else:
+ ious = bboxes1.new_zeros((rows * cols))
+ bboxes1 = bboxes1.contiguous()
+ bboxes2 = bboxes2.contiguous()
+ ext_module.box_iou_rotated(
+ bboxes1, bboxes2, ious, mode_flag=mode_flag, aligned=aligned)
+ if not aligned:
+ ious = ious.view(rows, cols)
+ return ious
diff --git a/annotator/uniformer/mmcv/ops/carafe.py b/annotator/uniformer/mmcv/ops/carafe.py
new file mode 100644
index 0000000000000000000000000000000000000000..5154cb3abfccfbbe0a1b2daa67018dbf80aaf6d2
--- /dev/null
+++ b/annotator/uniformer/mmcv/ops/carafe.py
@@ -0,0 +1,287 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from torch.autograd import Function
+from torch.nn.modules.module import Module
+
+from ..cnn import UPSAMPLE_LAYERS, normal_init, xavier_init
+from ..utils import ext_loader
+
+ext_module = ext_loader.load_ext('_ext', [
+ 'carafe_naive_forward', 'carafe_naive_backward', 'carafe_forward',
+ 'carafe_backward'
+])
+
+
+class CARAFENaiveFunction(Function):
+
+ @staticmethod
+ def symbolic(g, features, masks, kernel_size, group_size, scale_factor):
+ return g.op(
+ 'mmcv::MMCVCARAFENaive',
+ features,
+ masks,
+ kernel_size_i=kernel_size,
+ group_size_i=group_size,
+ scale_factor_f=scale_factor)
+
+ @staticmethod
+ def forward(ctx, features, masks, kernel_size, group_size, scale_factor):
+ assert scale_factor >= 1
+ assert masks.size(1) == kernel_size * kernel_size * group_size
+ assert masks.size(-1) == features.size(-1) * scale_factor
+ assert masks.size(-2) == features.size(-2) * scale_factor
+ assert features.size(1) % group_size == 0
+ assert (kernel_size - 1) % 2 == 0 and kernel_size >= 1
+ ctx.kernel_size = kernel_size
+ ctx.group_size = group_size
+ ctx.scale_factor = scale_factor
+ ctx.feature_size = features.size()
+ ctx.mask_size = masks.size()
+
+ n, c, h, w = features.size()
+ output = features.new_zeros((n, c, h * scale_factor, w * scale_factor))
+ ext_module.carafe_naive_forward(
+ features,
+ masks,
+ output,
+ kernel_size=kernel_size,
+ group_size=group_size,
+ scale_factor=scale_factor)
+
+ if features.requires_grad or masks.requires_grad:
+ ctx.save_for_backward(features, masks)
+ return output
+
+ @staticmethod
+ def backward(ctx, grad_output):
+ assert grad_output.is_cuda
+
+ features, masks = ctx.saved_tensors
+ kernel_size = ctx.kernel_size
+ group_size = ctx.group_size
+ scale_factor = ctx.scale_factor
+
+ grad_input = torch.zeros_like(features)
+ grad_masks = torch.zeros_like(masks)
+ ext_module.carafe_naive_backward(
+ grad_output.contiguous(),
+ features,
+ masks,
+ grad_input,
+ grad_masks,
+ kernel_size=kernel_size,
+ group_size=group_size,
+ scale_factor=scale_factor)
+
+ return grad_input, grad_masks, None, None, None
+
+
+carafe_naive = CARAFENaiveFunction.apply
+
+
+class CARAFENaive(Module):
+
+ def __init__(self, kernel_size, group_size, scale_factor):
+ super(CARAFENaive, self).__init__()
+
+ assert isinstance(kernel_size, int) and isinstance(
+ group_size, int) and isinstance(scale_factor, int)
+ self.kernel_size = kernel_size
+ self.group_size = group_size
+ self.scale_factor = scale_factor
+
+ def forward(self, features, masks):
+ return carafe_naive(features, masks, self.kernel_size, self.group_size,
+ self.scale_factor)
+
+
+class CARAFEFunction(Function):
+
+ @staticmethod
+ def symbolic(g, features, masks, kernel_size, group_size, scale_factor):
+ return g.op(
+ 'mmcv::MMCVCARAFE',
+ features,
+ masks,
+ kernel_size_i=kernel_size,
+ group_size_i=group_size,
+ scale_factor_f=scale_factor)
+
+ @staticmethod
+ def forward(ctx, features, masks, kernel_size, group_size, scale_factor):
+ assert scale_factor >= 1
+ assert masks.size(1) == kernel_size * kernel_size * group_size
+ assert masks.size(-1) == features.size(-1) * scale_factor
+ assert masks.size(-2) == features.size(-2) * scale_factor
+ assert features.size(1) % group_size == 0
+ assert (kernel_size - 1) % 2 == 0 and kernel_size >= 1
+ ctx.kernel_size = kernel_size
+ ctx.group_size = group_size
+ ctx.scale_factor = scale_factor
+ ctx.feature_size = features.size()
+ ctx.mask_size = masks.size()
+
+ n, c, h, w = features.size()
+ output = features.new_zeros((n, c, h * scale_factor, w * scale_factor))
+ routput = features.new_zeros(output.size(), requires_grad=False)
+ rfeatures = features.new_zeros(features.size(), requires_grad=False)
+ rmasks = masks.new_zeros(masks.size(), requires_grad=False)
+ ext_module.carafe_forward(
+ features,
+ masks,
+ rfeatures,
+ routput,
+ rmasks,
+ output,
+ kernel_size=kernel_size,
+ group_size=group_size,
+ scale_factor=scale_factor)
+
+ if features.requires_grad or masks.requires_grad:
+ ctx.save_for_backward(features, masks, rfeatures)
+ return output
+
+ @staticmethod
+ def backward(ctx, grad_output):
+ assert grad_output.is_cuda
+
+ features, masks, rfeatures = ctx.saved_tensors
+ kernel_size = ctx.kernel_size
+ group_size = ctx.group_size
+ scale_factor = ctx.scale_factor
+
+ rgrad_output = torch.zeros_like(grad_output, requires_grad=False)
+ rgrad_input_hs = torch.zeros_like(grad_output, requires_grad=False)
+ rgrad_input = torch.zeros_like(features, requires_grad=False)
+ rgrad_masks = torch.zeros_like(masks, requires_grad=False)
+ grad_input = torch.zeros_like(features, requires_grad=False)
+ grad_masks = torch.zeros_like(masks, requires_grad=False)
+ ext_module.carafe_backward(
+ grad_output.contiguous(),
+ rfeatures,
+ masks,
+ rgrad_output,
+ rgrad_input_hs,
+ rgrad_input,
+ rgrad_masks,
+ grad_input,
+ grad_masks,
+ kernel_size=kernel_size,
+ group_size=group_size,
+ scale_factor=scale_factor)
+ return grad_input, grad_masks, None, None, None
+
+
+carafe = CARAFEFunction.apply
+
+
+class CARAFE(Module):
+ """ CARAFE: Content-Aware ReAssembly of FEatures
+
+ Please refer to https://arxiv.org/abs/1905.02188 for more details.
+
+ Args:
+ kernel_size (int): reassemble kernel size
+ group_size (int): reassemble group size
+ scale_factor (int): upsample ratio
+
+ Returns:
+ upsampled feature map
+ """
+
+ def __init__(self, kernel_size, group_size, scale_factor):
+ super(CARAFE, self).__init__()
+
+ assert isinstance(kernel_size, int) and isinstance(
+ group_size, int) and isinstance(scale_factor, int)
+ self.kernel_size = kernel_size
+ self.group_size = group_size
+ self.scale_factor = scale_factor
+
+ def forward(self, features, masks):
+ return carafe(features, masks, self.kernel_size, self.group_size,
+ self.scale_factor)
+
+
+@UPSAMPLE_LAYERS.register_module(name='carafe')
+class CARAFEPack(nn.Module):
+ """A unified package of CARAFE upsampler that contains: 1) channel
+ compressor 2) content encoder 3) CARAFE op.
+
+ Official implementation of ICCV 2019 paper
+ CARAFE: Content-Aware ReAssembly of FEatures
+ Please refer to https://arxiv.org/abs/1905.02188 for more details.
+
+ Args:
+ channels (int): input feature channels
+ scale_factor (int): upsample ratio
+ up_kernel (int): kernel size of CARAFE op
+ up_group (int): group size of CARAFE op
+ encoder_kernel (int): kernel size of content encoder
+ encoder_dilation (int): dilation of content encoder
+ compressed_channels (int): output channels of channels compressor
+
+ Returns:
+ upsampled feature map
+ """
+
+ def __init__(self,
+ channels,
+ scale_factor,
+ up_kernel=5,
+ up_group=1,
+ encoder_kernel=3,
+ encoder_dilation=1,
+ compressed_channels=64):
+ super(CARAFEPack, self).__init__()
+ self.channels = channels
+ self.scale_factor = scale_factor
+ self.up_kernel = up_kernel
+ self.up_group = up_group
+ self.encoder_kernel = encoder_kernel
+ self.encoder_dilation = encoder_dilation
+ self.compressed_channels = compressed_channels
+ self.channel_compressor = nn.Conv2d(channels, self.compressed_channels,
+ 1)
+ self.content_encoder = nn.Conv2d(
+ self.compressed_channels,
+ self.up_kernel * self.up_kernel * self.up_group *
+ self.scale_factor * self.scale_factor,
+ self.encoder_kernel,
+ padding=int((self.encoder_kernel - 1) * self.encoder_dilation / 2),
+ dilation=self.encoder_dilation,
+ groups=1)
+ self.init_weights()
+
+ def init_weights(self):
+ for m in self.modules():
+ if isinstance(m, nn.Conv2d):
+ xavier_init(m, distribution='uniform')
+ normal_init(self.content_encoder, std=0.001)
+
+ def kernel_normalizer(self, mask):
+ mask = F.pixel_shuffle(mask, self.scale_factor)
+ n, mask_c, h, w = mask.size()
+ # use float division explicitly,
+ # to void inconsistency while exporting to onnx
+ mask_channel = int(mask_c / float(self.up_kernel**2))
+ mask = mask.view(n, mask_channel, -1, h, w)
+
+ mask = F.softmax(mask, dim=2, dtype=mask.dtype)
+ mask = mask.view(n, mask_c, h, w).contiguous()
+
+ return mask
+
+ def feature_reassemble(self, x, mask):
+ x = carafe(x, mask, self.up_kernel, self.up_group, self.scale_factor)
+ return x
+
+ def forward(self, x):
+ compressed_x = self.channel_compressor(x)
+ mask = self.content_encoder(compressed_x)
+ mask = self.kernel_normalizer(mask)
+
+ x = self.feature_reassemble(x, mask)
+ return x
diff --git a/annotator/uniformer/mmcv/ops/cc_attention.py b/annotator/uniformer/mmcv/ops/cc_attention.py
new file mode 100644
index 0000000000000000000000000000000000000000..9207aa95e6730bd9b3362dee612059a5f0ce1c5e
--- /dev/null
+++ b/annotator/uniformer/mmcv/ops/cc_attention.py
@@ -0,0 +1,83 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+from annotator.uniformer.mmcv.cnn import PLUGIN_LAYERS, Scale
+
+
+def NEG_INF_DIAG(n, device):
+ """Returns a diagonal matrix of size [n, n].
+
+ The diagonal are all "-inf". This is for avoiding calculating the
+ overlapped element in the Criss-Cross twice.
+ """
+ return torch.diag(torch.tensor(float('-inf')).to(device).repeat(n), 0)
+
+
+@PLUGIN_LAYERS.register_module()
+class CrissCrossAttention(nn.Module):
+ """Criss-Cross Attention Module.
+
+ .. note::
+ Before v1.3.13, we use a CUDA op. Since v1.3.13, we switch
+ to a pure PyTorch and equivalent implementation. For more
+ details, please refer to https://github.com/open-mmlab/mmcv/pull/1201.
+
+ Speed comparison for one forward pass
+
+ - Input size: [2,512,97,97]
+ - Device: 1 NVIDIA GeForce RTX 2080 Ti
+
+ +-----------------------+---------------+------------+---------------+
+ | |PyTorch version|CUDA version|Relative speed |
+ +=======================+===============+============+===============+
+ |with torch.no_grad() |0.00554402 s |0.0299619 s |5.4x |
+ +-----------------------+---------------+------------+---------------+
+ |no with torch.no_grad()|0.00562803 s |0.0301349 s |5.4x |
+ +-----------------------+---------------+------------+---------------+
+
+ Args:
+ in_channels (int): Channels of the input feature map.
+ """
+
+ def __init__(self, in_channels):
+ super().__init__()
+ self.query_conv = nn.Conv2d(in_channels, in_channels // 8, 1)
+ self.key_conv = nn.Conv2d(in_channels, in_channels // 8, 1)
+ self.value_conv = nn.Conv2d(in_channels, in_channels, 1)
+ self.gamma = Scale(0.)
+ self.in_channels = in_channels
+
+ def forward(self, x):
+ """forward function of Criss-Cross Attention.
+
+ Args:
+ x (Tensor): Input feature. \
+ shape (batch_size, in_channels, height, width)
+ Returns:
+ Tensor: Output of the layer, with shape of \
+ (batch_size, in_channels, height, width)
+ """
+ B, C, H, W = x.size()
+ query = self.query_conv(x)
+ key = self.key_conv(x)
+ value = self.value_conv(x)
+ energy_H = torch.einsum('bchw,bciw->bwhi', query, key) + NEG_INF_DIAG(
+ H, query.device)
+ energy_H = energy_H.transpose(1, 2)
+ energy_W = torch.einsum('bchw,bchj->bhwj', query, key)
+ attn = F.softmax(
+ torch.cat([energy_H, energy_W], dim=-1), dim=-1) # [B,H,W,(H+W)]
+ out = torch.einsum('bciw,bhwi->bchw', value, attn[..., :H])
+ out += torch.einsum('bchj,bhwj->bchw', value, attn[..., H:])
+
+ out = self.gamma(out) + x
+ out = out.contiguous()
+
+ return out
+
+ def __repr__(self):
+ s = self.__class__.__name__
+ s += f'(in_channels={self.in_channels})'
+ return s
diff --git a/annotator/uniformer/mmcv/ops/contour_expand.py b/annotator/uniformer/mmcv/ops/contour_expand.py
new file mode 100644
index 0000000000000000000000000000000000000000..ea1111e1768b5f27e118bf7dbc0d9c70a7afd6d7
--- /dev/null
+++ b/annotator/uniformer/mmcv/ops/contour_expand.py
@@ -0,0 +1,49 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import numpy as np
+import torch
+
+from ..utils import ext_loader
+
+ext_module = ext_loader.load_ext('_ext', ['contour_expand'])
+
+
+def contour_expand(kernel_mask, internal_kernel_label, min_kernel_area,
+ kernel_num):
+ """Expand kernel contours so that foreground pixels are assigned into
+ instances.
+
+ Arguments:
+ kernel_mask (np.array or Tensor): The instance kernel mask with
+ size hxw.
+ internal_kernel_label (np.array or Tensor): The instance internal
+ kernel label with size hxw.
+ min_kernel_area (int): The minimum kernel area.
+ kernel_num (int): The instance kernel number.
+
+ Returns:
+ label (list): The instance index map with size hxw.
+ """
+ assert isinstance(kernel_mask, (torch.Tensor, np.ndarray))
+ assert isinstance(internal_kernel_label, (torch.Tensor, np.ndarray))
+ assert isinstance(min_kernel_area, int)
+ assert isinstance(kernel_num, int)
+
+ if isinstance(kernel_mask, np.ndarray):
+ kernel_mask = torch.from_numpy(kernel_mask)
+ if isinstance(internal_kernel_label, np.ndarray):
+ internal_kernel_label = torch.from_numpy(internal_kernel_label)
+
+ if torch.__version__ == 'parrots':
+ if kernel_mask.shape[0] == 0 or internal_kernel_label.shape[0] == 0:
+ label = []
+ else:
+ label = ext_module.contour_expand(
+ kernel_mask,
+ internal_kernel_label,
+ min_kernel_area=min_kernel_area,
+ kernel_num=kernel_num)
+ label = label.tolist()
+ else:
+ label = ext_module.contour_expand(kernel_mask, internal_kernel_label,
+ min_kernel_area, kernel_num)
+ return label
diff --git a/annotator/uniformer/mmcv/ops/corner_pool.py b/annotator/uniformer/mmcv/ops/corner_pool.py
new file mode 100644
index 0000000000000000000000000000000000000000..a33d798b43d405e4c86bee4cd6389be21ca9c637
--- /dev/null
+++ b/annotator/uniformer/mmcv/ops/corner_pool.py
@@ -0,0 +1,161 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import torch
+from torch import nn
+from torch.autograd import Function
+
+from ..utils import ext_loader
+
+ext_module = ext_loader.load_ext('_ext', [
+ 'top_pool_forward', 'top_pool_backward', 'bottom_pool_forward',
+ 'bottom_pool_backward', 'left_pool_forward', 'left_pool_backward',
+ 'right_pool_forward', 'right_pool_backward'
+])
+
+_mode_dict = {'top': 0, 'bottom': 1, 'left': 2, 'right': 3}
+
+
+class TopPoolFunction(Function):
+
+ @staticmethod
+ def symbolic(g, input):
+ output = g.op(
+ 'mmcv::MMCVCornerPool', input, mode_i=int(_mode_dict['top']))
+ return output
+
+ @staticmethod
+ def forward(ctx, input):
+ output = ext_module.top_pool_forward(input)
+ ctx.save_for_backward(input)
+ return output
+
+ @staticmethod
+ def backward(ctx, grad_output):
+ input, = ctx.saved_tensors
+ output = ext_module.top_pool_backward(input, grad_output)
+ return output
+
+
+class BottomPoolFunction(Function):
+
+ @staticmethod
+ def symbolic(g, input):
+ output = g.op(
+ 'mmcv::MMCVCornerPool', input, mode_i=int(_mode_dict['bottom']))
+ return output
+
+ @staticmethod
+ def forward(ctx, input):
+ output = ext_module.bottom_pool_forward(input)
+ ctx.save_for_backward(input)
+ return output
+
+ @staticmethod
+ def backward(ctx, grad_output):
+ input, = ctx.saved_tensors
+ output = ext_module.bottom_pool_backward(input, grad_output)
+ return output
+
+
+class LeftPoolFunction(Function):
+
+ @staticmethod
+ def symbolic(g, input):
+ output = g.op(
+ 'mmcv::MMCVCornerPool', input, mode_i=int(_mode_dict['left']))
+ return output
+
+ @staticmethod
+ def forward(ctx, input):
+ output = ext_module.left_pool_forward(input)
+ ctx.save_for_backward(input)
+ return output
+
+ @staticmethod
+ def backward(ctx, grad_output):
+ input, = ctx.saved_tensors
+ output = ext_module.left_pool_backward(input, grad_output)
+ return output
+
+
+class RightPoolFunction(Function):
+
+ @staticmethod
+ def symbolic(g, input):
+ output = g.op(
+ 'mmcv::MMCVCornerPool', input, mode_i=int(_mode_dict['right']))
+ return output
+
+ @staticmethod
+ def forward(ctx, input):
+ output = ext_module.right_pool_forward(input)
+ ctx.save_for_backward(input)
+ return output
+
+ @staticmethod
+ def backward(ctx, grad_output):
+ input, = ctx.saved_tensors
+ output = ext_module.right_pool_backward(input, grad_output)
+ return output
+
+
+class CornerPool(nn.Module):
+ """Corner Pooling.
+
+ Corner Pooling is a new type of pooling layer that helps a
+ convolutional network better localize corners of bounding boxes.
+
+ Please refer to https://arxiv.org/abs/1808.01244 for more details.
+ Code is modified from https://github.com/princeton-vl/CornerNet-Lite.
+
+ Args:
+ mode(str): Pooling orientation for the pooling layer
+
+ - 'bottom': Bottom Pooling
+ - 'left': Left Pooling
+ - 'right': Right Pooling
+ - 'top': Top Pooling
+
+ Returns:
+ Feature map after pooling.
+ """
+
+ pool_functions = {
+ 'bottom': BottomPoolFunction,
+ 'left': LeftPoolFunction,
+ 'right': RightPoolFunction,
+ 'top': TopPoolFunction,
+ }
+
+ cummax_dim_flip = {
+ 'bottom': (2, False),
+ 'left': (3, True),
+ 'right': (3, False),
+ 'top': (2, True),
+ }
+
+ def __init__(self, mode):
+ super(CornerPool, self).__init__()
+ assert mode in self.pool_functions
+ self.mode = mode
+ self.corner_pool = self.pool_functions[mode]
+
+ def forward(self, x):
+ if torch.__version__ != 'parrots' and torch.__version__ >= '1.5.0':
+ if torch.onnx.is_in_onnx_export():
+ assert torch.__version__ >= '1.7.0', \
+ 'When `cummax` serves as an intermediate component whose '\
+ 'outputs is used as inputs for another modules, it\'s '\
+ 'expected that pytorch version must be >= 1.7.0, '\
+ 'otherwise Error appears like: `RuntimeError: tuple '\
+ 'appears in op that does not forward tuples, unsupported '\
+ 'kind: prim::PythonOp`.'
+
+ dim, flip = self.cummax_dim_flip[self.mode]
+ if flip:
+ x = x.flip(dim)
+ pool_tensor, _ = torch.cummax(x, dim=dim)
+ if flip:
+ pool_tensor = pool_tensor.flip(dim)
+ return pool_tensor
+ else:
+ return self.corner_pool.apply(x)
diff --git a/annotator/uniformer/mmcv/ops/correlation.py b/annotator/uniformer/mmcv/ops/correlation.py
new file mode 100644
index 0000000000000000000000000000000000000000..3d0b79c301b29915dfaf4d2b1846c59be73127d3
--- /dev/null
+++ b/annotator/uniformer/mmcv/ops/correlation.py
@@ -0,0 +1,196 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import torch
+from torch import Tensor, nn
+from torch.autograd import Function
+from torch.autograd.function import once_differentiable
+from torch.nn.modules.utils import _pair
+
+from ..utils import ext_loader
+
+ext_module = ext_loader.load_ext(
+ '_ext', ['correlation_forward', 'correlation_backward'])
+
+
+class CorrelationFunction(Function):
+
+ @staticmethod
+ def forward(ctx,
+ input1,
+ input2,
+ kernel_size=1,
+ max_displacement=1,
+ stride=1,
+ padding=1,
+ dilation=1,
+ dilation_patch=1):
+
+ ctx.save_for_backward(input1, input2)
+
+ kH, kW = ctx.kernel_size = _pair(kernel_size)
+ patch_size = max_displacement * 2 + 1
+ ctx.patch_size = patch_size
+ dH, dW = ctx.stride = _pair(stride)
+ padH, padW = ctx.padding = _pair(padding)
+ dilationH, dilationW = ctx.dilation = _pair(dilation)
+ dilation_patchH, dilation_patchW = ctx.dilation_patch = _pair(
+ dilation_patch)
+
+ output_size = CorrelationFunction._output_size(ctx, input1)
+
+ output = input1.new_zeros(output_size)
+
+ ext_module.correlation_forward(
+ input1,
+ input2,
+ output,
+ kH=kH,
+ kW=kW,
+ patchH=patch_size,
+ patchW=patch_size,
+ padH=padH,
+ padW=padW,
+ dilationH=dilationH,
+ dilationW=dilationW,
+ dilation_patchH=dilation_patchH,
+ dilation_patchW=dilation_patchW,
+ dH=dH,
+ dW=dW)
+
+ return output
+
+ @staticmethod
+ @once_differentiable
+ def backward(ctx, grad_output):
+ input1, input2 = ctx.saved_tensors
+
+ kH, kW = ctx.kernel_size
+ patch_size = ctx.patch_size
+ padH, padW = ctx.padding
+ dilationH, dilationW = ctx.dilation
+ dilation_patchH, dilation_patchW = ctx.dilation_patch
+ dH, dW = ctx.stride
+ grad_input1 = torch.zeros_like(input1)
+ grad_input2 = torch.zeros_like(input2)
+
+ ext_module.correlation_backward(
+ grad_output,
+ input1,
+ input2,
+ grad_input1,
+ grad_input2,
+ kH=kH,
+ kW=kW,
+ patchH=patch_size,
+ patchW=patch_size,
+ padH=padH,
+ padW=padW,
+ dilationH=dilationH,
+ dilationW=dilationW,
+ dilation_patchH=dilation_patchH,
+ dilation_patchW=dilation_patchW,
+ dH=dH,
+ dW=dW)
+ return grad_input1, grad_input2, None, None, None, None, None, None
+
+ @staticmethod
+ def _output_size(ctx, input1):
+ iH, iW = input1.size(2), input1.size(3)
+ batch_size = input1.size(0)
+ kH, kW = ctx.kernel_size
+ patch_size = ctx.patch_size
+ dH, dW = ctx.stride
+ padH, padW = ctx.padding
+ dilationH, dilationW = ctx.dilation
+ dilatedKH = (kH - 1) * dilationH + 1
+ dilatedKW = (kW - 1) * dilationW + 1
+
+ oH = int((iH + 2 * padH - dilatedKH) / dH + 1)
+ oW = int((iW + 2 * padW - dilatedKW) / dW + 1)
+
+ output_size = (batch_size, patch_size, patch_size, oH, oW)
+ return output_size
+
+
+class Correlation(nn.Module):
+ r"""Correlation operator
+
+ This correlation operator works for optical flow correlation computation.
+
+ There are two batched tensors with shape :math:`(N, C, H, W)`,
+ and the correlation output's shape is :math:`(N, max\_displacement \times
+ 2 + 1, max\_displacement * 2 + 1, H_{out}, W_{out})`
+
+ where
+
+ .. math::
+ H_{out} = \left\lfloor\frac{H_{in} + 2 \times padding -
+ dilation \times (kernel\_size - 1) - 1}
+ {stride} + 1\right\rfloor
+
+ .. math::
+ W_{out} = \left\lfloor\frac{W_{in} + 2 \times padding - dilation
+ \times (kernel\_size - 1) - 1}
+ {stride} + 1\right\rfloor
+
+ the correlation item :math:`(N_i, dy, dx)` is formed by taking the sliding
+ window convolution between input1 and shifted input2,
+
+ .. math::
+ Corr(N_i, dx, dy) =
+ \sum_{c=0}^{C-1}
+ input1(N_i, c) \star
+ \mathcal{S}(input2(N_i, c), dy, dx)
+
+ where :math:`\star` is the valid 2d sliding window convolution operator,
+ and :math:`\mathcal{S}` means shifting the input features (auto-complete
+ zero marginal), and :math:`dx, dy` are shifting distance, :math:`dx, dy \in
+ [-max\_displacement \times dilation\_patch, max\_displacement \times
+ dilation\_patch]`.
+
+ Args:
+ kernel_size (int): The size of sliding window i.e. local neighborhood
+ representing the center points and involved in correlation
+ computation. Defaults to 1.
+ max_displacement (int): The radius for computing correlation volume,
+ but the actual working space can be dilated by dilation_patch.
+ Defaults to 1.
+ stride (int): The stride of the sliding blocks in the input spatial
+ dimensions. Defaults to 1.
+ padding (int): Zero padding added to all four sides of the input1.
+ Defaults to 0.
+ dilation (int): The spacing of local neighborhood that will involved
+ in correlation. Defaults to 1.
+ dilation_patch (int): The spacing between position need to compute
+ correlation. Defaults to 1.
+ """
+
+ def __init__(self,
+ kernel_size: int = 1,
+ max_displacement: int = 1,
+ stride: int = 1,
+ padding: int = 0,
+ dilation: int = 1,
+ dilation_patch: int = 1) -> None:
+ super().__init__()
+ self.kernel_size = kernel_size
+ self.max_displacement = max_displacement
+ self.stride = stride
+ self.padding = padding
+ self.dilation = dilation
+ self.dilation_patch = dilation_patch
+
+ def forward(self, input1: Tensor, input2: Tensor) -> Tensor:
+ return CorrelationFunction.apply(input1, input2, self.kernel_size,
+ self.max_displacement, self.stride,
+ self.padding, self.dilation,
+ self.dilation_patch)
+
+ def __repr__(self) -> str:
+ s = self.__class__.__name__
+ s += f'(kernel_size={self.kernel_size}, '
+ s += f'max_displacement={self.max_displacement}, '
+ s += f'stride={self.stride}, '
+ s += f'padding={self.padding}, '
+ s += f'dilation={self.dilation}, '
+ s += f'dilation_patch={self.dilation_patch})'
+ return s
diff --git a/annotator/uniformer/mmcv/ops/deform_conv.py b/annotator/uniformer/mmcv/ops/deform_conv.py
new file mode 100644
index 0000000000000000000000000000000000000000..a3f8c75ee774823eea334e3b3732af6a18f55038
--- /dev/null
+++ b/annotator/uniformer/mmcv/ops/deform_conv.py
@@ -0,0 +1,405 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+from typing import Tuple, Union
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from torch import Tensor
+from torch.autograd import Function
+from torch.autograd.function import once_differentiable
+from torch.nn.modules.utils import _pair, _single
+
+from annotator.uniformer.mmcv.utils import deprecated_api_warning
+from ..cnn import CONV_LAYERS
+from ..utils import ext_loader, print_log
+
+ext_module = ext_loader.load_ext('_ext', [
+ 'deform_conv_forward', 'deform_conv_backward_input',
+ 'deform_conv_backward_parameters'
+])
+
+
+class DeformConv2dFunction(Function):
+
+ @staticmethod
+ def symbolic(g,
+ input,
+ offset,
+ weight,
+ stride,
+ padding,
+ dilation,
+ groups,
+ deform_groups,
+ bias=False,
+ im2col_step=32):
+ return g.op(
+ 'mmcv::MMCVDeformConv2d',
+ input,
+ offset,
+ weight,
+ stride_i=stride,
+ padding_i=padding,
+ dilation_i=dilation,
+ groups_i=groups,
+ deform_groups_i=deform_groups,
+ bias_i=bias,
+ im2col_step_i=im2col_step)
+
+ @staticmethod
+ def forward(ctx,
+ input,
+ offset,
+ weight,
+ stride=1,
+ padding=0,
+ dilation=1,
+ groups=1,
+ deform_groups=1,
+ bias=False,
+ im2col_step=32):
+ if input is not None and input.dim() != 4:
+ raise ValueError(
+ f'Expected 4D tensor as input, got {input.dim()}D tensor \
+ instead.')
+ assert bias is False, 'Only support bias is False.'
+ ctx.stride = _pair(stride)
+ ctx.padding = _pair(padding)
+ ctx.dilation = _pair(dilation)
+ ctx.groups = groups
+ ctx.deform_groups = deform_groups
+ ctx.im2col_step = im2col_step
+
+ # When pytorch version >= 1.6.0, amp is adopted for fp16 mode;
+ # amp won't cast the type of model (float32), but "offset" is cast
+ # to float16 by nn.Conv2d automatically, leading to the type
+ # mismatch with input (when it is float32) or weight.
+ # The flag for whether to use fp16 or amp is the type of "offset",
+ # we cast weight and input to temporarily support fp16 and amp
+ # whatever the pytorch version is.
+ input = input.type_as(offset)
+ weight = weight.type_as(input)
+ ctx.save_for_backward(input, offset, weight)
+
+ output = input.new_empty(
+ DeformConv2dFunction._output_size(ctx, input, weight))
+
+ ctx.bufs_ = [input.new_empty(0), input.new_empty(0)] # columns, ones
+
+ cur_im2col_step = min(ctx.im2col_step, input.size(0))
+ assert (input.size(0) %
+ cur_im2col_step) == 0, 'im2col step must divide batchsize'
+ ext_module.deform_conv_forward(
+ input,
+ weight,
+ offset,
+ output,
+ ctx.bufs_[0],
+ ctx.bufs_[1],
+ kW=weight.size(3),
+ kH=weight.size(2),
+ dW=ctx.stride[1],
+ dH=ctx.stride[0],
+ padW=ctx.padding[1],
+ padH=ctx.padding[0],
+ dilationW=ctx.dilation[1],
+ dilationH=ctx.dilation[0],
+ group=ctx.groups,
+ deformable_group=ctx.deform_groups,
+ im2col_step=cur_im2col_step)
+ return output
+
+ @staticmethod
+ @once_differentiable
+ def backward(ctx, grad_output):
+ input, offset, weight = ctx.saved_tensors
+
+ grad_input = grad_offset = grad_weight = None
+
+ cur_im2col_step = min(ctx.im2col_step, input.size(0))
+ assert (input.size(0) % cur_im2col_step
+ ) == 0, 'batch size must be divisible by im2col_step'
+
+ grad_output = grad_output.contiguous()
+ if ctx.needs_input_grad[0] or ctx.needs_input_grad[1]:
+ grad_input = torch.zeros_like(input)
+ grad_offset = torch.zeros_like(offset)
+ ext_module.deform_conv_backward_input(
+ input,
+ offset,
+ grad_output,
+ grad_input,
+ grad_offset,
+ weight,
+ ctx.bufs_[0],
+ kW=weight.size(3),
+ kH=weight.size(2),
+ dW=ctx.stride[1],
+ dH=ctx.stride[0],
+ padW=ctx.padding[1],
+ padH=ctx.padding[0],
+ dilationW=ctx.dilation[1],
+ dilationH=ctx.dilation[0],
+ group=ctx.groups,
+ deformable_group=ctx.deform_groups,
+ im2col_step=cur_im2col_step)
+
+ if ctx.needs_input_grad[2]:
+ grad_weight = torch.zeros_like(weight)
+ ext_module.deform_conv_backward_parameters(
+ input,
+ offset,
+ grad_output,
+ grad_weight,
+ ctx.bufs_[0],
+ ctx.bufs_[1],
+ kW=weight.size(3),
+ kH=weight.size(2),
+ dW=ctx.stride[1],
+ dH=ctx.stride[0],
+ padW=ctx.padding[1],
+ padH=ctx.padding[0],
+ dilationW=ctx.dilation[1],
+ dilationH=ctx.dilation[0],
+ group=ctx.groups,
+ deformable_group=ctx.deform_groups,
+ scale=1,
+ im2col_step=cur_im2col_step)
+
+ return grad_input, grad_offset, grad_weight, \
+ None, None, None, None, None, None, None
+
+ @staticmethod
+ def _output_size(ctx, input, weight):
+ channels = weight.size(0)
+ output_size = (input.size(0), channels)
+ for d in range(input.dim() - 2):
+ in_size = input.size(d + 2)
+ pad = ctx.padding[d]
+ kernel = ctx.dilation[d] * (weight.size(d + 2) - 1) + 1
+ stride_ = ctx.stride[d]
+ output_size += ((in_size + (2 * pad) - kernel) // stride_ + 1, )
+ if not all(map(lambda s: s > 0, output_size)):
+ raise ValueError(
+ 'convolution input is too small (output would be ' +
+ 'x'.join(map(str, output_size)) + ')')
+ return output_size
+
+
+deform_conv2d = DeformConv2dFunction.apply
+
+
+class DeformConv2d(nn.Module):
+ r"""Deformable 2D convolution.
+
+ Applies a deformable 2D convolution over an input signal composed of
+ several input planes. DeformConv2d was described in the paper
+ `Deformable Convolutional Networks
+ `_
+
+ Note:
+ The argument ``im2col_step`` was added in version 1.3.17, which means
+ number of samples processed by the ``im2col_cuda_kernel`` per call.
+ It enables users to define ``batch_size`` and ``im2col_step`` more
+ flexibly and solved `issue mmcv#1440
+ `_.
+
+ Args:
+ in_channels (int): Number of channels in the input image.
+ out_channels (int): Number of channels produced by the convolution.
+ kernel_size(int, tuple): Size of the convolving kernel.
+ stride(int, tuple): Stride of the convolution. Default: 1.
+ padding (int or tuple): Zero-padding added to both sides of the input.
+ Default: 0.
+ dilation (int or tuple): Spacing between kernel elements. Default: 1.
+ groups (int): Number of blocked connections from input.
+ channels to output channels. Default: 1.
+ deform_groups (int): Number of deformable group partitions.
+ bias (bool): If True, adds a learnable bias to the output.
+ Default: False.
+ im2col_step (int): Number of samples processed by im2col_cuda_kernel
+ per call. It will work when ``batch_size`` > ``im2col_step``, but
+ ``batch_size`` must be divisible by ``im2col_step``. Default: 32.
+ `New in version 1.3.17.`
+ """
+
+ @deprecated_api_warning({'deformable_groups': 'deform_groups'},
+ cls_name='DeformConv2d')
+ def __init__(self,
+ in_channels: int,
+ out_channels: int,
+ kernel_size: Union[int, Tuple[int, ...]],
+ stride: Union[int, Tuple[int, ...]] = 1,
+ padding: Union[int, Tuple[int, ...]] = 0,
+ dilation: Union[int, Tuple[int, ...]] = 1,
+ groups: int = 1,
+ deform_groups: int = 1,
+ bias: bool = False,
+ im2col_step: int = 32) -> None:
+ super(DeformConv2d, self).__init__()
+
+ assert not bias, \
+ f'bias={bias} is not supported in DeformConv2d.'
+ assert in_channels % groups == 0, \
+ f'in_channels {in_channels} cannot be divisible by groups {groups}'
+ assert out_channels % groups == 0, \
+ f'out_channels {out_channels} cannot be divisible by groups \
+ {groups}'
+
+ self.in_channels = in_channels
+ self.out_channels = out_channels
+ self.kernel_size = _pair(kernel_size)
+ self.stride = _pair(stride)
+ self.padding = _pair(padding)
+ self.dilation = _pair(dilation)
+ self.groups = groups
+ self.deform_groups = deform_groups
+ self.im2col_step = im2col_step
+ # enable compatibility with nn.Conv2d
+ self.transposed = False
+ self.output_padding = _single(0)
+
+ # only weight, no bias
+ self.weight = nn.Parameter(
+ torch.Tensor(out_channels, in_channels // self.groups,
+ *self.kernel_size))
+
+ self.reset_parameters()
+
+ def reset_parameters(self):
+ # switch the initialization of `self.weight` to the standard kaiming
+ # method described in `Delving deep into rectifiers: Surpassing
+ # human-level performance on ImageNet classification` - He, K. et al.
+ # (2015), using a uniform distribution
+ nn.init.kaiming_uniform_(self.weight, nonlinearity='relu')
+
+ def forward(self, x: Tensor, offset: Tensor) -> Tensor:
+ """Deformable Convolutional forward function.
+
+ Args:
+ x (Tensor): Input feature, shape (B, C_in, H_in, W_in)
+ offset (Tensor): Offset for deformable convolution, shape
+ (B, deform_groups*kernel_size[0]*kernel_size[1]*2,
+ H_out, W_out), H_out, W_out are equal to the output's.
+
+ An offset is like `[y0, x0, y1, x1, y2, x2, ..., y8, x8]`.
+ The spatial arrangement is like:
+
+ .. code:: text
+
+ (x0, y0) (x1, y1) (x2, y2)
+ (x3, y3) (x4, y4) (x5, y5)
+ (x6, y6) (x7, y7) (x8, y8)
+
+ Returns:
+ Tensor: Output of the layer.
+ """
+ # To fix an assert error in deform_conv_cuda.cpp:128
+ # input image is smaller than kernel
+ input_pad = (x.size(2) < self.kernel_size[0]) or (x.size(3) <
+ self.kernel_size[1])
+ if input_pad:
+ pad_h = max(self.kernel_size[0] - x.size(2), 0)
+ pad_w = max(self.kernel_size[1] - x.size(3), 0)
+ x = F.pad(x, (0, pad_w, 0, pad_h), 'constant', 0).contiguous()
+ offset = F.pad(offset, (0, pad_w, 0, pad_h), 'constant', 0)
+ offset = offset.contiguous()
+ out = deform_conv2d(x, offset, self.weight, self.stride, self.padding,
+ self.dilation, self.groups, self.deform_groups,
+ False, self.im2col_step)
+ if input_pad:
+ out = out[:, :, :out.size(2) - pad_h, :out.size(3) -
+ pad_w].contiguous()
+ return out
+
+ def __repr__(self):
+ s = self.__class__.__name__
+ s += f'(in_channels={self.in_channels},\n'
+ s += f'out_channels={self.out_channels},\n'
+ s += f'kernel_size={self.kernel_size},\n'
+ s += f'stride={self.stride},\n'
+ s += f'padding={self.padding},\n'
+ s += f'dilation={self.dilation},\n'
+ s += f'groups={self.groups},\n'
+ s += f'deform_groups={self.deform_groups},\n'
+ # bias is not supported in DeformConv2d.
+ s += 'bias=False)'
+ return s
+
+
+@CONV_LAYERS.register_module('DCN')
+class DeformConv2dPack(DeformConv2d):
+ """A Deformable Conv Encapsulation that acts as normal Conv layers.
+
+ The offset tensor is like `[y0, x0, y1, x1, y2, x2, ..., y8, x8]`.
+ The spatial arrangement is like:
+
+ .. code:: text
+
+ (x0, y0) (x1, y1) (x2, y2)
+ (x3, y3) (x4, y4) (x5, y5)
+ (x6, y6) (x7, y7) (x8, y8)
+
+ Args:
+ in_channels (int): Same as nn.Conv2d.
+ out_channels (int): Same as nn.Conv2d.
+ kernel_size (int or tuple[int]): Same as nn.Conv2d.
+ stride (int or tuple[int]): Same as nn.Conv2d.
+ padding (int or tuple[int]): Same as nn.Conv2d.
+ dilation (int or tuple[int]): Same as nn.Conv2d.
+ groups (int): Same as nn.Conv2d.
+ bias (bool or str): If specified as `auto`, it will be decided by the
+ norm_cfg. Bias will be set as True if norm_cfg is None, otherwise
+ False.
+ """
+
+ _version = 2
+
+ def __init__(self, *args, **kwargs):
+ super(DeformConv2dPack, self).__init__(*args, **kwargs)
+ self.conv_offset = nn.Conv2d(
+ self.in_channels,
+ self.deform_groups * 2 * self.kernel_size[0] * self.kernel_size[1],
+ kernel_size=self.kernel_size,
+ stride=_pair(self.stride),
+ padding=_pair(self.padding),
+ dilation=_pair(self.dilation),
+ bias=True)
+ self.init_offset()
+
+ def init_offset(self):
+ self.conv_offset.weight.data.zero_()
+ self.conv_offset.bias.data.zero_()
+
+ def forward(self, x):
+ offset = self.conv_offset(x)
+ return deform_conv2d(x, offset, self.weight, self.stride, self.padding,
+ self.dilation, self.groups, self.deform_groups,
+ False, self.im2col_step)
+
+ def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
+ missing_keys, unexpected_keys, error_msgs):
+ version = local_metadata.get('version', None)
+
+ if version is None or version < 2:
+ # the key is different in early versions
+ # In version < 2, DeformConvPack loads previous benchmark models.
+ if (prefix + 'conv_offset.weight' not in state_dict
+ and prefix[:-1] + '_offset.weight' in state_dict):
+ state_dict[prefix + 'conv_offset.weight'] = state_dict.pop(
+ prefix[:-1] + '_offset.weight')
+ if (prefix + 'conv_offset.bias' not in state_dict
+ and prefix[:-1] + '_offset.bias' in state_dict):
+ state_dict[prefix +
+ 'conv_offset.bias'] = state_dict.pop(prefix[:-1] +
+ '_offset.bias')
+
+ if version is not None and version > 1:
+ print_log(
+ f'DeformConv2dPack {prefix.rstrip(".")} is upgraded to '
+ 'version 2.',
+ logger='root')
+
+ super()._load_from_state_dict(state_dict, prefix, local_metadata,
+ strict, missing_keys, unexpected_keys,
+ error_msgs)
diff --git a/annotator/uniformer/mmcv/ops/deform_roi_pool.py b/annotator/uniformer/mmcv/ops/deform_roi_pool.py
new file mode 100644
index 0000000000000000000000000000000000000000..cc245ba91fee252226ba22e76bb94a35db9a629b
--- /dev/null
+++ b/annotator/uniformer/mmcv/ops/deform_roi_pool.py
@@ -0,0 +1,204 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+from torch import nn
+from torch.autograd import Function
+from torch.autograd.function import once_differentiable
+from torch.nn.modules.utils import _pair
+
+from ..utils import ext_loader
+
+ext_module = ext_loader.load_ext(
+ '_ext', ['deform_roi_pool_forward', 'deform_roi_pool_backward'])
+
+
+class DeformRoIPoolFunction(Function):
+
+ @staticmethod
+ def symbolic(g, input, rois, offset, output_size, spatial_scale,
+ sampling_ratio, gamma):
+ return g.op(
+ 'mmcv::MMCVDeformRoIPool',
+ input,
+ rois,
+ offset,
+ pooled_height_i=output_size[0],
+ pooled_width_i=output_size[1],
+ spatial_scale_f=spatial_scale,
+ sampling_ratio_f=sampling_ratio,
+ gamma_f=gamma)
+
+ @staticmethod
+ def forward(ctx,
+ input,
+ rois,
+ offset,
+ output_size,
+ spatial_scale=1.0,
+ sampling_ratio=0,
+ gamma=0.1):
+ if offset is None:
+ offset = input.new_zeros(0)
+ ctx.output_size = _pair(output_size)
+ ctx.spatial_scale = float(spatial_scale)
+ ctx.sampling_ratio = int(sampling_ratio)
+ ctx.gamma = float(gamma)
+
+ assert rois.size(1) == 5, 'RoI must be (idx, x1, y1, x2, y2)!'
+
+ output_shape = (rois.size(0), input.size(1), ctx.output_size[0],
+ ctx.output_size[1])
+ output = input.new_zeros(output_shape)
+
+ ext_module.deform_roi_pool_forward(
+ input,
+ rois,
+ offset,
+ output,
+ pooled_height=ctx.output_size[0],
+ pooled_width=ctx.output_size[1],
+ spatial_scale=ctx.spatial_scale,
+ sampling_ratio=ctx.sampling_ratio,
+ gamma=ctx.gamma)
+
+ ctx.save_for_backward(input, rois, offset)
+ return output
+
+ @staticmethod
+ @once_differentiable
+ def backward(ctx, grad_output):
+ input, rois, offset = ctx.saved_tensors
+ grad_input = grad_output.new_zeros(input.shape)
+ grad_offset = grad_output.new_zeros(offset.shape)
+
+ ext_module.deform_roi_pool_backward(
+ grad_output,
+ input,
+ rois,
+ offset,
+ grad_input,
+ grad_offset,
+ pooled_height=ctx.output_size[0],
+ pooled_width=ctx.output_size[1],
+ spatial_scale=ctx.spatial_scale,
+ sampling_ratio=ctx.sampling_ratio,
+ gamma=ctx.gamma)
+ if grad_offset.numel() == 0:
+ grad_offset = None
+ return grad_input, None, grad_offset, None, None, None, None
+
+
+deform_roi_pool = DeformRoIPoolFunction.apply
+
+
+class DeformRoIPool(nn.Module):
+
+ def __init__(self,
+ output_size,
+ spatial_scale=1.0,
+ sampling_ratio=0,
+ gamma=0.1):
+ super(DeformRoIPool, self).__init__()
+ self.output_size = _pair(output_size)
+ self.spatial_scale = float(spatial_scale)
+ self.sampling_ratio = int(sampling_ratio)
+ self.gamma = float(gamma)
+
+ def forward(self, input, rois, offset=None):
+ return deform_roi_pool(input, rois, offset, self.output_size,
+ self.spatial_scale, self.sampling_ratio,
+ self.gamma)
+
+
+class DeformRoIPoolPack(DeformRoIPool):
+
+ def __init__(self,
+ output_size,
+ output_channels,
+ deform_fc_channels=1024,
+ spatial_scale=1.0,
+ sampling_ratio=0,
+ gamma=0.1):
+ super(DeformRoIPoolPack, self).__init__(output_size, spatial_scale,
+ sampling_ratio, gamma)
+
+ self.output_channels = output_channels
+ self.deform_fc_channels = deform_fc_channels
+
+ self.offset_fc = nn.Sequential(
+ nn.Linear(
+ self.output_size[0] * self.output_size[1] *
+ self.output_channels, self.deform_fc_channels),
+ nn.ReLU(inplace=True),
+ nn.Linear(self.deform_fc_channels, self.deform_fc_channels),
+ nn.ReLU(inplace=True),
+ nn.Linear(self.deform_fc_channels,
+ self.output_size[0] * self.output_size[1] * 2))
+ self.offset_fc[-1].weight.data.zero_()
+ self.offset_fc[-1].bias.data.zero_()
+
+ def forward(self, input, rois):
+ assert input.size(1) == self.output_channels
+ x = deform_roi_pool(input, rois, None, self.output_size,
+ self.spatial_scale, self.sampling_ratio,
+ self.gamma)
+ rois_num = rois.size(0)
+ offset = self.offset_fc(x.view(rois_num, -1))
+ offset = offset.view(rois_num, 2, self.output_size[0],
+ self.output_size[1])
+ return deform_roi_pool(input, rois, offset, self.output_size,
+ self.spatial_scale, self.sampling_ratio,
+ self.gamma)
+
+
+class ModulatedDeformRoIPoolPack(DeformRoIPool):
+
+ def __init__(self,
+ output_size,
+ output_channels,
+ deform_fc_channels=1024,
+ spatial_scale=1.0,
+ sampling_ratio=0,
+ gamma=0.1):
+ super(ModulatedDeformRoIPoolPack,
+ self).__init__(output_size, spatial_scale, sampling_ratio, gamma)
+
+ self.output_channels = output_channels
+ self.deform_fc_channels = deform_fc_channels
+
+ self.offset_fc = nn.Sequential(
+ nn.Linear(
+ self.output_size[0] * self.output_size[1] *
+ self.output_channels, self.deform_fc_channels),
+ nn.ReLU(inplace=True),
+ nn.Linear(self.deform_fc_channels, self.deform_fc_channels),
+ nn.ReLU(inplace=True),
+ nn.Linear(self.deform_fc_channels,
+ self.output_size[0] * self.output_size[1] * 2))
+ self.offset_fc[-1].weight.data.zero_()
+ self.offset_fc[-1].bias.data.zero_()
+
+ self.mask_fc = nn.Sequential(
+ nn.Linear(
+ self.output_size[0] * self.output_size[1] *
+ self.output_channels, self.deform_fc_channels),
+ nn.ReLU(inplace=True),
+ nn.Linear(self.deform_fc_channels,
+ self.output_size[0] * self.output_size[1] * 1),
+ nn.Sigmoid())
+ self.mask_fc[2].weight.data.zero_()
+ self.mask_fc[2].bias.data.zero_()
+
+ def forward(self, input, rois):
+ assert input.size(1) == self.output_channels
+ x = deform_roi_pool(input, rois, None, self.output_size,
+ self.spatial_scale, self.sampling_ratio,
+ self.gamma)
+ rois_num = rois.size(0)
+ offset = self.offset_fc(x.view(rois_num, -1))
+ offset = offset.view(rois_num, 2, self.output_size[0],
+ self.output_size[1])
+ mask = self.mask_fc(x.view(rois_num, -1))
+ mask = mask.view(rois_num, 1, self.output_size[0], self.output_size[1])
+ d = deform_roi_pool(input, rois, offset, self.output_size,
+ self.spatial_scale, self.sampling_ratio,
+ self.gamma)
+ return d * mask
diff --git a/annotator/uniformer/mmcv/ops/deprecated_wrappers.py b/annotator/uniformer/mmcv/ops/deprecated_wrappers.py
new file mode 100644
index 0000000000000000000000000000000000000000..a2e593df9ee57637038683d7a1efaa347b2b69e7
--- /dev/null
+++ b/annotator/uniformer/mmcv/ops/deprecated_wrappers.py
@@ -0,0 +1,43 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+# This file is for backward compatibility.
+# Module wrappers for empty tensor have been moved to mmcv.cnn.bricks.
+import warnings
+
+from ..cnn.bricks.wrappers import Conv2d, ConvTranspose2d, Linear, MaxPool2d
+
+
+class Conv2d_deprecated(Conv2d):
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ warnings.warn(
+ 'Importing Conv2d wrapper from "mmcv.ops" will be deprecated in'
+ ' the future. Please import them from "mmcv.cnn" instead')
+
+
+class ConvTranspose2d_deprecated(ConvTranspose2d):
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ warnings.warn(
+ 'Importing ConvTranspose2d wrapper from "mmcv.ops" will be '
+ 'deprecated in the future. Please import them from "mmcv.cnn" '
+ 'instead')
+
+
+class MaxPool2d_deprecated(MaxPool2d):
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ warnings.warn(
+ 'Importing MaxPool2d wrapper from "mmcv.ops" will be deprecated in'
+ ' the future. Please import them from "mmcv.cnn" instead')
+
+
+class Linear_deprecated(Linear):
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ warnings.warn(
+ 'Importing Linear wrapper from "mmcv.ops" will be deprecated in'
+ ' the future. Please import them from "mmcv.cnn" instead')
diff --git a/annotator/uniformer/mmcv/ops/focal_loss.py b/annotator/uniformer/mmcv/ops/focal_loss.py
new file mode 100644
index 0000000000000000000000000000000000000000..763bc93bd2575c49ca8ccf20996bbd92d1e0d1a4
--- /dev/null
+++ b/annotator/uniformer/mmcv/ops/focal_loss.py
@@ -0,0 +1,212 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import torch
+import torch.nn as nn
+from torch.autograd import Function
+from torch.autograd.function import once_differentiable
+
+from ..utils import ext_loader
+
+ext_module = ext_loader.load_ext('_ext', [
+ 'sigmoid_focal_loss_forward', 'sigmoid_focal_loss_backward',
+ 'softmax_focal_loss_forward', 'softmax_focal_loss_backward'
+])
+
+
+class SigmoidFocalLossFunction(Function):
+
+ @staticmethod
+ def symbolic(g, input, target, gamma, alpha, weight, reduction):
+ return g.op(
+ 'mmcv::MMCVSigmoidFocalLoss',
+ input,
+ target,
+ gamma_f=gamma,
+ alpha_f=alpha,
+ weight_f=weight,
+ reduction_s=reduction)
+
+ @staticmethod
+ def forward(ctx,
+ input,
+ target,
+ gamma=2.0,
+ alpha=0.25,
+ weight=None,
+ reduction='mean'):
+
+ assert isinstance(target, (torch.LongTensor, torch.cuda.LongTensor))
+ assert input.dim() == 2
+ assert target.dim() == 1
+ assert input.size(0) == target.size(0)
+ if weight is None:
+ weight = input.new_empty(0)
+ else:
+ assert weight.dim() == 1
+ assert input.size(1) == weight.size(0)
+ ctx.reduction_dict = {'none': 0, 'mean': 1, 'sum': 2}
+ assert reduction in ctx.reduction_dict.keys()
+
+ ctx.gamma = float(gamma)
+ ctx.alpha = float(alpha)
+ ctx.reduction = ctx.reduction_dict[reduction]
+
+ output = input.new_zeros(input.size())
+
+ ext_module.sigmoid_focal_loss_forward(
+ input, target, weight, output, gamma=ctx.gamma, alpha=ctx.alpha)
+ if ctx.reduction == ctx.reduction_dict['mean']:
+ output = output.sum() / input.size(0)
+ elif ctx.reduction == ctx.reduction_dict['sum']:
+ output = output.sum()
+ ctx.save_for_backward(input, target, weight)
+ return output
+
+ @staticmethod
+ @once_differentiable
+ def backward(ctx, grad_output):
+ input, target, weight = ctx.saved_tensors
+
+ grad_input = input.new_zeros(input.size())
+
+ ext_module.sigmoid_focal_loss_backward(
+ input,
+ target,
+ weight,
+ grad_input,
+ gamma=ctx.gamma,
+ alpha=ctx.alpha)
+
+ grad_input *= grad_output
+ if ctx.reduction == ctx.reduction_dict['mean']:
+ grad_input /= input.size(0)
+ return grad_input, None, None, None, None, None
+
+
+sigmoid_focal_loss = SigmoidFocalLossFunction.apply
+
+
+class SigmoidFocalLoss(nn.Module):
+
+ def __init__(self, gamma, alpha, weight=None, reduction='mean'):
+ super(SigmoidFocalLoss, self).__init__()
+ self.gamma = gamma
+ self.alpha = alpha
+ self.register_buffer('weight', weight)
+ self.reduction = reduction
+
+ def forward(self, input, target):
+ return sigmoid_focal_loss(input, target, self.gamma, self.alpha,
+ self.weight, self.reduction)
+
+ def __repr__(self):
+ s = self.__class__.__name__
+ s += f'(gamma={self.gamma}, '
+ s += f'alpha={self.alpha}, '
+ s += f'reduction={self.reduction})'
+ return s
+
+
+class SoftmaxFocalLossFunction(Function):
+
+ @staticmethod
+ def symbolic(g, input, target, gamma, alpha, weight, reduction):
+ return g.op(
+ 'mmcv::MMCVSoftmaxFocalLoss',
+ input,
+ target,
+ gamma_f=gamma,
+ alpha_f=alpha,
+ weight_f=weight,
+ reduction_s=reduction)
+
+ @staticmethod
+ def forward(ctx,
+ input,
+ target,
+ gamma=2.0,
+ alpha=0.25,
+ weight=None,
+ reduction='mean'):
+
+ assert isinstance(target, (torch.LongTensor, torch.cuda.LongTensor))
+ assert input.dim() == 2
+ assert target.dim() == 1
+ assert input.size(0) == target.size(0)
+ if weight is None:
+ weight = input.new_empty(0)
+ else:
+ assert weight.dim() == 1
+ assert input.size(1) == weight.size(0)
+ ctx.reduction_dict = {'none': 0, 'mean': 1, 'sum': 2}
+ assert reduction in ctx.reduction_dict.keys()
+
+ ctx.gamma = float(gamma)
+ ctx.alpha = float(alpha)
+ ctx.reduction = ctx.reduction_dict[reduction]
+
+ channel_stats, _ = torch.max(input, dim=1)
+ input_softmax = input - channel_stats.unsqueeze(1).expand_as(input)
+ input_softmax.exp_()
+
+ channel_stats = input_softmax.sum(dim=1)
+ input_softmax /= channel_stats.unsqueeze(1).expand_as(input)
+
+ output = input.new_zeros(input.size(0))
+ ext_module.softmax_focal_loss_forward(
+ input_softmax,
+ target,
+ weight,
+ output,
+ gamma=ctx.gamma,
+ alpha=ctx.alpha)
+
+ if ctx.reduction == ctx.reduction_dict['mean']:
+ output = output.sum() / input.size(0)
+ elif ctx.reduction == ctx.reduction_dict['sum']:
+ output = output.sum()
+ ctx.save_for_backward(input_softmax, target, weight)
+ return output
+
+ @staticmethod
+ def backward(ctx, grad_output):
+ input_softmax, target, weight = ctx.saved_tensors
+ buff = input_softmax.new_zeros(input_softmax.size(0))
+ grad_input = input_softmax.new_zeros(input_softmax.size())
+
+ ext_module.softmax_focal_loss_backward(
+ input_softmax,
+ target,
+ weight,
+ buff,
+ grad_input,
+ gamma=ctx.gamma,
+ alpha=ctx.alpha)
+
+ grad_input *= grad_output
+ if ctx.reduction == ctx.reduction_dict['mean']:
+ grad_input /= input_softmax.size(0)
+ return grad_input, None, None, None, None, None
+
+
+softmax_focal_loss = SoftmaxFocalLossFunction.apply
+
+
+class SoftmaxFocalLoss(nn.Module):
+
+ def __init__(self, gamma, alpha, weight=None, reduction='mean'):
+ super(SoftmaxFocalLoss, self).__init__()
+ self.gamma = gamma
+ self.alpha = alpha
+ self.register_buffer('weight', weight)
+ self.reduction = reduction
+
+ def forward(self, input, target):
+ return softmax_focal_loss(input, target, self.gamma, self.alpha,
+ self.weight, self.reduction)
+
+ def __repr__(self):
+ s = self.__class__.__name__
+ s += f'(gamma={self.gamma}, '
+ s += f'alpha={self.alpha}, '
+ s += f'reduction={self.reduction})'
+ return s
diff --git a/annotator/uniformer/mmcv/ops/furthest_point_sample.py b/annotator/uniformer/mmcv/ops/furthest_point_sample.py
new file mode 100644
index 0000000000000000000000000000000000000000..374b7a878f1972c183941af28ba1df216ac1a60f
--- /dev/null
+++ b/annotator/uniformer/mmcv/ops/furthest_point_sample.py
@@ -0,0 +1,83 @@
+import torch
+from torch.autograd import Function
+
+from ..utils import ext_loader
+
+ext_module = ext_loader.load_ext('_ext', [
+ 'furthest_point_sampling_forward',
+ 'furthest_point_sampling_with_dist_forward'
+])
+
+
+class FurthestPointSampling(Function):
+ """Uses iterative furthest point sampling to select a set of features whose
+ corresponding points have the furthest distance."""
+
+ @staticmethod
+ def forward(ctx, points_xyz: torch.Tensor,
+ num_points: int) -> torch.Tensor:
+ """
+ Args:
+ points_xyz (Tensor): (B, N, 3) where N > num_points.
+ num_points (int): Number of points in the sampled set.
+
+ Returns:
+ Tensor: (B, num_points) indices of the sampled points.
+ """
+ assert points_xyz.is_contiguous()
+
+ B, N = points_xyz.size()[:2]
+ output = torch.cuda.IntTensor(B, num_points)
+ temp = torch.cuda.FloatTensor(B, N).fill_(1e10)
+
+ ext_module.furthest_point_sampling_forward(
+ points_xyz,
+ temp,
+ output,
+ b=B,
+ n=N,
+ m=num_points,
+ )
+ if torch.__version__ != 'parrots':
+ ctx.mark_non_differentiable(output)
+ return output
+
+ @staticmethod
+ def backward(xyz, a=None):
+ return None, None
+
+
+class FurthestPointSamplingWithDist(Function):
+ """Uses iterative furthest point sampling to select a set of features whose
+ corresponding points have the furthest distance."""
+
+ @staticmethod
+ def forward(ctx, points_dist: torch.Tensor,
+ num_points: int) -> torch.Tensor:
+ """
+ Args:
+ points_dist (Tensor): (B, N, N) Distance between each point pair.
+ num_points (int): Number of points in the sampled set.
+
+ Returns:
+ Tensor: (B, num_points) indices of the sampled points.
+ """
+ assert points_dist.is_contiguous()
+
+ B, N, _ = points_dist.size()
+ output = points_dist.new_zeros([B, num_points], dtype=torch.int32)
+ temp = points_dist.new_zeros([B, N]).fill_(1e10)
+
+ ext_module.furthest_point_sampling_with_dist_forward(
+ points_dist, temp, output, b=B, n=N, m=num_points)
+ if torch.__version__ != 'parrots':
+ ctx.mark_non_differentiable(output)
+ return output
+
+ @staticmethod
+ def backward(xyz, a=None):
+ return None, None
+
+
+furthest_point_sample = FurthestPointSampling.apply
+furthest_point_sample_with_dist = FurthestPointSamplingWithDist.apply
diff --git a/annotator/uniformer/mmcv/ops/fused_bias_leakyrelu.py b/annotator/uniformer/mmcv/ops/fused_bias_leakyrelu.py
new file mode 100644
index 0000000000000000000000000000000000000000..6d12508469c6c8fa1884debece44c58d158cb6fa
--- /dev/null
+++ b/annotator/uniformer/mmcv/ops/fused_bias_leakyrelu.py
@@ -0,0 +1,268 @@
+# modified from https://github.com/rosinality/stylegan2-pytorch/blob/master/op/fused_act.py # noqa:E501
+
+# Copyright (c) 2021, NVIDIA Corporation. All rights reserved.
+# NVIDIA Source Code License for StyleGAN2 with Adaptive Discriminator
+# Augmentation (ADA)
+# =======================================================================
+
+# 1. Definitions
+
+# "Licensor" means any person or entity that distributes its Work.
+
+# "Software" means the original work of authorship made available under
+# this License.
+
+# "Work" means the Software and any additions to or derivative works of
+# the Software that are made available under this License.
+
+# The terms "reproduce," "reproduction," "derivative works," and
+# "distribution" have the meaning as provided under U.S. copyright law;
+# provided, however, that for the purposes of this License, derivative
+# works shall not include works that remain separable from, or merely
+# link (or bind by name) to the interfaces of, the Work.
+
+# Works, including the Software, are "made available" under this License
+# by including in or with the Work either (a) a copyright notice
+# referencing the applicability of this License to the Work, or (b) a
+# copy of this License.
+
+# 2. License Grants
+
+# 2.1 Copyright Grant. Subject to the terms and conditions of this
+# License, each Licensor grants to you a perpetual, worldwide,
+# non-exclusive, royalty-free, copyright license to reproduce,
+# prepare derivative works of, publicly display, publicly perform,
+# sublicense and distribute its Work and any resulting derivative
+# works in any form.
+
+# 3. Limitations
+
+# 3.1 Redistribution. You may reproduce or distribute the Work only
+# if (a) you do so under this License, (b) you include a complete
+# copy of this License with your distribution, and (c) you retain
+# without modification any copyright, patent, trademark, or
+# attribution notices that are present in the Work.
+
+# 3.2 Derivative Works. You may specify that additional or different
+# terms apply to the use, reproduction, and distribution of your
+# derivative works of the Work ("Your Terms") only if (a) Your Terms
+# provide that the use limitation in Section 3.3 applies to your
+# derivative works, and (b) you identify the specific derivative
+# works that are subject to Your Terms. Notwithstanding Your Terms,
+# this License (including the redistribution requirements in Section
+# 3.1) will continue to apply to the Work itself.
+
+# 3.3 Use Limitation. The Work and any derivative works thereof only
+# may be used or intended for use non-commercially. Notwithstanding
+# the foregoing, NVIDIA and its affiliates may use the Work and any
+# derivative works commercially. As used herein, "non-commercially"
+# means for research or evaluation purposes only.
+
+# 3.4 Patent Claims. If you bring or threaten to bring a patent claim
+# against any Licensor (including any claim, cross-claim or
+# counterclaim in a lawsuit) to enforce any patents that you allege
+# are infringed by any Work, then your rights under this License from
+# such Licensor (including the grant in Section 2.1) will terminate
+# immediately.
+
+# 3.5 Trademarks. This License does not grant any rights to use any
+# Licensor’s or its affiliates’ names, logos, or trademarks, except
+# as necessary to reproduce the notices described in this License.
+
+# 3.6 Termination. If you violate any term of this License, then your
+# rights under this License (including the grant in Section 2.1) will
+# terminate immediately.
+
+# 4. Disclaimer of Warranty.
+
+# THE WORK IS PROVIDED "AS IS" WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WARRANTIES OR CONDITIONS OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE OR
+# NON-INFRINGEMENT. YOU BEAR THE RISK OF UNDERTAKING ANY ACTIVITIES UNDER
+# THIS LICENSE.
+
+# 5. Limitation of Liability.
+
+# EXCEPT AS PROHIBITED BY APPLICABLE LAW, IN NO EVENT AND UNDER NO LEGAL
+# THEORY, WHETHER IN TORT (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE
+# SHALL ANY LICENSOR BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY DIRECT,
+# INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF
+# OR RELATED TO THIS LICENSE, THE USE OR INABILITY TO USE THE WORK
+# (INCLUDING BUT NOT LIMITED TO LOSS OF GOODWILL, BUSINESS INTERRUPTION,
+# LOST PROFITS OR DATA, COMPUTER FAILURE OR MALFUNCTION, OR ANY OTHER
+# COMMERCIAL DAMAGES OR LOSSES), EVEN IF THE LICENSOR HAS BEEN ADVISED OF
+# THE POSSIBILITY OF SUCH DAMAGES.
+
+# =======================================================================
+
+import torch
+import torch.nn.functional as F
+from torch import nn
+from torch.autograd import Function
+
+from ..utils import ext_loader
+
+ext_module = ext_loader.load_ext('_ext', ['fused_bias_leakyrelu'])
+
+
+class FusedBiasLeakyReLUFunctionBackward(Function):
+ """Calculate second order deviation.
+
+ This function is to compute the second order deviation for the fused leaky
+ relu operation.
+ """
+
+ @staticmethod
+ def forward(ctx, grad_output, out, negative_slope, scale):
+ ctx.save_for_backward(out)
+ ctx.negative_slope = negative_slope
+ ctx.scale = scale
+
+ empty = grad_output.new_empty(0)
+
+ grad_input = ext_module.fused_bias_leakyrelu(
+ grad_output,
+ empty,
+ out,
+ act=3,
+ grad=1,
+ alpha=negative_slope,
+ scale=scale)
+
+ dim = [0]
+
+ if grad_input.ndim > 2:
+ dim += list(range(2, grad_input.ndim))
+
+ grad_bias = grad_input.sum(dim).detach()
+
+ return grad_input, grad_bias
+
+ @staticmethod
+ def backward(ctx, gradgrad_input, gradgrad_bias):
+ out, = ctx.saved_tensors
+
+ # The second order deviation, in fact, contains two parts, while the
+ # the first part is zero. Thus, we direct consider the second part
+ # which is similar with the first order deviation in implementation.
+ gradgrad_out = ext_module.fused_bias_leakyrelu(
+ gradgrad_input,
+ gradgrad_bias.to(out.dtype),
+ out,
+ act=3,
+ grad=1,
+ alpha=ctx.negative_slope,
+ scale=ctx.scale)
+
+ return gradgrad_out, None, None, None
+
+
+class FusedBiasLeakyReLUFunction(Function):
+
+ @staticmethod
+ def forward(ctx, input, bias, negative_slope, scale):
+ empty = input.new_empty(0)
+
+ out = ext_module.fused_bias_leakyrelu(
+ input,
+ bias,
+ empty,
+ act=3,
+ grad=0,
+ alpha=negative_slope,
+ scale=scale)
+ ctx.save_for_backward(out)
+ ctx.negative_slope = negative_slope
+ ctx.scale = scale
+
+ return out
+
+ @staticmethod
+ def backward(ctx, grad_output):
+ out, = ctx.saved_tensors
+
+ grad_input, grad_bias = FusedBiasLeakyReLUFunctionBackward.apply(
+ grad_output, out, ctx.negative_slope, ctx.scale)
+
+ return grad_input, grad_bias, None, None
+
+
+class FusedBiasLeakyReLU(nn.Module):
+ """Fused bias leaky ReLU.
+
+ This function is introduced in the StyleGAN2:
+ http://arxiv.org/abs/1912.04958
+
+ The bias term comes from the convolution operation. In addition, to keep
+ the variance of the feature map or gradients unchanged, they also adopt a
+ scale similarly with Kaiming initialization. However, since the
+ :math:`1+{alpha}^2` : is too small, we can just ignore it. Therefore, the
+ final scale is just :math:`\sqrt{2}`:. Of course, you may change it with # noqa: W605, E501
+ your own scale.
+
+ TODO: Implement the CPU version.
+
+ Args:
+ channel (int): The channel number of the feature map.
+ negative_slope (float, optional): Same as nn.LeakyRelu.
+ Defaults to 0.2.
+ scale (float, optional): A scalar to adjust the variance of the feature
+ map. Defaults to 2**0.5.
+ """
+
+ def __init__(self, num_channels, negative_slope=0.2, scale=2**0.5):
+ super(FusedBiasLeakyReLU, self).__init__()
+
+ self.bias = nn.Parameter(torch.zeros(num_channels))
+ self.negative_slope = negative_slope
+ self.scale = scale
+
+ def forward(self, input):
+ return fused_bias_leakyrelu(input, self.bias, self.negative_slope,
+ self.scale)
+
+
+def fused_bias_leakyrelu(input, bias, negative_slope=0.2, scale=2**0.5):
+ """Fused bias leaky ReLU function.
+
+ This function is introduced in the StyleGAN2:
+ http://arxiv.org/abs/1912.04958
+
+ The bias term comes from the convolution operation. In addition, to keep
+ the variance of the feature map or gradients unchanged, they also adopt a
+ scale similarly with Kaiming initialization. However, since the
+ :math:`1+{alpha}^2` : is too small, we can just ignore it. Therefore, the
+ final scale is just :math:`\sqrt{2}`:. Of course, you may change it with # noqa: W605, E501
+ your own scale.
+
+ Args:
+ input (torch.Tensor): Input feature map.
+ bias (nn.Parameter): The bias from convolution operation.
+ negative_slope (float, optional): Same as nn.LeakyRelu.
+ Defaults to 0.2.
+ scale (float, optional): A scalar to adjust the variance of the feature
+ map. Defaults to 2**0.5.
+
+ Returns:
+ torch.Tensor: Feature map after non-linear activation.
+ """
+
+ if not input.is_cuda:
+ return bias_leakyrelu_ref(input, bias, negative_slope, scale)
+
+ return FusedBiasLeakyReLUFunction.apply(input, bias.to(input.dtype),
+ negative_slope, scale)
+
+
+def bias_leakyrelu_ref(x, bias, negative_slope=0.2, scale=2**0.5):
+
+ if bias is not None:
+ assert bias.ndim == 1
+ assert bias.shape[0] == x.shape[1]
+ x = x + bias.reshape([-1 if i == 1 else 1 for i in range(x.ndim)])
+
+ x = F.leaky_relu(x, negative_slope)
+ if scale != 1:
+ x = x * scale
+
+ return x
diff --git a/annotator/uniformer/mmcv/ops/gather_points.py b/annotator/uniformer/mmcv/ops/gather_points.py
new file mode 100644
index 0000000000000000000000000000000000000000..f52f1677d8ea0facafc56a3672d37adb44677ff3
--- /dev/null
+++ b/annotator/uniformer/mmcv/ops/gather_points.py
@@ -0,0 +1,57 @@
+import torch
+from torch.autograd import Function
+
+from ..utils import ext_loader
+
+ext_module = ext_loader.load_ext(
+ '_ext', ['gather_points_forward', 'gather_points_backward'])
+
+
+class GatherPoints(Function):
+ """Gather points with given index."""
+
+ @staticmethod
+ def forward(ctx, features: torch.Tensor,
+ indices: torch.Tensor) -> torch.Tensor:
+ """
+ Args:
+ features (Tensor): (B, C, N) features to gather.
+ indices (Tensor): (B, M) where M is the number of points.
+
+ Returns:
+ Tensor: (B, C, M) where M is the number of points.
+ """
+ assert features.is_contiguous()
+ assert indices.is_contiguous()
+
+ B, npoint = indices.size()
+ _, C, N = features.size()
+ output = torch.cuda.FloatTensor(B, C, npoint)
+
+ ext_module.gather_points_forward(
+ features, indices, output, b=B, c=C, n=N, npoints=npoint)
+
+ ctx.for_backwards = (indices, C, N)
+ if torch.__version__ != 'parrots':
+ ctx.mark_non_differentiable(indices)
+ return output
+
+ @staticmethod
+ def backward(ctx, grad_out):
+ idx, C, N = ctx.for_backwards
+ B, npoint = idx.size()
+
+ grad_features = torch.cuda.FloatTensor(B, C, N).zero_()
+ grad_out_data = grad_out.data.contiguous()
+ ext_module.gather_points_backward(
+ grad_out_data,
+ idx,
+ grad_features.data,
+ b=B,
+ c=C,
+ n=N,
+ npoints=npoint)
+ return grad_features, None
+
+
+gather_points = GatherPoints.apply
diff --git a/annotator/uniformer/mmcv/ops/group_points.py b/annotator/uniformer/mmcv/ops/group_points.py
new file mode 100644
index 0000000000000000000000000000000000000000..6c3ec9d758ebe4e1c2205882af4be154008253a5
--- /dev/null
+++ b/annotator/uniformer/mmcv/ops/group_points.py
@@ -0,0 +1,224 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+from typing import Tuple
+
+import torch
+from torch import nn as nn
+from torch.autograd import Function
+
+from ..utils import ext_loader
+from .ball_query import ball_query
+from .knn import knn
+
+ext_module = ext_loader.load_ext(
+ '_ext', ['group_points_forward', 'group_points_backward'])
+
+
+class QueryAndGroup(nn.Module):
+ """Groups points with a ball query of radius.
+
+ Args:
+ max_radius (float): The maximum radius of the balls.
+ If None is given, we will use kNN sampling instead of ball query.
+ sample_num (int): Maximum number of features to gather in the ball.
+ min_radius (float, optional): The minimum radius of the balls.
+ Default: 0.
+ use_xyz (bool, optional): Whether to use xyz.
+ Default: True.
+ return_grouped_xyz (bool, optional): Whether to return grouped xyz.
+ Default: False.
+ normalize_xyz (bool, optional): Whether to normalize xyz.
+ Default: False.
+ uniform_sample (bool, optional): Whether to sample uniformly.
+ Default: False
+ return_unique_cnt (bool, optional): Whether to return the count of
+ unique samples. Default: False.
+ return_grouped_idx (bool, optional): Whether to return grouped idx.
+ Default: False.
+ """
+
+ def __init__(self,
+ max_radius,
+ sample_num,
+ min_radius=0,
+ use_xyz=True,
+ return_grouped_xyz=False,
+ normalize_xyz=False,
+ uniform_sample=False,
+ return_unique_cnt=False,
+ return_grouped_idx=False):
+ super().__init__()
+ self.max_radius = max_radius
+ self.min_radius = min_radius
+ self.sample_num = sample_num
+ self.use_xyz = use_xyz
+ self.return_grouped_xyz = return_grouped_xyz
+ self.normalize_xyz = normalize_xyz
+ self.uniform_sample = uniform_sample
+ self.return_unique_cnt = return_unique_cnt
+ self.return_grouped_idx = return_grouped_idx
+ if self.return_unique_cnt:
+ assert self.uniform_sample, \
+ 'uniform_sample should be True when ' \
+ 'returning the count of unique samples'
+ if self.max_radius is None:
+ assert not self.normalize_xyz, \
+ 'can not normalize grouped xyz when max_radius is None'
+
+ def forward(self, points_xyz, center_xyz, features=None):
+ """
+ Args:
+ points_xyz (Tensor): (B, N, 3) xyz coordinates of the features.
+ center_xyz (Tensor): (B, npoint, 3) coordinates of the centriods.
+ features (Tensor): (B, C, N) Descriptors of the features.
+
+ Returns:
+ Tensor: (B, 3 + C, npoint, sample_num) Grouped feature.
+ """
+ # if self.max_radius is None, we will perform kNN instead of ball query
+ # idx is of shape [B, npoint, sample_num]
+ if self.max_radius is None:
+ idx = knn(self.sample_num, points_xyz, center_xyz, False)
+ idx = idx.transpose(1, 2).contiguous()
+ else:
+ idx = ball_query(self.min_radius, self.max_radius, self.sample_num,
+ points_xyz, center_xyz)
+
+ if self.uniform_sample:
+ unique_cnt = torch.zeros((idx.shape[0], idx.shape[1]))
+ for i_batch in range(idx.shape[0]):
+ for i_region in range(idx.shape[1]):
+ unique_ind = torch.unique(idx[i_batch, i_region, :])
+ num_unique = unique_ind.shape[0]
+ unique_cnt[i_batch, i_region] = num_unique
+ sample_ind = torch.randint(
+ 0,
+ num_unique, (self.sample_num - num_unique, ),
+ dtype=torch.long)
+ all_ind = torch.cat((unique_ind, unique_ind[sample_ind]))
+ idx[i_batch, i_region, :] = all_ind
+
+ xyz_trans = points_xyz.transpose(1, 2).contiguous()
+ # (B, 3, npoint, sample_num)
+ grouped_xyz = grouping_operation(xyz_trans, idx)
+ grouped_xyz_diff = grouped_xyz - \
+ center_xyz.transpose(1, 2).unsqueeze(-1) # relative offsets
+ if self.normalize_xyz:
+ grouped_xyz_diff /= self.max_radius
+
+ if features is not None:
+ grouped_features = grouping_operation(features, idx)
+ if self.use_xyz:
+ # (B, C + 3, npoint, sample_num)
+ new_features = torch.cat([grouped_xyz_diff, grouped_features],
+ dim=1)
+ else:
+ new_features = grouped_features
+ else:
+ assert (self.use_xyz
+ ), 'Cannot have not features and not use xyz as a feature!'
+ new_features = grouped_xyz_diff
+
+ ret = [new_features]
+ if self.return_grouped_xyz:
+ ret.append(grouped_xyz)
+ if self.return_unique_cnt:
+ ret.append(unique_cnt)
+ if self.return_grouped_idx:
+ ret.append(idx)
+ if len(ret) == 1:
+ return ret[0]
+ else:
+ return tuple(ret)
+
+
+class GroupAll(nn.Module):
+ """Group xyz with feature.
+
+ Args:
+ use_xyz (bool): Whether to use xyz.
+ """
+
+ def __init__(self, use_xyz: bool = True):
+ super().__init__()
+ self.use_xyz = use_xyz
+
+ def forward(self,
+ xyz: torch.Tensor,
+ new_xyz: torch.Tensor,
+ features: torch.Tensor = None):
+ """
+ Args:
+ xyz (Tensor): (B, N, 3) xyz coordinates of the features.
+ new_xyz (Tensor): new xyz coordinates of the features.
+ features (Tensor): (B, C, N) features to group.
+
+ Returns:
+ Tensor: (B, C + 3, 1, N) Grouped feature.
+ """
+ grouped_xyz = xyz.transpose(1, 2).unsqueeze(2)
+ if features is not None:
+ grouped_features = features.unsqueeze(2)
+ if self.use_xyz:
+ # (B, 3 + C, 1, N)
+ new_features = torch.cat([grouped_xyz, grouped_features],
+ dim=1)
+ else:
+ new_features = grouped_features
+ else:
+ new_features = grouped_xyz
+
+ return new_features
+
+
+class GroupingOperation(Function):
+ """Group feature with given index."""
+
+ @staticmethod
+ def forward(ctx, features: torch.Tensor,
+ indices: torch.Tensor) -> torch.Tensor:
+ """
+ Args:
+ features (Tensor): (B, C, N) tensor of features to group.
+ indices (Tensor): (B, npoint, nsample) the indices of
+ features to group with.
+
+ Returns:
+ Tensor: (B, C, npoint, nsample) Grouped features.
+ """
+ features = features.contiguous()
+ indices = indices.contiguous()
+
+ B, nfeatures, nsample = indices.size()
+ _, C, N = features.size()
+ output = torch.cuda.FloatTensor(B, C, nfeatures, nsample)
+
+ ext_module.group_points_forward(B, C, N, nfeatures, nsample, features,
+ indices, output)
+
+ ctx.for_backwards = (indices, N)
+ return output
+
+ @staticmethod
+ def backward(ctx,
+ grad_out: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
+ """
+ Args:
+ grad_out (Tensor): (B, C, npoint, nsample) tensor of the gradients
+ of the output from forward.
+
+ Returns:
+ Tensor: (B, C, N) gradient of the features.
+ """
+ idx, N = ctx.for_backwards
+
+ B, C, npoint, nsample = grad_out.size()
+ grad_features = torch.cuda.FloatTensor(B, C, N).zero_()
+
+ grad_out_data = grad_out.data.contiguous()
+ ext_module.group_points_backward(B, C, N, npoint, nsample,
+ grad_out_data, idx,
+ grad_features.data)
+ return grad_features, None
+
+
+grouping_operation = GroupingOperation.apply
diff --git a/annotator/uniformer/mmcv/ops/info.py b/annotator/uniformer/mmcv/ops/info.py
new file mode 100644
index 0000000000000000000000000000000000000000..29f2e5598ae2bb5866ccd15a7d3b4de33c0cd14d
--- /dev/null
+++ b/annotator/uniformer/mmcv/ops/info.py
@@ -0,0 +1,36 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import glob
+import os
+
+import torch
+
+if torch.__version__ == 'parrots':
+ import parrots
+
+ def get_compiler_version():
+ return 'GCC ' + parrots.version.compiler
+
+ def get_compiling_cuda_version():
+ return parrots.version.cuda
+else:
+ from ..utils import ext_loader
+ ext_module = ext_loader.load_ext(
+ '_ext', ['get_compiler_version', 'get_compiling_cuda_version'])
+
+ def get_compiler_version():
+ return ext_module.get_compiler_version()
+
+ def get_compiling_cuda_version():
+ return ext_module.get_compiling_cuda_version()
+
+
+def get_onnxruntime_op_path():
+ wildcard = os.path.join(
+ os.path.abspath(os.path.dirname(os.path.dirname(__file__))),
+ '_ext_ort.*.so')
+
+ paths = glob.glob(wildcard)
+ if len(paths) > 0:
+ return paths[0]
+ else:
+ return ''
diff --git a/annotator/uniformer/mmcv/ops/iou3d.py b/annotator/uniformer/mmcv/ops/iou3d.py
new file mode 100644
index 0000000000000000000000000000000000000000..6fc71979190323f44c09f8b7e1761cf49cd2d76b
--- /dev/null
+++ b/annotator/uniformer/mmcv/ops/iou3d.py
@@ -0,0 +1,85 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import torch
+
+from ..utils import ext_loader
+
+ext_module = ext_loader.load_ext('_ext', [
+ 'iou3d_boxes_iou_bev_forward', 'iou3d_nms_forward',
+ 'iou3d_nms_normal_forward'
+])
+
+
+def boxes_iou_bev(boxes_a, boxes_b):
+ """Calculate boxes IoU in the Bird's Eye View.
+
+ Args:
+ boxes_a (torch.Tensor): Input boxes a with shape (M, 5).
+ boxes_b (torch.Tensor): Input boxes b with shape (N, 5).
+
+ Returns:
+ ans_iou (torch.Tensor): IoU result with shape (M, N).
+ """
+ ans_iou = boxes_a.new_zeros(
+ torch.Size((boxes_a.shape[0], boxes_b.shape[0])))
+
+ ext_module.iou3d_boxes_iou_bev_forward(boxes_a.contiguous(),
+ boxes_b.contiguous(), ans_iou)
+
+ return ans_iou
+
+
+def nms_bev(boxes, scores, thresh, pre_max_size=None, post_max_size=None):
+ """NMS function GPU implementation (for BEV boxes). The overlap of two
+ boxes for IoU calculation is defined as the exact overlapping area of the
+ two boxes. In this function, one can also set ``pre_max_size`` and
+ ``post_max_size``.
+
+ Args:
+ boxes (torch.Tensor): Input boxes with the shape of [N, 5]
+ ([x1, y1, x2, y2, ry]).
+ scores (torch.Tensor): Scores of boxes with the shape of [N].
+ thresh (float): Overlap threshold of NMS.
+ pre_max_size (int, optional): Max size of boxes before NMS.
+ Default: None.
+ post_max_size (int, optional): Max size of boxes after NMS.
+ Default: None.
+
+ Returns:
+ torch.Tensor: Indexes after NMS.
+ """
+ assert boxes.size(1) == 5, 'Input boxes shape should be [N, 5]'
+ order = scores.sort(0, descending=True)[1]
+
+ if pre_max_size is not None:
+ order = order[:pre_max_size]
+ boxes = boxes[order].contiguous()
+
+ keep = torch.zeros(boxes.size(0), dtype=torch.long)
+ num_out = ext_module.iou3d_nms_forward(boxes, keep, thresh)
+ keep = order[keep[:num_out].cuda(boxes.device)].contiguous()
+ if post_max_size is not None:
+ keep = keep[:post_max_size]
+ return keep
+
+
+def nms_normal_bev(boxes, scores, thresh):
+ """Normal NMS function GPU implementation (for BEV boxes). The overlap of
+ two boxes for IoU calculation is defined as the exact overlapping area of
+ the two boxes WITH their yaw angle set to 0.
+
+ Args:
+ boxes (torch.Tensor): Input boxes with shape (N, 5).
+ scores (torch.Tensor): Scores of predicted boxes with shape (N).
+ thresh (float): Overlap threshold of NMS.
+
+ Returns:
+ torch.Tensor: Remaining indices with scores in descending order.
+ """
+ assert boxes.shape[1] == 5, 'Input boxes shape should be [N, 5]'
+ order = scores.sort(0, descending=True)[1]
+
+ boxes = boxes[order].contiguous()
+
+ keep = torch.zeros(boxes.size(0), dtype=torch.long)
+ num_out = ext_module.iou3d_nms_normal_forward(boxes, keep, thresh)
+ return order[keep[:num_out].cuda(boxes.device)].contiguous()
diff --git a/annotator/uniformer/mmcv/ops/knn.py b/annotator/uniformer/mmcv/ops/knn.py
new file mode 100644
index 0000000000000000000000000000000000000000..f335785036669fc19239825b0aae6dde3f73bf92
--- /dev/null
+++ b/annotator/uniformer/mmcv/ops/knn.py
@@ -0,0 +1,77 @@
+import torch
+from torch.autograd import Function
+
+from ..utils import ext_loader
+
+ext_module = ext_loader.load_ext('_ext', ['knn_forward'])
+
+
+class KNN(Function):
+ r"""KNN (CUDA) based on heap data structure.
+ Modified from `PAConv `_.
+
+ Find k-nearest points.
+ """
+
+ @staticmethod
+ def forward(ctx,
+ k: int,
+ xyz: torch.Tensor,
+ center_xyz: torch.Tensor = None,
+ transposed: bool = False) -> torch.Tensor:
+ """
+ Args:
+ k (int): number of nearest neighbors.
+ xyz (Tensor): (B, N, 3) if transposed == False, else (B, 3, N).
+ xyz coordinates of the features.
+ center_xyz (Tensor, optional): (B, npoint, 3) if transposed ==
+ False, else (B, 3, npoint). centers of the knn query.
+ Default: None.
+ transposed (bool, optional): whether the input tensors are
+ transposed. Should not explicitly use this keyword when
+ calling knn (=KNN.apply), just add the fourth param.
+ Default: False.
+
+ Returns:
+ Tensor: (B, k, npoint) tensor with the indices of
+ the features that form k-nearest neighbours.
+ """
+ assert (k > 0) & (k < 100), 'k should be in range(0, 100)'
+
+ if center_xyz is None:
+ center_xyz = xyz
+
+ if transposed:
+ xyz = xyz.transpose(2, 1).contiguous()
+ center_xyz = center_xyz.transpose(2, 1).contiguous()
+
+ assert xyz.is_contiguous() # [B, N, 3]
+ assert center_xyz.is_contiguous() # [B, npoint, 3]
+
+ center_xyz_device = center_xyz.get_device()
+ assert center_xyz_device == xyz.get_device(), \
+ 'center_xyz and xyz should be put on the same device'
+ if torch.cuda.current_device() != center_xyz_device:
+ torch.cuda.set_device(center_xyz_device)
+
+ B, npoint, _ = center_xyz.shape
+ N = xyz.shape[1]
+
+ idx = center_xyz.new_zeros((B, npoint, k)).int()
+ dist2 = center_xyz.new_zeros((B, npoint, k)).float()
+
+ ext_module.knn_forward(
+ xyz, center_xyz, idx, dist2, b=B, n=N, m=npoint, nsample=k)
+ # idx shape to [B, k, npoint]
+ idx = idx.transpose(2, 1).contiguous()
+ if torch.__version__ != 'parrots':
+ ctx.mark_non_differentiable(idx)
+ return idx
+
+ @staticmethod
+ def backward(ctx, a=None):
+ return None, None, None
+
+
+knn = KNN.apply
diff --git a/annotator/uniformer/mmcv/ops/masked_conv.py b/annotator/uniformer/mmcv/ops/masked_conv.py
new file mode 100644
index 0000000000000000000000000000000000000000..cd514cc204c1d571ea5dc7e74b038c0f477a008b
--- /dev/null
+++ b/annotator/uniformer/mmcv/ops/masked_conv.py
@@ -0,0 +1,111 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import math
+
+import torch
+import torch.nn as nn
+from torch.autograd import Function
+from torch.autograd.function import once_differentiable
+from torch.nn.modules.utils import _pair
+
+from ..utils import ext_loader
+
+ext_module = ext_loader.load_ext(
+ '_ext', ['masked_im2col_forward', 'masked_col2im_forward'])
+
+
+class MaskedConv2dFunction(Function):
+
+ @staticmethod
+ def symbolic(g, features, mask, weight, bias, padding, stride):
+ return g.op(
+ 'mmcv::MMCVMaskedConv2d',
+ features,
+ mask,
+ weight,
+ bias,
+ padding_i=padding,
+ stride_i=stride)
+
+ @staticmethod
+ def forward(ctx, features, mask, weight, bias, padding=0, stride=1):
+ assert mask.dim() == 3 and mask.size(0) == 1
+ assert features.dim() == 4 and features.size(0) == 1
+ assert features.size()[2:] == mask.size()[1:]
+ pad_h, pad_w = _pair(padding)
+ stride_h, stride_w = _pair(stride)
+ if stride_h != 1 or stride_w != 1:
+ raise ValueError(
+ 'Stride could not only be 1 in masked_conv2d currently.')
+ out_channel, in_channel, kernel_h, kernel_w = weight.size()
+
+ batch_size = features.size(0)
+ out_h = int(
+ math.floor((features.size(2) + 2 * pad_h -
+ (kernel_h - 1) - 1) / stride_h + 1))
+ out_w = int(
+ math.floor((features.size(3) + 2 * pad_w -
+ (kernel_h - 1) - 1) / stride_w + 1))
+ mask_inds = torch.nonzero(mask[0] > 0, as_tuple=False)
+ output = features.new_zeros(batch_size, out_channel, out_h, out_w)
+ if mask_inds.numel() > 0:
+ mask_h_idx = mask_inds[:, 0].contiguous()
+ mask_w_idx = mask_inds[:, 1].contiguous()
+ data_col = features.new_zeros(in_channel * kernel_h * kernel_w,
+ mask_inds.size(0))
+ ext_module.masked_im2col_forward(
+ features,
+ mask_h_idx,
+ mask_w_idx,
+ data_col,
+ kernel_h=kernel_h,
+ kernel_w=kernel_w,
+ pad_h=pad_h,
+ pad_w=pad_w)
+
+ masked_output = torch.addmm(1, bias[:, None], 1,
+ weight.view(out_channel, -1), data_col)
+ ext_module.masked_col2im_forward(
+ masked_output,
+ mask_h_idx,
+ mask_w_idx,
+ output,
+ height=out_h,
+ width=out_w,
+ channels=out_channel)
+ return output
+
+ @staticmethod
+ @once_differentiable
+ def backward(ctx, grad_output):
+ return (None, ) * 5
+
+
+masked_conv2d = MaskedConv2dFunction.apply
+
+
+class MaskedConv2d(nn.Conv2d):
+ """A MaskedConv2d which inherits the official Conv2d.
+
+ The masked forward doesn't implement the backward function and only
+ supports the stride parameter to be 1 currently.
+ """
+
+ def __init__(self,
+ in_channels,
+ out_channels,
+ kernel_size,
+ stride=1,
+ padding=0,
+ dilation=1,
+ groups=1,
+ bias=True):
+ super(MaskedConv2d,
+ self).__init__(in_channels, out_channels, kernel_size, stride,
+ padding, dilation, groups, bias)
+
+ def forward(self, input, mask=None):
+ if mask is None: # fallback to the normal Conv2d
+ return super(MaskedConv2d, self).forward(input)
+ else:
+ return masked_conv2d(input, mask, self.weight, self.bias,
+ self.padding)
diff --git a/annotator/uniformer/mmcv/ops/merge_cells.py b/annotator/uniformer/mmcv/ops/merge_cells.py
new file mode 100644
index 0000000000000000000000000000000000000000..48ca8cc0a8aca8432835bd760c0403a3c35b34cf
--- /dev/null
+++ b/annotator/uniformer/mmcv/ops/merge_cells.py
@@ -0,0 +1,149 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+from abc import abstractmethod
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+from ..cnn import ConvModule
+
+
+class BaseMergeCell(nn.Module):
+ """The basic class for cells used in NAS-FPN and NAS-FCOS.
+
+ BaseMergeCell takes 2 inputs. After applying convolution
+ on them, they are resized to the target size. Then,
+ they go through binary_op, which depends on the type of cell.
+ If with_out_conv is True, the result of output will go through
+ another convolution layer.
+
+ Args:
+ in_channels (int): number of input channels in out_conv layer.
+ out_channels (int): number of output channels in out_conv layer.
+ with_out_conv (bool): Whether to use out_conv layer
+ out_conv_cfg (dict): Config dict for convolution layer, which should
+ contain "groups", "kernel_size", "padding", "bias" to build
+ out_conv layer.
+ out_norm_cfg (dict): Config dict for normalization layer in out_conv.
+ out_conv_order (tuple): The order of conv/norm/activation layers in
+ out_conv.
+ with_input1_conv (bool): Whether to use convolution on input1.
+ with_input2_conv (bool): Whether to use convolution on input2.
+ input_conv_cfg (dict): Config dict for building input1_conv layer and
+ input2_conv layer, which is expected to contain the type of
+ convolution.
+ Default: None, which means using conv2d.
+ input_norm_cfg (dict): Config dict for normalization layer in
+ input1_conv and input2_conv layer. Default: None.
+ upsample_mode (str): Interpolation method used to resize the output
+ of input1_conv and input2_conv to target size. Currently, we
+ support ['nearest', 'bilinear']. Default: 'nearest'.
+ """
+
+ def __init__(self,
+ fused_channels=256,
+ out_channels=256,
+ with_out_conv=True,
+ out_conv_cfg=dict(
+ groups=1, kernel_size=3, padding=1, bias=True),
+ out_norm_cfg=None,
+ out_conv_order=('act', 'conv', 'norm'),
+ with_input1_conv=False,
+ with_input2_conv=False,
+ input_conv_cfg=None,
+ input_norm_cfg=None,
+ upsample_mode='nearest'):
+ super(BaseMergeCell, self).__init__()
+ assert upsample_mode in ['nearest', 'bilinear']
+ self.with_out_conv = with_out_conv
+ self.with_input1_conv = with_input1_conv
+ self.with_input2_conv = with_input2_conv
+ self.upsample_mode = upsample_mode
+
+ if self.with_out_conv:
+ self.out_conv = ConvModule(
+ fused_channels,
+ out_channels,
+ **out_conv_cfg,
+ norm_cfg=out_norm_cfg,
+ order=out_conv_order)
+
+ self.input1_conv = self._build_input_conv(
+ out_channels, input_conv_cfg,
+ input_norm_cfg) if with_input1_conv else nn.Sequential()
+ self.input2_conv = self._build_input_conv(
+ out_channels, input_conv_cfg,
+ input_norm_cfg) if with_input2_conv else nn.Sequential()
+
+ def _build_input_conv(self, channel, conv_cfg, norm_cfg):
+ return ConvModule(
+ channel,
+ channel,
+ 3,
+ padding=1,
+ conv_cfg=conv_cfg,
+ norm_cfg=norm_cfg,
+ bias=True)
+
+ @abstractmethod
+ def _binary_op(self, x1, x2):
+ pass
+
+ def _resize(self, x, size):
+ if x.shape[-2:] == size:
+ return x
+ elif x.shape[-2:] < size:
+ return F.interpolate(x, size=size, mode=self.upsample_mode)
+ else:
+ assert x.shape[-2] % size[-2] == 0 and x.shape[-1] % size[-1] == 0
+ kernel_size = x.shape[-1] // size[-1]
+ x = F.max_pool2d(x, kernel_size=kernel_size, stride=kernel_size)
+ return x
+
+ def forward(self, x1, x2, out_size=None):
+ assert x1.shape[:2] == x2.shape[:2]
+ assert out_size is None or len(out_size) == 2
+ if out_size is None: # resize to larger one
+ out_size = max(x1.size()[2:], x2.size()[2:])
+
+ x1 = self.input1_conv(x1)
+ x2 = self.input2_conv(x2)
+
+ x1 = self._resize(x1, out_size)
+ x2 = self._resize(x2, out_size)
+
+ x = self._binary_op(x1, x2)
+ if self.with_out_conv:
+ x = self.out_conv(x)
+ return x
+
+
+class SumCell(BaseMergeCell):
+
+ def __init__(self, in_channels, out_channels, **kwargs):
+ super(SumCell, self).__init__(in_channels, out_channels, **kwargs)
+
+ def _binary_op(self, x1, x2):
+ return x1 + x2
+
+
+class ConcatCell(BaseMergeCell):
+
+ def __init__(self, in_channels, out_channels, **kwargs):
+ super(ConcatCell, self).__init__(in_channels * 2, out_channels,
+ **kwargs)
+
+ def _binary_op(self, x1, x2):
+ ret = torch.cat([x1, x2], dim=1)
+ return ret
+
+
+class GlobalPoolingCell(BaseMergeCell):
+
+ def __init__(self, in_channels=None, out_channels=None, **kwargs):
+ super().__init__(in_channels, out_channels, **kwargs)
+ self.global_pool = nn.AdaptiveAvgPool2d((1, 1))
+
+ def _binary_op(self, x1, x2):
+ x2_att = self.global_pool(x2).sigmoid()
+ return x2 + x2_att * x1
diff --git a/annotator/uniformer/mmcv/ops/modulated_deform_conv.py b/annotator/uniformer/mmcv/ops/modulated_deform_conv.py
new file mode 100644
index 0000000000000000000000000000000000000000..75559579cf053abcc99538606cbb88c723faf783
--- /dev/null
+++ b/annotator/uniformer/mmcv/ops/modulated_deform_conv.py
@@ -0,0 +1,282 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import math
+
+import torch
+import torch.nn as nn
+from torch.autograd import Function
+from torch.autograd.function import once_differentiable
+from torch.nn.modules.utils import _pair, _single
+
+from annotator.uniformer.mmcv.utils import deprecated_api_warning
+from ..cnn import CONV_LAYERS
+from ..utils import ext_loader, print_log
+
+ext_module = ext_loader.load_ext(
+ '_ext',
+ ['modulated_deform_conv_forward', 'modulated_deform_conv_backward'])
+
+
+class ModulatedDeformConv2dFunction(Function):
+
+ @staticmethod
+ def symbolic(g, input, offset, mask, weight, bias, stride, padding,
+ dilation, groups, deform_groups):
+ input_tensors = [input, offset, mask, weight]
+ if bias is not None:
+ input_tensors.append(bias)
+ return g.op(
+ 'mmcv::MMCVModulatedDeformConv2d',
+ *input_tensors,
+ stride_i=stride,
+ padding_i=padding,
+ dilation_i=dilation,
+ groups_i=groups,
+ deform_groups_i=deform_groups)
+
+ @staticmethod
+ def forward(ctx,
+ input,
+ offset,
+ mask,
+ weight,
+ bias=None,
+ stride=1,
+ padding=0,
+ dilation=1,
+ groups=1,
+ deform_groups=1):
+ if input is not None and input.dim() != 4:
+ raise ValueError(
+ f'Expected 4D tensor as input, got {input.dim()}D tensor \
+ instead.')
+ ctx.stride = _pair(stride)
+ ctx.padding = _pair(padding)
+ ctx.dilation = _pair(dilation)
+ ctx.groups = groups
+ ctx.deform_groups = deform_groups
+ ctx.with_bias = bias is not None
+ if not ctx.with_bias:
+ bias = input.new_empty(0) # fake tensor
+ # When pytorch version >= 1.6.0, amp is adopted for fp16 mode;
+ # amp won't cast the type of model (float32), but "offset" is cast
+ # to float16 by nn.Conv2d automatically, leading to the type
+ # mismatch with input (when it is float32) or weight.
+ # The flag for whether to use fp16 or amp is the type of "offset",
+ # we cast weight and input to temporarily support fp16 and amp
+ # whatever the pytorch version is.
+ input = input.type_as(offset)
+ weight = weight.type_as(input)
+ ctx.save_for_backward(input, offset, mask, weight, bias)
+ output = input.new_empty(
+ ModulatedDeformConv2dFunction._output_size(ctx, input, weight))
+ ctx._bufs = [input.new_empty(0), input.new_empty(0)]
+ ext_module.modulated_deform_conv_forward(
+ input,
+ weight,
+ bias,
+ ctx._bufs[0],
+ offset,
+ mask,
+ output,
+ ctx._bufs[1],
+ kernel_h=weight.size(2),
+ kernel_w=weight.size(3),
+ stride_h=ctx.stride[0],
+ stride_w=ctx.stride[1],
+ pad_h=ctx.padding[0],
+ pad_w=ctx.padding[1],
+ dilation_h=ctx.dilation[0],
+ dilation_w=ctx.dilation[1],
+ group=ctx.groups,
+ deformable_group=ctx.deform_groups,
+ with_bias=ctx.with_bias)
+ return output
+
+ @staticmethod
+ @once_differentiable
+ def backward(ctx, grad_output):
+ input, offset, mask, weight, bias = ctx.saved_tensors
+ grad_input = torch.zeros_like(input)
+ grad_offset = torch.zeros_like(offset)
+ grad_mask = torch.zeros_like(mask)
+ grad_weight = torch.zeros_like(weight)
+ grad_bias = torch.zeros_like(bias)
+ grad_output = grad_output.contiguous()
+ ext_module.modulated_deform_conv_backward(
+ input,
+ weight,
+ bias,
+ ctx._bufs[0],
+ offset,
+ mask,
+ ctx._bufs[1],
+ grad_input,
+ grad_weight,
+ grad_bias,
+ grad_offset,
+ grad_mask,
+ grad_output,
+ kernel_h=weight.size(2),
+ kernel_w=weight.size(3),
+ stride_h=ctx.stride[0],
+ stride_w=ctx.stride[1],
+ pad_h=ctx.padding[0],
+ pad_w=ctx.padding[1],
+ dilation_h=ctx.dilation[0],
+ dilation_w=ctx.dilation[1],
+ group=ctx.groups,
+ deformable_group=ctx.deform_groups,
+ with_bias=ctx.with_bias)
+ if not ctx.with_bias:
+ grad_bias = None
+
+ return (grad_input, grad_offset, grad_mask, grad_weight, grad_bias,
+ None, None, None, None, None)
+
+ @staticmethod
+ def _output_size(ctx, input, weight):
+ channels = weight.size(0)
+ output_size = (input.size(0), channels)
+ for d in range(input.dim() - 2):
+ in_size = input.size(d + 2)
+ pad = ctx.padding[d]
+ kernel = ctx.dilation[d] * (weight.size(d + 2) - 1) + 1
+ stride_ = ctx.stride[d]
+ output_size += ((in_size + (2 * pad) - kernel) // stride_ + 1, )
+ if not all(map(lambda s: s > 0, output_size)):
+ raise ValueError(
+ 'convolution input is too small (output would be ' +
+ 'x'.join(map(str, output_size)) + ')')
+ return output_size
+
+
+modulated_deform_conv2d = ModulatedDeformConv2dFunction.apply
+
+
+class ModulatedDeformConv2d(nn.Module):
+
+ @deprecated_api_warning({'deformable_groups': 'deform_groups'},
+ cls_name='ModulatedDeformConv2d')
+ def __init__(self,
+ in_channels,
+ out_channels,
+ kernel_size,
+ stride=1,
+ padding=0,
+ dilation=1,
+ groups=1,
+ deform_groups=1,
+ bias=True):
+ super(ModulatedDeformConv2d, self).__init__()
+ self.in_channels = in_channels
+ self.out_channels = out_channels
+ self.kernel_size = _pair(kernel_size)
+ self.stride = _pair(stride)
+ self.padding = _pair(padding)
+ self.dilation = _pair(dilation)
+ self.groups = groups
+ self.deform_groups = deform_groups
+ # enable compatibility with nn.Conv2d
+ self.transposed = False
+ self.output_padding = _single(0)
+
+ self.weight = nn.Parameter(
+ torch.Tensor(out_channels, in_channels // groups,
+ *self.kernel_size))
+ if bias:
+ self.bias = nn.Parameter(torch.Tensor(out_channels))
+ else:
+ self.register_parameter('bias', None)
+ self.init_weights()
+
+ def init_weights(self):
+ n = self.in_channels
+ for k in self.kernel_size:
+ n *= k
+ stdv = 1. / math.sqrt(n)
+ self.weight.data.uniform_(-stdv, stdv)
+ if self.bias is not None:
+ self.bias.data.zero_()
+
+ def forward(self, x, offset, mask):
+ return modulated_deform_conv2d(x, offset, mask, self.weight, self.bias,
+ self.stride, self.padding,
+ self.dilation, self.groups,
+ self.deform_groups)
+
+
+@CONV_LAYERS.register_module('DCNv2')
+class ModulatedDeformConv2dPack(ModulatedDeformConv2d):
+ """A ModulatedDeformable Conv Encapsulation that acts as normal Conv
+ layers.
+
+ Args:
+ in_channels (int): Same as nn.Conv2d.
+ out_channels (int): Same as nn.Conv2d.
+ kernel_size (int or tuple[int]): Same as nn.Conv2d.
+ stride (int): Same as nn.Conv2d, while tuple is not supported.
+ padding (int): Same as nn.Conv2d, while tuple is not supported.
+ dilation (int): Same as nn.Conv2d, while tuple is not supported.
+ groups (int): Same as nn.Conv2d.
+ bias (bool or str): If specified as `auto`, it will be decided by the
+ norm_cfg. Bias will be set as True if norm_cfg is None, otherwise
+ False.
+ """
+
+ _version = 2
+
+ def __init__(self, *args, **kwargs):
+ super(ModulatedDeformConv2dPack, self).__init__(*args, **kwargs)
+ self.conv_offset = nn.Conv2d(
+ self.in_channels,
+ self.deform_groups * 3 * self.kernel_size[0] * self.kernel_size[1],
+ kernel_size=self.kernel_size,
+ stride=self.stride,
+ padding=self.padding,
+ dilation=self.dilation,
+ bias=True)
+ self.init_weights()
+
+ def init_weights(self):
+ super(ModulatedDeformConv2dPack, self).init_weights()
+ if hasattr(self, 'conv_offset'):
+ self.conv_offset.weight.data.zero_()
+ self.conv_offset.bias.data.zero_()
+
+ def forward(self, x):
+ out = self.conv_offset(x)
+ o1, o2, mask = torch.chunk(out, 3, dim=1)
+ offset = torch.cat((o1, o2), dim=1)
+ mask = torch.sigmoid(mask)
+ return modulated_deform_conv2d(x, offset, mask, self.weight, self.bias,
+ self.stride, self.padding,
+ self.dilation, self.groups,
+ self.deform_groups)
+
+ def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
+ missing_keys, unexpected_keys, error_msgs):
+ version = local_metadata.get('version', None)
+
+ if version is None or version < 2:
+ # the key is different in early versions
+ # In version < 2, ModulatedDeformConvPack
+ # loads previous benchmark models.
+ if (prefix + 'conv_offset.weight' not in state_dict
+ and prefix[:-1] + '_offset.weight' in state_dict):
+ state_dict[prefix + 'conv_offset.weight'] = state_dict.pop(
+ prefix[:-1] + '_offset.weight')
+ if (prefix + 'conv_offset.bias' not in state_dict
+ and prefix[:-1] + '_offset.bias' in state_dict):
+ state_dict[prefix +
+ 'conv_offset.bias'] = state_dict.pop(prefix[:-1] +
+ '_offset.bias')
+
+ if version is not None and version > 1:
+ print_log(
+ f'ModulatedDeformConvPack {prefix.rstrip(".")} is upgraded to '
+ 'version 2.',
+ logger='root')
+
+ super()._load_from_state_dict(state_dict, prefix, local_metadata,
+ strict, missing_keys, unexpected_keys,
+ error_msgs)
diff --git a/annotator/uniformer/mmcv/ops/multi_scale_deform_attn.py b/annotator/uniformer/mmcv/ops/multi_scale_deform_attn.py
new file mode 100644
index 0000000000000000000000000000000000000000..c52dda18b41705705b47dd0e995b124048c16fba
--- /dev/null
+++ b/annotator/uniformer/mmcv/ops/multi_scale_deform_attn.py
@@ -0,0 +1,358 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import math
+import warnings
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from torch.autograd.function import Function, once_differentiable
+
+from annotator.uniformer.mmcv import deprecated_api_warning
+from annotator.uniformer.mmcv.cnn import constant_init, xavier_init
+from annotator.uniformer.mmcv.cnn.bricks.registry import ATTENTION
+from annotator.uniformer.mmcv.runner import BaseModule
+from ..utils import ext_loader
+
+ext_module = ext_loader.load_ext(
+ '_ext', ['ms_deform_attn_backward', 'ms_deform_attn_forward'])
+
+
+class MultiScaleDeformableAttnFunction(Function):
+
+ @staticmethod
+ def forward(ctx, value, value_spatial_shapes, value_level_start_index,
+ sampling_locations, attention_weights, im2col_step):
+ """GPU version of multi-scale deformable attention.
+
+ Args:
+ value (Tensor): The value has shape
+ (bs, num_keys, mum_heads, embed_dims//num_heads)
+ value_spatial_shapes (Tensor): Spatial shape of
+ each feature map, has shape (num_levels, 2),
+ last dimension 2 represent (h, w)
+ sampling_locations (Tensor): The location of sampling points,
+ has shape
+ (bs ,num_queries, num_heads, num_levels, num_points, 2),
+ the last dimension 2 represent (x, y).
+ attention_weights (Tensor): The weight of sampling points used
+ when calculate the attention, has shape
+ (bs ,num_queries, num_heads, num_levels, num_points),
+ im2col_step (Tensor): The step used in image to column.
+
+ Returns:
+ Tensor: has shape (bs, num_queries, embed_dims)
+ """
+
+ ctx.im2col_step = im2col_step
+ output = ext_module.ms_deform_attn_forward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ im2col_step=ctx.im2col_step)
+ ctx.save_for_backward(value, value_spatial_shapes,
+ value_level_start_index, sampling_locations,
+ attention_weights)
+ return output
+
+ @staticmethod
+ @once_differentiable
+ def backward(ctx, grad_output):
+ """GPU version of backward function.
+
+ Args:
+ grad_output (Tensor): Gradient
+ of output tensor of forward.
+
+ Returns:
+ Tuple[Tensor]: Gradient
+ of input tensors in forward.
+ """
+ value, value_spatial_shapes, value_level_start_index,\
+ sampling_locations, attention_weights = ctx.saved_tensors
+ grad_value = torch.zeros_like(value)
+ grad_sampling_loc = torch.zeros_like(sampling_locations)
+ grad_attn_weight = torch.zeros_like(attention_weights)
+
+ ext_module.ms_deform_attn_backward(
+ value,
+ value_spatial_shapes,
+ value_level_start_index,
+ sampling_locations,
+ attention_weights,
+ grad_output.contiguous(),
+ grad_value,
+ grad_sampling_loc,
+ grad_attn_weight,
+ im2col_step=ctx.im2col_step)
+
+ return grad_value, None, None, \
+ grad_sampling_loc, grad_attn_weight, None
+
+
+def multi_scale_deformable_attn_pytorch(value, value_spatial_shapes,
+ sampling_locations, attention_weights):
+ """CPU version of multi-scale deformable attention.
+
+ Args:
+ value (Tensor): The value has shape
+ (bs, num_keys, mum_heads, embed_dims//num_heads)
+ value_spatial_shapes (Tensor): Spatial shape of
+ each feature map, has shape (num_levels, 2),
+ last dimension 2 represent (h, w)
+ sampling_locations (Tensor): The location of sampling points,
+ has shape
+ (bs ,num_queries, num_heads, num_levels, num_points, 2),
+ the last dimension 2 represent (x, y).
+ attention_weights (Tensor): The weight of sampling points used
+ when calculate the attention, has shape
+ (bs ,num_queries, num_heads, num_levels, num_points),
+
+ Returns:
+ Tensor: has shape (bs, num_queries, embed_dims)
+ """
+
+ bs, _, num_heads, embed_dims = value.shape
+ _, num_queries, num_heads, num_levels, num_points, _ =\
+ sampling_locations.shape
+ value_list = value.split([H_ * W_ for H_, W_ in value_spatial_shapes],
+ dim=1)
+ sampling_grids = 2 * sampling_locations - 1
+ sampling_value_list = []
+ for level, (H_, W_) in enumerate(value_spatial_shapes):
+ # bs, H_*W_, num_heads, embed_dims ->
+ # bs, H_*W_, num_heads*embed_dims ->
+ # bs, num_heads*embed_dims, H_*W_ ->
+ # bs*num_heads, embed_dims, H_, W_
+ value_l_ = value_list[level].flatten(2).transpose(1, 2).reshape(
+ bs * num_heads, embed_dims, H_, W_)
+ # bs, num_queries, num_heads, num_points, 2 ->
+ # bs, num_heads, num_queries, num_points, 2 ->
+ # bs*num_heads, num_queries, num_points, 2
+ sampling_grid_l_ = sampling_grids[:, :, :,
+ level].transpose(1, 2).flatten(0, 1)
+ # bs*num_heads, embed_dims, num_queries, num_points
+ sampling_value_l_ = F.grid_sample(
+ value_l_,
+ sampling_grid_l_,
+ mode='bilinear',
+ padding_mode='zeros',
+ align_corners=False)
+ sampling_value_list.append(sampling_value_l_)
+ # (bs, num_queries, num_heads, num_levels, num_points) ->
+ # (bs, num_heads, num_queries, num_levels, num_points) ->
+ # (bs, num_heads, 1, num_queries, num_levels*num_points)
+ attention_weights = attention_weights.transpose(1, 2).reshape(
+ bs * num_heads, 1, num_queries, num_levels * num_points)
+ output = (torch.stack(sampling_value_list, dim=-2).flatten(-2) *
+ attention_weights).sum(-1).view(bs, num_heads * embed_dims,
+ num_queries)
+ return output.transpose(1, 2).contiguous()
+
+
+@ATTENTION.register_module()
+class MultiScaleDeformableAttention(BaseModule):
+ """An attention module used in Deformable-Detr.
+
+ `Deformable DETR: Deformable Transformers for End-to-End Object Detection.
+ `_.
+
+ Args:
+ embed_dims (int): The embedding dimension of Attention.
+ Default: 256.
+ num_heads (int): Parallel attention heads. Default: 64.
+ num_levels (int): The number of feature map used in
+ Attention. Default: 4.
+ num_points (int): The number of sampling points for
+ each query in each head. Default: 4.
+ im2col_step (int): The step used in image_to_column.
+ Default: 64.
+ dropout (float): A Dropout layer on `inp_identity`.
+ Default: 0.1.
+ batch_first (bool): Key, Query and Value are shape of
+ (batch, n, embed_dim)
+ or (n, batch, embed_dim). Default to False.
+ norm_cfg (dict): Config dict for normalization layer.
+ Default: None.
+ init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization.
+ Default: None.
+ """
+
+ def __init__(self,
+ embed_dims=256,
+ num_heads=8,
+ num_levels=4,
+ num_points=4,
+ im2col_step=64,
+ dropout=0.1,
+ batch_first=False,
+ norm_cfg=None,
+ init_cfg=None):
+ super().__init__(init_cfg)
+ if embed_dims % num_heads != 0:
+ raise ValueError(f'embed_dims must be divisible by num_heads, '
+ f'but got {embed_dims} and {num_heads}')
+ dim_per_head = embed_dims // num_heads
+ self.norm_cfg = norm_cfg
+ self.dropout = nn.Dropout(dropout)
+ self.batch_first = batch_first
+
+ # you'd better set dim_per_head to a power of 2
+ # which is more efficient in the CUDA implementation
+ def _is_power_of_2(n):
+ if (not isinstance(n, int)) or (n < 0):
+ raise ValueError(
+ 'invalid input for _is_power_of_2: {} (type: {})'.format(
+ n, type(n)))
+ return (n & (n - 1) == 0) and n != 0
+
+ if not _is_power_of_2(dim_per_head):
+ warnings.warn(
+ "You'd better set embed_dims in "
+ 'MultiScaleDeformAttention to make '
+ 'the dimension of each attention head a power of 2 '
+ 'which is more efficient in our CUDA implementation.')
+
+ self.im2col_step = im2col_step
+ self.embed_dims = embed_dims
+ self.num_levels = num_levels
+ self.num_heads = num_heads
+ self.num_points = num_points
+ self.sampling_offsets = nn.Linear(
+ embed_dims, num_heads * num_levels * num_points * 2)
+ self.attention_weights = nn.Linear(embed_dims,
+ num_heads * num_levels * num_points)
+ self.value_proj = nn.Linear(embed_dims, embed_dims)
+ self.output_proj = nn.Linear(embed_dims, embed_dims)
+ self.init_weights()
+
+ def init_weights(self):
+ """Default initialization for Parameters of Module."""
+ constant_init(self.sampling_offsets, 0.)
+ thetas = torch.arange(
+ self.num_heads,
+ dtype=torch.float32) * (2.0 * math.pi / self.num_heads)
+ grid_init = torch.stack([thetas.cos(), thetas.sin()], -1)
+ grid_init = (grid_init /
+ grid_init.abs().max(-1, keepdim=True)[0]).view(
+ self.num_heads, 1, 1,
+ 2).repeat(1, self.num_levels, self.num_points, 1)
+ for i in range(self.num_points):
+ grid_init[:, :, i, :] *= i + 1
+
+ self.sampling_offsets.bias.data = grid_init.view(-1)
+ constant_init(self.attention_weights, val=0., bias=0.)
+ xavier_init(self.value_proj, distribution='uniform', bias=0.)
+ xavier_init(self.output_proj, distribution='uniform', bias=0.)
+ self._is_init = True
+
+ @deprecated_api_warning({'residual': 'identity'},
+ cls_name='MultiScaleDeformableAttention')
+ def forward(self,
+ query,
+ key=None,
+ value=None,
+ identity=None,
+ query_pos=None,
+ key_padding_mask=None,
+ reference_points=None,
+ spatial_shapes=None,
+ level_start_index=None,
+ **kwargs):
+ """Forward Function of MultiScaleDeformAttention.
+
+ Args:
+ query (Tensor): Query of Transformer with shape
+ (num_query, bs, embed_dims).
+ key (Tensor): The key tensor with shape
+ `(num_key, bs, embed_dims)`.
+ value (Tensor): The value tensor with shape
+ `(num_key, bs, embed_dims)`.
+ identity (Tensor): The tensor used for addition, with the
+ same shape as `query`. Default None. If None,
+ `query` will be used.
+ query_pos (Tensor): The positional encoding for `query`.
+ Default: None.
+ key_pos (Tensor): The positional encoding for `key`. Default
+ None.
+ reference_points (Tensor): The normalized reference
+ points with shape (bs, num_query, num_levels, 2),
+ all elements is range in [0, 1], top-left (0,0),
+ bottom-right (1, 1), including padding area.
+ or (N, Length_{query}, num_levels, 4), add
+ additional two dimensions is (w, h) to
+ form reference boxes.
+ key_padding_mask (Tensor): ByteTensor for `query`, with
+ shape [bs, num_key].
+ spatial_shapes (Tensor): Spatial shape of features in
+ different levels. With shape (num_levels, 2),
+ last dimension represents (h, w).
+ level_start_index (Tensor): The start index of each level.
+ A tensor has shape ``(num_levels, )`` and can be represented
+ as [0, h_0*w_0, h_0*w_0+h_1*w_1, ...].
+
+ Returns:
+ Tensor: forwarded results with shape [num_query, bs, embed_dims].
+ """
+
+ if value is None:
+ value = query
+
+ if identity is None:
+ identity = query
+ if query_pos is not None:
+ query = query + query_pos
+ if not self.batch_first:
+ # change to (bs, num_query ,embed_dims)
+ query = query.permute(1, 0, 2)
+ value = value.permute(1, 0, 2)
+
+ bs, num_query, _ = query.shape
+ bs, num_value, _ = value.shape
+ assert (spatial_shapes[:, 0] * spatial_shapes[:, 1]).sum() == num_value
+
+ value = self.value_proj(value)
+ if key_padding_mask is not None:
+ value = value.masked_fill(key_padding_mask[..., None], 0.0)
+ value = value.view(bs, num_value, self.num_heads, -1)
+ sampling_offsets = self.sampling_offsets(query).view(
+ bs, num_query, self.num_heads, self.num_levels, self.num_points, 2)
+ attention_weights = self.attention_weights(query).view(
+ bs, num_query, self.num_heads, self.num_levels * self.num_points)
+ attention_weights = attention_weights.softmax(-1)
+
+ attention_weights = attention_weights.view(bs, num_query,
+ self.num_heads,
+ self.num_levels,
+ self.num_points)
+ if reference_points.shape[-1] == 2:
+ offset_normalizer = torch.stack(
+ [spatial_shapes[..., 1], spatial_shapes[..., 0]], -1)
+ sampling_locations = reference_points[:, :, None, :, None, :] \
+ + sampling_offsets \
+ / offset_normalizer[None, None, None, :, None, :]
+ elif reference_points.shape[-1] == 4:
+ sampling_locations = reference_points[:, :, None, :, None, :2] \
+ + sampling_offsets / self.num_points \
+ * reference_points[:, :, None, :, None, 2:] \
+ * 0.5
+ else:
+ raise ValueError(
+ f'Last dim of reference_points must be'
+ f' 2 or 4, but get {reference_points.shape[-1]} instead.')
+ if torch.cuda.is_available() and value.is_cuda:
+ output = MultiScaleDeformableAttnFunction.apply(
+ value, spatial_shapes, level_start_index, sampling_locations,
+ attention_weights, self.im2col_step)
+ else:
+ output = multi_scale_deformable_attn_pytorch(
+ value, spatial_shapes, sampling_locations, attention_weights)
+
+ output = self.output_proj(output)
+
+ if not self.batch_first:
+ # (num_query, bs ,embed_dims)
+ output = output.permute(1, 0, 2)
+
+ return self.dropout(output) + identity
diff --git a/annotator/uniformer/mmcv/ops/nms.py b/annotator/uniformer/mmcv/ops/nms.py
new file mode 100644
index 0000000000000000000000000000000000000000..6d9634281f486ab284091786886854c451368052
--- /dev/null
+++ b/annotator/uniformer/mmcv/ops/nms.py
@@ -0,0 +1,417 @@
+import os
+
+import numpy as np
+import torch
+
+from annotator.uniformer.mmcv.utils import deprecated_api_warning
+from ..utils import ext_loader
+
+ext_module = ext_loader.load_ext(
+ '_ext', ['nms', 'softnms', 'nms_match', 'nms_rotated'])
+
+
+# This function is modified from: https://github.com/pytorch/vision/
+class NMSop(torch.autograd.Function):
+
+ @staticmethod
+ def forward(ctx, bboxes, scores, iou_threshold, offset, score_threshold,
+ max_num):
+ is_filtering_by_score = score_threshold > 0
+ if is_filtering_by_score:
+ valid_mask = scores > score_threshold
+ bboxes, scores = bboxes[valid_mask], scores[valid_mask]
+ valid_inds = torch.nonzero(
+ valid_mask, as_tuple=False).squeeze(dim=1)
+
+ inds = ext_module.nms(
+ bboxes, scores, iou_threshold=float(iou_threshold), offset=offset)
+
+ if max_num > 0:
+ inds = inds[:max_num]
+ if is_filtering_by_score:
+ inds = valid_inds[inds]
+ return inds
+
+ @staticmethod
+ def symbolic(g, bboxes, scores, iou_threshold, offset, score_threshold,
+ max_num):
+ from ..onnx import is_custom_op_loaded
+ has_custom_op = is_custom_op_loaded()
+ # TensorRT nms plugin is aligned with original nms in ONNXRuntime
+ is_trt_backend = os.environ.get('ONNX_BACKEND') == 'MMCVTensorRT'
+ if has_custom_op and (not is_trt_backend):
+ return g.op(
+ 'mmcv::NonMaxSuppression',
+ bboxes,
+ scores,
+ iou_threshold_f=float(iou_threshold),
+ offset_i=int(offset))
+ else:
+ from torch.onnx.symbolic_opset9 import select, squeeze, unsqueeze
+ from ..onnx.onnx_utils.symbolic_helper import _size_helper
+
+ boxes = unsqueeze(g, bboxes, 0)
+ scores = unsqueeze(g, unsqueeze(g, scores, 0), 0)
+
+ if max_num > 0:
+ max_num = g.op(
+ 'Constant',
+ value_t=torch.tensor(max_num, dtype=torch.long))
+ else:
+ dim = g.op('Constant', value_t=torch.tensor(0))
+ max_num = _size_helper(g, bboxes, dim)
+ max_output_per_class = max_num
+ iou_threshold = g.op(
+ 'Constant',
+ value_t=torch.tensor([iou_threshold], dtype=torch.float))
+ score_threshold = g.op(
+ 'Constant',
+ value_t=torch.tensor([score_threshold], dtype=torch.float))
+ nms_out = g.op('NonMaxSuppression', boxes, scores,
+ max_output_per_class, iou_threshold,
+ score_threshold)
+ return squeeze(
+ g,
+ select(
+ g, nms_out, 1,
+ g.op(
+ 'Constant',
+ value_t=torch.tensor([2], dtype=torch.long))), 1)
+
+
+class SoftNMSop(torch.autograd.Function):
+
+ @staticmethod
+ def forward(ctx, boxes, scores, iou_threshold, sigma, min_score, method,
+ offset):
+ dets = boxes.new_empty((boxes.size(0), 5), device='cpu')
+ inds = ext_module.softnms(
+ boxes.cpu(),
+ scores.cpu(),
+ dets.cpu(),
+ iou_threshold=float(iou_threshold),
+ sigma=float(sigma),
+ min_score=float(min_score),
+ method=int(method),
+ offset=int(offset))
+ return dets, inds
+
+ @staticmethod
+ def symbolic(g, boxes, scores, iou_threshold, sigma, min_score, method,
+ offset):
+ from packaging import version
+ assert version.parse(torch.__version__) >= version.parse('1.7.0')
+ nms_out = g.op(
+ 'mmcv::SoftNonMaxSuppression',
+ boxes,
+ scores,
+ iou_threshold_f=float(iou_threshold),
+ sigma_f=float(sigma),
+ min_score_f=float(min_score),
+ method_i=int(method),
+ offset_i=int(offset),
+ outputs=2)
+ return nms_out
+
+
+@deprecated_api_warning({'iou_thr': 'iou_threshold'})
+def nms(boxes, scores, iou_threshold, offset=0, score_threshold=0, max_num=-1):
+ """Dispatch to either CPU or GPU NMS implementations.
+
+ The input can be either torch tensor or numpy array. GPU NMS will be used
+ if the input is gpu tensor, otherwise CPU NMS
+ will be used. The returned type will always be the same as inputs.
+
+ Arguments:
+ boxes (torch.Tensor or np.ndarray): boxes in shape (N, 4).
+ scores (torch.Tensor or np.ndarray): scores in shape (N, ).
+ iou_threshold (float): IoU threshold for NMS.
+ offset (int, 0 or 1): boxes' width or height is (x2 - x1 + offset).
+ score_threshold (float): score threshold for NMS.
+ max_num (int): maximum number of boxes after NMS.
+
+ Returns:
+ tuple: kept dets(boxes and scores) and indice, which is always the \
+ same data type as the input.
+
+ Example:
+ >>> boxes = np.array([[49.1, 32.4, 51.0, 35.9],
+ >>> [49.3, 32.9, 51.0, 35.3],
+ >>> [49.2, 31.8, 51.0, 35.4],
+ >>> [35.1, 11.5, 39.1, 15.7],
+ >>> [35.6, 11.8, 39.3, 14.2],
+ >>> [35.3, 11.5, 39.9, 14.5],
+ >>> [35.2, 11.7, 39.7, 15.7]], dtype=np.float32)
+ >>> scores = np.array([0.9, 0.9, 0.5, 0.5, 0.5, 0.4, 0.3],\
+ dtype=np.float32)
+ >>> iou_threshold = 0.6
+ >>> dets, inds = nms(boxes, scores, iou_threshold)
+ >>> assert len(inds) == len(dets) == 3
+ """
+ assert isinstance(boxes, (torch.Tensor, np.ndarray))
+ assert isinstance(scores, (torch.Tensor, np.ndarray))
+ is_numpy = False
+ if isinstance(boxes, np.ndarray):
+ is_numpy = True
+ boxes = torch.from_numpy(boxes)
+ if isinstance(scores, np.ndarray):
+ scores = torch.from_numpy(scores)
+ assert boxes.size(1) == 4
+ assert boxes.size(0) == scores.size(0)
+ assert offset in (0, 1)
+
+ if torch.__version__ == 'parrots':
+ indata_list = [boxes, scores]
+ indata_dict = {
+ 'iou_threshold': float(iou_threshold),
+ 'offset': int(offset)
+ }
+ inds = ext_module.nms(*indata_list, **indata_dict)
+ else:
+ inds = NMSop.apply(boxes, scores, iou_threshold, offset,
+ score_threshold, max_num)
+ dets = torch.cat((boxes[inds], scores[inds].reshape(-1, 1)), dim=1)
+ if is_numpy:
+ dets = dets.cpu().numpy()
+ inds = inds.cpu().numpy()
+ return dets, inds
+
+
+@deprecated_api_warning({'iou_thr': 'iou_threshold'})
+def soft_nms(boxes,
+ scores,
+ iou_threshold=0.3,
+ sigma=0.5,
+ min_score=1e-3,
+ method='linear',
+ offset=0):
+ """Dispatch to only CPU Soft NMS implementations.
+
+ The input can be either a torch tensor or numpy array.
+ The returned type will always be the same as inputs.
+
+ Arguments:
+ boxes (torch.Tensor or np.ndarray): boxes in shape (N, 4).
+ scores (torch.Tensor or np.ndarray): scores in shape (N, ).
+ iou_threshold (float): IoU threshold for NMS.
+ sigma (float): hyperparameter for gaussian method
+ min_score (float): score filter threshold
+ method (str): either 'linear' or 'gaussian'
+ offset (int, 0 or 1): boxes' width or height is (x2 - x1 + offset).
+
+ Returns:
+ tuple: kept dets(boxes and scores) and indice, which is always the \
+ same data type as the input.
+
+ Example:
+ >>> boxes = np.array([[4., 3., 5., 3.],
+ >>> [4., 3., 5., 4.],
+ >>> [3., 1., 3., 1.],
+ >>> [3., 1., 3., 1.],
+ >>> [3., 1., 3., 1.],
+ >>> [3., 1., 3., 1.]], dtype=np.float32)
+ >>> scores = np.array([0.9, 0.9, 0.5, 0.5, 0.4, 0.0], dtype=np.float32)
+ >>> iou_threshold = 0.6
+ >>> dets, inds = soft_nms(boxes, scores, iou_threshold, sigma=0.5)
+ >>> assert len(inds) == len(dets) == 5
+ """
+
+ assert isinstance(boxes, (torch.Tensor, np.ndarray))
+ assert isinstance(scores, (torch.Tensor, np.ndarray))
+ is_numpy = False
+ if isinstance(boxes, np.ndarray):
+ is_numpy = True
+ boxes = torch.from_numpy(boxes)
+ if isinstance(scores, np.ndarray):
+ scores = torch.from_numpy(scores)
+ assert boxes.size(1) == 4
+ assert boxes.size(0) == scores.size(0)
+ assert offset in (0, 1)
+ method_dict = {'naive': 0, 'linear': 1, 'gaussian': 2}
+ assert method in method_dict.keys()
+
+ if torch.__version__ == 'parrots':
+ dets = boxes.new_empty((boxes.size(0), 5), device='cpu')
+ indata_list = [boxes.cpu(), scores.cpu(), dets.cpu()]
+ indata_dict = {
+ 'iou_threshold': float(iou_threshold),
+ 'sigma': float(sigma),
+ 'min_score': min_score,
+ 'method': method_dict[method],
+ 'offset': int(offset)
+ }
+ inds = ext_module.softnms(*indata_list, **indata_dict)
+ else:
+ dets, inds = SoftNMSop.apply(boxes.cpu(), scores.cpu(),
+ float(iou_threshold), float(sigma),
+ float(min_score), method_dict[method],
+ int(offset))
+
+ dets = dets[:inds.size(0)]
+
+ if is_numpy:
+ dets = dets.cpu().numpy()
+ inds = inds.cpu().numpy()
+ return dets, inds
+ else:
+ return dets.to(device=boxes.device), inds.to(device=boxes.device)
+
+
+def batched_nms(boxes, scores, idxs, nms_cfg, class_agnostic=False):
+ """Performs non-maximum suppression in a batched fashion.
+
+ Modified from https://github.com/pytorch/vision/blob
+ /505cd6957711af790211896d32b40291bea1bc21/torchvision/ops/boxes.py#L39.
+ In order to perform NMS independently per class, we add an offset to all
+ the boxes. The offset is dependent only on the class idx, and is large
+ enough so that boxes from different classes do not overlap.
+
+ Arguments:
+ boxes (torch.Tensor): boxes in shape (N, 4).
+ scores (torch.Tensor): scores in shape (N, ).
+ idxs (torch.Tensor): each index value correspond to a bbox cluster,
+ and NMS will not be applied between elements of different idxs,
+ shape (N, ).
+ nms_cfg (dict): specify nms type and other parameters like iou_thr.
+ Possible keys includes the following.
+
+ - iou_thr (float): IoU threshold used for NMS.
+ - split_thr (float): threshold number of boxes. In some cases the
+ number of boxes is large (e.g., 200k). To avoid OOM during
+ training, the users could set `split_thr` to a small value.
+ If the number of boxes is greater than the threshold, it will
+ perform NMS on each group of boxes separately and sequentially.
+ Defaults to 10000.
+ class_agnostic (bool): if true, nms is class agnostic,
+ i.e. IoU thresholding happens over all boxes,
+ regardless of the predicted class.
+
+ Returns:
+ tuple: kept dets and indice.
+ """
+ nms_cfg_ = nms_cfg.copy()
+ class_agnostic = nms_cfg_.pop('class_agnostic', class_agnostic)
+ if class_agnostic:
+ boxes_for_nms = boxes
+ else:
+ max_coordinate = boxes.max()
+ offsets = idxs.to(boxes) * (max_coordinate + torch.tensor(1).to(boxes))
+ boxes_for_nms = boxes + offsets[:, None]
+
+ nms_type = nms_cfg_.pop('type', 'nms')
+ nms_op = eval(nms_type)
+
+ split_thr = nms_cfg_.pop('split_thr', 10000)
+ # Won't split to multiple nms nodes when exporting to onnx
+ if boxes_for_nms.shape[0] < split_thr or torch.onnx.is_in_onnx_export():
+ dets, keep = nms_op(boxes_for_nms, scores, **nms_cfg_)
+ boxes = boxes[keep]
+ # -1 indexing works abnormal in TensorRT
+ # This assumes `dets` has 5 dimensions where
+ # the last dimension is score.
+ # TODO: more elegant way to handle the dimension issue.
+ # Some type of nms would reweight the score, such as SoftNMS
+ scores = dets[:, 4]
+ else:
+ max_num = nms_cfg_.pop('max_num', -1)
+ total_mask = scores.new_zeros(scores.size(), dtype=torch.bool)
+ # Some type of nms would reweight the score, such as SoftNMS
+ scores_after_nms = scores.new_zeros(scores.size())
+ for id in torch.unique(idxs):
+ mask = (idxs == id).nonzero(as_tuple=False).view(-1)
+ dets, keep = nms_op(boxes_for_nms[mask], scores[mask], **nms_cfg_)
+ total_mask[mask[keep]] = True
+ scores_after_nms[mask[keep]] = dets[:, -1]
+ keep = total_mask.nonzero(as_tuple=False).view(-1)
+
+ scores, inds = scores_after_nms[keep].sort(descending=True)
+ keep = keep[inds]
+ boxes = boxes[keep]
+
+ if max_num > 0:
+ keep = keep[:max_num]
+ boxes = boxes[:max_num]
+ scores = scores[:max_num]
+
+ return torch.cat([boxes, scores[:, None]], -1), keep
+
+
+def nms_match(dets, iou_threshold):
+ """Matched dets into different groups by NMS.
+
+ NMS match is Similar to NMS but when a bbox is suppressed, nms match will
+ record the indice of suppressed bbox and form a group with the indice of
+ kept bbox. In each group, indice is sorted as score order.
+
+ Arguments:
+ dets (torch.Tensor | np.ndarray): Det boxes with scores, shape (N, 5).
+ iou_thr (float): IoU thresh for NMS.
+
+ Returns:
+ List[torch.Tensor | np.ndarray]: The outer list corresponds different
+ matched group, the inner Tensor corresponds the indices for a group
+ in score order.
+ """
+ if dets.shape[0] == 0:
+ matched = []
+ else:
+ assert dets.shape[-1] == 5, 'inputs dets.shape should be (N, 5), ' \
+ f'but get {dets.shape}'
+ if isinstance(dets, torch.Tensor):
+ dets_t = dets.detach().cpu()
+ else:
+ dets_t = torch.from_numpy(dets)
+ indata_list = [dets_t]
+ indata_dict = {'iou_threshold': float(iou_threshold)}
+ matched = ext_module.nms_match(*indata_list, **indata_dict)
+ if torch.__version__ == 'parrots':
+ matched = matched.tolist()
+
+ if isinstance(dets, torch.Tensor):
+ return [dets.new_tensor(m, dtype=torch.long) for m in matched]
+ else:
+ return [np.array(m, dtype=np.int) for m in matched]
+
+
+def nms_rotated(dets, scores, iou_threshold, labels=None):
+ """Performs non-maximum suppression (NMS) on the rotated boxes according to
+ their intersection-over-union (IoU).
+
+ Rotated NMS iteratively removes lower scoring rotated boxes which have an
+ IoU greater than iou_threshold with another (higher scoring) rotated box.
+
+ Args:
+ boxes (Tensor): Rotated boxes in shape (N, 5). They are expected to \
+ be in (x_ctr, y_ctr, width, height, angle_radian) format.
+ scores (Tensor): scores in shape (N, ).
+ iou_threshold (float): IoU thresh for NMS.
+ labels (Tensor): boxes' label in shape (N,).
+
+ Returns:
+ tuple: kept dets(boxes and scores) and indice, which is always the \
+ same data type as the input.
+ """
+ if dets.shape[0] == 0:
+ return dets, None
+ multi_label = labels is not None
+ if multi_label:
+ dets_wl = torch.cat((dets, labels.unsqueeze(1)), 1)
+ else:
+ dets_wl = dets
+ _, order = scores.sort(0, descending=True)
+ dets_sorted = dets_wl.index_select(0, order)
+
+ if torch.__version__ == 'parrots':
+ keep_inds = ext_module.nms_rotated(
+ dets_wl,
+ scores,
+ order,
+ dets_sorted,
+ iou_threshold=iou_threshold,
+ multi_label=multi_label)
+ else:
+ keep_inds = ext_module.nms_rotated(dets_wl, scores, order, dets_sorted,
+ iou_threshold, multi_label)
+ dets = torch.cat((dets[keep_inds], scores[keep_inds].reshape(-1, 1)),
+ dim=1)
+ return dets, keep_inds
diff --git a/annotator/uniformer/mmcv/ops/pixel_group.py b/annotator/uniformer/mmcv/ops/pixel_group.py
new file mode 100644
index 0000000000000000000000000000000000000000..2143c75f835a467c802fc3c37ecd3ac0f85bcda4
--- /dev/null
+++ b/annotator/uniformer/mmcv/ops/pixel_group.py
@@ -0,0 +1,75 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import numpy as np
+import torch
+
+from ..utils import ext_loader
+
+ext_module = ext_loader.load_ext('_ext', ['pixel_group'])
+
+
+def pixel_group(score, mask, embedding, kernel_label, kernel_contour,
+ kernel_region_num, distance_threshold):
+ """Group pixels into text instances, which is widely used text detection
+ methods.
+
+ Arguments:
+ score (np.array or Tensor): The foreground score with size hxw.
+ mask (np.array or Tensor): The foreground mask with size hxw.
+ embedding (np.array or Tensor): The embedding with size hxwxc to
+ distinguish instances.
+ kernel_label (np.array or Tensor): The instance kernel index with
+ size hxw.
+ kernel_contour (np.array or Tensor): The kernel contour with size hxw.
+ kernel_region_num (int): The instance kernel region number.
+ distance_threshold (float): The embedding distance threshold between
+ kernel and pixel in one instance.
+
+ Returns:
+ pixel_assignment (List[List[float]]): The instance coordinate list.
+ Each element consists of averaged confidence, pixel number, and
+ coordinates (x_i, y_i for all pixels) in order.
+ """
+ assert isinstance(score, (torch.Tensor, np.ndarray))
+ assert isinstance(mask, (torch.Tensor, np.ndarray))
+ assert isinstance(embedding, (torch.Tensor, np.ndarray))
+ assert isinstance(kernel_label, (torch.Tensor, np.ndarray))
+ assert isinstance(kernel_contour, (torch.Tensor, np.ndarray))
+ assert isinstance(kernel_region_num, int)
+ assert isinstance(distance_threshold, float)
+
+ if isinstance(score, np.ndarray):
+ score = torch.from_numpy(score)
+ if isinstance(mask, np.ndarray):
+ mask = torch.from_numpy(mask)
+ if isinstance(embedding, np.ndarray):
+ embedding = torch.from_numpy(embedding)
+ if isinstance(kernel_label, np.ndarray):
+ kernel_label = torch.from_numpy(kernel_label)
+ if isinstance(kernel_contour, np.ndarray):
+ kernel_contour = torch.from_numpy(kernel_contour)
+
+ if torch.__version__ == 'parrots':
+ label = ext_module.pixel_group(
+ score,
+ mask,
+ embedding,
+ kernel_label,
+ kernel_contour,
+ kernel_region_num=kernel_region_num,
+ distance_threshold=distance_threshold)
+ label = label.tolist()
+ label = label[0]
+ list_index = kernel_region_num
+ pixel_assignment = []
+ for x in range(kernel_region_num):
+ pixel_assignment.append(
+ np.array(
+ label[list_index:list_index + int(label[x])],
+ dtype=np.float))
+ list_index = list_index + int(label[x])
+ else:
+ pixel_assignment = ext_module.pixel_group(score, mask, embedding,
+ kernel_label, kernel_contour,
+ kernel_region_num,
+ distance_threshold)
+ return pixel_assignment
diff --git a/annotator/uniformer/mmcv/ops/point_sample.py b/annotator/uniformer/mmcv/ops/point_sample.py
new file mode 100644
index 0000000000000000000000000000000000000000..267f4b3c56630acd85f9bdc630b7be09abab0aba
--- /dev/null
+++ b/annotator/uniformer/mmcv/ops/point_sample.py
@@ -0,0 +1,336 @@
+# Modified from https://github.com/facebookresearch/detectron2/tree/master/projects/PointRend # noqa
+
+from os import path as osp
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from torch.nn.modules.utils import _pair
+from torch.onnx.operators import shape_as_tensor
+
+
+def bilinear_grid_sample(im, grid, align_corners=False):
+ """Given an input and a flow-field grid, computes the output using input
+ values and pixel locations from grid. Supported only bilinear interpolation
+ method to sample the input pixels.
+
+ Args:
+ im (torch.Tensor): Input feature map, shape (N, C, H, W)
+ grid (torch.Tensor): Point coordinates, shape (N, Hg, Wg, 2)
+ align_corners {bool}: If set to True, the extrema (-1 and 1) are
+ considered as referring to the center points of the input’s
+ corner pixels. If set to False, they are instead considered as
+ referring to the corner points of the input’s corner pixels,
+ making the sampling more resolution agnostic.
+ Returns:
+ torch.Tensor: A tensor with sampled points, shape (N, C, Hg, Wg)
+ """
+ n, c, h, w = im.shape
+ gn, gh, gw, _ = grid.shape
+ assert n == gn
+
+ x = grid[:, :, :, 0]
+ y = grid[:, :, :, 1]
+
+ if align_corners:
+ x = ((x + 1) / 2) * (w - 1)
+ y = ((y + 1) / 2) * (h - 1)
+ else:
+ x = ((x + 1) * w - 1) / 2
+ y = ((y + 1) * h - 1) / 2
+
+ x = x.view(n, -1)
+ y = y.view(n, -1)
+
+ x0 = torch.floor(x).long()
+ y0 = torch.floor(y).long()
+ x1 = x0 + 1
+ y1 = y0 + 1
+
+ wa = ((x1 - x) * (y1 - y)).unsqueeze(1)
+ wb = ((x1 - x) * (y - y0)).unsqueeze(1)
+ wc = ((x - x0) * (y1 - y)).unsqueeze(1)
+ wd = ((x - x0) * (y - y0)).unsqueeze(1)
+
+ # Apply default for grid_sample function zero padding
+ im_padded = F.pad(im, pad=[1, 1, 1, 1], mode='constant', value=0)
+ padded_h = h + 2
+ padded_w = w + 2
+ # save points positions after padding
+ x0, x1, y0, y1 = x0 + 1, x1 + 1, y0 + 1, y1 + 1
+
+ # Clip coordinates to padded image size
+ x0 = torch.where(x0 < 0, torch.tensor(0), x0)
+ x0 = torch.where(x0 > padded_w - 1, torch.tensor(padded_w - 1), x0)
+ x1 = torch.where(x1 < 0, torch.tensor(0), x1)
+ x1 = torch.where(x1 > padded_w - 1, torch.tensor(padded_w - 1), x1)
+ y0 = torch.where(y0 < 0, torch.tensor(0), y0)
+ y0 = torch.where(y0 > padded_h - 1, torch.tensor(padded_h - 1), y0)
+ y1 = torch.where(y1 < 0, torch.tensor(0), y1)
+ y1 = torch.where(y1 > padded_h - 1, torch.tensor(padded_h - 1), y1)
+
+ im_padded = im_padded.view(n, c, -1)
+
+ x0_y0 = (x0 + y0 * padded_w).unsqueeze(1).expand(-1, c, -1)
+ x0_y1 = (x0 + y1 * padded_w).unsqueeze(1).expand(-1, c, -1)
+ x1_y0 = (x1 + y0 * padded_w).unsqueeze(1).expand(-1, c, -1)
+ x1_y1 = (x1 + y1 * padded_w).unsqueeze(1).expand(-1, c, -1)
+
+ Ia = torch.gather(im_padded, 2, x0_y0)
+ Ib = torch.gather(im_padded, 2, x0_y1)
+ Ic = torch.gather(im_padded, 2, x1_y0)
+ Id = torch.gather(im_padded, 2, x1_y1)
+
+ return (Ia * wa + Ib * wb + Ic * wc + Id * wd).reshape(n, c, gh, gw)
+
+
+def is_in_onnx_export_without_custom_ops():
+ from annotator.uniformer.mmcv.ops import get_onnxruntime_op_path
+ ort_custom_op_path = get_onnxruntime_op_path()
+ return torch.onnx.is_in_onnx_export(
+ ) and not osp.exists(ort_custom_op_path)
+
+
+def normalize(grid):
+ """Normalize input grid from [-1, 1] to [0, 1]
+ Args:
+ grid (Tensor): The grid to be normalize, range [-1, 1].
+ Returns:
+ Tensor: Normalized grid, range [0, 1].
+ """
+
+ return (grid + 1.0) / 2.0
+
+
+def denormalize(grid):
+ """Denormalize input grid from range [0, 1] to [-1, 1]
+ Args:
+ grid (Tensor): The grid to be denormalize, range [0, 1].
+ Returns:
+ Tensor: Denormalized grid, range [-1, 1].
+ """
+
+ return grid * 2.0 - 1.0
+
+
+def generate_grid(num_grid, size, device):
+ """Generate regular square grid of points in [0, 1] x [0, 1] coordinate
+ space.
+
+ Args:
+ num_grid (int): The number of grids to sample, one for each region.
+ size (tuple(int, int)): The side size of the regular grid.
+ device (torch.device): Desired device of returned tensor.
+
+ Returns:
+ (torch.Tensor): A tensor of shape (num_grid, size[0]*size[1], 2) that
+ contains coordinates for the regular grids.
+ """
+
+ affine_trans = torch.tensor([[[1., 0., 0.], [0., 1., 0.]]], device=device)
+ grid = F.affine_grid(
+ affine_trans, torch.Size((1, 1, *size)), align_corners=False)
+ grid = normalize(grid)
+ return grid.view(1, -1, 2).expand(num_grid, -1, -1)
+
+
+def rel_roi_point_to_abs_img_point(rois, rel_roi_points):
+ """Convert roi based relative point coordinates to image based absolute
+ point coordinates.
+
+ Args:
+ rois (Tensor): RoIs or BBoxes, shape (N, 4) or (N, 5)
+ rel_roi_points (Tensor): Point coordinates inside RoI, relative to
+ RoI, location, range (0, 1), shape (N, P, 2)
+ Returns:
+ Tensor: Image based absolute point coordinates, shape (N, P, 2)
+ """
+
+ with torch.no_grad():
+ assert rel_roi_points.size(0) == rois.size(0)
+ assert rois.dim() == 2
+ assert rel_roi_points.dim() == 3
+ assert rel_roi_points.size(2) == 2
+ # remove batch idx
+ if rois.size(1) == 5:
+ rois = rois[:, 1:]
+ abs_img_points = rel_roi_points.clone()
+ # To avoid an error during exporting to onnx use independent
+ # variables instead inplace computation
+ xs = abs_img_points[:, :, 0] * (rois[:, None, 2] - rois[:, None, 0])
+ ys = abs_img_points[:, :, 1] * (rois[:, None, 3] - rois[:, None, 1])
+ xs += rois[:, None, 0]
+ ys += rois[:, None, 1]
+ abs_img_points = torch.stack([xs, ys], dim=2)
+ return abs_img_points
+
+
+def get_shape_from_feature_map(x):
+ """Get spatial resolution of input feature map considering exporting to
+ onnx mode.
+
+ Args:
+ x (torch.Tensor): Input tensor, shape (N, C, H, W)
+ Returns:
+ torch.Tensor: Spatial resolution (width, height), shape (1, 1, 2)
+ """
+ if torch.onnx.is_in_onnx_export():
+ img_shape = shape_as_tensor(x)[2:].flip(0).view(1, 1, 2).to(
+ x.device).float()
+ else:
+ img_shape = torch.tensor(x.shape[2:]).flip(0).view(1, 1, 2).to(
+ x.device).float()
+ return img_shape
+
+
+def abs_img_point_to_rel_img_point(abs_img_points, img, spatial_scale=1.):
+ """Convert image based absolute point coordinates to image based relative
+ coordinates for sampling.
+
+ Args:
+ abs_img_points (Tensor): Image based absolute point coordinates,
+ shape (N, P, 2)
+ img (tuple/Tensor): (height, width) of image or feature map.
+ spatial_scale (float): Scale points by this factor. Default: 1.
+
+ Returns:
+ Tensor: Image based relative point coordinates for sampling,
+ shape (N, P, 2)
+ """
+
+ assert (isinstance(img, tuple) and len(img) == 2) or \
+ (isinstance(img, torch.Tensor) and len(img.shape) == 4)
+
+ if isinstance(img, tuple):
+ h, w = img
+ scale = torch.tensor([w, h],
+ dtype=torch.float,
+ device=abs_img_points.device)
+ scale = scale.view(1, 1, 2)
+ else:
+ scale = get_shape_from_feature_map(img)
+
+ return abs_img_points / scale * spatial_scale
+
+
+def rel_roi_point_to_rel_img_point(rois,
+ rel_roi_points,
+ img,
+ spatial_scale=1.):
+ """Convert roi based relative point coordinates to image based absolute
+ point coordinates.
+
+ Args:
+ rois (Tensor): RoIs or BBoxes, shape (N, 4) or (N, 5)
+ rel_roi_points (Tensor): Point coordinates inside RoI, relative to
+ RoI, location, range (0, 1), shape (N, P, 2)
+ img (tuple/Tensor): (height, width) of image or feature map.
+ spatial_scale (float): Scale points by this factor. Default: 1.
+
+ Returns:
+ Tensor: Image based relative point coordinates for sampling,
+ shape (N, P, 2)
+ """
+
+ abs_img_point = rel_roi_point_to_abs_img_point(rois, rel_roi_points)
+ rel_img_point = abs_img_point_to_rel_img_point(abs_img_point, img,
+ spatial_scale)
+
+ return rel_img_point
+
+
+def point_sample(input, points, align_corners=False, **kwargs):
+ """A wrapper around :func:`grid_sample` to support 3D point_coords tensors
+ Unlike :func:`torch.nn.functional.grid_sample` it assumes point_coords to
+ lie inside ``[0, 1] x [0, 1]`` square.
+
+ Args:
+ input (Tensor): Feature map, shape (N, C, H, W).
+ points (Tensor): Image based absolute point coordinates (normalized),
+ range [0, 1] x [0, 1], shape (N, P, 2) or (N, Hgrid, Wgrid, 2).
+ align_corners (bool): Whether align_corners. Default: False
+
+ Returns:
+ Tensor: Features of `point` on `input`, shape (N, C, P) or
+ (N, C, Hgrid, Wgrid).
+ """
+
+ add_dim = False
+ if points.dim() == 3:
+ add_dim = True
+ points = points.unsqueeze(2)
+ if is_in_onnx_export_without_custom_ops():
+ # If custom ops for onnx runtime not compiled use python
+ # implementation of grid_sample function to make onnx graph
+ # with supported nodes
+ output = bilinear_grid_sample(
+ input, denormalize(points), align_corners=align_corners)
+ else:
+ output = F.grid_sample(
+ input, denormalize(points), align_corners=align_corners, **kwargs)
+ if add_dim:
+ output = output.squeeze(3)
+ return output
+
+
+class SimpleRoIAlign(nn.Module):
+
+ def __init__(self, output_size, spatial_scale, aligned=True):
+ """Simple RoI align in PointRend, faster than standard RoIAlign.
+
+ Args:
+ output_size (tuple[int]): h, w
+ spatial_scale (float): scale the input boxes by this number
+ aligned (bool): if False, use the legacy implementation in
+ MMDetection, align_corners=True will be used in F.grid_sample.
+ If True, align the results more perfectly.
+ """
+
+ super(SimpleRoIAlign, self).__init__()
+ self.output_size = _pair(output_size)
+ self.spatial_scale = float(spatial_scale)
+ # to be consistent with other RoI ops
+ self.use_torchvision = False
+ self.aligned = aligned
+
+ def forward(self, features, rois):
+ num_imgs = features.size(0)
+ num_rois = rois.size(0)
+ rel_roi_points = generate_grid(
+ num_rois, self.output_size, device=rois.device)
+
+ if torch.onnx.is_in_onnx_export():
+ rel_img_points = rel_roi_point_to_rel_img_point(
+ rois, rel_roi_points, features, self.spatial_scale)
+ rel_img_points = rel_img_points.reshape(num_imgs, -1,
+ *rel_img_points.shape[1:])
+ point_feats = point_sample(
+ features, rel_img_points, align_corners=not self.aligned)
+ point_feats = point_feats.transpose(1, 2)
+ else:
+ point_feats = []
+ for batch_ind in range(num_imgs):
+ # unravel batch dim
+ feat = features[batch_ind].unsqueeze(0)
+ inds = (rois[:, 0].long() == batch_ind)
+ if inds.any():
+ rel_img_points = rel_roi_point_to_rel_img_point(
+ rois[inds], rel_roi_points[inds], feat,
+ self.spatial_scale).unsqueeze(0)
+ point_feat = point_sample(
+ feat, rel_img_points, align_corners=not self.aligned)
+ point_feat = point_feat.squeeze(0).transpose(0, 1)
+ point_feats.append(point_feat)
+
+ point_feats = torch.cat(point_feats, dim=0)
+
+ channels = features.size(1)
+ roi_feats = point_feats.reshape(num_rois, channels, *self.output_size)
+
+ return roi_feats
+
+ def __repr__(self):
+ format_str = self.__class__.__name__
+ format_str += '(output_size={}, spatial_scale={}'.format(
+ self.output_size, self.spatial_scale)
+ return format_str
diff --git a/annotator/uniformer/mmcv/ops/points_in_boxes.py b/annotator/uniformer/mmcv/ops/points_in_boxes.py
new file mode 100644
index 0000000000000000000000000000000000000000..4003173a53052161dbcd687a2fa1d755642fdab8
--- /dev/null
+++ b/annotator/uniformer/mmcv/ops/points_in_boxes.py
@@ -0,0 +1,133 @@
+import torch
+
+from ..utils import ext_loader
+
+ext_module = ext_loader.load_ext('_ext', [
+ 'points_in_boxes_part_forward', 'points_in_boxes_cpu_forward',
+ 'points_in_boxes_all_forward'
+])
+
+
+def points_in_boxes_part(points, boxes):
+ """Find the box in which each point is (CUDA).
+
+ Args:
+ points (torch.Tensor): [B, M, 3], [x, y, z] in LiDAR/DEPTH coordinate
+ boxes (torch.Tensor): [B, T, 7],
+ num_valid_boxes <= T, [x, y, z, x_size, y_size, z_size, rz] in
+ LiDAR/DEPTH coordinate, (x, y, z) is the bottom center
+
+ Returns:
+ box_idxs_of_pts (torch.Tensor): (B, M), default background = -1
+ """
+ assert points.shape[0] == boxes.shape[0], \
+ 'Points and boxes should have the same batch size, ' \
+ f'but got {points.shape[0]} and {boxes.shape[0]}'
+ assert boxes.shape[2] == 7, \
+ 'boxes dimension should be 7, ' \
+ f'but got unexpected shape {boxes.shape[2]}'
+ assert points.shape[2] == 3, \
+ 'points dimension should be 3, ' \
+ f'but got unexpected shape {points.shape[2]}'
+ batch_size, num_points, _ = points.shape
+
+ box_idxs_of_pts = points.new_zeros((batch_size, num_points),
+ dtype=torch.int).fill_(-1)
+
+ # If manually put the tensor 'points' or 'boxes' on a device
+ # which is not the current device, some temporary variables
+ # will be created on the current device in the cuda op,
+ # and the output will be incorrect.
+ # Therefore, we force the current device to be the same
+ # as the device of the tensors if it was not.
+ # Please refer to https://github.com/open-mmlab/mmdetection3d/issues/305
+ # for the incorrect output before the fix.
+ points_device = points.get_device()
+ assert points_device == boxes.get_device(), \
+ 'Points and boxes should be put on the same device'
+ if torch.cuda.current_device() != points_device:
+ torch.cuda.set_device(points_device)
+
+ ext_module.points_in_boxes_part_forward(boxes.contiguous(),
+ points.contiguous(),
+ box_idxs_of_pts)
+
+ return box_idxs_of_pts
+
+
+def points_in_boxes_cpu(points, boxes):
+ """Find all boxes in which each point is (CPU). The CPU version of
+ :meth:`points_in_boxes_all`.
+
+ Args:
+ points (torch.Tensor): [B, M, 3], [x, y, z] in
+ LiDAR/DEPTH coordinate
+ boxes (torch.Tensor): [B, T, 7],
+ num_valid_boxes <= T, [x, y, z, x_size, y_size, z_size, rz],
+ (x, y, z) is the bottom center.
+
+ Returns:
+ box_idxs_of_pts (torch.Tensor): (B, M, T), default background = 0.
+ """
+ assert points.shape[0] == boxes.shape[0], \
+ 'Points and boxes should have the same batch size, ' \
+ f'but got {points.shape[0]} and {boxes.shape[0]}'
+ assert boxes.shape[2] == 7, \
+ 'boxes dimension should be 7, ' \
+ f'but got unexpected shape {boxes.shape[2]}'
+ assert points.shape[2] == 3, \
+ 'points dimension should be 3, ' \
+ f'but got unexpected shape {points.shape[2]}'
+ batch_size, num_points, _ = points.shape
+ num_boxes = boxes.shape[1]
+
+ point_indices = points.new_zeros((batch_size, num_boxes, num_points),
+ dtype=torch.int)
+ for b in range(batch_size):
+ ext_module.points_in_boxes_cpu_forward(boxes[b].float().contiguous(),
+ points[b].float().contiguous(),
+ point_indices[b])
+ point_indices = point_indices.transpose(1, 2)
+
+ return point_indices
+
+
+def points_in_boxes_all(points, boxes):
+ """Find all boxes in which each point is (CUDA).
+
+ Args:
+ points (torch.Tensor): [B, M, 3], [x, y, z] in LiDAR/DEPTH coordinate
+ boxes (torch.Tensor): [B, T, 7],
+ num_valid_boxes <= T, [x, y, z, x_size, y_size, z_size, rz],
+ (x, y, z) is the bottom center.
+
+ Returns:
+ box_idxs_of_pts (torch.Tensor): (B, M, T), default background = 0.
+ """
+ assert boxes.shape[0] == points.shape[0], \
+ 'Points and boxes should have the same batch size, ' \
+ f'but got {boxes.shape[0]} and {boxes.shape[0]}'
+ assert boxes.shape[2] == 7, \
+ 'boxes dimension should be 7, ' \
+ f'but got unexpected shape {boxes.shape[2]}'
+ assert points.shape[2] == 3, \
+ 'points dimension should be 3, ' \
+ f'but got unexpected shape {points.shape[2]}'
+ batch_size, num_points, _ = points.shape
+ num_boxes = boxes.shape[1]
+
+ box_idxs_of_pts = points.new_zeros((batch_size, num_points, num_boxes),
+ dtype=torch.int).fill_(0)
+
+ # Same reason as line 25-32
+ points_device = points.get_device()
+ assert points_device == boxes.get_device(), \
+ 'Points and boxes should be put on the same device'
+ if torch.cuda.current_device() != points_device:
+ torch.cuda.set_device(points_device)
+
+ ext_module.points_in_boxes_all_forward(boxes.contiguous(),
+ points.contiguous(),
+ box_idxs_of_pts)
+
+ return box_idxs_of_pts
diff --git a/annotator/uniformer/mmcv/ops/points_sampler.py b/annotator/uniformer/mmcv/ops/points_sampler.py
new file mode 100644
index 0000000000000000000000000000000000000000..a802a74fd6c3610d9ae178e6201f47423eca7ad1
--- /dev/null
+++ b/annotator/uniformer/mmcv/ops/points_sampler.py
@@ -0,0 +1,177 @@
+from typing import List
+
+import torch
+from torch import nn as nn
+
+from annotator.uniformer.mmcv.runner import force_fp32
+from .furthest_point_sample import (furthest_point_sample,
+ furthest_point_sample_with_dist)
+
+
+def calc_square_dist(point_feat_a, point_feat_b, norm=True):
+ """Calculating square distance between a and b.
+
+ Args:
+ point_feat_a (Tensor): (B, N, C) Feature vector of each point.
+ point_feat_b (Tensor): (B, M, C) Feature vector of each point.
+ norm (Bool, optional): Whether to normalize the distance.
+ Default: True.
+
+ Returns:
+ Tensor: (B, N, M) Distance between each pair points.
+ """
+ num_channel = point_feat_a.shape[-1]
+ # [bs, n, 1]
+ a_square = torch.sum(point_feat_a.unsqueeze(dim=2).pow(2), dim=-1)
+ # [bs, 1, m]
+ b_square = torch.sum(point_feat_b.unsqueeze(dim=1).pow(2), dim=-1)
+
+ corr_matrix = torch.matmul(point_feat_a, point_feat_b.transpose(1, 2))
+
+ dist = a_square + b_square - 2 * corr_matrix
+ if norm:
+ dist = torch.sqrt(dist) / num_channel
+ return dist
+
+
+def get_sampler_cls(sampler_type):
+ """Get the type and mode of points sampler.
+
+ Args:
+ sampler_type (str): The type of points sampler.
+ The valid value are "D-FPS", "F-FPS", or "FS".
+
+ Returns:
+ class: Points sampler type.
+ """
+ sampler_mappings = {
+ 'D-FPS': DFPSSampler,
+ 'F-FPS': FFPSSampler,
+ 'FS': FSSampler,
+ }
+ try:
+ return sampler_mappings[sampler_type]
+ except KeyError:
+ raise KeyError(
+ f'Supported `sampler_type` are {sampler_mappings.keys()}, but got \
+ {sampler_type}')
+
+
+class PointsSampler(nn.Module):
+ """Points sampling.
+
+ Args:
+ num_point (list[int]): Number of sample points.
+ fps_mod_list (list[str], optional): Type of FPS method, valid mod
+ ['F-FPS', 'D-FPS', 'FS'], Default: ['D-FPS'].
+ F-FPS: using feature distances for FPS.
+ D-FPS: using Euclidean distances of points for FPS.
+ FS: using F-FPS and D-FPS simultaneously.
+ fps_sample_range_list (list[int], optional):
+ Range of points to apply FPS. Default: [-1].
+ """
+
+ def __init__(self,
+ num_point: List[int],
+ fps_mod_list: List[str] = ['D-FPS'],
+ fps_sample_range_list: List[int] = [-1]):
+ super().__init__()
+ # FPS would be applied to different fps_mod in the list,
+ # so the length of the num_point should be equal to
+ # fps_mod_list and fps_sample_range_list.
+ assert len(num_point) == len(fps_mod_list) == len(
+ fps_sample_range_list)
+ self.num_point = num_point
+ self.fps_sample_range_list = fps_sample_range_list
+ self.samplers = nn.ModuleList()
+ for fps_mod in fps_mod_list:
+ self.samplers.append(get_sampler_cls(fps_mod)())
+ self.fp16_enabled = False
+
+ @force_fp32()
+ def forward(self, points_xyz, features):
+ """
+ Args:
+ points_xyz (Tensor): (B, N, 3) xyz coordinates of the features.
+ features (Tensor): (B, C, N) Descriptors of the features.
+
+ Returns:
+ Tensor: (B, npoint, sample_num) Indices of sampled points.
+ """
+ indices = []
+ last_fps_end_index = 0
+
+ for fps_sample_range, sampler, npoint in zip(
+ self.fps_sample_range_list, self.samplers, self.num_point):
+ assert fps_sample_range < points_xyz.shape[1]
+
+ if fps_sample_range == -1:
+ sample_points_xyz = points_xyz[:, last_fps_end_index:]
+ if features is not None:
+ sample_features = features[:, :, last_fps_end_index:]
+ else:
+ sample_features = None
+ else:
+ sample_points_xyz = \
+ points_xyz[:, last_fps_end_index:fps_sample_range]
+ if features is not None:
+ sample_features = features[:, :, last_fps_end_index:
+ fps_sample_range]
+ else:
+ sample_features = None
+
+ fps_idx = sampler(sample_points_xyz.contiguous(), sample_features,
+ npoint)
+
+ indices.append(fps_idx + last_fps_end_index)
+ last_fps_end_index += fps_sample_range
+ indices = torch.cat(indices, dim=1)
+
+ return indices
+
+
+class DFPSSampler(nn.Module):
+ """Using Euclidean distances of points for FPS."""
+
+ def __init__(self):
+ super().__init__()
+
+ def forward(self, points, features, npoint):
+ """Sampling points with D-FPS."""
+ fps_idx = furthest_point_sample(points.contiguous(), npoint)
+ return fps_idx
+
+
+class FFPSSampler(nn.Module):
+ """Using feature distances for FPS."""
+
+ def __init__(self):
+ super().__init__()
+
+ def forward(self, points, features, npoint):
+ """Sampling points with F-FPS."""
+ assert features is not None, \
+ 'feature input to FFPS_Sampler should not be None'
+ features_for_fps = torch.cat([points, features.transpose(1, 2)], dim=2)
+ features_dist = calc_square_dist(
+ features_for_fps, features_for_fps, norm=False)
+ fps_idx = furthest_point_sample_with_dist(features_dist, npoint)
+ return fps_idx
+
+
+class FSSampler(nn.Module):
+ """Using F-FPS and D-FPS simultaneously."""
+
+ def __init__(self):
+ super().__init__()
+
+ def forward(self, points, features, npoint):
+ """Sampling points with FS_Sampling."""
+ assert features is not None, \
+ 'feature input to FS_Sampler should not be None'
+ ffps_sampler = FFPSSampler()
+ dfps_sampler = DFPSSampler()
+ fps_idx_ffps = ffps_sampler(points, features, npoint)
+ fps_idx_dfps = dfps_sampler(points, features, npoint)
+ fps_idx = torch.cat([fps_idx_ffps, fps_idx_dfps], dim=1)
+ return fps_idx
diff --git a/annotator/uniformer/mmcv/ops/psa_mask.py b/annotator/uniformer/mmcv/ops/psa_mask.py
new file mode 100644
index 0000000000000000000000000000000000000000..cdf14e62b50e8d4dd6856c94333c703bcc4c9ab6
--- /dev/null
+++ b/annotator/uniformer/mmcv/ops/psa_mask.py
@@ -0,0 +1,92 @@
+# Modified from https://github.com/hszhao/semseg/blob/master/lib/psa
+from torch import nn
+from torch.autograd import Function
+from torch.nn.modules.utils import _pair
+
+from ..utils import ext_loader
+
+ext_module = ext_loader.load_ext('_ext',
+ ['psamask_forward', 'psamask_backward'])
+
+
+class PSAMaskFunction(Function):
+
+ @staticmethod
+ def symbolic(g, input, psa_type, mask_size):
+ return g.op(
+ 'mmcv::MMCVPSAMask',
+ input,
+ psa_type_i=psa_type,
+ mask_size_i=mask_size)
+
+ @staticmethod
+ def forward(ctx, input, psa_type, mask_size):
+ ctx.psa_type = psa_type
+ ctx.mask_size = _pair(mask_size)
+ ctx.save_for_backward(input)
+
+ h_mask, w_mask = ctx.mask_size
+ batch_size, channels, h_feature, w_feature = input.size()
+ assert channels == h_mask * w_mask
+ output = input.new_zeros(
+ (batch_size, h_feature * w_feature, h_feature, w_feature))
+
+ ext_module.psamask_forward(
+ input,
+ output,
+ psa_type=psa_type,
+ num_=batch_size,
+ h_feature=h_feature,
+ w_feature=w_feature,
+ h_mask=h_mask,
+ w_mask=w_mask,
+ half_h_mask=(h_mask - 1) // 2,
+ half_w_mask=(w_mask - 1) // 2)
+ return output
+
+ @staticmethod
+ def backward(ctx, grad_output):
+ input = ctx.saved_tensors[0]
+ psa_type = ctx.psa_type
+ h_mask, w_mask = ctx.mask_size
+ batch_size, channels, h_feature, w_feature = input.size()
+ grad_input = grad_output.new_zeros(
+ (batch_size, channels, h_feature, w_feature))
+ ext_module.psamask_backward(
+ grad_output,
+ grad_input,
+ psa_type=psa_type,
+ num_=batch_size,
+ h_feature=h_feature,
+ w_feature=w_feature,
+ h_mask=h_mask,
+ w_mask=w_mask,
+ half_h_mask=(h_mask - 1) // 2,
+ half_w_mask=(w_mask - 1) // 2)
+ return grad_input, None, None, None
+
+
+psa_mask = PSAMaskFunction.apply
+
+
+class PSAMask(nn.Module):
+
+ def __init__(self, psa_type, mask_size=None):
+ super(PSAMask, self).__init__()
+ assert psa_type in ['collect', 'distribute']
+ if psa_type == 'collect':
+ psa_type_enum = 0
+ else:
+ psa_type_enum = 1
+ self.psa_type_enum = psa_type_enum
+ self.mask_size = mask_size
+ self.psa_type = psa_type
+
+ def forward(self, input):
+ return psa_mask(input, self.psa_type_enum, self.mask_size)
+
+ def __repr__(self):
+ s = self.__class__.__name__
+ s += f'(psa_type={self.psa_type}, '
+ s += f'mask_size={self.mask_size})'
+ return s
diff --git a/annotator/uniformer/mmcv/ops/roi_align.py b/annotator/uniformer/mmcv/ops/roi_align.py
new file mode 100644
index 0000000000000000000000000000000000000000..0755aefc66e67233ceae0f4b77948301c443e9fb
--- /dev/null
+++ b/annotator/uniformer/mmcv/ops/roi_align.py
@@ -0,0 +1,223 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import torch
+import torch.nn as nn
+from torch.autograd import Function
+from torch.autograd.function import once_differentiable
+from torch.nn.modules.utils import _pair
+
+from ..utils import deprecated_api_warning, ext_loader
+
+ext_module = ext_loader.load_ext('_ext',
+ ['roi_align_forward', 'roi_align_backward'])
+
+
+class RoIAlignFunction(Function):
+
+ @staticmethod
+ def symbolic(g, input, rois, output_size, spatial_scale, sampling_ratio,
+ pool_mode, aligned):
+ from ..onnx import is_custom_op_loaded
+ has_custom_op = is_custom_op_loaded()
+ if has_custom_op:
+ return g.op(
+ 'mmcv::MMCVRoiAlign',
+ input,
+ rois,
+ output_height_i=output_size[0],
+ output_width_i=output_size[1],
+ spatial_scale_f=spatial_scale,
+ sampling_ratio_i=sampling_ratio,
+ mode_s=pool_mode,
+ aligned_i=aligned)
+ else:
+ from torch.onnx.symbolic_opset9 import sub, squeeze
+ from torch.onnx.symbolic_helper import _slice_helper
+ from torch.onnx import TensorProtoDataType
+ # batch_indices = rois[:, 0].long()
+ batch_indices = _slice_helper(
+ g, rois, axes=[1], starts=[0], ends=[1])
+ batch_indices = squeeze(g, batch_indices, 1)
+ batch_indices = g.op(
+ 'Cast', batch_indices, to_i=TensorProtoDataType.INT64)
+ # rois = rois[:, 1:]
+ rois = _slice_helper(g, rois, axes=[1], starts=[1], ends=[5])
+ if aligned:
+ # rois -= 0.5/spatial_scale
+ aligned_offset = g.op(
+ 'Constant',
+ value_t=torch.tensor([0.5 / spatial_scale],
+ dtype=torch.float32))
+ rois = sub(g, rois, aligned_offset)
+ # roi align
+ return g.op(
+ 'RoiAlign',
+ input,
+ rois,
+ batch_indices,
+ output_height_i=output_size[0],
+ output_width_i=output_size[1],
+ spatial_scale_f=spatial_scale,
+ sampling_ratio_i=max(0, sampling_ratio),
+ mode_s=pool_mode)
+
+ @staticmethod
+ def forward(ctx,
+ input,
+ rois,
+ output_size,
+ spatial_scale=1.0,
+ sampling_ratio=0,
+ pool_mode='avg',
+ aligned=True):
+ ctx.output_size = _pair(output_size)
+ ctx.spatial_scale = spatial_scale
+ ctx.sampling_ratio = sampling_ratio
+ assert pool_mode in ('max', 'avg')
+ ctx.pool_mode = 0 if pool_mode == 'max' else 1
+ ctx.aligned = aligned
+ ctx.input_shape = input.size()
+
+ assert rois.size(1) == 5, 'RoI must be (idx, x1, y1, x2, y2)!'
+
+ output_shape = (rois.size(0), input.size(1), ctx.output_size[0],
+ ctx.output_size[1])
+ output = input.new_zeros(output_shape)
+ if ctx.pool_mode == 0:
+ argmax_y = input.new_zeros(output_shape)
+ argmax_x = input.new_zeros(output_shape)
+ else:
+ argmax_y = input.new_zeros(0)
+ argmax_x = input.new_zeros(0)
+
+ ext_module.roi_align_forward(
+ input,
+ rois,
+ output,
+ argmax_y,
+ argmax_x,
+ aligned_height=ctx.output_size[0],
+ aligned_width=ctx.output_size[1],
+ spatial_scale=ctx.spatial_scale,
+ sampling_ratio=ctx.sampling_ratio,
+ pool_mode=ctx.pool_mode,
+ aligned=ctx.aligned)
+
+ ctx.save_for_backward(rois, argmax_y, argmax_x)
+ return output
+
+ @staticmethod
+ @once_differentiable
+ def backward(ctx, grad_output):
+ rois, argmax_y, argmax_x = ctx.saved_tensors
+ grad_input = grad_output.new_zeros(ctx.input_shape)
+ # complex head architecture may cause grad_output uncontiguous.
+ grad_output = grad_output.contiguous()
+ ext_module.roi_align_backward(
+ grad_output,
+ rois,
+ argmax_y,
+ argmax_x,
+ grad_input,
+ aligned_height=ctx.output_size[0],
+ aligned_width=ctx.output_size[1],
+ spatial_scale=ctx.spatial_scale,
+ sampling_ratio=ctx.sampling_ratio,
+ pool_mode=ctx.pool_mode,
+ aligned=ctx.aligned)
+ return grad_input, None, None, None, None, None, None
+
+
+roi_align = RoIAlignFunction.apply
+
+
+class RoIAlign(nn.Module):
+ """RoI align pooling layer.
+
+ Args:
+ output_size (tuple): h, w
+ spatial_scale (float): scale the input boxes by this number
+ sampling_ratio (int): number of inputs samples to take for each
+ output sample. 0 to take samples densely for current models.
+ pool_mode (str, 'avg' or 'max'): pooling mode in each bin.
+ aligned (bool): if False, use the legacy implementation in
+ MMDetection. If True, align the results more perfectly.
+ use_torchvision (bool): whether to use roi_align from torchvision.
+
+ Note:
+ The implementation of RoIAlign when aligned=True is modified from
+ https://github.com/facebookresearch/detectron2/
+
+ The meaning of aligned=True:
+
+ Given a continuous coordinate c, its two neighboring pixel
+ indices (in our pixel model) are computed by floor(c - 0.5) and
+ ceil(c - 0.5). For example, c=1.3 has pixel neighbors with discrete
+ indices [0] and [1] (which are sampled from the underlying signal
+ at continuous coordinates 0.5 and 1.5). But the original roi_align
+ (aligned=False) does not subtract the 0.5 when computing
+ neighboring pixel indices and therefore it uses pixels with a
+ slightly incorrect alignment (relative to our pixel model) when
+ performing bilinear interpolation.
+
+ With `aligned=True`,
+ we first appropriately scale the ROI and then shift it by -0.5
+ prior to calling roi_align. This produces the correct neighbors;
+
+ The difference does not make a difference to the model's
+ performance if ROIAlign is used together with conv layers.
+ """
+
+ @deprecated_api_warning(
+ {
+ 'out_size': 'output_size',
+ 'sample_num': 'sampling_ratio'
+ },
+ cls_name='RoIAlign')
+ def __init__(self,
+ output_size,
+ spatial_scale=1.0,
+ sampling_ratio=0,
+ pool_mode='avg',
+ aligned=True,
+ use_torchvision=False):
+ super(RoIAlign, self).__init__()
+
+ self.output_size = _pair(output_size)
+ self.spatial_scale = float(spatial_scale)
+ self.sampling_ratio = int(sampling_ratio)
+ self.pool_mode = pool_mode
+ self.aligned = aligned
+ self.use_torchvision = use_torchvision
+
+ def forward(self, input, rois):
+ """
+ Args:
+ input: NCHW images
+ rois: Bx5 boxes. First column is the index into N.\
+ The other 4 columns are xyxy.
+ """
+ if self.use_torchvision:
+ from torchvision.ops import roi_align as tv_roi_align
+ if 'aligned' in tv_roi_align.__code__.co_varnames:
+ return tv_roi_align(input, rois, self.output_size,
+ self.spatial_scale, self.sampling_ratio,
+ self.aligned)
+ else:
+ if self.aligned:
+ rois -= rois.new_tensor([0.] +
+ [0.5 / self.spatial_scale] * 4)
+ return tv_roi_align(input, rois, self.output_size,
+ self.spatial_scale, self.sampling_ratio)
+ else:
+ return roi_align(input, rois, self.output_size, self.spatial_scale,
+ self.sampling_ratio, self.pool_mode, self.aligned)
+
+ def __repr__(self):
+ s = self.__class__.__name__
+ s += f'(output_size={self.output_size}, '
+ s += f'spatial_scale={self.spatial_scale}, '
+ s += f'sampling_ratio={self.sampling_ratio}, '
+ s += f'pool_mode={self.pool_mode}, '
+ s += f'aligned={self.aligned}, '
+ s += f'use_torchvision={self.use_torchvision})'
+ return s
diff --git a/annotator/uniformer/mmcv/ops/roi_align_rotated.py b/annotator/uniformer/mmcv/ops/roi_align_rotated.py
new file mode 100644
index 0000000000000000000000000000000000000000..0ce4961a3555d4da8bc3e32f1f7d5ad50036587d
--- /dev/null
+++ b/annotator/uniformer/mmcv/ops/roi_align_rotated.py
@@ -0,0 +1,177 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import torch.nn as nn
+from torch.autograd import Function
+
+from ..utils import ext_loader
+
+ext_module = ext_loader.load_ext(
+ '_ext', ['roi_align_rotated_forward', 'roi_align_rotated_backward'])
+
+
+class RoIAlignRotatedFunction(Function):
+
+ @staticmethod
+ def symbolic(g, features, rois, out_size, spatial_scale, sample_num,
+ aligned, clockwise):
+ if isinstance(out_size, int):
+ out_h = out_size
+ out_w = out_size
+ elif isinstance(out_size, tuple):
+ assert len(out_size) == 2
+ assert isinstance(out_size[0], int)
+ assert isinstance(out_size[1], int)
+ out_h, out_w = out_size
+ else:
+ raise TypeError(
+ '"out_size" must be an integer or tuple of integers')
+ return g.op(
+ 'mmcv::MMCVRoIAlignRotated',
+ features,
+ rois,
+ output_height_i=out_h,
+ output_width_i=out_h,
+ spatial_scale_f=spatial_scale,
+ sampling_ratio_i=sample_num,
+ aligned_i=aligned,
+ clockwise_i=clockwise)
+
+ @staticmethod
+ def forward(ctx,
+ features,
+ rois,
+ out_size,
+ spatial_scale,
+ sample_num=0,
+ aligned=True,
+ clockwise=False):
+ if isinstance(out_size, int):
+ out_h = out_size
+ out_w = out_size
+ elif isinstance(out_size, tuple):
+ assert len(out_size) == 2
+ assert isinstance(out_size[0], int)
+ assert isinstance(out_size[1], int)
+ out_h, out_w = out_size
+ else:
+ raise TypeError(
+ '"out_size" must be an integer or tuple of integers')
+ ctx.spatial_scale = spatial_scale
+ ctx.sample_num = sample_num
+ ctx.aligned = aligned
+ ctx.clockwise = clockwise
+ ctx.save_for_backward(rois)
+ ctx.feature_size = features.size()
+
+ batch_size, num_channels, data_height, data_width = features.size()
+ num_rois = rois.size(0)
+
+ output = features.new_zeros(num_rois, num_channels, out_h, out_w)
+ ext_module.roi_align_rotated_forward(
+ features,
+ rois,
+ output,
+ pooled_height=out_h,
+ pooled_width=out_w,
+ spatial_scale=spatial_scale,
+ sample_num=sample_num,
+ aligned=aligned,
+ clockwise=clockwise)
+ return output
+
+ @staticmethod
+ def backward(ctx, grad_output):
+ feature_size = ctx.feature_size
+ spatial_scale = ctx.spatial_scale
+ aligned = ctx.aligned
+ clockwise = ctx.clockwise
+ sample_num = ctx.sample_num
+ rois = ctx.saved_tensors[0]
+ assert feature_size is not None
+ batch_size, num_channels, data_height, data_width = feature_size
+
+ out_w = grad_output.size(3)
+ out_h = grad_output.size(2)
+
+ grad_input = grad_rois = None
+
+ if ctx.needs_input_grad[0]:
+ grad_input = rois.new_zeros(batch_size, num_channels, data_height,
+ data_width)
+ ext_module.roi_align_rotated_backward(
+ grad_output.contiguous(),
+ rois,
+ grad_input,
+ pooled_height=out_h,
+ pooled_width=out_w,
+ spatial_scale=spatial_scale,
+ sample_num=sample_num,
+ aligned=aligned,
+ clockwise=clockwise)
+ return grad_input, grad_rois, None, None, None, None, None
+
+
+roi_align_rotated = RoIAlignRotatedFunction.apply
+
+
+class RoIAlignRotated(nn.Module):
+ """RoI align pooling layer for rotated proposals.
+
+ It accepts a feature map of shape (N, C, H, W) and rois with shape
+ (n, 6) with each roi decoded as (batch_index, center_x, center_y,
+ w, h, angle). The angle is in radian.
+
+ Args:
+ out_size (tuple): h, w
+ spatial_scale (float): scale the input boxes by this number
+ sample_num (int): number of inputs samples to take for each
+ output sample. 0 to take samples densely for current models.
+ aligned (bool): if False, use the legacy implementation in
+ MMDetection. If True, align the results more perfectly.
+ Default: True.
+ clockwise (bool): If True, the angle in each proposal follows a
+ clockwise fashion in image space, otherwise, the angle is
+ counterclockwise. Default: False.
+
+ Note:
+ The implementation of RoIAlign when aligned=True is modified from
+ https://github.com/facebookresearch/detectron2/
+
+ The meaning of aligned=True:
+
+ Given a continuous coordinate c, its two neighboring pixel
+ indices (in our pixel model) are computed by floor(c - 0.5) and
+ ceil(c - 0.5). For example, c=1.3 has pixel neighbors with discrete
+ indices [0] and [1] (which are sampled from the underlying signal
+ at continuous coordinates 0.5 and 1.5). But the original roi_align
+ (aligned=False) does not subtract the 0.5 when computing
+ neighboring pixel indices and therefore it uses pixels with a
+ slightly incorrect alignment (relative to our pixel model) when
+ performing bilinear interpolation.
+
+ With `aligned=True`,
+ we first appropriately scale the ROI and then shift it by -0.5
+ prior to calling roi_align. This produces the correct neighbors;
+
+ The difference does not make a difference to the model's
+ performance if ROIAlign is used together with conv layers.
+ """
+
+ def __init__(self,
+ out_size,
+ spatial_scale,
+ sample_num=0,
+ aligned=True,
+ clockwise=False):
+ super(RoIAlignRotated, self).__init__()
+
+ self.out_size = out_size
+ self.spatial_scale = float(spatial_scale)
+ self.sample_num = int(sample_num)
+ self.aligned = aligned
+ self.clockwise = clockwise
+
+ def forward(self, features, rois):
+ return RoIAlignRotatedFunction.apply(features, rois, self.out_size,
+ self.spatial_scale,
+ self.sample_num, self.aligned,
+ self.clockwise)
diff --git a/annotator/uniformer/mmcv/ops/roi_pool.py b/annotator/uniformer/mmcv/ops/roi_pool.py
new file mode 100644
index 0000000000000000000000000000000000000000..d339d8f2941eabc1cbe181a9c6c5ab5ff4ff4e5f
--- /dev/null
+++ b/annotator/uniformer/mmcv/ops/roi_pool.py
@@ -0,0 +1,86 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import torch
+import torch.nn as nn
+from torch.autograd import Function
+from torch.autograd.function import once_differentiable
+from torch.nn.modules.utils import _pair
+
+from ..utils import ext_loader
+
+ext_module = ext_loader.load_ext('_ext',
+ ['roi_pool_forward', 'roi_pool_backward'])
+
+
+class RoIPoolFunction(Function):
+
+ @staticmethod
+ def symbolic(g, input, rois, output_size, spatial_scale):
+ return g.op(
+ 'MaxRoiPool',
+ input,
+ rois,
+ pooled_shape_i=output_size,
+ spatial_scale_f=spatial_scale)
+
+ @staticmethod
+ def forward(ctx, input, rois, output_size, spatial_scale=1.0):
+ ctx.output_size = _pair(output_size)
+ ctx.spatial_scale = spatial_scale
+ ctx.input_shape = input.size()
+
+ assert rois.size(1) == 5, 'RoI must be (idx, x1, y1, x2, y2)!'
+
+ output_shape = (rois.size(0), input.size(1), ctx.output_size[0],
+ ctx.output_size[1])
+ output = input.new_zeros(output_shape)
+ argmax = input.new_zeros(output_shape, dtype=torch.int)
+
+ ext_module.roi_pool_forward(
+ input,
+ rois,
+ output,
+ argmax,
+ pooled_height=ctx.output_size[0],
+ pooled_width=ctx.output_size[1],
+ spatial_scale=ctx.spatial_scale)
+
+ ctx.save_for_backward(rois, argmax)
+ return output
+
+ @staticmethod
+ @once_differentiable
+ def backward(ctx, grad_output):
+ rois, argmax = ctx.saved_tensors
+ grad_input = grad_output.new_zeros(ctx.input_shape)
+
+ ext_module.roi_pool_backward(
+ grad_output,
+ rois,
+ argmax,
+ grad_input,
+ pooled_height=ctx.output_size[0],
+ pooled_width=ctx.output_size[1],
+ spatial_scale=ctx.spatial_scale)
+
+ return grad_input, None, None, None
+
+
+roi_pool = RoIPoolFunction.apply
+
+
+class RoIPool(nn.Module):
+
+ def __init__(self, output_size, spatial_scale=1.0):
+ super(RoIPool, self).__init__()
+
+ self.output_size = _pair(output_size)
+ self.spatial_scale = float(spatial_scale)
+
+ def forward(self, input, rois):
+ return roi_pool(input, rois, self.output_size, self.spatial_scale)
+
+ def __repr__(self):
+ s = self.__class__.__name__
+ s += f'(output_size={self.output_size}, '
+ s += f'spatial_scale={self.spatial_scale})'
+ return s
diff --git a/annotator/uniformer/mmcv/ops/roiaware_pool3d.py b/annotator/uniformer/mmcv/ops/roiaware_pool3d.py
new file mode 100644
index 0000000000000000000000000000000000000000..291b0e5a9b692492c7d7e495ea639c46042e2f18
--- /dev/null
+++ b/annotator/uniformer/mmcv/ops/roiaware_pool3d.py
@@ -0,0 +1,114 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import torch
+from torch import nn as nn
+from torch.autograd import Function
+
+import annotator.uniformer.mmcv as mmcv
+from ..utils import ext_loader
+
+ext_module = ext_loader.load_ext(
+ '_ext', ['roiaware_pool3d_forward', 'roiaware_pool3d_backward'])
+
+
+class RoIAwarePool3d(nn.Module):
+ """Encode the geometry-specific features of each 3D proposal.
+
+ Please refer to `PartA2 `_ for more
+ details.
+
+ Args:
+ out_size (int or tuple): The size of output features. n or
+ [n1, n2, n3].
+ max_pts_per_voxel (int, optional): The maximum number of points per
+ voxel. Default: 128.
+ mode (str, optional): Pooling method of RoIAware, 'max' or 'avg'.
+ Default: 'max'.
+ """
+
+ def __init__(self, out_size, max_pts_per_voxel=128, mode='max'):
+ super().__init__()
+
+ self.out_size = out_size
+ self.max_pts_per_voxel = max_pts_per_voxel
+ assert mode in ['max', 'avg']
+ pool_mapping = {'max': 0, 'avg': 1}
+ self.mode = pool_mapping[mode]
+
+ def forward(self, rois, pts, pts_feature):
+ """
+ Args:
+ rois (torch.Tensor): [N, 7], in LiDAR coordinate,
+ (x, y, z) is the bottom center of rois.
+ pts (torch.Tensor): [npoints, 3], coordinates of input points.
+ pts_feature (torch.Tensor): [npoints, C], features of input points.
+
+ Returns:
+ pooled_features (torch.Tensor): [N, out_x, out_y, out_z, C]
+ """
+
+ return RoIAwarePool3dFunction.apply(rois, pts, pts_feature,
+ self.out_size,
+ self.max_pts_per_voxel, self.mode)
+
+
+class RoIAwarePool3dFunction(Function):
+
+ @staticmethod
+ def forward(ctx, rois, pts, pts_feature, out_size, max_pts_per_voxel,
+ mode):
+ """
+ Args:
+ rois (torch.Tensor): [N, 7], in LiDAR coordinate,
+ (x, y, z) is the bottom center of rois.
+ pts (torch.Tensor): [npoints, 3], coordinates of input points.
+ pts_feature (torch.Tensor): [npoints, C], features of input points.
+ out_size (int or tuple): The size of output features. n or
+ [n1, n2, n3].
+ max_pts_per_voxel (int): The maximum number of points per voxel.
+ Default: 128.
+ mode (int): Pooling method of RoIAware, 0 (max pool) or 1 (average
+ pool).
+
+ Returns:
+ pooled_features (torch.Tensor): [N, out_x, out_y, out_z, C], output
+ pooled features.
+ """
+
+ if isinstance(out_size, int):
+ out_x = out_y = out_z = out_size
+ else:
+ assert len(out_size) == 3
+ assert mmcv.is_tuple_of(out_size, int)
+ out_x, out_y, out_z = out_size
+
+ num_rois = rois.shape[0]
+ num_channels = pts_feature.shape[-1]
+ num_pts = pts.shape[0]
+
+ pooled_features = pts_feature.new_zeros(
+ (num_rois, out_x, out_y, out_z, num_channels))
+ argmax = pts_feature.new_zeros(
+ (num_rois, out_x, out_y, out_z, num_channels), dtype=torch.int)
+ pts_idx_of_voxels = pts_feature.new_zeros(
+ (num_rois, out_x, out_y, out_z, max_pts_per_voxel),
+ dtype=torch.int)
+
+ ext_module.roiaware_pool3d_forward(rois, pts, pts_feature, argmax,
+ pts_idx_of_voxels, pooled_features,
+ mode)
+
+ ctx.roiaware_pool3d_for_backward = (pts_idx_of_voxels, argmax, mode,
+ num_pts, num_channels)
+ return pooled_features
+
+ @staticmethod
+ def backward(ctx, grad_out):
+ ret = ctx.roiaware_pool3d_for_backward
+ pts_idx_of_voxels, argmax, mode, num_pts, num_channels = ret
+
+ grad_in = grad_out.new_zeros((num_pts, num_channels))
+ ext_module.roiaware_pool3d_backward(pts_idx_of_voxels, argmax,
+ grad_out.contiguous(), grad_in,
+ mode)
+
+ return None, None, grad_in, None, None, None
diff --git a/annotator/uniformer/mmcv/ops/roipoint_pool3d.py b/annotator/uniformer/mmcv/ops/roipoint_pool3d.py
new file mode 100644
index 0000000000000000000000000000000000000000..0a21412c0728431c04b84245bc2e3109eea9aefc
--- /dev/null
+++ b/annotator/uniformer/mmcv/ops/roipoint_pool3d.py
@@ -0,0 +1,77 @@
+from torch import nn as nn
+from torch.autograd import Function
+
+from ..utils import ext_loader
+
+ext_module = ext_loader.load_ext('_ext', ['roipoint_pool3d_forward'])
+
+
+class RoIPointPool3d(nn.Module):
+ """Encode the geometry-specific features of each 3D proposal.
+
+ Please refer to `Paper of PartA2 `_
+ for more details.
+
+ Args:
+ num_sampled_points (int, optional): Number of samples in each roi.
+ Default: 512.
+ """
+
+ def __init__(self, num_sampled_points=512):
+ super().__init__()
+ self.num_sampled_points = num_sampled_points
+
+ def forward(self, points, point_features, boxes3d):
+ """
+ Args:
+ points (torch.Tensor): Input points whose shape is (B, N, C).
+ point_features (torch.Tensor): Features of input points whose shape
+ is (B, N, C).
+ boxes3d (B, M, 7), Input bounding boxes whose shape is (B, M, 7).
+
+ Returns:
+ pooled_features (torch.Tensor): The output pooled features whose
+ shape is (B, M, 512, 3 + C).
+ pooled_empty_flag (torch.Tensor): Empty flag whose shape is (B, M).
+ """
+ return RoIPointPool3dFunction.apply(points, point_features, boxes3d,
+ self.num_sampled_points)
+
+
+class RoIPointPool3dFunction(Function):
+
+ @staticmethod
+ def forward(ctx, points, point_features, boxes3d, num_sampled_points=512):
+ """
+ Args:
+ points (torch.Tensor): Input points whose shape is (B, N, C).
+ point_features (torch.Tensor): Features of input points whose shape
+ is (B, N, C).
+ boxes3d (B, M, 7), Input bounding boxes whose shape is (B, M, 7).
+ num_sampled_points (int, optional): The num of sampled points.
+ Default: 512.
+
+ Returns:
+ pooled_features (torch.Tensor): The output pooled features whose
+ shape is (B, M, 512, 3 + C).
+ pooled_empty_flag (torch.Tensor): Empty flag whose shape is (B, M).
+ """
+ assert len(points.shape) == 3 and points.shape[2] == 3
+ batch_size, boxes_num, feature_len = points.shape[0], boxes3d.shape[
+ 1], point_features.shape[2]
+ pooled_boxes3d = boxes3d.view(batch_size, -1, 7)
+ pooled_features = point_features.new_zeros(
+ (batch_size, boxes_num, num_sampled_points, 3 + feature_len))
+ pooled_empty_flag = point_features.new_zeros(
+ (batch_size, boxes_num)).int()
+
+ ext_module.roipoint_pool3d_forward(points.contiguous(),
+ pooled_boxes3d.contiguous(),
+ point_features.contiguous(),
+ pooled_features, pooled_empty_flag)
+
+ return pooled_features, pooled_empty_flag
+
+ @staticmethod
+ def backward(ctx, grad_out):
+ raise NotImplementedError
diff --git a/annotator/uniformer/mmcv/ops/saconv.py b/annotator/uniformer/mmcv/ops/saconv.py
new file mode 100644
index 0000000000000000000000000000000000000000..b4ee3978e097fca422805db4e31ae481006d7971
--- /dev/null
+++ b/annotator/uniformer/mmcv/ops/saconv.py
@@ -0,0 +1,145 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+from annotator.uniformer.mmcv.cnn import CONV_LAYERS, ConvAWS2d, constant_init
+from annotator.uniformer.mmcv.ops.deform_conv import deform_conv2d
+from annotator.uniformer.mmcv.utils import TORCH_VERSION, digit_version
+
+
+@CONV_LAYERS.register_module(name='SAC')
+class SAConv2d(ConvAWS2d):
+ """SAC (Switchable Atrous Convolution)
+
+ This is an implementation of SAC in DetectoRS
+ (https://arxiv.org/pdf/2006.02334.pdf).
+
+ Args:
+ in_channels (int): Number of channels in the input image
+ out_channels (int): Number of channels produced by the convolution
+ kernel_size (int or tuple): Size of the convolving kernel
+ stride (int or tuple, optional): Stride of the convolution. Default: 1
+ padding (int or tuple, optional): Zero-padding added to both sides of
+ the input. Default: 0
+ padding_mode (string, optional): ``'zeros'``, ``'reflect'``,
+ ``'replicate'`` or ``'circular'``. Default: ``'zeros'``
+ dilation (int or tuple, optional): Spacing between kernel elements.
+ Default: 1
+ groups (int, optional): Number of blocked connections from input
+ channels to output channels. Default: 1
+ bias (bool, optional): If ``True``, adds a learnable bias to the
+ output. Default: ``True``
+ use_deform: If ``True``, replace convolution with deformable
+ convolution. Default: ``False``.
+ """
+
+ def __init__(self,
+ in_channels,
+ out_channels,
+ kernel_size,
+ stride=1,
+ padding=0,
+ dilation=1,
+ groups=1,
+ bias=True,
+ use_deform=False):
+ super().__init__(
+ in_channels,
+ out_channels,
+ kernel_size,
+ stride=stride,
+ padding=padding,
+ dilation=dilation,
+ groups=groups,
+ bias=bias)
+ self.use_deform = use_deform
+ self.switch = nn.Conv2d(
+ self.in_channels, 1, kernel_size=1, stride=stride, bias=True)
+ self.weight_diff = nn.Parameter(torch.Tensor(self.weight.size()))
+ self.pre_context = nn.Conv2d(
+ self.in_channels, self.in_channels, kernel_size=1, bias=True)
+ self.post_context = nn.Conv2d(
+ self.out_channels, self.out_channels, kernel_size=1, bias=True)
+ if self.use_deform:
+ self.offset_s = nn.Conv2d(
+ self.in_channels,
+ 18,
+ kernel_size=3,
+ padding=1,
+ stride=stride,
+ bias=True)
+ self.offset_l = nn.Conv2d(
+ self.in_channels,
+ 18,
+ kernel_size=3,
+ padding=1,
+ stride=stride,
+ bias=True)
+ self.init_weights()
+
+ def init_weights(self):
+ constant_init(self.switch, 0, bias=1)
+ self.weight_diff.data.zero_()
+ constant_init(self.pre_context, 0)
+ constant_init(self.post_context, 0)
+ if self.use_deform:
+ constant_init(self.offset_s, 0)
+ constant_init(self.offset_l, 0)
+
+ def forward(self, x):
+ # pre-context
+ avg_x = F.adaptive_avg_pool2d(x, output_size=1)
+ avg_x = self.pre_context(avg_x)
+ avg_x = avg_x.expand_as(x)
+ x = x + avg_x
+ # switch
+ avg_x = F.pad(x, pad=(2, 2, 2, 2), mode='reflect')
+ avg_x = F.avg_pool2d(avg_x, kernel_size=5, stride=1, padding=0)
+ switch = self.switch(avg_x)
+ # sac
+ weight = self._get_weight(self.weight)
+ zero_bias = torch.zeros(
+ self.out_channels, device=weight.device, dtype=weight.dtype)
+
+ if self.use_deform:
+ offset = self.offset_s(avg_x)
+ out_s = deform_conv2d(x, offset, weight, self.stride, self.padding,
+ self.dilation, self.groups, 1)
+ else:
+ if (TORCH_VERSION == 'parrots'
+ or digit_version(TORCH_VERSION) < digit_version('1.5.0')):
+ out_s = super().conv2d_forward(x, weight)
+ elif digit_version(TORCH_VERSION) >= digit_version('1.8.0'):
+ # bias is a required argument of _conv_forward in torch 1.8.0
+ out_s = super()._conv_forward(x, weight, zero_bias)
+ else:
+ out_s = super()._conv_forward(x, weight)
+ ori_p = self.padding
+ ori_d = self.dilation
+ self.padding = tuple(3 * p for p in self.padding)
+ self.dilation = tuple(3 * d for d in self.dilation)
+ weight = weight + self.weight_diff
+ if self.use_deform:
+ offset = self.offset_l(avg_x)
+ out_l = deform_conv2d(x, offset, weight, self.stride, self.padding,
+ self.dilation, self.groups, 1)
+ else:
+ if (TORCH_VERSION == 'parrots'
+ or digit_version(TORCH_VERSION) < digit_version('1.5.0')):
+ out_l = super().conv2d_forward(x, weight)
+ elif digit_version(TORCH_VERSION) >= digit_version('1.8.0'):
+ # bias is a required argument of _conv_forward in torch 1.8.0
+ out_l = super()._conv_forward(x, weight, zero_bias)
+ else:
+ out_l = super()._conv_forward(x, weight)
+
+ out = switch * out_s + (1 - switch) * out_l
+ self.padding = ori_p
+ self.dilation = ori_d
+ # post-context
+ avg_x = F.adaptive_avg_pool2d(out, output_size=1)
+ avg_x = self.post_context(avg_x)
+ avg_x = avg_x.expand_as(out)
+ out = out + avg_x
+ return out
diff --git a/annotator/uniformer/mmcv/ops/scatter_points.py b/annotator/uniformer/mmcv/ops/scatter_points.py
new file mode 100644
index 0000000000000000000000000000000000000000..2b8aa4169e9f6ca4a6f845ce17d6d1e4db416bb8
--- /dev/null
+++ b/annotator/uniformer/mmcv/ops/scatter_points.py
@@ -0,0 +1,135 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import torch
+from torch import nn
+from torch.autograd import Function
+
+from ..utils import ext_loader
+
+ext_module = ext_loader.load_ext(
+ '_ext',
+ ['dynamic_point_to_voxel_forward', 'dynamic_point_to_voxel_backward'])
+
+
+class _DynamicScatter(Function):
+
+ @staticmethod
+ def forward(ctx, feats, coors, reduce_type='max'):
+ """convert kitti points(N, >=3) to voxels.
+
+ Args:
+ feats (torch.Tensor): [N, C]. Points features to be reduced
+ into voxels.
+ coors (torch.Tensor): [N, ndim]. Corresponding voxel coordinates
+ (specifically multi-dim voxel index) of each points.
+ reduce_type (str, optional): Reduce op. support 'max', 'sum' and
+ 'mean'. Default: 'max'.
+
+ Returns:
+ voxel_feats (torch.Tensor): [M, C]. Reduced features, input
+ features that shares the same voxel coordinates are reduced to
+ one row.
+ voxel_coors (torch.Tensor): [M, ndim]. Voxel coordinates.
+ """
+ results = ext_module.dynamic_point_to_voxel_forward(
+ feats, coors, reduce_type)
+ (voxel_feats, voxel_coors, point2voxel_map,
+ voxel_points_count) = results
+ ctx.reduce_type = reduce_type
+ ctx.save_for_backward(feats, voxel_feats, point2voxel_map,
+ voxel_points_count)
+ ctx.mark_non_differentiable(voxel_coors)
+ return voxel_feats, voxel_coors
+
+ @staticmethod
+ def backward(ctx, grad_voxel_feats, grad_voxel_coors=None):
+ (feats, voxel_feats, point2voxel_map,
+ voxel_points_count) = ctx.saved_tensors
+ grad_feats = torch.zeros_like(feats)
+ # TODO: whether to use index put or use cuda_backward
+ # To use index put, need point to voxel index
+ ext_module.dynamic_point_to_voxel_backward(
+ grad_feats, grad_voxel_feats.contiguous(), feats, voxel_feats,
+ point2voxel_map, voxel_points_count, ctx.reduce_type)
+ return grad_feats, None, None
+
+
+dynamic_scatter = _DynamicScatter.apply
+
+
+class DynamicScatter(nn.Module):
+ """Scatters points into voxels, used in the voxel encoder with dynamic
+ voxelization.
+
+ Note:
+ The CPU and GPU implementation get the same output, but have numerical
+ difference after summation and division (e.g., 5e-7).
+
+ Args:
+ voxel_size (list): list [x, y, z] size of three dimension.
+ point_cloud_range (list): The coordinate range of points, [x_min,
+ y_min, z_min, x_max, y_max, z_max].
+ average_points (bool): whether to use avg pooling to scatter points
+ into voxel.
+ """
+
+ def __init__(self, voxel_size, point_cloud_range, average_points: bool):
+ super().__init__()
+
+ self.voxel_size = voxel_size
+ self.point_cloud_range = point_cloud_range
+ self.average_points = average_points
+
+ def forward_single(self, points, coors):
+ """Scatters points into voxels.
+
+ Args:
+ points (torch.Tensor): Points to be reduced into voxels.
+ coors (torch.Tensor): Corresponding voxel coordinates (specifically
+ multi-dim voxel index) of each points.
+
+ Returns:
+ voxel_feats (torch.Tensor): Reduced features, input features that
+ shares the same voxel coordinates are reduced to one row.
+ voxel_coors (torch.Tensor): Voxel coordinates.
+ """
+ reduce = 'mean' if self.average_points else 'max'
+ return dynamic_scatter(points.contiguous(), coors.contiguous(), reduce)
+
+ def forward(self, points, coors):
+ """Scatters points/features into voxels.
+
+ Args:
+ points (torch.Tensor): Points to be reduced into voxels.
+ coors (torch.Tensor): Corresponding voxel coordinates (specifically
+ multi-dim voxel index) of each points.
+
+ Returns:
+ voxel_feats (torch.Tensor): Reduced features, input features that
+ shares the same voxel coordinates are reduced to one row.
+ voxel_coors (torch.Tensor): Voxel coordinates.
+ """
+ if coors.size(-1) == 3:
+ return self.forward_single(points, coors)
+ else:
+ batch_size = coors[-1, 0] + 1
+ voxels, voxel_coors = [], []
+ for i in range(batch_size):
+ inds = torch.where(coors[:, 0] == i)
+ voxel, voxel_coor = self.forward_single(
+ points[inds], coors[inds][:, 1:])
+ coor_pad = nn.functional.pad(
+ voxel_coor, (1, 0), mode='constant', value=i)
+ voxel_coors.append(coor_pad)
+ voxels.append(voxel)
+ features = torch.cat(voxels, dim=0)
+ feature_coors = torch.cat(voxel_coors, dim=0)
+
+ return features, feature_coors
+
+ def __repr__(self):
+ s = self.__class__.__name__ + '('
+ s += 'voxel_size=' + str(self.voxel_size)
+ s += ', point_cloud_range=' + str(self.point_cloud_range)
+ s += ', average_points=' + str(self.average_points)
+ s += ')'
+ return s
diff --git a/annotator/uniformer/mmcv/ops/sync_bn.py b/annotator/uniformer/mmcv/ops/sync_bn.py
new file mode 100644
index 0000000000000000000000000000000000000000..c9b016fcbe860989c56cd1040034bcfa60e146d2
--- /dev/null
+++ b/annotator/uniformer/mmcv/ops/sync_bn.py
@@ -0,0 +1,279 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import torch
+import torch.distributed as dist
+import torch.nn.functional as F
+from torch.autograd import Function
+from torch.autograd.function import once_differentiable
+from torch.nn.modules.module import Module
+from torch.nn.parameter import Parameter
+
+from annotator.uniformer.mmcv.cnn import NORM_LAYERS
+from ..utils import ext_loader
+
+ext_module = ext_loader.load_ext('_ext', [
+ 'sync_bn_forward_mean', 'sync_bn_forward_var', 'sync_bn_forward_output',
+ 'sync_bn_backward_param', 'sync_bn_backward_data'
+])
+
+
+class SyncBatchNormFunction(Function):
+
+ @staticmethod
+ def symbolic(g, input, running_mean, running_var, weight, bias, momentum,
+ eps, group, group_size, stats_mode):
+ return g.op(
+ 'mmcv::MMCVSyncBatchNorm',
+ input,
+ running_mean,
+ running_var,
+ weight,
+ bias,
+ momentum_f=momentum,
+ eps_f=eps,
+ group_i=group,
+ group_size_i=group_size,
+ stats_mode=stats_mode)
+
+ @staticmethod
+ def forward(self, input, running_mean, running_var, weight, bias, momentum,
+ eps, group, group_size, stats_mode):
+ self.momentum = momentum
+ self.eps = eps
+ self.group = group
+ self.group_size = group_size
+ self.stats_mode = stats_mode
+
+ assert isinstance(
+ input, (torch.HalfTensor, torch.FloatTensor,
+ torch.cuda.HalfTensor, torch.cuda.FloatTensor)), \
+ f'only support Half or Float Tensor, but {input.type()}'
+ output = torch.zeros_like(input)
+ input3d = input.flatten(start_dim=2)
+ output3d = output.view_as(input3d)
+ num_channels = input3d.size(1)
+
+ # ensure mean/var/norm/std are initialized as zeros
+ # ``torch.empty()`` does not guarantee that
+ mean = torch.zeros(
+ num_channels, dtype=torch.float, device=input3d.device)
+ var = torch.zeros(
+ num_channels, dtype=torch.float, device=input3d.device)
+ norm = torch.zeros_like(
+ input3d, dtype=torch.float, device=input3d.device)
+ std = torch.zeros(
+ num_channels, dtype=torch.float, device=input3d.device)
+
+ batch_size = input3d.size(0)
+ if batch_size > 0:
+ ext_module.sync_bn_forward_mean(input3d, mean)
+ batch_flag = torch.ones([1], device=mean.device, dtype=mean.dtype)
+ else:
+ # skip updating mean and leave it as zeros when the input is empty
+ batch_flag = torch.zeros([1], device=mean.device, dtype=mean.dtype)
+
+ # synchronize mean and the batch flag
+ vec = torch.cat([mean, batch_flag])
+ if self.stats_mode == 'N':
+ vec *= batch_size
+ if self.group_size > 1:
+ dist.all_reduce(vec, group=self.group)
+ total_batch = vec[-1].detach()
+ mean = vec[:num_channels]
+
+ if self.stats_mode == 'default':
+ mean = mean / self.group_size
+ elif self.stats_mode == 'N':
+ mean = mean / total_batch.clamp(min=1)
+ else:
+ raise NotImplementedError
+
+ # leave var as zeros when the input is empty
+ if batch_size > 0:
+ ext_module.sync_bn_forward_var(input3d, mean, var)
+
+ if self.stats_mode == 'N':
+ var *= batch_size
+ if self.group_size > 1:
+ dist.all_reduce(var, group=self.group)
+
+ if self.stats_mode == 'default':
+ var /= self.group_size
+ elif self.stats_mode == 'N':
+ var /= total_batch.clamp(min=1)
+ else:
+ raise NotImplementedError
+
+ # if the total batch size over all the ranks is zero,
+ # we should not update the statistics in the current batch
+ update_flag = total_batch.clamp(max=1)
+ momentum = update_flag * self.momentum
+ ext_module.sync_bn_forward_output(
+ input3d,
+ mean,
+ var,
+ weight,
+ bias,
+ running_mean,
+ running_var,
+ norm,
+ std,
+ output3d,
+ eps=self.eps,
+ momentum=momentum,
+ group_size=self.group_size)
+ self.save_for_backward(norm, std, weight)
+ return output
+
+ @staticmethod
+ @once_differentiable
+ def backward(self, grad_output):
+ norm, std, weight = self.saved_tensors
+ grad_weight = torch.zeros_like(weight)
+ grad_bias = torch.zeros_like(weight)
+ grad_input = torch.zeros_like(grad_output)
+ grad_output3d = grad_output.flatten(start_dim=2)
+ grad_input3d = grad_input.view_as(grad_output3d)
+
+ batch_size = grad_input3d.size(0)
+ if batch_size > 0:
+ ext_module.sync_bn_backward_param(grad_output3d, norm, grad_weight,
+ grad_bias)
+
+ # all reduce
+ if self.group_size > 1:
+ dist.all_reduce(grad_weight, group=self.group)
+ dist.all_reduce(grad_bias, group=self.group)
+ grad_weight /= self.group_size
+ grad_bias /= self.group_size
+
+ if batch_size > 0:
+ ext_module.sync_bn_backward_data(grad_output3d, weight,
+ grad_weight, grad_bias, norm, std,
+ grad_input3d)
+
+ return grad_input, None, None, grad_weight, grad_bias, \
+ None, None, None, None, None
+
+
+@NORM_LAYERS.register_module(name='MMSyncBN')
+class SyncBatchNorm(Module):
+ """Synchronized Batch Normalization.
+
+ Args:
+ num_features (int): number of features/chennels in input tensor
+ eps (float, optional): a value added to the denominator for numerical
+ stability. Defaults to 1e-5.
+ momentum (float, optional): the value used for the running_mean and
+ running_var computation. Defaults to 0.1.
+ affine (bool, optional): whether to use learnable affine parameters.
+ Defaults to True.
+ track_running_stats (bool, optional): whether to track the running
+ mean and variance during training. When set to False, this
+ module does not track such statistics, and initializes statistics
+ buffers ``running_mean`` and ``running_var`` as ``None``. When
+ these buffers are ``None``, this module always uses batch
+ statistics in both training and eval modes. Defaults to True.
+ group (int, optional): synchronization of stats happen within
+ each process group individually. By default it is synchronization
+ across the whole world. Defaults to None.
+ stats_mode (str, optional): The statistical mode. Available options
+ includes ``'default'`` and ``'N'``. Defaults to 'default'.
+ When ``stats_mode=='default'``, it computes the overall statistics
+ using those from each worker with equal weight, i.e., the
+ statistics are synchronized and simply divied by ``group``. This
+ mode will produce inaccurate statistics when empty tensors occur.
+ When ``stats_mode=='N'``, it compute the overall statistics using
+ the total number of batches in each worker ignoring the number of
+ group, i.e., the statistics are synchronized and then divied by
+ the total batch ``N``. This mode is beneficial when empty tensors
+ occur during training, as it average the total mean by the real
+ number of batch.
+ """
+
+ def __init__(self,
+ num_features,
+ eps=1e-5,
+ momentum=0.1,
+ affine=True,
+ track_running_stats=True,
+ group=None,
+ stats_mode='default'):
+ super(SyncBatchNorm, self).__init__()
+ self.num_features = num_features
+ self.eps = eps
+ self.momentum = momentum
+ self.affine = affine
+ self.track_running_stats = track_running_stats
+ group = dist.group.WORLD if group is None else group
+ self.group = group
+ self.group_size = dist.get_world_size(group)
+ assert stats_mode in ['default', 'N'], \
+ f'"stats_mode" only accepts "default" and "N", got "{stats_mode}"'
+ self.stats_mode = stats_mode
+ if self.affine:
+ self.weight = Parameter(torch.Tensor(num_features))
+ self.bias = Parameter(torch.Tensor(num_features))
+ else:
+ self.register_parameter('weight', None)
+ self.register_parameter('bias', None)
+ if self.track_running_stats:
+ self.register_buffer('running_mean', torch.zeros(num_features))
+ self.register_buffer('running_var', torch.ones(num_features))
+ self.register_buffer('num_batches_tracked',
+ torch.tensor(0, dtype=torch.long))
+ else:
+ self.register_buffer('running_mean', None)
+ self.register_buffer('running_var', None)
+ self.register_buffer('num_batches_tracked', None)
+ self.reset_parameters()
+
+ def reset_running_stats(self):
+ if self.track_running_stats:
+ self.running_mean.zero_()
+ self.running_var.fill_(1)
+ self.num_batches_tracked.zero_()
+
+ def reset_parameters(self):
+ self.reset_running_stats()
+ if self.affine:
+ self.weight.data.uniform_() # pytorch use ones_()
+ self.bias.data.zero_()
+
+ def forward(self, input):
+ if input.dim() < 2:
+ raise ValueError(
+ f'expected at least 2D input, got {input.dim()}D input')
+ if self.momentum is None:
+ exponential_average_factor = 0.0
+ else:
+ exponential_average_factor = self.momentum
+
+ if self.training and self.track_running_stats:
+ if self.num_batches_tracked is not None:
+ self.num_batches_tracked += 1
+ if self.momentum is None: # use cumulative moving average
+ exponential_average_factor = 1.0 / float(
+ self.num_batches_tracked)
+ else: # use exponential moving average
+ exponential_average_factor = self.momentum
+
+ if self.training or not self.track_running_stats:
+ return SyncBatchNormFunction.apply(
+ input, self.running_mean, self.running_var, self.weight,
+ self.bias, exponential_average_factor, self.eps, self.group,
+ self.group_size, self.stats_mode)
+ else:
+ return F.batch_norm(input, self.running_mean, self.running_var,
+ self.weight, self.bias, False,
+ exponential_average_factor, self.eps)
+
+ def __repr__(self):
+ s = self.__class__.__name__
+ s += f'({self.num_features}, '
+ s += f'eps={self.eps}, '
+ s += f'momentum={self.momentum}, '
+ s += f'affine={self.affine}, '
+ s += f'track_running_stats={self.track_running_stats}, '
+ s += f'group_size={self.group_size},'
+ s += f'stats_mode={self.stats_mode})'
+ return s
diff --git a/annotator/uniformer/mmcv/ops/three_interpolate.py b/annotator/uniformer/mmcv/ops/three_interpolate.py
new file mode 100644
index 0000000000000000000000000000000000000000..203f47f05d58087e034fb3cd8cd6a09233947b4a
--- /dev/null
+++ b/annotator/uniformer/mmcv/ops/three_interpolate.py
@@ -0,0 +1,68 @@
+from typing import Tuple
+
+import torch
+from torch.autograd import Function
+
+from ..utils import ext_loader
+
+ext_module = ext_loader.load_ext(
+ '_ext', ['three_interpolate_forward', 'three_interpolate_backward'])
+
+
+class ThreeInterpolate(Function):
+ """Performs weighted linear interpolation on 3 features.
+
+ Please refer to `Paper of PointNet++ `_
+ for more details.
+ """
+
+ @staticmethod
+ def forward(ctx, features: torch.Tensor, indices: torch.Tensor,
+ weight: torch.Tensor) -> torch.Tensor:
+ """
+ Args:
+ features (Tensor): (B, C, M) Features descriptors to be
+ interpolated
+ indices (Tensor): (B, n, 3) index three nearest neighbors
+ of the target features in features
+ weight (Tensor): (B, n, 3) weights of interpolation
+
+ Returns:
+ Tensor: (B, C, N) tensor of the interpolated features
+ """
+ assert features.is_contiguous()
+ assert indices.is_contiguous()
+ assert weight.is_contiguous()
+
+ B, c, m = features.size()
+ n = indices.size(1)
+ ctx.three_interpolate_for_backward = (indices, weight, m)
+ output = torch.cuda.FloatTensor(B, c, n)
+
+ ext_module.three_interpolate_forward(
+ features, indices, weight, output, b=B, c=c, m=m, n=n)
+ return output
+
+ @staticmethod
+ def backward(
+ ctx, grad_out: torch.Tensor
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
+ """
+ Args:
+ grad_out (Tensor): (B, C, N) tensor with gradients of outputs
+
+ Returns:
+ Tensor: (B, C, M) tensor with gradients of features
+ """
+ idx, weight, m = ctx.three_interpolate_for_backward
+ B, c, n = grad_out.size()
+
+ grad_features = torch.cuda.FloatTensor(B, c, m).zero_()
+ grad_out_data = grad_out.data.contiguous()
+
+ ext_module.three_interpolate_backward(
+ grad_out_data, idx, weight, grad_features.data, b=B, c=c, n=n, m=m)
+ return grad_features, None, None
+
+
+three_interpolate = ThreeInterpolate.apply
diff --git a/annotator/uniformer/mmcv/ops/three_nn.py b/annotator/uniformer/mmcv/ops/three_nn.py
new file mode 100644
index 0000000000000000000000000000000000000000..2b01047a129989cd5545a0a86f23a487f4a13ce1
--- /dev/null
+++ b/annotator/uniformer/mmcv/ops/three_nn.py
@@ -0,0 +1,51 @@
+from typing import Tuple
+
+import torch
+from torch.autograd import Function
+
+from ..utils import ext_loader
+
+ext_module = ext_loader.load_ext('_ext', ['three_nn_forward'])
+
+
+class ThreeNN(Function):
+ """Find the top-3 nearest neighbors of the target set from the source set.
+
+ Please refer to `Paper of PointNet++ `_
+ for more details.
+ """
+
+ @staticmethod
+ def forward(ctx, target: torch.Tensor,
+ source: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
+ """
+ Args:
+ target (Tensor): shape (B, N, 3), points set that needs to
+ find the nearest neighbors.
+ source (Tensor): shape (B, M, 3), points set that is used
+ to find the nearest neighbors of points in target set.
+
+ Returns:
+ Tensor: shape (B, N, 3), L2 distance of each point in target
+ set to their corresponding nearest neighbors.
+ """
+ target = target.contiguous()
+ source = source.contiguous()
+
+ B, N, _ = target.size()
+ m = source.size(1)
+ dist2 = torch.cuda.FloatTensor(B, N, 3)
+ idx = torch.cuda.IntTensor(B, N, 3)
+
+ ext_module.three_nn_forward(target, source, dist2, idx, b=B, n=N, m=m)
+ if torch.__version__ != 'parrots':
+ ctx.mark_non_differentiable(idx)
+
+ return torch.sqrt(dist2), idx
+
+ @staticmethod
+ def backward(ctx, a=None, b=None):
+ return None, None
+
+
+three_nn = ThreeNN.apply
diff --git a/annotator/uniformer/mmcv/ops/tin_shift.py b/annotator/uniformer/mmcv/ops/tin_shift.py
new file mode 100644
index 0000000000000000000000000000000000000000..472c9fcfe45a124e819b7ed5653e585f94a8811e
--- /dev/null
+++ b/annotator/uniformer/mmcv/ops/tin_shift.py
@@ -0,0 +1,68 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+# Code reference from "Temporal Interlacing Network"
+# https://github.com/deepcs233/TIN/blob/master/cuda_shift/rtc_wrap.py
+# Hao Shao, Shengju Qian, Yu Liu
+# shaoh19@mails.tsinghua.edu.cn, sjqian@cse.cuhk.edu.hk, yuliu@ee.cuhk.edu.hk
+
+import torch
+import torch.nn as nn
+from torch.autograd import Function
+
+from ..utils import ext_loader
+
+ext_module = ext_loader.load_ext('_ext',
+ ['tin_shift_forward', 'tin_shift_backward'])
+
+
+class TINShiftFunction(Function):
+
+ @staticmethod
+ def forward(ctx, input, shift):
+ C = input.size(2)
+ num_segments = shift.size(1)
+ if C // num_segments <= 0 or C % num_segments != 0:
+ raise ValueError('C should be a multiple of num_segments, '
+ f'but got C={C} and num_segments={num_segments}.')
+
+ ctx.save_for_backward(shift)
+
+ out = torch.zeros_like(input)
+ ext_module.tin_shift_forward(input, shift, out)
+
+ return out
+
+ @staticmethod
+ def backward(ctx, grad_output):
+
+ shift = ctx.saved_tensors[0]
+ data_grad_input = grad_output.new(*grad_output.size()).zero_()
+ shift_grad_input = shift.new(*shift.size()).zero_()
+ ext_module.tin_shift_backward(grad_output, shift, data_grad_input)
+
+ return data_grad_input, shift_grad_input
+
+
+tin_shift = TINShiftFunction.apply
+
+
+class TINShift(nn.Module):
+ """Temporal Interlace Shift.
+
+ Temporal Interlace shift is a differentiable temporal-wise frame shifting
+ which is proposed in "Temporal Interlacing Network"
+
+ Please refer to https://arxiv.org/abs/2001.06499 for more details.
+ Code is modified from https://github.com/mit-han-lab/temporal-shift-module
+ """
+
+ def forward(self, input, shift):
+ """Perform temporal interlace shift.
+
+ Args:
+ input (Tensor): Feature map with shape [N, num_segments, C, H * W].
+ shift (Tensor): Shift tensor with shape [N, num_segments].
+
+ Returns:
+ Feature map after temporal interlace shift.
+ """
+ return tin_shift(input, shift)
diff --git a/annotator/uniformer/mmcv/ops/upfirdn2d.py b/annotator/uniformer/mmcv/ops/upfirdn2d.py
new file mode 100644
index 0000000000000000000000000000000000000000..c8bb2c3c949eed38a6465ed369fa881538dca010
--- /dev/null
+++ b/annotator/uniformer/mmcv/ops/upfirdn2d.py
@@ -0,0 +1,330 @@
+# modified from https://github.com/rosinality/stylegan2-pytorch/blob/master/op/upfirdn2d.py # noqa:E501
+
+# Copyright (c) 2021, NVIDIA Corporation. All rights reserved.
+# NVIDIA Source Code License for StyleGAN2 with Adaptive Discriminator
+# Augmentation (ADA)
+# =======================================================================
+
+# 1. Definitions
+
+# "Licensor" means any person or entity that distributes its Work.
+
+# "Software" means the original work of authorship made available under
+# this License.
+
+# "Work" means the Software and any additions to or derivative works of
+# the Software that are made available under this License.
+
+# The terms "reproduce," "reproduction," "derivative works," and
+# "distribution" have the meaning as provided under U.S. copyright law;
+# provided, however, that for the purposes of this License, derivative
+# works shall not include works that remain separable from, or merely
+# link (or bind by name) to the interfaces of, the Work.
+
+# Works, including the Software, are "made available" under this License
+# by including in or with the Work either (a) a copyright notice
+# referencing the applicability of this License to the Work, or (b) a
+# copy of this License.
+
+# 2. License Grants
+
+# 2.1 Copyright Grant. Subject to the terms and conditions of this
+# License, each Licensor grants to you a perpetual, worldwide,
+# non-exclusive, royalty-free, copyright license to reproduce,
+# prepare derivative works of, publicly display, publicly perform,
+# sublicense and distribute its Work and any resulting derivative
+# works in any form.
+
+# 3. Limitations
+
+# 3.1 Redistribution. You may reproduce or distribute the Work only
+# if (a) you do so under this License, (b) you include a complete
+# copy of this License with your distribution, and (c) you retain
+# without modification any copyright, patent, trademark, or
+# attribution notices that are present in the Work.
+
+# 3.2 Derivative Works. You may specify that additional or different
+# terms apply to the use, reproduction, and distribution of your
+# derivative works of the Work ("Your Terms") only if (a) Your Terms
+# provide that the use limitation in Section 3.3 applies to your
+# derivative works, and (b) you identify the specific derivative
+# works that are subject to Your Terms. Notwithstanding Your Terms,
+# this License (including the redistribution requirements in Section
+# 3.1) will continue to apply to the Work itself.
+
+# 3.3 Use Limitation. The Work and any derivative works thereof only
+# may be used or intended for use non-commercially. Notwithstanding
+# the foregoing, NVIDIA and its affiliates may use the Work and any
+# derivative works commercially. As used herein, "non-commercially"
+# means for research or evaluation purposes only.
+
+# 3.4 Patent Claims. If you bring or threaten to bring a patent claim
+# against any Licensor (including any claim, cross-claim or
+# counterclaim in a lawsuit) to enforce any patents that you allege
+# are infringed by any Work, then your rights under this License from
+# such Licensor (including the grant in Section 2.1) will terminate
+# immediately.
+
+# 3.5 Trademarks. This License does not grant any rights to use any
+# Licensor’s or its affiliates’ names, logos, or trademarks, except
+# as necessary to reproduce the notices described in this License.
+
+# 3.6 Termination. If you violate any term of this License, then your
+# rights under this License (including the grant in Section 2.1) will
+# terminate immediately.
+
+# 4. Disclaimer of Warranty.
+
+# THE WORK IS PROVIDED "AS IS" WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WARRANTIES OR CONDITIONS OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE OR
+# NON-INFRINGEMENT. YOU BEAR THE RISK OF UNDERTAKING ANY ACTIVITIES UNDER
+# THIS LICENSE.
+
+# 5. Limitation of Liability.
+
+# EXCEPT AS PROHIBITED BY APPLICABLE LAW, IN NO EVENT AND UNDER NO LEGAL
+# THEORY, WHETHER IN TORT (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE
+# SHALL ANY LICENSOR BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY DIRECT,
+# INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF
+# OR RELATED TO THIS LICENSE, THE USE OR INABILITY TO USE THE WORK
+# (INCLUDING BUT NOT LIMITED TO LOSS OF GOODWILL, BUSINESS INTERRUPTION,
+# LOST PROFITS OR DATA, COMPUTER FAILURE OR MALFUNCTION, OR ANY OTHER
+# COMMERCIAL DAMAGES OR LOSSES), EVEN IF THE LICENSOR HAS BEEN ADVISED OF
+# THE POSSIBILITY OF SUCH DAMAGES.
+
+# =======================================================================
+
+import torch
+from torch.autograd import Function
+from torch.nn import functional as F
+
+from annotator.uniformer.mmcv.utils import to_2tuple
+from ..utils import ext_loader
+
+upfirdn2d_ext = ext_loader.load_ext('_ext', ['upfirdn2d'])
+
+
+class UpFirDn2dBackward(Function):
+
+ @staticmethod
+ def forward(ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad,
+ in_size, out_size):
+
+ up_x, up_y = up
+ down_x, down_y = down
+ g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad
+
+ grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1)
+
+ grad_input = upfirdn2d_ext.upfirdn2d(
+ grad_output,
+ grad_kernel,
+ up_x=down_x,
+ up_y=down_y,
+ down_x=up_x,
+ down_y=up_y,
+ pad_x0=g_pad_x0,
+ pad_x1=g_pad_x1,
+ pad_y0=g_pad_y0,
+ pad_y1=g_pad_y1)
+ grad_input = grad_input.view(in_size[0], in_size[1], in_size[2],
+ in_size[3])
+
+ ctx.save_for_backward(kernel)
+
+ pad_x0, pad_x1, pad_y0, pad_y1 = pad
+
+ ctx.up_x = up_x
+ ctx.up_y = up_y
+ ctx.down_x = down_x
+ ctx.down_y = down_y
+ ctx.pad_x0 = pad_x0
+ ctx.pad_x1 = pad_x1
+ ctx.pad_y0 = pad_y0
+ ctx.pad_y1 = pad_y1
+ ctx.in_size = in_size
+ ctx.out_size = out_size
+
+ return grad_input
+
+ @staticmethod
+ def backward(ctx, gradgrad_input):
+ kernel, = ctx.saved_tensors
+
+ gradgrad_input = gradgrad_input.reshape(-1, ctx.in_size[2],
+ ctx.in_size[3], 1)
+
+ gradgrad_out = upfirdn2d_ext.upfirdn2d(
+ gradgrad_input,
+ kernel,
+ up_x=ctx.up_x,
+ up_y=ctx.up_y,
+ down_x=ctx.down_x,
+ down_y=ctx.down_y,
+ pad_x0=ctx.pad_x0,
+ pad_x1=ctx.pad_x1,
+ pad_y0=ctx.pad_y0,
+ pad_y1=ctx.pad_y1)
+ # gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.out_size[0],
+ # ctx.out_size[1], ctx.in_size[3])
+ gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.in_size[1],
+ ctx.out_size[0], ctx.out_size[1])
+
+ return gradgrad_out, None, None, None, None, None, None, None, None
+
+
+class UpFirDn2d(Function):
+
+ @staticmethod
+ def forward(ctx, input, kernel, up, down, pad):
+ up_x, up_y = up
+ down_x, down_y = down
+ pad_x0, pad_x1, pad_y0, pad_y1 = pad
+
+ kernel_h, kernel_w = kernel.shape
+ batch, channel, in_h, in_w = input.shape
+ ctx.in_size = input.shape
+
+ input = input.reshape(-1, in_h, in_w, 1)
+
+ ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1]))
+
+ out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1
+ out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1
+ ctx.out_size = (out_h, out_w)
+
+ ctx.up = (up_x, up_y)
+ ctx.down = (down_x, down_y)
+ ctx.pad = (pad_x0, pad_x1, pad_y0, pad_y1)
+
+ g_pad_x0 = kernel_w - pad_x0 - 1
+ g_pad_y0 = kernel_h - pad_y0 - 1
+ g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1
+ g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1
+
+ ctx.g_pad = (g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1)
+
+ out = upfirdn2d_ext.upfirdn2d(
+ input,
+ kernel,
+ up_x=up_x,
+ up_y=up_y,
+ down_x=down_x,
+ down_y=down_y,
+ pad_x0=pad_x0,
+ pad_x1=pad_x1,
+ pad_y0=pad_y0,
+ pad_y1=pad_y1)
+ # out = out.view(major, out_h, out_w, minor)
+ out = out.view(-1, channel, out_h, out_w)
+
+ return out
+
+ @staticmethod
+ def backward(ctx, grad_output):
+ kernel, grad_kernel = ctx.saved_tensors
+
+ grad_input = UpFirDn2dBackward.apply(
+ grad_output,
+ kernel,
+ grad_kernel,
+ ctx.up,
+ ctx.down,
+ ctx.pad,
+ ctx.g_pad,
+ ctx.in_size,
+ ctx.out_size,
+ )
+
+ return grad_input, None, None, None, None
+
+
+def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)):
+ """UpFRIDn for 2d features.
+
+ UpFIRDn is short for upsample, apply FIR filter and downsample. More
+ details can be found in:
+ https://www.mathworks.com/help/signal/ref/upfirdn.html
+
+ Args:
+ input (Tensor): Tensor with shape of (n, c, h, w).
+ kernel (Tensor): Filter kernel.
+ up (int | tuple[int], optional): Upsampling factor. If given a number,
+ we will use this factor for the both height and width side.
+ Defaults to 1.
+ down (int | tuple[int], optional): Downsampling factor. If given a
+ number, we will use this factor for the both height and width side.
+ Defaults to 1.
+ pad (tuple[int], optional): Padding for tensors, (x_pad, y_pad) or
+ (x_pad_0, x_pad_1, y_pad_0, y_pad_1). Defaults to (0, 0).
+
+ Returns:
+ Tensor: Tensor after UpFIRDn.
+ """
+ if input.device.type == 'cpu':
+ if len(pad) == 2:
+ pad = (pad[0], pad[1], pad[0], pad[1])
+
+ up = to_2tuple(up)
+
+ down = to_2tuple(down)
+
+ out = upfirdn2d_native(input, kernel, up[0], up[1], down[0], down[1],
+ pad[0], pad[1], pad[2], pad[3])
+ else:
+ _up = to_2tuple(up)
+
+ _down = to_2tuple(down)
+
+ if len(pad) == 4:
+ _pad = pad
+ elif len(pad) == 2:
+ _pad = (pad[0], pad[1], pad[0], pad[1])
+
+ out = UpFirDn2d.apply(input, kernel, _up, _down, _pad)
+
+ return out
+
+
+def upfirdn2d_native(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1,
+ pad_y0, pad_y1):
+ _, channel, in_h, in_w = input.shape
+ input = input.reshape(-1, in_h, in_w, 1)
+
+ _, in_h, in_w, minor = input.shape
+ kernel_h, kernel_w = kernel.shape
+
+ out = input.view(-1, in_h, 1, in_w, 1, minor)
+ out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1])
+ out = out.view(-1, in_h * up_y, in_w * up_x, minor)
+
+ out = F.pad(
+ out,
+ [0, 0,
+ max(pad_x0, 0),
+ max(pad_x1, 0),
+ max(pad_y0, 0),
+ max(pad_y1, 0)])
+ out = out[:,
+ max(-pad_y0, 0):out.shape[1] - max(-pad_y1, 0),
+ max(-pad_x0, 0):out.shape[2] - max(-pad_x1, 0), :, ]
+
+ out = out.permute(0, 3, 1, 2)
+ out = out.reshape(
+ [-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1])
+ w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w)
+ out = F.conv2d(out, w)
+ out = out.reshape(
+ -1,
+ minor,
+ in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1,
+ in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1,
+ )
+ out = out.permute(0, 2, 3, 1)
+ out = out[:, ::down_y, ::down_x, :]
+
+ out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1
+ out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1
+
+ return out.view(-1, channel, out_h, out_w)
diff --git a/annotator/uniformer/mmcv/ops/voxelize.py b/annotator/uniformer/mmcv/ops/voxelize.py
new file mode 100644
index 0000000000000000000000000000000000000000..ca3226a4fbcbfe58490fa2ea8e1c16b531214121
--- /dev/null
+++ b/annotator/uniformer/mmcv/ops/voxelize.py
@@ -0,0 +1,132 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import torch
+from torch import nn
+from torch.autograd import Function
+from torch.nn.modules.utils import _pair
+
+from ..utils import ext_loader
+
+ext_module = ext_loader.load_ext(
+ '_ext', ['dynamic_voxelize_forward', 'hard_voxelize_forward'])
+
+
+class _Voxelization(Function):
+
+ @staticmethod
+ def forward(ctx,
+ points,
+ voxel_size,
+ coors_range,
+ max_points=35,
+ max_voxels=20000):
+ """Convert kitti points(N, >=3) to voxels.
+
+ Args:
+ points (torch.Tensor): [N, ndim]. Points[:, :3] contain xyz points
+ and points[:, 3:] contain other information like reflectivity.
+ voxel_size (tuple or float): The size of voxel with the shape of
+ [3].
+ coors_range (tuple or float): The coordinate range of voxel with
+ the shape of [6].
+ max_points (int, optional): maximum points contained in a voxel. if
+ max_points=-1, it means using dynamic_voxelize. Default: 35.
+ max_voxels (int, optional): maximum voxels this function create.
+ for second, 20000 is a good choice. Users should shuffle points
+ before call this function because max_voxels may drop points.
+ Default: 20000.
+
+ Returns:
+ voxels_out (torch.Tensor): Output voxels with the shape of [M,
+ max_points, ndim]. Only contain points and returned when
+ max_points != -1.
+ coors_out (torch.Tensor): Output coordinates with the shape of
+ [M, 3].
+ num_points_per_voxel_out (torch.Tensor): Num points per voxel with
+ the shape of [M]. Only returned when max_points != -1.
+ """
+ if max_points == -1 or max_voxels == -1:
+ coors = points.new_zeros(size=(points.size(0), 3), dtype=torch.int)
+ ext_module.dynamic_voxelize_forward(points, coors, voxel_size,
+ coors_range, 3)
+ return coors
+ else:
+ voxels = points.new_zeros(
+ size=(max_voxels, max_points, points.size(1)))
+ coors = points.new_zeros(size=(max_voxels, 3), dtype=torch.int)
+ num_points_per_voxel = points.new_zeros(
+ size=(max_voxels, ), dtype=torch.int)
+ voxel_num = ext_module.hard_voxelize_forward(
+ points, voxels, coors, num_points_per_voxel, voxel_size,
+ coors_range, max_points, max_voxels, 3)
+ # select the valid voxels
+ voxels_out = voxels[:voxel_num]
+ coors_out = coors[:voxel_num]
+ num_points_per_voxel_out = num_points_per_voxel[:voxel_num]
+ return voxels_out, coors_out, num_points_per_voxel_out
+
+
+voxelization = _Voxelization.apply
+
+
+class Voxelization(nn.Module):
+ """Convert kitti points(N, >=3) to voxels.
+
+ Please refer to `PVCNN `_ for more
+ details.
+
+ Args:
+ voxel_size (tuple or float): The size of voxel with the shape of [3].
+ point_cloud_range (tuple or float): The coordinate range of voxel with
+ the shape of [6].
+ max_num_points (int): maximum points contained in a voxel. if
+ max_points=-1, it means using dynamic_voxelize.
+ max_voxels (int, optional): maximum voxels this function create.
+ for second, 20000 is a good choice. Users should shuffle points
+ before call this function because max_voxels may drop points.
+ Default: 20000.
+ """
+
+ def __init__(self,
+ voxel_size,
+ point_cloud_range,
+ max_num_points,
+ max_voxels=20000):
+ super().__init__()
+
+ self.voxel_size = voxel_size
+ self.point_cloud_range = point_cloud_range
+ self.max_num_points = max_num_points
+ if isinstance(max_voxels, tuple):
+ self.max_voxels = max_voxels
+ else:
+ self.max_voxels = _pair(max_voxels)
+
+ point_cloud_range = torch.tensor(
+ point_cloud_range, dtype=torch.float32)
+ voxel_size = torch.tensor(voxel_size, dtype=torch.float32)
+ grid_size = (point_cloud_range[3:] -
+ point_cloud_range[:3]) / voxel_size
+ grid_size = torch.round(grid_size).long()
+ input_feat_shape = grid_size[:2]
+ self.grid_size = grid_size
+ # the origin shape is as [x-len, y-len, z-len]
+ # [w, h, d] -> [d, h, w]
+ self.pcd_shape = [*input_feat_shape, 1][::-1]
+
+ def forward(self, input):
+ if self.training:
+ max_voxels = self.max_voxels[0]
+ else:
+ max_voxels = self.max_voxels[1]
+
+ return voxelization(input, self.voxel_size, self.point_cloud_range,
+ self.max_num_points, max_voxels)
+
+ def __repr__(self):
+ s = self.__class__.__name__ + '('
+ s += 'voxel_size=' + str(self.voxel_size)
+ s += ', point_cloud_range=' + str(self.point_cloud_range)
+ s += ', max_num_points=' + str(self.max_num_points)
+ s += ', max_voxels=' + str(self.max_voxels)
+ s += ')'
+ return s
diff --git a/annotator/uniformer/mmcv/parallel/__init__.py b/annotator/uniformer/mmcv/parallel/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..2ed2c17ad357742e423beeaf4d35db03fe9af469
--- /dev/null
+++ b/annotator/uniformer/mmcv/parallel/__init__.py
@@ -0,0 +1,13 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+from .collate import collate
+from .data_container import DataContainer
+from .data_parallel import MMDataParallel
+from .distributed import MMDistributedDataParallel
+from .registry import MODULE_WRAPPERS
+from .scatter_gather import scatter, scatter_kwargs
+from .utils import is_module_wrapper
+
+__all__ = [
+ 'collate', 'DataContainer', 'MMDataParallel', 'MMDistributedDataParallel',
+ 'scatter', 'scatter_kwargs', 'is_module_wrapper', 'MODULE_WRAPPERS'
+]
diff --git a/annotator/uniformer/mmcv/parallel/__pycache__/__init__.cpython-310.pyc b/annotator/uniformer/mmcv/parallel/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..62e9d7d5ec73a354b16fbbdb8bfd8cfa0cffb056
Binary files /dev/null and b/annotator/uniformer/mmcv/parallel/__pycache__/__init__.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/parallel/__pycache__/__init__.cpython-38.pyc b/annotator/uniformer/mmcv/parallel/__pycache__/__init__.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a1886e4220d2ee8aeff6610bc64ef573f1de9f25
Binary files /dev/null and b/annotator/uniformer/mmcv/parallel/__pycache__/__init__.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/parallel/__pycache__/_functions.cpython-310.pyc b/annotator/uniformer/mmcv/parallel/__pycache__/_functions.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..923c5b08c19dc4fe51e3d93e4a7acea3ddf72895
Binary files /dev/null and b/annotator/uniformer/mmcv/parallel/__pycache__/_functions.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/parallel/__pycache__/_functions.cpython-38.pyc b/annotator/uniformer/mmcv/parallel/__pycache__/_functions.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..182af3f187f39a220495fa84339b16cb1dda8229
Binary files /dev/null and b/annotator/uniformer/mmcv/parallel/__pycache__/_functions.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/parallel/__pycache__/collate.cpython-310.pyc b/annotator/uniformer/mmcv/parallel/__pycache__/collate.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1a0043034e05b914fd0ce3c433893190873ac245
Binary files /dev/null and b/annotator/uniformer/mmcv/parallel/__pycache__/collate.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/parallel/__pycache__/collate.cpython-38.pyc b/annotator/uniformer/mmcv/parallel/__pycache__/collate.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..44a37c9b715ef49f3acfef60678eb226cc174210
Binary files /dev/null and b/annotator/uniformer/mmcv/parallel/__pycache__/collate.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/parallel/__pycache__/data_container.cpython-310.pyc b/annotator/uniformer/mmcv/parallel/__pycache__/data_container.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..bfc041ec2c21ee9291f251b7a6991c5e4bcd7ce6
Binary files /dev/null and b/annotator/uniformer/mmcv/parallel/__pycache__/data_container.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/parallel/__pycache__/data_container.cpython-38.pyc b/annotator/uniformer/mmcv/parallel/__pycache__/data_container.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7c4dcc124876cc854f3440f0b13a7ae9b9a4c49e
Binary files /dev/null and b/annotator/uniformer/mmcv/parallel/__pycache__/data_container.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/parallel/__pycache__/data_parallel.cpython-310.pyc b/annotator/uniformer/mmcv/parallel/__pycache__/data_parallel.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..427e6f15970ad49f95eda82a72917062b9e79ca8
Binary files /dev/null and b/annotator/uniformer/mmcv/parallel/__pycache__/data_parallel.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/parallel/__pycache__/data_parallel.cpython-38.pyc b/annotator/uniformer/mmcv/parallel/__pycache__/data_parallel.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5d5a402452c6a64f77c5aaeb2ce57d739765918d
Binary files /dev/null and b/annotator/uniformer/mmcv/parallel/__pycache__/data_parallel.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/parallel/__pycache__/distributed.cpython-310.pyc b/annotator/uniformer/mmcv/parallel/__pycache__/distributed.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..99c3474481fe229ce6609b32f15e12b79c231c9a
Binary files /dev/null and b/annotator/uniformer/mmcv/parallel/__pycache__/distributed.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/parallel/__pycache__/distributed.cpython-38.pyc b/annotator/uniformer/mmcv/parallel/__pycache__/distributed.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3b7b7a57c3f302099c937e224d227f6e6cd0c7df
Binary files /dev/null and b/annotator/uniformer/mmcv/parallel/__pycache__/distributed.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/parallel/__pycache__/registry.cpython-310.pyc b/annotator/uniformer/mmcv/parallel/__pycache__/registry.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a31093aeef696f6a6fed73def6b51a0eee25679f
Binary files /dev/null and b/annotator/uniformer/mmcv/parallel/__pycache__/registry.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/parallel/__pycache__/registry.cpython-38.pyc b/annotator/uniformer/mmcv/parallel/__pycache__/registry.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..be5142d9a376f9fbb13cfea76a375b6e86197368
Binary files /dev/null and b/annotator/uniformer/mmcv/parallel/__pycache__/registry.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/parallel/__pycache__/scatter_gather.cpython-310.pyc b/annotator/uniformer/mmcv/parallel/__pycache__/scatter_gather.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6914912f403fea34c05bde58ef5d532a84f10d62
Binary files /dev/null and b/annotator/uniformer/mmcv/parallel/__pycache__/scatter_gather.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/parallel/__pycache__/scatter_gather.cpython-38.pyc b/annotator/uniformer/mmcv/parallel/__pycache__/scatter_gather.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e683c7c76f9557b587af52317e1330d868fa19aa
Binary files /dev/null and b/annotator/uniformer/mmcv/parallel/__pycache__/scatter_gather.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/parallel/__pycache__/utils.cpython-310.pyc b/annotator/uniformer/mmcv/parallel/__pycache__/utils.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c96ccbfa4f992f6135efe4961e873cab74db93e2
Binary files /dev/null and b/annotator/uniformer/mmcv/parallel/__pycache__/utils.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/parallel/__pycache__/utils.cpython-38.pyc b/annotator/uniformer/mmcv/parallel/__pycache__/utils.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9eb759d455bc18004df2804ffcedab11e7c1cfd5
Binary files /dev/null and b/annotator/uniformer/mmcv/parallel/__pycache__/utils.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/parallel/_functions.py b/annotator/uniformer/mmcv/parallel/_functions.py
new file mode 100644
index 0000000000000000000000000000000000000000..9b5a8a44483ab991411d07122b22a1d027e4be8e
--- /dev/null
+++ b/annotator/uniformer/mmcv/parallel/_functions.py
@@ -0,0 +1,79 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import torch
+from torch.nn.parallel._functions import _get_stream
+
+
+def scatter(input, devices, streams=None):
+ """Scatters tensor across multiple GPUs."""
+ if streams is None:
+ streams = [None] * len(devices)
+
+ if isinstance(input, list):
+ chunk_size = (len(input) - 1) // len(devices) + 1
+ outputs = [
+ scatter(input[i], [devices[i // chunk_size]],
+ [streams[i // chunk_size]]) for i in range(len(input))
+ ]
+ return outputs
+ elif isinstance(input, torch.Tensor):
+ output = input.contiguous()
+ # TODO: copy to a pinned buffer first (if copying from CPU)
+ stream = streams[0] if output.numel() > 0 else None
+ if devices != [-1]:
+ with torch.cuda.device(devices[0]), torch.cuda.stream(stream):
+ output = output.cuda(devices[0], non_blocking=True)
+ else:
+ # unsqueeze the first dimension thus the tensor's shape is the
+ # same as those scattered with GPU.
+ output = output.unsqueeze(0)
+ return output
+ else:
+ raise Exception(f'Unknown type {type(input)}.')
+
+
+def synchronize_stream(output, devices, streams):
+ if isinstance(output, list):
+ chunk_size = len(output) // len(devices)
+ for i in range(len(devices)):
+ for j in range(chunk_size):
+ synchronize_stream(output[i * chunk_size + j], [devices[i]],
+ [streams[i]])
+ elif isinstance(output, torch.Tensor):
+ if output.numel() != 0:
+ with torch.cuda.device(devices[0]):
+ main_stream = torch.cuda.current_stream()
+ main_stream.wait_stream(streams[0])
+ output.record_stream(main_stream)
+ else:
+ raise Exception(f'Unknown type {type(output)}.')
+
+
+def get_input_device(input):
+ if isinstance(input, list):
+ for item in input:
+ input_device = get_input_device(item)
+ if input_device != -1:
+ return input_device
+ return -1
+ elif isinstance(input, torch.Tensor):
+ return input.get_device() if input.is_cuda else -1
+ else:
+ raise Exception(f'Unknown type {type(input)}.')
+
+
+class Scatter:
+
+ @staticmethod
+ def forward(target_gpus, input):
+ input_device = get_input_device(input)
+ streams = None
+ if input_device == -1 and target_gpus != [-1]:
+ # Perform CPU to GPU copies in a background stream
+ streams = [_get_stream(device) for device in target_gpus]
+
+ outputs = scatter(input, target_gpus, streams)
+ # Synchronize with the copy stream
+ if streams is not None:
+ synchronize_stream(outputs, target_gpus, streams)
+
+ return tuple(outputs)
diff --git a/annotator/uniformer/mmcv/parallel/collate.py b/annotator/uniformer/mmcv/parallel/collate.py
new file mode 100644
index 0000000000000000000000000000000000000000..ad749197df21b0d74297548be5f66a696adebf7f
--- /dev/null
+++ b/annotator/uniformer/mmcv/parallel/collate.py
@@ -0,0 +1,84 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+from collections.abc import Mapping, Sequence
+
+import torch
+import torch.nn.functional as F
+from torch.utils.data.dataloader import default_collate
+
+from .data_container import DataContainer
+
+
+def collate(batch, samples_per_gpu=1):
+ """Puts each data field into a tensor/DataContainer with outer dimension
+ batch size.
+
+ Extend default_collate to add support for
+ :type:`~mmcv.parallel.DataContainer`. There are 3 cases.
+
+ 1. cpu_only = True, e.g., meta data
+ 2. cpu_only = False, stack = True, e.g., images tensors
+ 3. cpu_only = False, stack = False, e.g., gt bboxes
+ """
+
+ if not isinstance(batch, Sequence):
+ raise TypeError(f'{batch.dtype} is not supported.')
+
+ if isinstance(batch[0], DataContainer):
+ stacked = []
+ if batch[0].cpu_only:
+ for i in range(0, len(batch), samples_per_gpu):
+ stacked.append(
+ [sample.data for sample in batch[i:i + samples_per_gpu]])
+ return DataContainer(
+ stacked, batch[0].stack, batch[0].padding_value, cpu_only=True)
+ elif batch[0].stack:
+ for i in range(0, len(batch), samples_per_gpu):
+ assert isinstance(batch[i].data, torch.Tensor)
+
+ if batch[i].pad_dims is not None:
+ ndim = batch[i].dim()
+ assert ndim > batch[i].pad_dims
+ max_shape = [0 for _ in range(batch[i].pad_dims)]
+ for dim in range(1, batch[i].pad_dims + 1):
+ max_shape[dim - 1] = batch[i].size(-dim)
+ for sample in batch[i:i + samples_per_gpu]:
+ for dim in range(0, ndim - batch[i].pad_dims):
+ assert batch[i].size(dim) == sample.size(dim)
+ for dim in range(1, batch[i].pad_dims + 1):
+ max_shape[dim - 1] = max(max_shape[dim - 1],
+ sample.size(-dim))
+ padded_samples = []
+ for sample in batch[i:i + samples_per_gpu]:
+ pad = [0 for _ in range(batch[i].pad_dims * 2)]
+ for dim in range(1, batch[i].pad_dims + 1):
+ pad[2 * dim -
+ 1] = max_shape[dim - 1] - sample.size(-dim)
+ padded_samples.append(
+ F.pad(
+ sample.data, pad, value=sample.padding_value))
+ stacked.append(default_collate(padded_samples))
+ elif batch[i].pad_dims is None:
+ stacked.append(
+ default_collate([
+ sample.data
+ for sample in batch[i:i + samples_per_gpu]
+ ]))
+ else:
+ raise ValueError(
+ 'pad_dims should be either None or integers (1-3)')
+
+ else:
+ for i in range(0, len(batch), samples_per_gpu):
+ stacked.append(
+ [sample.data for sample in batch[i:i + samples_per_gpu]])
+ return DataContainer(stacked, batch[0].stack, batch[0].padding_value)
+ elif isinstance(batch[0], Sequence):
+ transposed = zip(*batch)
+ return [collate(samples, samples_per_gpu) for samples in transposed]
+ elif isinstance(batch[0], Mapping):
+ return {
+ key: collate([d[key] for d in batch], samples_per_gpu)
+ for key in batch[0]
+ }
+ else:
+ return default_collate(batch)
diff --git a/annotator/uniformer/mmcv/parallel/data_container.py b/annotator/uniformer/mmcv/parallel/data_container.py
new file mode 100644
index 0000000000000000000000000000000000000000..cedb0d32a51a1f575a622b38de2cee3ab4757821
--- /dev/null
+++ b/annotator/uniformer/mmcv/parallel/data_container.py
@@ -0,0 +1,89 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import functools
+
+import torch
+
+
+def assert_tensor_type(func):
+
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ if not isinstance(args[0].data, torch.Tensor):
+ raise AttributeError(
+ f'{args[0].__class__.__name__} has no attribute '
+ f'{func.__name__} for type {args[0].datatype}')
+ return func(*args, **kwargs)
+
+ return wrapper
+
+
+class DataContainer:
+ """A container for any type of objects.
+
+ Typically tensors will be stacked in the collate function and sliced along
+ some dimension in the scatter function. This behavior has some limitations.
+ 1. All tensors have to be the same size.
+ 2. Types are limited (numpy array or Tensor).
+
+ We design `DataContainer` and `MMDataParallel` to overcome these
+ limitations. The behavior can be either of the following.
+
+ - copy to GPU, pad all tensors to the same size and stack them
+ - copy to GPU without stacking
+ - leave the objects as is and pass it to the model
+ - pad_dims specifies the number of last few dimensions to do padding
+ """
+
+ def __init__(self,
+ data,
+ stack=False,
+ padding_value=0,
+ cpu_only=False,
+ pad_dims=2):
+ self._data = data
+ self._cpu_only = cpu_only
+ self._stack = stack
+ self._padding_value = padding_value
+ assert pad_dims in [None, 1, 2, 3]
+ self._pad_dims = pad_dims
+
+ def __repr__(self):
+ return f'{self.__class__.__name__}({repr(self.data)})'
+
+ def __len__(self):
+ return len(self._data)
+
+ @property
+ def data(self):
+ return self._data
+
+ @property
+ def datatype(self):
+ if isinstance(self.data, torch.Tensor):
+ return self.data.type()
+ else:
+ return type(self.data)
+
+ @property
+ def cpu_only(self):
+ return self._cpu_only
+
+ @property
+ def stack(self):
+ return self._stack
+
+ @property
+ def padding_value(self):
+ return self._padding_value
+
+ @property
+ def pad_dims(self):
+ return self._pad_dims
+
+ @assert_tensor_type
+ def size(self, *args, **kwargs):
+ return self.data.size(*args, **kwargs)
+
+ @assert_tensor_type
+ def dim(self):
+ return self.data.dim()
diff --git a/annotator/uniformer/mmcv/parallel/data_parallel.py b/annotator/uniformer/mmcv/parallel/data_parallel.py
new file mode 100644
index 0000000000000000000000000000000000000000..79b5f69b654cf647dc7ae9174223781ab5c607d2
--- /dev/null
+++ b/annotator/uniformer/mmcv/parallel/data_parallel.py
@@ -0,0 +1,89 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+from itertools import chain
+
+from torch.nn.parallel import DataParallel
+
+from .scatter_gather import scatter_kwargs
+
+
+class MMDataParallel(DataParallel):
+ """The DataParallel module that supports DataContainer.
+
+ MMDataParallel has two main differences with PyTorch DataParallel:
+
+ - It supports a custom type :class:`DataContainer` which allows more
+ flexible control of input data during both GPU and CPU inference.
+ - It implement two more APIs ``train_step()`` and ``val_step()``.
+
+ Args:
+ module (:class:`nn.Module`): Module to be encapsulated.
+ device_ids (list[int]): Device IDS of modules to be scattered to.
+ Defaults to None when GPU is not available.
+ output_device (str | int): Device ID for output. Defaults to None.
+ dim (int): Dimension used to scatter the data. Defaults to 0.
+ """
+
+ def __init__(self, *args, dim=0, **kwargs):
+ super(MMDataParallel, self).__init__(*args, dim=dim, **kwargs)
+ self.dim = dim
+
+ def forward(self, *inputs, **kwargs):
+ """Override the original forward function.
+
+ The main difference lies in the CPU inference where the data in
+ :class:`DataContainers` will still be gathered.
+ """
+ if not self.device_ids:
+ # We add the following line thus the module could gather and
+ # convert data containers as those in GPU inference
+ inputs, kwargs = self.scatter(inputs, kwargs, [-1])
+ return self.module(*inputs[0], **kwargs[0])
+ else:
+ return super().forward(*inputs, **kwargs)
+
+ def scatter(self, inputs, kwargs, device_ids):
+ return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim)
+
+ def train_step(self, *inputs, **kwargs):
+ if not self.device_ids:
+ # We add the following line thus the module could gather and
+ # convert data containers as those in GPU inference
+ inputs, kwargs = self.scatter(inputs, kwargs, [-1])
+ return self.module.train_step(*inputs[0], **kwargs[0])
+
+ assert len(self.device_ids) == 1, \
+ ('MMDataParallel only supports single GPU training, if you need to'
+ ' train with multiple GPUs, please use MMDistributedDataParallel'
+ 'instead.')
+
+ for t in chain(self.module.parameters(), self.module.buffers()):
+ if t.device != self.src_device_obj:
+ raise RuntimeError(
+ 'module must have its parameters and buffers '
+ f'on device {self.src_device_obj} (device_ids[0]) but '
+ f'found one of them on device: {t.device}')
+
+ inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)
+ return self.module.train_step(*inputs[0], **kwargs[0])
+
+ def val_step(self, *inputs, **kwargs):
+ if not self.device_ids:
+ # We add the following line thus the module could gather and
+ # convert data containers as those in GPU inference
+ inputs, kwargs = self.scatter(inputs, kwargs, [-1])
+ return self.module.val_step(*inputs[0], **kwargs[0])
+
+ assert len(self.device_ids) == 1, \
+ ('MMDataParallel only supports single GPU training, if you need to'
+ ' train with multiple GPUs, please use MMDistributedDataParallel'
+ ' instead.')
+
+ for t in chain(self.module.parameters(), self.module.buffers()):
+ if t.device != self.src_device_obj:
+ raise RuntimeError(
+ 'module must have its parameters and buffers '
+ f'on device {self.src_device_obj} (device_ids[0]) but '
+ f'found one of them on device: {t.device}')
+
+ inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)
+ return self.module.val_step(*inputs[0], **kwargs[0])
diff --git a/annotator/uniformer/mmcv/parallel/distributed.py b/annotator/uniformer/mmcv/parallel/distributed.py
new file mode 100644
index 0000000000000000000000000000000000000000..1e4c27903db58a54d37ea1ed9ec0104098b486f2
--- /dev/null
+++ b/annotator/uniformer/mmcv/parallel/distributed.py
@@ -0,0 +1,112 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import torch
+from torch.nn.parallel.distributed import (DistributedDataParallel,
+ _find_tensors)
+
+from annotator.uniformer.mmcv import print_log
+from annotator.uniformer.mmcv.utils import TORCH_VERSION, digit_version
+from .scatter_gather import scatter_kwargs
+
+
+class MMDistributedDataParallel(DistributedDataParallel):
+ """The DDP module that supports DataContainer.
+
+ MMDDP has two main differences with PyTorch DDP:
+
+ - It supports a custom type :class:`DataContainer` which allows more
+ flexible control of input data.
+ - It implement two APIs ``train_step()`` and ``val_step()``.
+ """
+
+ def to_kwargs(self, inputs, kwargs, device_id):
+ # Use `self.to_kwargs` instead of `self.scatter` in pytorch1.8
+ # to move all tensors to device_id
+ return scatter_kwargs(inputs, kwargs, [device_id], dim=self.dim)
+
+ def scatter(self, inputs, kwargs, device_ids):
+ return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim)
+
+ def train_step(self, *inputs, **kwargs):
+ """train_step() API for module wrapped by DistributedDataParallel.
+
+ This method is basically the same as
+ ``DistributedDataParallel.forward()``, while replacing
+ ``self.module.forward()`` with ``self.module.train_step()``.
+ It is compatible with PyTorch 1.1 - 1.5.
+ """
+
+ # In PyTorch >= 1.7, ``reducer._rebuild_buckets()`` is moved from the
+ # end of backward to the beginning of forward.
+ if ('parrots' not in TORCH_VERSION
+ and digit_version(TORCH_VERSION) >= digit_version('1.7')
+ and self.reducer._rebuild_buckets()):
+ print_log(
+ 'Reducer buckets have been rebuilt in this iteration.',
+ logger='mmcv')
+
+ if getattr(self, 'require_forward_param_sync', True):
+ self._sync_params()
+ if self.device_ids:
+ inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)
+ if len(self.device_ids) == 1:
+ output = self.module.train_step(*inputs[0], **kwargs[0])
+ else:
+ outputs = self.parallel_apply(
+ self._module_copies[:len(inputs)], inputs, kwargs)
+ output = self.gather(outputs, self.output_device)
+ else:
+ output = self.module.train_step(*inputs, **kwargs)
+
+ if torch.is_grad_enabled() and getattr(
+ self, 'require_backward_grad_sync', True):
+ if self.find_unused_parameters:
+ self.reducer.prepare_for_backward(list(_find_tensors(output)))
+ else:
+ self.reducer.prepare_for_backward([])
+ else:
+ if ('parrots' not in TORCH_VERSION
+ and digit_version(TORCH_VERSION) > digit_version('1.2')):
+ self.require_forward_param_sync = False
+ return output
+
+ def val_step(self, *inputs, **kwargs):
+ """val_step() API for module wrapped by DistributedDataParallel.
+
+ This method is basically the same as
+ ``DistributedDataParallel.forward()``, while replacing
+ ``self.module.forward()`` with ``self.module.val_step()``.
+ It is compatible with PyTorch 1.1 - 1.5.
+ """
+ # In PyTorch >= 1.7, ``reducer._rebuild_buckets()`` is moved from the
+ # end of backward to the beginning of forward.
+ if ('parrots' not in TORCH_VERSION
+ and digit_version(TORCH_VERSION) >= digit_version('1.7')
+ and self.reducer._rebuild_buckets()):
+ print_log(
+ 'Reducer buckets have been rebuilt in this iteration.',
+ logger='mmcv')
+
+ if getattr(self, 'require_forward_param_sync', True):
+ self._sync_params()
+ if self.device_ids:
+ inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)
+ if len(self.device_ids) == 1:
+ output = self.module.val_step(*inputs[0], **kwargs[0])
+ else:
+ outputs = self.parallel_apply(
+ self._module_copies[:len(inputs)], inputs, kwargs)
+ output = self.gather(outputs, self.output_device)
+ else:
+ output = self.module.val_step(*inputs, **kwargs)
+
+ if torch.is_grad_enabled() and getattr(
+ self, 'require_backward_grad_sync', True):
+ if self.find_unused_parameters:
+ self.reducer.prepare_for_backward(list(_find_tensors(output)))
+ else:
+ self.reducer.prepare_for_backward([])
+ else:
+ if ('parrots' not in TORCH_VERSION
+ and digit_version(TORCH_VERSION) > digit_version('1.2')):
+ self.require_forward_param_sync = False
+ return output
diff --git a/annotator/uniformer/mmcv/parallel/distributed_deprecated.py b/annotator/uniformer/mmcv/parallel/distributed_deprecated.py
new file mode 100644
index 0000000000000000000000000000000000000000..676937a2085d4da20fa87923041a200fca6214eb
--- /dev/null
+++ b/annotator/uniformer/mmcv/parallel/distributed_deprecated.py
@@ -0,0 +1,70 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import torch
+import torch.distributed as dist
+import torch.nn as nn
+from torch._utils import (_flatten_dense_tensors, _take_tensors,
+ _unflatten_dense_tensors)
+
+from annotator.uniformer.mmcv.utils import TORCH_VERSION, digit_version
+from .registry import MODULE_WRAPPERS
+from .scatter_gather import scatter_kwargs
+
+
+@MODULE_WRAPPERS.register_module()
+class MMDistributedDataParallel(nn.Module):
+
+ def __init__(self,
+ module,
+ dim=0,
+ broadcast_buffers=True,
+ bucket_cap_mb=25):
+ super(MMDistributedDataParallel, self).__init__()
+ self.module = module
+ self.dim = dim
+ self.broadcast_buffers = broadcast_buffers
+
+ self.broadcast_bucket_size = bucket_cap_mb * 1024 * 1024
+ self._sync_params()
+
+ def _dist_broadcast_coalesced(self, tensors, buffer_size):
+ for tensors in _take_tensors(tensors, buffer_size):
+ flat_tensors = _flatten_dense_tensors(tensors)
+ dist.broadcast(flat_tensors, 0)
+ for tensor, synced in zip(
+ tensors, _unflatten_dense_tensors(flat_tensors, tensors)):
+ tensor.copy_(synced)
+
+ def _sync_params(self):
+ module_states = list(self.module.state_dict().values())
+ if len(module_states) > 0:
+ self._dist_broadcast_coalesced(module_states,
+ self.broadcast_bucket_size)
+ if self.broadcast_buffers:
+ if (TORCH_VERSION != 'parrots'
+ and digit_version(TORCH_VERSION) < digit_version('1.0')):
+ buffers = [b.data for b in self.module._all_buffers()]
+ else:
+ buffers = [b.data for b in self.module.buffers()]
+ if len(buffers) > 0:
+ self._dist_broadcast_coalesced(buffers,
+ self.broadcast_bucket_size)
+
+ def scatter(self, inputs, kwargs, device_ids):
+ return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim)
+
+ def forward(self, *inputs, **kwargs):
+ inputs, kwargs = self.scatter(inputs, kwargs,
+ [torch.cuda.current_device()])
+ return self.module(*inputs[0], **kwargs[0])
+
+ def train_step(self, *inputs, **kwargs):
+ inputs, kwargs = self.scatter(inputs, kwargs,
+ [torch.cuda.current_device()])
+ output = self.module.train_step(*inputs[0], **kwargs[0])
+ return output
+
+ def val_step(self, *inputs, **kwargs):
+ inputs, kwargs = self.scatter(inputs, kwargs,
+ [torch.cuda.current_device()])
+ output = self.module.val_step(*inputs[0], **kwargs[0])
+ return output
diff --git a/annotator/uniformer/mmcv/parallel/registry.py b/annotator/uniformer/mmcv/parallel/registry.py
new file mode 100644
index 0000000000000000000000000000000000000000..a204a07fba10e614223f090d1a57cf9c4d74d4a1
--- /dev/null
+++ b/annotator/uniformer/mmcv/parallel/registry.py
@@ -0,0 +1,8 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+from torch.nn.parallel import DataParallel, DistributedDataParallel
+
+from annotator.uniformer.mmcv.utils import Registry
+
+MODULE_WRAPPERS = Registry('module wrapper')
+MODULE_WRAPPERS.register_module(module=DataParallel)
+MODULE_WRAPPERS.register_module(module=DistributedDataParallel)
diff --git a/annotator/uniformer/mmcv/parallel/scatter_gather.py b/annotator/uniformer/mmcv/parallel/scatter_gather.py
new file mode 100644
index 0000000000000000000000000000000000000000..900ff88566f8f14830590459dc4fd16d4b382e47
--- /dev/null
+++ b/annotator/uniformer/mmcv/parallel/scatter_gather.py
@@ -0,0 +1,59 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import torch
+from torch.nn.parallel._functions import Scatter as OrigScatter
+
+from ._functions import Scatter
+from .data_container import DataContainer
+
+
+def scatter(inputs, target_gpus, dim=0):
+ """Scatter inputs to target gpus.
+
+ The only difference from original :func:`scatter` is to add support for
+ :type:`~mmcv.parallel.DataContainer`.
+ """
+
+ def scatter_map(obj):
+ if isinstance(obj, torch.Tensor):
+ if target_gpus != [-1]:
+ return OrigScatter.apply(target_gpus, None, dim, obj)
+ else:
+ # for CPU inference we use self-implemented scatter
+ return Scatter.forward(target_gpus, obj)
+ if isinstance(obj, DataContainer):
+ if obj.cpu_only:
+ return obj.data
+ else:
+ return Scatter.forward(target_gpus, obj.data)
+ if isinstance(obj, tuple) and len(obj) > 0:
+ return list(zip(*map(scatter_map, obj)))
+ if isinstance(obj, list) and len(obj) > 0:
+ out = list(map(list, zip(*map(scatter_map, obj))))
+ return out
+ if isinstance(obj, dict) and len(obj) > 0:
+ out = list(map(type(obj), zip(*map(scatter_map, obj.items()))))
+ return out
+ return [obj for targets in target_gpus]
+
+ # After scatter_map is called, a scatter_map cell will exist. This cell
+ # has a reference to the actual function scatter_map, which has references
+ # to a closure that has a reference to the scatter_map cell (because the
+ # fn is recursive). To avoid this reference cycle, we set the function to
+ # None, clearing the cell
+ try:
+ return scatter_map(inputs)
+ finally:
+ scatter_map = None
+
+
+def scatter_kwargs(inputs, kwargs, target_gpus, dim=0):
+ """Scatter with support for kwargs dictionary."""
+ inputs = scatter(inputs, target_gpus, dim) if inputs else []
+ kwargs = scatter(kwargs, target_gpus, dim) if kwargs else []
+ if len(inputs) < len(kwargs):
+ inputs.extend([() for _ in range(len(kwargs) - len(inputs))])
+ elif len(kwargs) < len(inputs):
+ kwargs.extend([{} for _ in range(len(inputs) - len(kwargs))])
+ inputs = tuple(inputs)
+ kwargs = tuple(kwargs)
+ return inputs, kwargs
diff --git a/annotator/uniformer/mmcv/parallel/utils.py b/annotator/uniformer/mmcv/parallel/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..0f5712cb42c38a2e8563bf563efb6681383cab9b
--- /dev/null
+++ b/annotator/uniformer/mmcv/parallel/utils.py
@@ -0,0 +1,20 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+from .registry import MODULE_WRAPPERS
+
+
+def is_module_wrapper(module):
+ """Check if a module is a module wrapper.
+
+ The following 3 modules in MMCV (and their subclasses) are regarded as
+ module wrappers: DataParallel, DistributedDataParallel,
+ MMDistributedDataParallel (the deprecated version). You may add you own
+ module wrapper by registering it to mmcv.parallel.MODULE_WRAPPERS.
+
+ Args:
+ module (nn.Module): The module to be checked.
+
+ Returns:
+ bool: True if the input module is a module wrapper.
+ """
+ module_wrappers = tuple(MODULE_WRAPPERS.module_dict.values())
+ return isinstance(module, module_wrappers)
diff --git a/annotator/uniformer/mmcv/runner/__init__.py b/annotator/uniformer/mmcv/runner/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..52e4b48d383a84a055dcd7f6236f6e8e58eab924
--- /dev/null
+++ b/annotator/uniformer/mmcv/runner/__init__.py
@@ -0,0 +1,47 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+from .base_module import BaseModule, ModuleList, Sequential
+from .base_runner import BaseRunner
+from .builder import RUNNERS, build_runner
+from .checkpoint import (CheckpointLoader, _load_checkpoint,
+ _load_checkpoint_with_prefix, load_checkpoint,
+ load_state_dict, save_checkpoint, weights_to_cpu)
+from .default_constructor import DefaultRunnerConstructor
+from .dist_utils import (allreduce_grads, allreduce_params, get_dist_info,
+ init_dist, master_only)
+from .epoch_based_runner import EpochBasedRunner, Runner
+from .fp16_utils import LossScaler, auto_fp16, force_fp32, wrap_fp16_model
+from .hooks import (HOOKS, CheckpointHook, ClosureHook, DistEvalHook,
+ DistSamplerSeedHook, DvcliveLoggerHook, EMAHook, EvalHook,
+ Fp16OptimizerHook, GradientCumulativeFp16OptimizerHook,
+ GradientCumulativeOptimizerHook, Hook, IterTimerHook,
+ LoggerHook, LrUpdaterHook, MlflowLoggerHook,
+ NeptuneLoggerHook, OptimizerHook, PaviLoggerHook,
+ SyncBuffersHook, TensorboardLoggerHook, TextLoggerHook,
+ WandbLoggerHook)
+from .iter_based_runner import IterBasedRunner, IterLoader
+from .log_buffer import LogBuffer
+from .optimizer import (OPTIMIZER_BUILDERS, OPTIMIZERS,
+ DefaultOptimizerConstructor, build_optimizer,
+ build_optimizer_constructor)
+from .priority import Priority, get_priority
+from .utils import get_host_info, get_time_str, obj_from_dict, set_random_seed
+
+__all__ = [
+ 'BaseRunner', 'Runner', 'EpochBasedRunner', 'IterBasedRunner', 'LogBuffer',
+ 'HOOKS', 'Hook', 'CheckpointHook', 'ClosureHook', 'LrUpdaterHook',
+ 'OptimizerHook', 'IterTimerHook', 'DistSamplerSeedHook', 'LoggerHook',
+ 'PaviLoggerHook', 'TextLoggerHook', 'TensorboardLoggerHook',
+ 'NeptuneLoggerHook', 'WandbLoggerHook', 'MlflowLoggerHook',
+ 'DvcliveLoggerHook', '_load_checkpoint', 'load_state_dict',
+ 'load_checkpoint', 'weights_to_cpu', 'save_checkpoint', 'Priority',
+ 'get_priority', 'get_host_info', 'get_time_str', 'obj_from_dict',
+ 'init_dist', 'get_dist_info', 'master_only', 'OPTIMIZER_BUILDERS',
+ 'OPTIMIZERS', 'DefaultOptimizerConstructor', 'build_optimizer',
+ 'build_optimizer_constructor', 'IterLoader', 'set_random_seed',
+ 'auto_fp16', 'force_fp32', 'wrap_fp16_model', 'Fp16OptimizerHook',
+ 'SyncBuffersHook', 'EMAHook', 'build_runner', 'RUNNERS', 'allreduce_grads',
+ 'allreduce_params', 'LossScaler', 'CheckpointLoader', 'BaseModule',
+ '_load_checkpoint_with_prefix', 'EvalHook', 'DistEvalHook', 'Sequential',
+ 'ModuleList', 'GradientCumulativeOptimizerHook',
+ 'GradientCumulativeFp16OptimizerHook', 'DefaultRunnerConstructor'
+]
diff --git a/annotator/uniformer/mmcv/runner/__pycache__/__init__.cpython-310.pyc b/annotator/uniformer/mmcv/runner/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c623a505e7c5b7d3b6b216bf561a1e8fb5f6f00a
Binary files /dev/null and b/annotator/uniformer/mmcv/runner/__pycache__/__init__.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/runner/__pycache__/__init__.cpython-38.pyc b/annotator/uniformer/mmcv/runner/__pycache__/__init__.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..27c2a5cc451c2f0ab392de9e5546599b0a8fd73c
Binary files /dev/null and b/annotator/uniformer/mmcv/runner/__pycache__/__init__.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/runner/__pycache__/base_module.cpython-310.pyc b/annotator/uniformer/mmcv/runner/__pycache__/base_module.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9f71e7c5b05048e825102fe0df56d0e1f0480fa2
Binary files /dev/null and b/annotator/uniformer/mmcv/runner/__pycache__/base_module.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/runner/__pycache__/base_module.cpython-38.pyc b/annotator/uniformer/mmcv/runner/__pycache__/base_module.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9166501ed1a23944e3ab3c4620d4366010d8e28f
Binary files /dev/null and b/annotator/uniformer/mmcv/runner/__pycache__/base_module.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/runner/__pycache__/base_runner.cpython-310.pyc b/annotator/uniformer/mmcv/runner/__pycache__/base_runner.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f2cefb92134305d69831e206409f90bb11afd09f
Binary files /dev/null and b/annotator/uniformer/mmcv/runner/__pycache__/base_runner.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/runner/__pycache__/base_runner.cpython-38.pyc b/annotator/uniformer/mmcv/runner/__pycache__/base_runner.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0f8d024cd765e0795a324ece8a6c110afde7313b
Binary files /dev/null and b/annotator/uniformer/mmcv/runner/__pycache__/base_runner.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/runner/__pycache__/builder.cpython-310.pyc b/annotator/uniformer/mmcv/runner/__pycache__/builder.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3bf7845627e79532838eaf713151611795d64815
Binary files /dev/null and b/annotator/uniformer/mmcv/runner/__pycache__/builder.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/runner/__pycache__/builder.cpython-38.pyc b/annotator/uniformer/mmcv/runner/__pycache__/builder.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4dc7d3c90b47a6cae4887f55f3ae0d3aeb3210d2
Binary files /dev/null and b/annotator/uniformer/mmcv/runner/__pycache__/builder.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/runner/__pycache__/checkpoint.cpython-310.pyc b/annotator/uniformer/mmcv/runner/__pycache__/checkpoint.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..81668e472b36f686bb2f6df2e2d73ef1a3cc1fcf
Binary files /dev/null and b/annotator/uniformer/mmcv/runner/__pycache__/checkpoint.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/runner/__pycache__/checkpoint.cpython-38.pyc b/annotator/uniformer/mmcv/runner/__pycache__/checkpoint.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..71ce15fc623ba5bd522c65b4129c6f4ece123780
Binary files /dev/null and b/annotator/uniformer/mmcv/runner/__pycache__/checkpoint.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/runner/__pycache__/default_constructor.cpython-310.pyc b/annotator/uniformer/mmcv/runner/__pycache__/default_constructor.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5ba54fa6d900b2b7f7efc42702602b540b2c83b4
Binary files /dev/null and b/annotator/uniformer/mmcv/runner/__pycache__/default_constructor.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/runner/__pycache__/default_constructor.cpython-38.pyc b/annotator/uniformer/mmcv/runner/__pycache__/default_constructor.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..cc045d3027688e7f453ac2e1b973ddd8b940e56b
Binary files /dev/null and b/annotator/uniformer/mmcv/runner/__pycache__/default_constructor.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/runner/__pycache__/dist_utils.cpython-310.pyc b/annotator/uniformer/mmcv/runner/__pycache__/dist_utils.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..595cd84ebb4777755294ab3da51ae3789868e1d3
Binary files /dev/null and b/annotator/uniformer/mmcv/runner/__pycache__/dist_utils.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/runner/__pycache__/dist_utils.cpython-38.pyc b/annotator/uniformer/mmcv/runner/__pycache__/dist_utils.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a4ca118174831c163bfb662f3b6ef35c305aa427
Binary files /dev/null and b/annotator/uniformer/mmcv/runner/__pycache__/dist_utils.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/runner/__pycache__/epoch_based_runner.cpython-310.pyc b/annotator/uniformer/mmcv/runner/__pycache__/epoch_based_runner.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b5eec7c7c1ba5d7eb2877f381d194ea2e8e9bbc9
Binary files /dev/null and b/annotator/uniformer/mmcv/runner/__pycache__/epoch_based_runner.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/runner/__pycache__/epoch_based_runner.cpython-38.pyc b/annotator/uniformer/mmcv/runner/__pycache__/epoch_based_runner.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..26fea9e84f3cc2fafaa2e8339c801803a5350953
Binary files /dev/null and b/annotator/uniformer/mmcv/runner/__pycache__/epoch_based_runner.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/runner/__pycache__/fp16_utils.cpython-310.pyc b/annotator/uniformer/mmcv/runner/__pycache__/fp16_utils.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e8e8fa301b77c5a3bc89af97fcc23785c9f573b2
Binary files /dev/null and b/annotator/uniformer/mmcv/runner/__pycache__/fp16_utils.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/runner/__pycache__/fp16_utils.cpython-38.pyc b/annotator/uniformer/mmcv/runner/__pycache__/fp16_utils.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..570ac323a73baf9c9f76b436c11d7be3b71e4a50
Binary files /dev/null and b/annotator/uniformer/mmcv/runner/__pycache__/fp16_utils.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/runner/__pycache__/iter_based_runner.cpython-310.pyc b/annotator/uniformer/mmcv/runner/__pycache__/iter_based_runner.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b594c4907967a77cabb0c578f5090110c7cd9858
Binary files /dev/null and b/annotator/uniformer/mmcv/runner/__pycache__/iter_based_runner.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/runner/__pycache__/iter_based_runner.cpython-38.pyc b/annotator/uniformer/mmcv/runner/__pycache__/iter_based_runner.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1f7b38adf77423df2f324a178a3b0baed92af4fc
Binary files /dev/null and b/annotator/uniformer/mmcv/runner/__pycache__/iter_based_runner.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/runner/__pycache__/log_buffer.cpython-310.pyc b/annotator/uniformer/mmcv/runner/__pycache__/log_buffer.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9e12e69f53e096fbb897c1c11886c8a59d5a900d
Binary files /dev/null and b/annotator/uniformer/mmcv/runner/__pycache__/log_buffer.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/runner/__pycache__/log_buffer.cpython-38.pyc b/annotator/uniformer/mmcv/runner/__pycache__/log_buffer.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..841df61f4c84568a03b804710f50414d88fef798
Binary files /dev/null and b/annotator/uniformer/mmcv/runner/__pycache__/log_buffer.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/runner/__pycache__/priority.cpython-310.pyc b/annotator/uniformer/mmcv/runner/__pycache__/priority.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1161218381f96b5eb947632500f7d424f3548f82
Binary files /dev/null and b/annotator/uniformer/mmcv/runner/__pycache__/priority.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/runner/__pycache__/priority.cpython-38.pyc b/annotator/uniformer/mmcv/runner/__pycache__/priority.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..36eb0ca09a6430556413cd84b083722c283a011f
Binary files /dev/null and b/annotator/uniformer/mmcv/runner/__pycache__/priority.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/runner/__pycache__/utils.cpython-310.pyc b/annotator/uniformer/mmcv/runner/__pycache__/utils.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a2822f8a8fefcc45f75122201077925eceeb8e42
Binary files /dev/null and b/annotator/uniformer/mmcv/runner/__pycache__/utils.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/runner/__pycache__/utils.cpython-38.pyc b/annotator/uniformer/mmcv/runner/__pycache__/utils.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1a9ac6ee2099e0156a9248e4ff2a49e9105d251a
Binary files /dev/null and b/annotator/uniformer/mmcv/runner/__pycache__/utils.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/runner/base_module.py b/annotator/uniformer/mmcv/runner/base_module.py
new file mode 100644
index 0000000000000000000000000000000000000000..617fad9bb89f10a9a0911d962dfb3bc8f3a3628c
--- /dev/null
+++ b/annotator/uniformer/mmcv/runner/base_module.py
@@ -0,0 +1,195 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import copy
+import warnings
+from abc import ABCMeta
+from collections import defaultdict
+from logging import FileHandler
+
+import torch.nn as nn
+
+from annotator.uniformer.mmcv.runner.dist_utils import master_only
+from annotator.uniformer.mmcv.utils.logging import get_logger, logger_initialized, print_log
+
+
+class BaseModule(nn.Module, metaclass=ABCMeta):
+ """Base module for all modules in openmmlab.
+
+ ``BaseModule`` is a wrapper of ``torch.nn.Module`` with additional
+ functionality of parameter initialization. Compared with
+ ``torch.nn.Module``, ``BaseModule`` mainly adds three attributes.
+
+ - ``init_cfg``: the config to control the initialization.
+ - ``init_weights``: The function of parameter
+ initialization and recording initialization
+ information.
+ - ``_params_init_info``: Used to track the parameter
+ initialization information. This attribute only
+ exists during executing the ``init_weights``.
+
+ Args:
+ init_cfg (dict, optional): Initialization config dict.
+ """
+
+ def __init__(self, init_cfg=None):
+ """Initialize BaseModule, inherited from `torch.nn.Module`"""
+
+ # NOTE init_cfg can be defined in different levels, but init_cfg
+ # in low levels has a higher priority.
+
+ super(BaseModule, self).__init__()
+ # define default value of init_cfg instead of hard code
+ # in init_weights() function
+ self._is_init = False
+
+ self.init_cfg = copy.deepcopy(init_cfg)
+
+ # Backward compatibility in derived classes
+ # if pretrained is not None:
+ # warnings.warn('DeprecationWarning: pretrained is a deprecated \
+ # key, please consider using init_cfg')
+ # self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)
+
+ @property
+ def is_init(self):
+ return self._is_init
+
+ def init_weights(self):
+ """Initialize the weights."""
+
+ is_top_level_module = False
+ # check if it is top-level module
+ if not hasattr(self, '_params_init_info'):
+ # The `_params_init_info` is used to record the initialization
+ # information of the parameters
+ # the key should be the obj:`nn.Parameter` of model and the value
+ # should be a dict containing
+ # - init_info (str): The string that describes the initialization.
+ # - tmp_mean_value (FloatTensor): The mean of the parameter,
+ # which indicates whether the parameter has been modified.
+ # this attribute would be deleted after all parameters
+ # is initialized.
+ self._params_init_info = defaultdict(dict)
+ is_top_level_module = True
+
+ # Initialize the `_params_init_info`,
+ # When detecting the `tmp_mean_value` of
+ # the corresponding parameter is changed, update related
+ # initialization information
+ for name, param in self.named_parameters():
+ self._params_init_info[param][
+ 'init_info'] = f'The value is the same before and ' \
+ f'after calling `init_weights` ' \
+ f'of {self.__class__.__name__} '
+ self._params_init_info[param][
+ 'tmp_mean_value'] = param.data.mean()
+
+ # pass `params_init_info` to all submodules
+ # All submodules share the same `params_init_info`,
+ # so it will be updated when parameters are
+ # modified at any level of the model.
+ for sub_module in self.modules():
+ sub_module._params_init_info = self._params_init_info
+
+ # Get the initialized logger, if not exist,
+ # create a logger named `mmcv`
+ logger_names = list(logger_initialized.keys())
+ logger_name = logger_names[0] if logger_names else 'mmcv'
+
+ from ..cnn import initialize
+ from ..cnn.utils.weight_init import update_init_info
+ module_name = self.__class__.__name__
+ if not self._is_init:
+ if self.init_cfg:
+ print_log(
+ f'initialize {module_name} with init_cfg {self.init_cfg}',
+ logger=logger_name)
+ initialize(self, self.init_cfg)
+ if isinstance(self.init_cfg, dict):
+ # prevent the parameters of
+ # the pre-trained model
+ # from being overwritten by
+ # the `init_weights`
+ if self.init_cfg['type'] == 'Pretrained':
+ return
+
+ for m in self.children():
+ if hasattr(m, 'init_weights'):
+ m.init_weights()
+ # users may overload the `init_weights`
+ update_init_info(
+ m,
+ init_info=f'Initialized by '
+ f'user-defined `init_weights`'
+ f' in {m.__class__.__name__} ')
+
+ self._is_init = True
+ else:
+ warnings.warn(f'init_weights of {self.__class__.__name__} has '
+ f'been called more than once.')
+
+ if is_top_level_module:
+ self._dump_init_info(logger_name)
+
+ for sub_module in self.modules():
+ del sub_module._params_init_info
+
+ @master_only
+ def _dump_init_info(self, logger_name):
+ """Dump the initialization information to a file named
+ `initialization.log.json` in workdir.
+
+ Args:
+ logger_name (str): The name of logger.
+ """
+
+ logger = get_logger(logger_name)
+
+ with_file_handler = False
+ # dump the information to the logger file if there is a `FileHandler`
+ for handler in logger.handlers:
+ if isinstance(handler, FileHandler):
+ handler.stream.write(
+ 'Name of parameter - Initialization information\n')
+ for name, param in self.named_parameters():
+ handler.stream.write(
+ f'\n{name} - {param.shape}: '
+ f"\n{self._params_init_info[param]['init_info']} \n")
+ handler.stream.flush()
+ with_file_handler = True
+ if not with_file_handler:
+ for name, param in self.named_parameters():
+ print_log(
+ f'\n{name} - {param.shape}: '
+ f"\n{self._params_init_info[param]['init_info']} \n ",
+ logger=logger_name)
+
+ def __repr__(self):
+ s = super().__repr__()
+ if self.init_cfg:
+ s += f'\ninit_cfg={self.init_cfg}'
+ return s
+
+
+class Sequential(BaseModule, nn.Sequential):
+ """Sequential module in openmmlab.
+
+ Args:
+ init_cfg (dict, optional): Initialization config dict.
+ """
+
+ def __init__(self, *args, init_cfg=None):
+ BaseModule.__init__(self, init_cfg)
+ nn.Sequential.__init__(self, *args)
+
+
+class ModuleList(BaseModule, nn.ModuleList):
+ """ModuleList in openmmlab.
+
+ Args:
+ modules (iterable, optional): an iterable of modules to add.
+ init_cfg (dict, optional): Initialization config dict.
+ """
+
+ def __init__(self, modules=None, init_cfg=None):
+ BaseModule.__init__(self, init_cfg)
+ nn.ModuleList.__init__(self, modules)
diff --git a/annotator/uniformer/mmcv/runner/base_runner.py b/annotator/uniformer/mmcv/runner/base_runner.py
new file mode 100644
index 0000000000000000000000000000000000000000..4928db0a73b56fe0218a4bf66ec4ffa082d31ccc
--- /dev/null
+++ b/annotator/uniformer/mmcv/runner/base_runner.py
@@ -0,0 +1,542 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import copy
+import logging
+import os.path as osp
+import warnings
+from abc import ABCMeta, abstractmethod
+
+import torch
+from torch.optim import Optimizer
+
+import annotator.uniformer.mmcv as mmcv
+from ..parallel import is_module_wrapper
+from .checkpoint import load_checkpoint
+from .dist_utils import get_dist_info
+from .hooks import HOOKS, Hook
+from .log_buffer import LogBuffer
+from .priority import Priority, get_priority
+from .utils import get_time_str
+
+
+class BaseRunner(metaclass=ABCMeta):
+ """The base class of Runner, a training helper for PyTorch.
+
+ All subclasses should implement the following APIs:
+
+ - ``run()``
+ - ``train()``
+ - ``val()``
+ - ``save_checkpoint()``
+
+ Args:
+ model (:obj:`torch.nn.Module`): The model to be run.
+ batch_processor (callable): A callable method that process a data
+ batch. The interface of this method should be
+ `batch_processor(model, data, train_mode) -> dict`
+ optimizer (dict or :obj:`torch.optim.Optimizer`): It can be either an
+ optimizer (in most cases) or a dict of optimizers (in models that
+ requires more than one optimizer, e.g., GAN).
+ work_dir (str, optional): The working directory to save checkpoints
+ and logs. Defaults to None.
+ logger (:obj:`logging.Logger`): Logger used during training.
+ Defaults to None. (The default value is just for backward
+ compatibility)
+ meta (dict | None): A dict records some import information such as
+ environment info and seed, which will be logged in logger hook.
+ Defaults to None.
+ max_epochs (int, optional): Total training epochs.
+ max_iters (int, optional): Total training iterations.
+ """
+
+ def __init__(self,
+ model,
+ batch_processor=None,
+ optimizer=None,
+ work_dir=None,
+ logger=None,
+ meta=None,
+ max_iters=None,
+ max_epochs=None):
+ if batch_processor is not None:
+ if not callable(batch_processor):
+ raise TypeError('batch_processor must be callable, '
+ f'but got {type(batch_processor)}')
+ warnings.warn('batch_processor is deprecated, please implement '
+ 'train_step() and val_step() in the model instead.')
+ # raise an error is `batch_processor` is not None and
+ # `model.train_step()` exists.
+ if is_module_wrapper(model):
+ _model = model.module
+ else:
+ _model = model
+ if hasattr(_model, 'train_step') or hasattr(_model, 'val_step'):
+ raise RuntimeError(
+ 'batch_processor and model.train_step()/model.val_step() '
+ 'cannot be both available.')
+ else:
+ assert hasattr(model, 'train_step')
+
+ # check the type of `optimizer`
+ if isinstance(optimizer, dict):
+ for name, optim in optimizer.items():
+ if not isinstance(optim, Optimizer):
+ raise TypeError(
+ f'optimizer must be a dict of torch.optim.Optimizers, '
+ f'but optimizer["{name}"] is a {type(optim)}')
+ elif not isinstance(optimizer, Optimizer) and optimizer is not None:
+ raise TypeError(
+ f'optimizer must be a torch.optim.Optimizer object '
+ f'or dict or None, but got {type(optimizer)}')
+
+ # check the type of `logger`
+ if not isinstance(logger, logging.Logger):
+ raise TypeError(f'logger must be a logging.Logger object, '
+ f'but got {type(logger)}')
+
+ # check the type of `meta`
+ if meta is not None and not isinstance(meta, dict):
+ raise TypeError(
+ f'meta must be a dict or None, but got {type(meta)}')
+
+ self.model = model
+ self.batch_processor = batch_processor
+ self.optimizer = optimizer
+ self.logger = logger
+ self.meta = meta
+ # create work_dir
+ if mmcv.is_str(work_dir):
+ self.work_dir = osp.abspath(work_dir)
+ mmcv.mkdir_or_exist(self.work_dir)
+ elif work_dir is None:
+ self.work_dir = None
+ else:
+ raise TypeError('"work_dir" must be a str or None')
+
+ # get model name from the model class
+ if hasattr(self.model, 'module'):
+ self._model_name = self.model.module.__class__.__name__
+ else:
+ self._model_name = self.model.__class__.__name__
+
+ self._rank, self._world_size = get_dist_info()
+ self.timestamp = get_time_str()
+ self.mode = None
+ self._hooks = []
+ self._epoch = 0
+ self._iter = 0
+ self._inner_iter = 0
+
+ if max_epochs is not None and max_iters is not None:
+ raise ValueError(
+ 'Only one of `max_epochs` or `max_iters` can be set.')
+
+ self._max_epochs = max_epochs
+ self._max_iters = max_iters
+ # TODO: Redesign LogBuffer, it is not flexible and elegant enough
+ self.log_buffer = LogBuffer()
+
+ @property
+ def model_name(self):
+ """str: Name of the model, usually the module class name."""
+ return self._model_name
+
+ @property
+ def rank(self):
+ """int: Rank of current process. (distributed training)"""
+ return self._rank
+
+ @property
+ def world_size(self):
+ """int: Number of processes participating in the job.
+ (distributed training)"""
+ return self._world_size
+
+ @property
+ def hooks(self):
+ """list[:obj:`Hook`]: A list of registered hooks."""
+ return self._hooks
+
+ @property
+ def epoch(self):
+ """int: Current epoch."""
+ return self._epoch
+
+ @property
+ def iter(self):
+ """int: Current iteration."""
+ return self._iter
+
+ @property
+ def inner_iter(self):
+ """int: Iteration in an epoch."""
+ return self._inner_iter
+
+ @property
+ def max_epochs(self):
+ """int: Maximum training epochs."""
+ return self._max_epochs
+
+ @property
+ def max_iters(self):
+ """int: Maximum training iterations."""
+ return self._max_iters
+
+ @abstractmethod
+ def train(self):
+ pass
+
+ @abstractmethod
+ def val(self):
+ pass
+
+ @abstractmethod
+ def run(self, data_loaders, workflow, **kwargs):
+ pass
+
+ @abstractmethod
+ def save_checkpoint(self,
+ out_dir,
+ filename_tmpl,
+ save_optimizer=True,
+ meta=None,
+ create_symlink=True):
+ pass
+
+ def current_lr(self):
+ """Get current learning rates.
+
+ Returns:
+ list[float] | dict[str, list[float]]: Current learning rates of all
+ param groups. If the runner has a dict of optimizers, this
+ method will return a dict.
+ """
+ if isinstance(self.optimizer, torch.optim.Optimizer):
+ lr = [group['lr'] for group in self.optimizer.param_groups]
+ elif isinstance(self.optimizer, dict):
+ lr = dict()
+ for name, optim in self.optimizer.items():
+ lr[name] = [group['lr'] for group in optim.param_groups]
+ else:
+ raise RuntimeError(
+ 'lr is not applicable because optimizer does not exist.')
+ return lr
+
+ def current_momentum(self):
+ """Get current momentums.
+
+ Returns:
+ list[float] | dict[str, list[float]]: Current momentums of all
+ param groups. If the runner has a dict of optimizers, this
+ method will return a dict.
+ """
+
+ def _get_momentum(optimizer):
+ momentums = []
+ for group in optimizer.param_groups:
+ if 'momentum' in group.keys():
+ momentums.append(group['momentum'])
+ elif 'betas' in group.keys():
+ momentums.append(group['betas'][0])
+ else:
+ momentums.append(0)
+ return momentums
+
+ if self.optimizer is None:
+ raise RuntimeError(
+ 'momentum is not applicable because optimizer does not exist.')
+ elif isinstance(self.optimizer, torch.optim.Optimizer):
+ momentums = _get_momentum(self.optimizer)
+ elif isinstance(self.optimizer, dict):
+ momentums = dict()
+ for name, optim in self.optimizer.items():
+ momentums[name] = _get_momentum(optim)
+ return momentums
+
+ def register_hook(self, hook, priority='NORMAL'):
+ """Register a hook into the hook list.
+
+ The hook will be inserted into a priority queue, with the specified
+ priority (See :class:`Priority` for details of priorities).
+ For hooks with the same priority, they will be triggered in the same
+ order as they are registered.
+
+ Args:
+ hook (:obj:`Hook`): The hook to be registered.
+ priority (int or str or :obj:`Priority`): Hook priority.
+ Lower value means higher priority.
+ """
+ assert isinstance(hook, Hook)
+ if hasattr(hook, 'priority'):
+ raise ValueError('"priority" is a reserved attribute for hooks')
+ priority = get_priority(priority)
+ hook.priority = priority
+ # insert the hook to a sorted list
+ inserted = False
+ for i in range(len(self._hooks) - 1, -1, -1):
+ if priority >= self._hooks[i].priority:
+ self._hooks.insert(i + 1, hook)
+ inserted = True
+ break
+ if not inserted:
+ self._hooks.insert(0, hook)
+
+ def register_hook_from_cfg(self, hook_cfg):
+ """Register a hook from its cfg.
+
+ Args:
+ hook_cfg (dict): Hook config. It should have at least keys 'type'
+ and 'priority' indicating its type and priority.
+
+ Notes:
+ The specific hook class to register should not use 'type' and
+ 'priority' arguments during initialization.
+ """
+ hook_cfg = hook_cfg.copy()
+ priority = hook_cfg.pop('priority', 'NORMAL')
+ hook = mmcv.build_from_cfg(hook_cfg, HOOKS)
+ self.register_hook(hook, priority=priority)
+
+ def call_hook(self, fn_name):
+ """Call all hooks.
+
+ Args:
+ fn_name (str): The function name in each hook to be called, such as
+ "before_train_epoch".
+ """
+ for hook in self._hooks:
+ getattr(hook, fn_name)(self)
+
+ def get_hook_info(self):
+ # Get hooks info in each stage
+ stage_hook_map = {stage: [] for stage in Hook.stages}
+ for hook in self.hooks:
+ try:
+ priority = Priority(hook.priority).name
+ except ValueError:
+ priority = hook.priority
+ classname = hook.__class__.__name__
+ hook_info = f'({priority:<12}) {classname:<35}'
+ for trigger_stage in hook.get_triggered_stages():
+ stage_hook_map[trigger_stage].append(hook_info)
+
+ stage_hook_infos = []
+ for stage in Hook.stages:
+ hook_infos = stage_hook_map[stage]
+ if len(hook_infos) > 0:
+ info = f'{stage}:\n'
+ info += '\n'.join(hook_infos)
+ info += '\n -------------------- '
+ stage_hook_infos.append(info)
+ return '\n'.join(stage_hook_infos)
+
+ def load_checkpoint(self,
+ filename,
+ map_location='cpu',
+ strict=False,
+ revise_keys=[(r'^module.', '')]):
+ return load_checkpoint(
+ self.model,
+ filename,
+ map_location,
+ strict,
+ self.logger,
+ revise_keys=revise_keys)
+
+ def resume(self,
+ checkpoint,
+ resume_optimizer=True,
+ map_location='default'):
+ if map_location == 'default':
+ if torch.cuda.is_available():
+ device_id = torch.cuda.current_device()
+ checkpoint = self.load_checkpoint(
+ checkpoint,
+ map_location=lambda storage, loc: storage.cuda(device_id))
+ else:
+ checkpoint = self.load_checkpoint(checkpoint)
+ else:
+ checkpoint = self.load_checkpoint(
+ checkpoint, map_location=map_location)
+
+ self._epoch = checkpoint['meta']['epoch']
+ self._iter = checkpoint['meta']['iter']
+ if self.meta is None:
+ self.meta = {}
+ self.meta.setdefault('hook_msgs', {})
+ # load `last_ckpt`, `best_score`, `best_ckpt`, etc. for hook messages
+ self.meta['hook_msgs'].update(checkpoint['meta'].get('hook_msgs', {}))
+
+ # Re-calculate the number of iterations when resuming
+ # models with different number of GPUs
+ if 'config' in checkpoint['meta']:
+ config = mmcv.Config.fromstring(
+ checkpoint['meta']['config'], file_format='.py')
+ previous_gpu_ids = config.get('gpu_ids', None)
+ if previous_gpu_ids and len(previous_gpu_ids) > 0 and len(
+ previous_gpu_ids) != self.world_size:
+ self._iter = int(self._iter * len(previous_gpu_ids) /
+ self.world_size)
+ self.logger.info('the iteration number is changed due to '
+ 'change of GPU number')
+
+ # resume meta information meta
+ self.meta = checkpoint['meta']
+
+ if 'optimizer' in checkpoint and resume_optimizer:
+ if isinstance(self.optimizer, Optimizer):
+ self.optimizer.load_state_dict(checkpoint['optimizer'])
+ elif isinstance(self.optimizer, dict):
+ for k in self.optimizer.keys():
+ self.optimizer[k].load_state_dict(
+ checkpoint['optimizer'][k])
+ else:
+ raise TypeError(
+ 'Optimizer should be dict or torch.optim.Optimizer '
+ f'but got {type(self.optimizer)}')
+
+ self.logger.info('resumed epoch %d, iter %d', self.epoch, self.iter)
+
+ def register_lr_hook(self, lr_config):
+ if lr_config is None:
+ return
+ elif isinstance(lr_config, dict):
+ assert 'policy' in lr_config
+ policy_type = lr_config.pop('policy')
+ # If the type of policy is all in lower case, e.g., 'cyclic',
+ # then its first letter will be capitalized, e.g., to be 'Cyclic'.
+ # This is for the convenient usage of Lr updater.
+ # Since this is not applicable for `
+ # CosineAnnealingLrUpdater`,
+ # the string will not be changed if it contains capital letters.
+ if policy_type == policy_type.lower():
+ policy_type = policy_type.title()
+ hook_type = policy_type + 'LrUpdaterHook'
+ lr_config['type'] = hook_type
+ hook = mmcv.build_from_cfg(lr_config, HOOKS)
+ else:
+ hook = lr_config
+ self.register_hook(hook, priority='VERY_HIGH')
+
+ def register_momentum_hook(self, momentum_config):
+ if momentum_config is None:
+ return
+ if isinstance(momentum_config, dict):
+ assert 'policy' in momentum_config
+ policy_type = momentum_config.pop('policy')
+ # If the type of policy is all in lower case, e.g., 'cyclic',
+ # then its first letter will be capitalized, e.g., to be 'Cyclic'.
+ # This is for the convenient usage of momentum updater.
+ # Since this is not applicable for
+ # `CosineAnnealingMomentumUpdater`,
+ # the string will not be changed if it contains capital letters.
+ if policy_type == policy_type.lower():
+ policy_type = policy_type.title()
+ hook_type = policy_type + 'MomentumUpdaterHook'
+ momentum_config['type'] = hook_type
+ hook = mmcv.build_from_cfg(momentum_config, HOOKS)
+ else:
+ hook = momentum_config
+ self.register_hook(hook, priority='HIGH')
+
+ def register_optimizer_hook(self, optimizer_config):
+ if optimizer_config is None:
+ return
+ if isinstance(optimizer_config, dict):
+ optimizer_config.setdefault('type', 'OptimizerHook')
+ hook = mmcv.build_from_cfg(optimizer_config, HOOKS)
+ else:
+ hook = optimizer_config
+ self.register_hook(hook, priority='ABOVE_NORMAL')
+
+ def register_checkpoint_hook(self, checkpoint_config):
+ if checkpoint_config is None:
+ return
+ if isinstance(checkpoint_config, dict):
+ checkpoint_config.setdefault('type', 'CheckpointHook')
+ hook = mmcv.build_from_cfg(checkpoint_config, HOOKS)
+ else:
+ hook = checkpoint_config
+ self.register_hook(hook, priority='NORMAL')
+
+ def register_logger_hooks(self, log_config):
+ if log_config is None:
+ return
+ log_interval = log_config['interval']
+ for info in log_config['hooks']:
+ logger_hook = mmcv.build_from_cfg(
+ info, HOOKS, default_args=dict(interval=log_interval))
+ self.register_hook(logger_hook, priority='VERY_LOW')
+
+ def register_timer_hook(self, timer_config):
+ if timer_config is None:
+ return
+ if isinstance(timer_config, dict):
+ timer_config_ = copy.deepcopy(timer_config)
+ hook = mmcv.build_from_cfg(timer_config_, HOOKS)
+ else:
+ hook = timer_config
+ self.register_hook(hook, priority='LOW')
+
+ def register_custom_hooks(self, custom_config):
+ if custom_config is None:
+ return
+
+ if not isinstance(custom_config, list):
+ custom_config = [custom_config]
+
+ for item in custom_config:
+ if isinstance(item, dict):
+ self.register_hook_from_cfg(item)
+ else:
+ self.register_hook(item, priority='NORMAL')
+
+ def register_profiler_hook(self, profiler_config):
+ if profiler_config is None:
+ return
+ if isinstance(profiler_config, dict):
+ profiler_config.setdefault('type', 'ProfilerHook')
+ hook = mmcv.build_from_cfg(profiler_config, HOOKS)
+ else:
+ hook = profiler_config
+ self.register_hook(hook)
+
+ def register_training_hooks(self,
+ lr_config,
+ optimizer_config=None,
+ checkpoint_config=None,
+ log_config=None,
+ momentum_config=None,
+ timer_config=dict(type='IterTimerHook'),
+ custom_hooks_config=None):
+ """Register default and custom hooks for training.
+
+ Default and custom hooks include:
+
+ +----------------------+-------------------------+
+ | Hooks | Priority |
+ +======================+=========================+
+ | LrUpdaterHook | VERY_HIGH (10) |
+ +----------------------+-------------------------+
+ | MomentumUpdaterHook | HIGH (30) |
+ +----------------------+-------------------------+
+ | OptimizerStepperHook | ABOVE_NORMAL (40) |
+ +----------------------+-------------------------+
+ | CheckpointSaverHook | NORMAL (50) |
+ +----------------------+-------------------------+
+ | IterTimerHook | LOW (70) |
+ +----------------------+-------------------------+
+ | LoggerHook(s) | VERY_LOW (90) |
+ +----------------------+-------------------------+
+ | CustomHook(s) | defaults to NORMAL (50) |
+ +----------------------+-------------------------+
+
+ If custom hooks have same priority with default hooks, custom hooks
+ will be triggered after default hooks.
+ """
+ self.register_lr_hook(lr_config)
+ self.register_momentum_hook(momentum_config)
+ self.register_optimizer_hook(optimizer_config)
+ self.register_checkpoint_hook(checkpoint_config)
+ self.register_timer_hook(timer_config)
+ self.register_logger_hooks(log_config)
+ self.register_custom_hooks(custom_hooks_config)
diff --git a/annotator/uniformer/mmcv/runner/builder.py b/annotator/uniformer/mmcv/runner/builder.py
new file mode 100644
index 0000000000000000000000000000000000000000..77c96ba0b2f30ead9da23f293c5dc84dd3e4a74f
--- /dev/null
+++ b/annotator/uniformer/mmcv/runner/builder.py
@@ -0,0 +1,24 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import copy
+
+from ..utils import Registry
+
+RUNNERS = Registry('runner')
+RUNNER_BUILDERS = Registry('runner builder')
+
+
+def build_runner_constructor(cfg):
+ return RUNNER_BUILDERS.build(cfg)
+
+
+def build_runner(cfg, default_args=None):
+ runner_cfg = copy.deepcopy(cfg)
+ constructor_type = runner_cfg.pop('constructor',
+ 'DefaultRunnerConstructor')
+ runner_constructor = build_runner_constructor(
+ dict(
+ type=constructor_type,
+ runner_cfg=runner_cfg,
+ default_args=default_args))
+ runner = runner_constructor()
+ return runner
diff --git a/annotator/uniformer/mmcv/runner/checkpoint.py b/annotator/uniformer/mmcv/runner/checkpoint.py
new file mode 100644
index 0000000000000000000000000000000000000000..b29ca320679164432f446adad893e33fb2b4b29e
--- /dev/null
+++ b/annotator/uniformer/mmcv/runner/checkpoint.py
@@ -0,0 +1,707 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import io
+import os
+import os.path as osp
+import pkgutil
+import re
+import time
+import warnings
+from collections import OrderedDict
+from importlib import import_module
+from tempfile import TemporaryDirectory
+
+import torch
+import torchvision
+from torch.optim import Optimizer
+from torch.utils import model_zoo
+
+import annotator.uniformer.mmcv as mmcv
+from ..fileio import FileClient
+from ..fileio import load as load_file
+from ..parallel import is_module_wrapper
+from ..utils import mkdir_or_exist
+from .dist_utils import get_dist_info
+
+ENV_MMCV_HOME = 'MMCV_HOME'
+ENV_XDG_CACHE_HOME = 'XDG_CACHE_HOME'
+DEFAULT_CACHE_DIR = '~/.cache'
+
+
+def _get_mmcv_home():
+ mmcv_home = os.path.expanduser(
+ os.getenv(
+ ENV_MMCV_HOME,
+ os.path.join(
+ os.getenv(ENV_XDG_CACHE_HOME, DEFAULT_CACHE_DIR), 'mmcv')))
+
+ mkdir_or_exist(mmcv_home)
+ return mmcv_home
+
+
+def load_state_dict(module, state_dict, strict=False, logger=None):
+ """Load state_dict to a module.
+
+ This method is modified from :meth:`torch.nn.Module.load_state_dict`.
+ Default value for ``strict`` is set to ``False`` and the message for
+ param mismatch will be shown even if strict is False.
+
+ Args:
+ module (Module): Module that receives the state_dict.
+ state_dict (OrderedDict): Weights.
+ strict (bool): whether to strictly enforce that the keys
+ in :attr:`state_dict` match the keys returned by this module's
+ :meth:`~torch.nn.Module.state_dict` function. Default: ``False``.
+ logger (:obj:`logging.Logger`, optional): Logger to log the error
+ message. If not specified, print function will be used.
+ """
+ unexpected_keys = []
+ all_missing_keys = []
+ err_msg = []
+
+ metadata = getattr(state_dict, '_metadata', None)
+ state_dict = state_dict.copy()
+ if metadata is not None:
+ state_dict._metadata = metadata
+
+ # use _load_from_state_dict to enable checkpoint version control
+ def load(module, prefix=''):
+ # recursively check parallel module in case that the model has a
+ # complicated structure, e.g., nn.Module(nn.Module(DDP))
+ if is_module_wrapper(module):
+ module = module.module
+ local_metadata = {} if metadata is None else metadata.get(
+ prefix[:-1], {})
+ module._load_from_state_dict(state_dict, prefix, local_metadata, True,
+ all_missing_keys, unexpected_keys,
+ err_msg)
+ for name, child in module._modules.items():
+ if child is not None:
+ load(child, prefix + name + '.')
+
+ load(module)
+ load = None # break load->load reference cycle
+
+ # ignore "num_batches_tracked" of BN layers
+ missing_keys = [
+ key for key in all_missing_keys if 'num_batches_tracked' not in key
+ ]
+
+ if unexpected_keys:
+ err_msg.append('unexpected key in source '
+ f'state_dict: {", ".join(unexpected_keys)}\n')
+ if missing_keys:
+ err_msg.append(
+ f'missing keys in source state_dict: {", ".join(missing_keys)}\n')
+
+ rank, _ = get_dist_info()
+ if len(err_msg) > 0 and rank == 0:
+ err_msg.insert(
+ 0, 'The model and loaded state dict do not match exactly\n')
+ err_msg = '\n'.join(err_msg)
+ if strict:
+ raise RuntimeError(err_msg)
+ elif logger is not None:
+ logger.warning(err_msg)
+ else:
+ print(err_msg)
+
+
+def get_torchvision_models():
+ model_urls = dict()
+ for _, name, ispkg in pkgutil.walk_packages(torchvision.models.__path__):
+ if ispkg:
+ continue
+ _zoo = import_module(f'torchvision.models.{name}')
+ if hasattr(_zoo, 'model_urls'):
+ _urls = getattr(_zoo, 'model_urls')
+ model_urls.update(_urls)
+ return model_urls
+
+
+def get_external_models():
+ mmcv_home = _get_mmcv_home()
+ default_json_path = osp.join(mmcv.__path__[0], 'model_zoo/open_mmlab.json')
+ default_urls = load_file(default_json_path)
+ assert isinstance(default_urls, dict)
+ external_json_path = osp.join(mmcv_home, 'open_mmlab.json')
+ if osp.exists(external_json_path):
+ external_urls = load_file(external_json_path)
+ assert isinstance(external_urls, dict)
+ default_urls.update(external_urls)
+
+ return default_urls
+
+
+def get_mmcls_models():
+ mmcls_json_path = osp.join(mmcv.__path__[0], 'model_zoo/mmcls.json')
+ mmcls_urls = load_file(mmcls_json_path)
+
+ return mmcls_urls
+
+
+def get_deprecated_model_names():
+ deprecate_json_path = osp.join(mmcv.__path__[0],
+ 'model_zoo/deprecated.json')
+ deprecate_urls = load_file(deprecate_json_path)
+ assert isinstance(deprecate_urls, dict)
+
+ return deprecate_urls
+
+
+def _process_mmcls_checkpoint(checkpoint):
+ state_dict = checkpoint['state_dict']
+ new_state_dict = OrderedDict()
+ for k, v in state_dict.items():
+ if k.startswith('backbone.'):
+ new_state_dict[k[9:]] = v
+ new_checkpoint = dict(state_dict=new_state_dict)
+
+ return new_checkpoint
+
+
+class CheckpointLoader:
+ """A general checkpoint loader to manage all schemes."""
+
+ _schemes = {}
+
+ @classmethod
+ def _register_scheme(cls, prefixes, loader, force=False):
+ if isinstance(prefixes, str):
+ prefixes = [prefixes]
+ else:
+ assert isinstance(prefixes, (list, tuple))
+ for prefix in prefixes:
+ if (prefix not in cls._schemes) or force:
+ cls._schemes[prefix] = loader
+ else:
+ raise KeyError(
+ f'{prefix} is already registered as a loader backend, '
+ 'add "force=True" if you want to override it')
+ # sort, longer prefixes take priority
+ cls._schemes = OrderedDict(
+ sorted(cls._schemes.items(), key=lambda t: t[0], reverse=True))
+
+ @classmethod
+ def register_scheme(cls, prefixes, loader=None, force=False):
+ """Register a loader to CheckpointLoader.
+
+ This method can be used as a normal class method or a decorator.
+
+ Args:
+ prefixes (str or list[str] or tuple[str]):
+ The prefix of the registered loader.
+ loader (function, optional): The loader function to be registered.
+ When this method is used as a decorator, loader is None.
+ Defaults to None.
+ force (bool, optional): Whether to override the loader
+ if the prefix has already been registered. Defaults to False.
+ """
+
+ if loader is not None:
+ cls._register_scheme(prefixes, loader, force=force)
+ return
+
+ def _register(loader_cls):
+ cls._register_scheme(prefixes, loader_cls, force=force)
+ return loader_cls
+
+ return _register
+
+ @classmethod
+ def _get_checkpoint_loader(cls, path):
+ """Finds a loader that supports the given path. Falls back to the local
+ loader if no other loader is found.
+
+ Args:
+ path (str): checkpoint path
+
+ Returns:
+ loader (function): checkpoint loader
+ """
+
+ for p in cls._schemes:
+ if path.startswith(p):
+ return cls._schemes[p]
+
+ @classmethod
+ def load_checkpoint(cls, filename, map_location=None, logger=None):
+ """load checkpoint through URL scheme path.
+
+ Args:
+ filename (str): checkpoint file name with given prefix
+ map_location (str, optional): Same as :func:`torch.load`.
+ Default: None
+ logger (:mod:`logging.Logger`, optional): The logger for message.
+ Default: None
+
+ Returns:
+ dict or OrderedDict: The loaded checkpoint.
+ """
+
+ checkpoint_loader = cls._get_checkpoint_loader(filename)
+ class_name = checkpoint_loader.__name__
+ mmcv.print_log(
+ f'load checkpoint from {class_name[10:]} path: {filename}', logger)
+ return checkpoint_loader(filename, map_location)
+
+
+@CheckpointLoader.register_scheme(prefixes='')
+def load_from_local(filename, map_location):
+ """load checkpoint by local file path.
+
+ Args:
+ filename (str): local checkpoint file path
+ map_location (str, optional): Same as :func:`torch.load`.
+
+ Returns:
+ dict or OrderedDict: The loaded checkpoint.
+ """
+
+ if not osp.isfile(filename):
+ raise IOError(f'{filename} is not a checkpoint file')
+ checkpoint = torch.load(filename, map_location=map_location)
+ return checkpoint
+
+
+@CheckpointLoader.register_scheme(prefixes=('http://', 'https://'))
+def load_from_http(filename, map_location=None, model_dir=None):
+ """load checkpoint through HTTP or HTTPS scheme path. In distributed
+ setting, this function only download checkpoint at local rank 0.
+
+ Args:
+ filename (str): checkpoint file path with modelzoo or
+ torchvision prefix
+ map_location (str, optional): Same as :func:`torch.load`.
+ model_dir (string, optional): directory in which to save the object,
+ Default: None
+
+ Returns:
+ dict or OrderedDict: The loaded checkpoint.
+ """
+ rank, world_size = get_dist_info()
+ rank = int(os.environ.get('LOCAL_RANK', rank))
+ if rank == 0:
+ checkpoint = model_zoo.load_url(
+ filename, model_dir=model_dir, map_location=map_location)
+ if world_size > 1:
+ torch.distributed.barrier()
+ if rank > 0:
+ checkpoint = model_zoo.load_url(
+ filename, model_dir=model_dir, map_location=map_location)
+ return checkpoint
+
+
+@CheckpointLoader.register_scheme(prefixes='pavi://')
+def load_from_pavi(filename, map_location=None):
+ """load checkpoint through the file path prefixed with pavi. In distributed
+ setting, this function download ckpt at all ranks to different temporary
+ directories.
+
+ Args:
+ filename (str): checkpoint file path with pavi prefix
+ map_location (str, optional): Same as :func:`torch.load`.
+ Default: None
+
+ Returns:
+ dict or OrderedDict: The loaded checkpoint.
+ """
+ assert filename.startswith('pavi://'), \
+ f'Expected filename startswith `pavi://`, but get {filename}'
+ model_path = filename[7:]
+
+ try:
+ from pavi import modelcloud
+ except ImportError:
+ raise ImportError(
+ 'Please install pavi to load checkpoint from modelcloud.')
+
+ model = modelcloud.get(model_path)
+ with TemporaryDirectory() as tmp_dir:
+ downloaded_file = osp.join(tmp_dir, model.name)
+ model.download(downloaded_file)
+ checkpoint = torch.load(downloaded_file, map_location=map_location)
+ return checkpoint
+
+
+@CheckpointLoader.register_scheme(prefixes='s3://')
+def load_from_ceph(filename, map_location=None, backend='petrel'):
+ """load checkpoint through the file path prefixed with s3. In distributed
+ setting, this function download ckpt at all ranks to different temporary
+ directories.
+
+ Args:
+ filename (str): checkpoint file path with s3 prefix
+ map_location (str, optional): Same as :func:`torch.load`.
+ backend (str, optional): The storage backend type. Options are 'ceph',
+ 'petrel'. Default: 'petrel'.
+
+ .. warning::
+ :class:`mmcv.fileio.file_client.CephBackend` will be deprecated,
+ please use :class:`mmcv.fileio.file_client.PetrelBackend` instead.
+
+ Returns:
+ dict or OrderedDict: The loaded checkpoint.
+ """
+ allowed_backends = ['ceph', 'petrel']
+ if backend not in allowed_backends:
+ raise ValueError(f'Load from Backend {backend} is not supported.')
+
+ if backend == 'ceph':
+ warnings.warn(
+ 'CephBackend will be deprecated, please use PetrelBackend instead')
+
+ # CephClient and PetrelBackend have the same prefix 's3://' and the latter
+ # will be chosen as default. If PetrelBackend can not be instantiated
+ # successfully, the CephClient will be chosen.
+ try:
+ file_client = FileClient(backend=backend)
+ except ImportError:
+ allowed_backends.remove(backend)
+ file_client = FileClient(backend=allowed_backends[0])
+
+ with io.BytesIO(file_client.get(filename)) as buffer:
+ checkpoint = torch.load(buffer, map_location=map_location)
+ return checkpoint
+
+
+@CheckpointLoader.register_scheme(prefixes=('modelzoo://', 'torchvision://'))
+def load_from_torchvision(filename, map_location=None):
+ """load checkpoint through the file path prefixed with modelzoo or
+ torchvision.
+
+ Args:
+ filename (str): checkpoint file path with modelzoo or
+ torchvision prefix
+ map_location (str, optional): Same as :func:`torch.load`.
+
+ Returns:
+ dict or OrderedDict: The loaded checkpoint.
+ """
+ model_urls = get_torchvision_models()
+ if filename.startswith('modelzoo://'):
+ warnings.warn('The URL scheme of "modelzoo://" is deprecated, please '
+ 'use "torchvision://" instead')
+ model_name = filename[11:]
+ else:
+ model_name = filename[14:]
+ return load_from_http(model_urls[model_name], map_location=map_location)
+
+
+@CheckpointLoader.register_scheme(prefixes=('open-mmlab://', 'openmmlab://'))
+def load_from_openmmlab(filename, map_location=None):
+ """load checkpoint through the file path prefixed with open-mmlab or
+ openmmlab.
+
+ Args:
+ filename (str): checkpoint file path with open-mmlab or
+ openmmlab prefix
+ map_location (str, optional): Same as :func:`torch.load`.
+ Default: None
+
+ Returns:
+ dict or OrderedDict: The loaded checkpoint.
+ """
+
+ model_urls = get_external_models()
+ prefix_str = 'open-mmlab://'
+ if filename.startswith(prefix_str):
+ model_name = filename[13:]
+ else:
+ model_name = filename[12:]
+ prefix_str = 'openmmlab://'
+
+ deprecated_urls = get_deprecated_model_names()
+ if model_name in deprecated_urls:
+ warnings.warn(f'{prefix_str}{model_name} is deprecated in favor '
+ f'of {prefix_str}{deprecated_urls[model_name]}')
+ model_name = deprecated_urls[model_name]
+ model_url = model_urls[model_name]
+ # check if is url
+ if model_url.startswith(('http://', 'https://')):
+ checkpoint = load_from_http(model_url, map_location=map_location)
+ else:
+ filename = osp.join(_get_mmcv_home(), model_url)
+ if not osp.isfile(filename):
+ raise IOError(f'{filename} is not a checkpoint file')
+ checkpoint = torch.load(filename, map_location=map_location)
+ return checkpoint
+
+
+@CheckpointLoader.register_scheme(prefixes='mmcls://')
+def load_from_mmcls(filename, map_location=None):
+ """load checkpoint through the file path prefixed with mmcls.
+
+ Args:
+ filename (str): checkpoint file path with mmcls prefix
+ map_location (str, optional): Same as :func:`torch.load`.
+
+ Returns:
+ dict or OrderedDict: The loaded checkpoint.
+ """
+
+ model_urls = get_mmcls_models()
+ model_name = filename[8:]
+ checkpoint = load_from_http(
+ model_urls[model_name], map_location=map_location)
+ checkpoint = _process_mmcls_checkpoint(checkpoint)
+ return checkpoint
+
+
+def _load_checkpoint(filename, map_location=None, logger=None):
+ """Load checkpoint from somewhere (modelzoo, file, url).
+
+ Args:
+ filename (str): Accept local filepath, URL, ``torchvision://xxx``,
+ ``open-mmlab://xxx``. Please refer to ``docs/model_zoo.md`` for
+ details.
+ map_location (str, optional): Same as :func:`torch.load`.
+ Default: None.
+ logger (:mod:`logging.Logger`, optional): The logger for error message.
+ Default: None
+
+ Returns:
+ dict or OrderedDict: The loaded checkpoint. It can be either an
+ OrderedDict storing model weights or a dict containing other
+ information, which depends on the checkpoint.
+ """
+ return CheckpointLoader.load_checkpoint(filename, map_location, logger)
+
+
+def _load_checkpoint_with_prefix(prefix, filename, map_location=None):
+ """Load partial pretrained model with specific prefix.
+
+ Args:
+ prefix (str): The prefix of sub-module.
+ filename (str): Accept local filepath, URL, ``torchvision://xxx``,
+ ``open-mmlab://xxx``. Please refer to ``docs/model_zoo.md`` for
+ details.
+ map_location (str | None): Same as :func:`torch.load`. Default: None.
+
+ Returns:
+ dict or OrderedDict: The loaded checkpoint.
+ """
+
+ checkpoint = _load_checkpoint(filename, map_location=map_location)
+
+ if 'state_dict' in checkpoint:
+ state_dict = checkpoint['state_dict']
+ else:
+ state_dict = checkpoint
+ if not prefix.endswith('.'):
+ prefix += '.'
+ prefix_len = len(prefix)
+
+ state_dict = {
+ k[prefix_len:]: v
+ for k, v in state_dict.items() if k.startswith(prefix)
+ }
+
+ assert state_dict, f'{prefix} is not in the pretrained model'
+ return state_dict
+
+
+def load_checkpoint(model,
+ filename,
+ map_location=None,
+ strict=False,
+ logger=None,
+ revise_keys=[(r'^module\.', '')]):
+ """Load checkpoint from a file or URI.
+
+ Args:
+ model (Module): Module to load checkpoint.
+ filename (str): Accept local filepath, URL, ``torchvision://xxx``,
+ ``open-mmlab://xxx``. Please refer to ``docs/model_zoo.md`` for
+ details.
+ map_location (str): Same as :func:`torch.load`.
+ strict (bool): Whether to allow different params for the model and
+ checkpoint.
+ logger (:mod:`logging.Logger` or None): The logger for error message.
+ revise_keys (list): A list of customized keywords to modify the
+ state_dict in checkpoint. Each item is a (pattern, replacement)
+ pair of the regular expression operations. Default: strip
+ the prefix 'module.' by [(r'^module\\.', '')].
+
+ Returns:
+ dict or OrderedDict: The loaded checkpoint.
+ """
+ checkpoint = _load_checkpoint(filename, map_location, logger)
+ # OrderedDict is a subclass of dict
+ if not isinstance(checkpoint, dict):
+ raise RuntimeError(
+ f'No state_dict found in checkpoint file {filename}')
+ # get state_dict from checkpoint
+ if 'state_dict' in checkpoint:
+ state_dict = checkpoint['state_dict']
+ else:
+ state_dict = checkpoint
+
+ # strip prefix of state_dict
+ metadata = getattr(state_dict, '_metadata', OrderedDict())
+ for p, r in revise_keys:
+ state_dict = OrderedDict(
+ {re.sub(p, r, k): v
+ for k, v in state_dict.items()})
+ # Keep metadata in state_dict
+ state_dict._metadata = metadata
+
+ # load state_dict
+ load_state_dict(model, state_dict, strict, logger)
+ return checkpoint
+
+
+def weights_to_cpu(state_dict):
+ """Copy a model state_dict to cpu.
+
+ Args:
+ state_dict (OrderedDict): Model weights on GPU.
+
+ Returns:
+ OrderedDict: Model weights on GPU.
+ """
+ state_dict_cpu = OrderedDict()
+ for key, val in state_dict.items():
+ state_dict_cpu[key] = val.cpu()
+ # Keep metadata in state_dict
+ state_dict_cpu._metadata = getattr(state_dict, '_metadata', OrderedDict())
+ return state_dict_cpu
+
+
+def _save_to_state_dict(module, destination, prefix, keep_vars):
+ """Saves module state to `destination` dictionary.
+
+ This method is modified from :meth:`torch.nn.Module._save_to_state_dict`.
+
+ Args:
+ module (nn.Module): The module to generate state_dict.
+ destination (dict): A dict where state will be stored.
+ prefix (str): The prefix for parameters and buffers used in this
+ module.
+ """
+ for name, param in module._parameters.items():
+ if param is not None:
+ destination[prefix + name] = param if keep_vars else param.detach()
+ for name, buf in module._buffers.items():
+ # remove check of _non_persistent_buffers_set to allow nn.BatchNorm2d
+ if buf is not None:
+ destination[prefix + name] = buf if keep_vars else buf.detach()
+
+
+def get_state_dict(module, destination=None, prefix='', keep_vars=False):
+ """Returns a dictionary containing a whole state of the module.
+
+ Both parameters and persistent buffers (e.g. running averages) are
+ included. Keys are corresponding parameter and buffer names.
+
+ This method is modified from :meth:`torch.nn.Module.state_dict` to
+ recursively check parallel module in case that the model has a complicated
+ structure, e.g., nn.Module(nn.Module(DDP)).
+
+ Args:
+ module (nn.Module): The module to generate state_dict.
+ destination (OrderedDict): Returned dict for the state of the
+ module.
+ prefix (str): Prefix of the key.
+ keep_vars (bool): Whether to keep the variable property of the
+ parameters. Default: False.
+
+ Returns:
+ dict: A dictionary containing a whole state of the module.
+ """
+ # recursively check parallel module in case that the model has a
+ # complicated structure, e.g., nn.Module(nn.Module(DDP))
+ if is_module_wrapper(module):
+ module = module.module
+
+ # below is the same as torch.nn.Module.state_dict()
+ if destination is None:
+ destination = OrderedDict()
+ destination._metadata = OrderedDict()
+ destination._metadata[prefix[:-1]] = local_metadata = dict(
+ version=module._version)
+ _save_to_state_dict(module, destination, prefix, keep_vars)
+ for name, child in module._modules.items():
+ if child is not None:
+ get_state_dict(
+ child, destination, prefix + name + '.', keep_vars=keep_vars)
+ for hook in module._state_dict_hooks.values():
+ hook_result = hook(module, destination, prefix, local_metadata)
+ if hook_result is not None:
+ destination = hook_result
+ return destination
+
+
+def save_checkpoint(model,
+ filename,
+ optimizer=None,
+ meta=None,
+ file_client_args=None):
+ """Save checkpoint to file.
+
+ The checkpoint will have 3 fields: ``meta``, ``state_dict`` and
+ ``optimizer``. By default ``meta`` will contain version and time info.
+
+ Args:
+ model (Module): Module whose params are to be saved.
+ filename (str): Checkpoint filename.
+ optimizer (:obj:`Optimizer`, optional): Optimizer to be saved.
+ meta (dict, optional): Metadata to be saved in checkpoint.
+ file_client_args (dict, optional): Arguments to instantiate a
+ FileClient. See :class:`mmcv.fileio.FileClient` for details.
+ Default: None.
+ `New in version 1.3.16.`
+ """
+ if meta is None:
+ meta = {}
+ elif not isinstance(meta, dict):
+ raise TypeError(f'meta must be a dict or None, but got {type(meta)}')
+ meta.update(mmcv_version=mmcv.__version__, time=time.asctime())
+
+ if is_module_wrapper(model):
+ model = model.module
+
+ if hasattr(model, 'CLASSES') and model.CLASSES is not None:
+ # save class name to the meta
+ meta.update(CLASSES=model.CLASSES)
+
+ checkpoint = {
+ 'meta': meta,
+ 'state_dict': weights_to_cpu(get_state_dict(model))
+ }
+ # save optimizer state dict in the checkpoint
+ if isinstance(optimizer, Optimizer):
+ checkpoint['optimizer'] = optimizer.state_dict()
+ elif isinstance(optimizer, dict):
+ checkpoint['optimizer'] = {}
+ for name, optim in optimizer.items():
+ checkpoint['optimizer'][name] = optim.state_dict()
+
+ if filename.startswith('pavi://'):
+ if file_client_args is not None:
+ raise ValueError(
+ 'file_client_args should be "None" if filename starts with'
+ f'"pavi://", but got {file_client_args}')
+ try:
+ from pavi import modelcloud
+ from pavi import exception
+ except ImportError:
+ raise ImportError(
+ 'Please install pavi to load checkpoint from modelcloud.')
+ model_path = filename[7:]
+ root = modelcloud.Folder()
+ model_dir, model_name = osp.split(model_path)
+ try:
+ model = modelcloud.get(model_dir)
+ except exception.NodeNotFoundError:
+ model = root.create_training_model(model_dir)
+ with TemporaryDirectory() as tmp_dir:
+ checkpoint_file = osp.join(tmp_dir, model_name)
+ with open(checkpoint_file, 'wb') as f:
+ torch.save(checkpoint, f)
+ f.flush()
+ model.create_file(checkpoint_file, name=model_name)
+ else:
+ file_client = FileClient.infer_client(file_client_args, filename)
+ with io.BytesIO() as f:
+ torch.save(checkpoint, f)
+ file_client.put(f.getvalue(), filename)
diff --git a/annotator/uniformer/mmcv/runner/default_constructor.py b/annotator/uniformer/mmcv/runner/default_constructor.py
new file mode 100644
index 0000000000000000000000000000000000000000..3f1f5b44168768dfda3947393a63a6cf9cf50b41
--- /dev/null
+++ b/annotator/uniformer/mmcv/runner/default_constructor.py
@@ -0,0 +1,44 @@
+from .builder import RUNNER_BUILDERS, RUNNERS
+
+
+@RUNNER_BUILDERS.register_module()
+class DefaultRunnerConstructor:
+ """Default constructor for runners.
+
+ Custom existing `Runner` like `EpocBasedRunner` though `RunnerConstructor`.
+ For example, We can inject some new properties and functions for `Runner`.
+
+ Example:
+ >>> from annotator.uniformer.mmcv.runner import RUNNER_BUILDERS, build_runner
+ >>> # Define a new RunnerReconstructor
+ >>> @RUNNER_BUILDERS.register_module()
+ >>> class MyRunnerConstructor:
+ ... def __init__(self, runner_cfg, default_args=None):
+ ... if not isinstance(runner_cfg, dict):
+ ... raise TypeError('runner_cfg should be a dict',
+ ... f'but got {type(runner_cfg)}')
+ ... self.runner_cfg = runner_cfg
+ ... self.default_args = default_args
+ ...
+ ... def __call__(self):
+ ... runner = RUNNERS.build(self.runner_cfg,
+ ... default_args=self.default_args)
+ ... # Add new properties for existing runner
+ ... runner.my_name = 'my_runner'
+ ... runner.my_function = lambda self: print(self.my_name)
+ ... ...
+ >>> # build your runner
+ >>> runner_cfg = dict(type='EpochBasedRunner', max_epochs=40,
+ ... constructor='MyRunnerConstructor')
+ >>> runner = build_runner(runner_cfg)
+ """
+
+ def __init__(self, runner_cfg, default_args=None):
+ if not isinstance(runner_cfg, dict):
+ raise TypeError('runner_cfg should be a dict',
+ f'but got {type(runner_cfg)}')
+ self.runner_cfg = runner_cfg
+ self.default_args = default_args
+
+ def __call__(self):
+ return RUNNERS.build(self.runner_cfg, default_args=self.default_args)
diff --git a/annotator/uniformer/mmcv/runner/dist_utils.py b/annotator/uniformer/mmcv/runner/dist_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..d3a1ef3fda5ceeb31bf15a73779da1b1903ab0fe
--- /dev/null
+++ b/annotator/uniformer/mmcv/runner/dist_utils.py
@@ -0,0 +1,164 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import functools
+import os
+import subprocess
+from collections import OrderedDict
+
+import torch
+import torch.multiprocessing as mp
+from torch import distributed as dist
+from torch._utils import (_flatten_dense_tensors, _take_tensors,
+ _unflatten_dense_tensors)
+
+
+def init_dist(launcher, backend='nccl', **kwargs):
+ if mp.get_start_method(allow_none=True) is None:
+ mp.set_start_method('spawn')
+ if launcher == 'pytorch':
+ _init_dist_pytorch(backend, **kwargs)
+ elif launcher == 'mpi':
+ _init_dist_mpi(backend, **kwargs)
+ elif launcher == 'slurm':
+ _init_dist_slurm(backend, **kwargs)
+ else:
+ raise ValueError(f'Invalid launcher type: {launcher}')
+
+
+def _init_dist_pytorch(backend, **kwargs):
+ # TODO: use local_rank instead of rank % num_gpus
+ rank = int(os.environ['RANK'])
+ num_gpus = torch.cuda.device_count()
+ torch.cuda.set_device(rank % num_gpus)
+ dist.init_process_group(backend=backend, **kwargs)
+
+
+def _init_dist_mpi(backend, **kwargs):
+ # TODO: use local_rank instead of rank % num_gpus
+ rank = int(os.environ['OMPI_COMM_WORLD_RANK'])
+ num_gpus = torch.cuda.device_count()
+ torch.cuda.set_device(rank % num_gpus)
+ dist.init_process_group(backend=backend, **kwargs)
+
+
+def _init_dist_slurm(backend, port=None):
+ """Initialize slurm distributed training environment.
+
+ If argument ``port`` is not specified, then the master port will be system
+ environment variable ``MASTER_PORT``. If ``MASTER_PORT`` is not in system
+ environment variable, then a default port ``29500`` will be used.
+
+ Args:
+ backend (str): Backend of torch.distributed.
+ port (int, optional): Master port. Defaults to None.
+ """
+ proc_id = int(os.environ['SLURM_PROCID'])
+ ntasks = int(os.environ['SLURM_NTASKS'])
+ node_list = os.environ['SLURM_NODELIST']
+ num_gpus = torch.cuda.device_count()
+ torch.cuda.set_device(proc_id % num_gpus)
+ addr = subprocess.getoutput(
+ f'scontrol show hostname {node_list} | head -n1')
+ # specify master port
+ if port is not None:
+ os.environ['MASTER_PORT'] = str(port)
+ elif 'MASTER_PORT' in os.environ:
+ pass # use MASTER_PORT in the environment variable
+ else:
+ # 29500 is torch.distributed default port
+ os.environ['MASTER_PORT'] = '29500'
+ # use MASTER_ADDR in the environment variable if it already exists
+ if 'MASTER_ADDR' not in os.environ:
+ os.environ['MASTER_ADDR'] = addr
+ os.environ['WORLD_SIZE'] = str(ntasks)
+ os.environ['LOCAL_RANK'] = str(proc_id % num_gpus)
+ os.environ['RANK'] = str(proc_id)
+ dist.init_process_group(backend=backend)
+
+
+def get_dist_info():
+ if dist.is_available() and dist.is_initialized():
+ rank = dist.get_rank()
+ world_size = dist.get_world_size()
+ else:
+ rank = 0
+ world_size = 1
+ return rank, world_size
+
+
+def master_only(func):
+
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ rank, _ = get_dist_info()
+ if rank == 0:
+ return func(*args, **kwargs)
+
+ return wrapper
+
+
+def allreduce_params(params, coalesce=True, bucket_size_mb=-1):
+ """Allreduce parameters.
+
+ Args:
+ params (list[torch.Parameters]): List of parameters or buffers of a
+ model.
+ coalesce (bool, optional): Whether allreduce parameters as a whole.
+ Defaults to True.
+ bucket_size_mb (int, optional): Size of bucket, the unit is MB.
+ Defaults to -1.
+ """
+ _, world_size = get_dist_info()
+ if world_size == 1:
+ return
+ params = [param.data for param in params]
+ if coalesce:
+ _allreduce_coalesced(params, world_size, bucket_size_mb)
+ else:
+ for tensor in params:
+ dist.all_reduce(tensor.div_(world_size))
+
+
+def allreduce_grads(params, coalesce=True, bucket_size_mb=-1):
+ """Allreduce gradients.
+
+ Args:
+ params (list[torch.Parameters]): List of parameters of a model
+ coalesce (bool, optional): Whether allreduce parameters as a whole.
+ Defaults to True.
+ bucket_size_mb (int, optional): Size of bucket, the unit is MB.
+ Defaults to -1.
+ """
+ grads = [
+ param.grad.data for param in params
+ if param.requires_grad and param.grad is not None
+ ]
+ _, world_size = get_dist_info()
+ if world_size == 1:
+ return
+ if coalesce:
+ _allreduce_coalesced(grads, world_size, bucket_size_mb)
+ else:
+ for tensor in grads:
+ dist.all_reduce(tensor.div_(world_size))
+
+
+def _allreduce_coalesced(tensors, world_size, bucket_size_mb=-1):
+ if bucket_size_mb > 0:
+ bucket_size_bytes = bucket_size_mb * 1024 * 1024
+ buckets = _take_tensors(tensors, bucket_size_bytes)
+ else:
+ buckets = OrderedDict()
+ for tensor in tensors:
+ tp = tensor.type()
+ if tp not in buckets:
+ buckets[tp] = []
+ buckets[tp].append(tensor)
+ buckets = buckets.values()
+
+ for bucket in buckets:
+ flat_tensors = _flatten_dense_tensors(bucket)
+ dist.all_reduce(flat_tensors)
+ flat_tensors.div_(world_size)
+ for tensor, synced in zip(
+ bucket, _unflatten_dense_tensors(flat_tensors, bucket)):
+ tensor.copy_(synced)
diff --git a/annotator/uniformer/mmcv/runner/epoch_based_runner.py b/annotator/uniformer/mmcv/runner/epoch_based_runner.py
new file mode 100644
index 0000000000000000000000000000000000000000..766a9ce6afdf09cd11b1b15005f5132583011348
--- /dev/null
+++ b/annotator/uniformer/mmcv/runner/epoch_based_runner.py
@@ -0,0 +1,187 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import os.path as osp
+import platform
+import shutil
+import time
+import warnings
+
+import torch
+
+import annotator.uniformer.mmcv as mmcv
+from .base_runner import BaseRunner
+from .builder import RUNNERS
+from .checkpoint import save_checkpoint
+from .utils import get_host_info
+
+
+@RUNNERS.register_module()
+class EpochBasedRunner(BaseRunner):
+ """Epoch-based Runner.
+
+ This runner train models epoch by epoch.
+ """
+
+ def run_iter(self, data_batch, train_mode, **kwargs):
+ if self.batch_processor is not None:
+ outputs = self.batch_processor(
+ self.model, data_batch, train_mode=train_mode, **kwargs)
+ elif train_mode:
+ outputs = self.model.train_step(data_batch, self.optimizer,
+ **kwargs)
+ else:
+ outputs = self.model.val_step(data_batch, self.optimizer, **kwargs)
+ if not isinstance(outputs, dict):
+ raise TypeError('"batch_processor()" or "model.train_step()"'
+ 'and "model.val_step()" must return a dict')
+ if 'log_vars' in outputs:
+ self.log_buffer.update(outputs['log_vars'], outputs['num_samples'])
+ self.outputs = outputs
+
+ def train(self, data_loader, **kwargs):
+ self.model.train()
+ self.mode = 'train'
+ self.data_loader = data_loader
+ self._max_iters = self._max_epochs * len(self.data_loader)
+ self.call_hook('before_train_epoch')
+ time.sleep(2) # Prevent possible deadlock during epoch transition
+ for i, data_batch in enumerate(self.data_loader):
+ self._inner_iter = i
+ self.call_hook('before_train_iter')
+ self.run_iter(data_batch, train_mode=True, **kwargs)
+ self.call_hook('after_train_iter')
+ self._iter += 1
+
+ self.call_hook('after_train_epoch')
+ self._epoch += 1
+
+ @torch.no_grad()
+ def val(self, data_loader, **kwargs):
+ self.model.eval()
+ self.mode = 'val'
+ self.data_loader = data_loader
+ self.call_hook('before_val_epoch')
+ time.sleep(2) # Prevent possible deadlock during epoch transition
+ for i, data_batch in enumerate(self.data_loader):
+ self._inner_iter = i
+ self.call_hook('before_val_iter')
+ self.run_iter(data_batch, train_mode=False)
+ self.call_hook('after_val_iter')
+
+ self.call_hook('after_val_epoch')
+
+ def run(self, data_loaders, workflow, max_epochs=None, **kwargs):
+ """Start running.
+
+ Args:
+ data_loaders (list[:obj:`DataLoader`]): Dataloaders for training
+ and validation.
+ workflow (list[tuple]): A list of (phase, epochs) to specify the
+ running order and epochs. E.g, [('train', 2), ('val', 1)] means
+ running 2 epochs for training and 1 epoch for validation,
+ iteratively.
+ """
+ assert isinstance(data_loaders, list)
+ assert mmcv.is_list_of(workflow, tuple)
+ assert len(data_loaders) == len(workflow)
+ if max_epochs is not None:
+ warnings.warn(
+ 'setting max_epochs in run is deprecated, '
+ 'please set max_epochs in runner_config', DeprecationWarning)
+ self._max_epochs = max_epochs
+
+ assert self._max_epochs is not None, (
+ 'max_epochs must be specified during instantiation')
+
+ for i, flow in enumerate(workflow):
+ mode, epochs = flow
+ if mode == 'train':
+ self._max_iters = self._max_epochs * len(data_loaders[i])
+ break
+
+ work_dir = self.work_dir if self.work_dir is not None else 'NONE'
+ self.logger.info('Start running, host: %s, work_dir: %s',
+ get_host_info(), work_dir)
+ self.logger.info('Hooks will be executed in the following order:\n%s',
+ self.get_hook_info())
+ self.logger.info('workflow: %s, max: %d epochs', workflow,
+ self._max_epochs)
+ self.call_hook('before_run')
+
+ while self.epoch < self._max_epochs:
+ for i, flow in enumerate(workflow):
+ mode, epochs = flow
+ if isinstance(mode, str): # self.train()
+ if not hasattr(self, mode):
+ raise ValueError(
+ f'runner has no method named "{mode}" to run an '
+ 'epoch')
+ epoch_runner = getattr(self, mode)
+ else:
+ raise TypeError(
+ 'mode in workflow must be a str, but got {}'.format(
+ type(mode)))
+
+ for _ in range(epochs):
+ if mode == 'train' and self.epoch >= self._max_epochs:
+ break
+ epoch_runner(data_loaders[i], **kwargs)
+
+ time.sleep(1) # wait for some hooks like loggers to finish
+ self.call_hook('after_run')
+
+ def save_checkpoint(self,
+ out_dir,
+ filename_tmpl='epoch_{}.pth',
+ save_optimizer=True,
+ meta=None,
+ create_symlink=True):
+ """Save the checkpoint.
+
+ Args:
+ out_dir (str): The directory that checkpoints are saved.
+ filename_tmpl (str, optional): The checkpoint filename template,
+ which contains a placeholder for the epoch number.
+ Defaults to 'epoch_{}.pth'.
+ save_optimizer (bool, optional): Whether to save the optimizer to
+ the checkpoint. Defaults to True.
+ meta (dict, optional): The meta information to be saved in the
+ checkpoint. Defaults to None.
+ create_symlink (bool, optional): Whether to create a symlink
+ "latest.pth" to point to the latest checkpoint.
+ Defaults to True.
+ """
+ if meta is None:
+ meta = {}
+ elif not isinstance(meta, dict):
+ raise TypeError(
+ f'meta should be a dict or None, but got {type(meta)}')
+ if self.meta is not None:
+ meta.update(self.meta)
+ # Note: meta.update(self.meta) should be done before
+ # meta.update(epoch=self.epoch + 1, iter=self.iter) otherwise
+ # there will be problems with resumed checkpoints.
+ # More details in https://github.com/open-mmlab/mmcv/pull/1108
+ meta.update(epoch=self.epoch + 1, iter=self.iter)
+
+ filename = filename_tmpl.format(self.epoch + 1)
+ filepath = osp.join(out_dir, filename)
+ optimizer = self.optimizer if save_optimizer else None
+ save_checkpoint(self.model, filepath, optimizer=optimizer, meta=meta)
+ # in some environments, `os.symlink` is not supported, you may need to
+ # set `create_symlink` to False
+ if create_symlink:
+ dst_file = osp.join(out_dir, 'latest.pth')
+ if platform.system() != 'Windows':
+ mmcv.symlink(filename, dst_file)
+ else:
+ shutil.copy(filepath, dst_file)
+
+
+@RUNNERS.register_module()
+class Runner(EpochBasedRunner):
+ """Deprecated name of EpochBasedRunner."""
+
+ def __init__(self, *args, **kwargs):
+ warnings.warn(
+ 'Runner was deprecated, please use EpochBasedRunner instead')
+ super().__init__(*args, **kwargs)
diff --git a/annotator/uniformer/mmcv/runner/fp16_utils.py b/annotator/uniformer/mmcv/runner/fp16_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..1981011d6859192e3e663e29d13500d56ba47f6c
--- /dev/null
+++ b/annotator/uniformer/mmcv/runner/fp16_utils.py
@@ -0,0 +1,410 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import functools
+import warnings
+from collections import abc
+from inspect import getfullargspec
+
+import numpy as np
+import torch
+import torch.nn as nn
+
+from annotator.uniformer.mmcv.utils import TORCH_VERSION, digit_version
+from .dist_utils import allreduce_grads as _allreduce_grads
+
+try:
+ # If PyTorch version >= 1.6.0, torch.cuda.amp.autocast would be imported
+ # and used; otherwise, auto fp16 will adopt mmcv's implementation.
+ # Note that when PyTorch >= 1.6.0, we still cast tensor types to fp16
+ # manually, so the behavior may not be consistent with real amp.
+ from torch.cuda.amp import autocast
+except ImportError:
+ pass
+
+
+def cast_tensor_type(inputs, src_type, dst_type):
+ """Recursively convert Tensor in inputs from src_type to dst_type.
+
+ Args:
+ inputs: Inputs that to be casted.
+ src_type (torch.dtype): Source type..
+ dst_type (torch.dtype): Destination type.
+
+ Returns:
+ The same type with inputs, but all contained Tensors have been cast.
+ """
+ if isinstance(inputs, nn.Module):
+ return inputs
+ elif isinstance(inputs, torch.Tensor):
+ return inputs.to(dst_type)
+ elif isinstance(inputs, str):
+ return inputs
+ elif isinstance(inputs, np.ndarray):
+ return inputs
+ elif isinstance(inputs, abc.Mapping):
+ return type(inputs)({
+ k: cast_tensor_type(v, src_type, dst_type)
+ for k, v in inputs.items()
+ })
+ elif isinstance(inputs, abc.Iterable):
+ return type(inputs)(
+ cast_tensor_type(item, src_type, dst_type) for item in inputs)
+ else:
+ return inputs
+
+
+def auto_fp16(apply_to=None, out_fp32=False):
+ """Decorator to enable fp16 training automatically.
+
+ This decorator is useful when you write custom modules and want to support
+ mixed precision training. If inputs arguments are fp32 tensors, they will
+ be converted to fp16 automatically. Arguments other than fp32 tensors are
+ ignored. If you are using PyTorch >= 1.6, torch.cuda.amp is used as the
+ backend, otherwise, original mmcv implementation will be adopted.
+
+ Args:
+ apply_to (Iterable, optional): The argument names to be converted.
+ `None` indicates all arguments.
+ out_fp32 (bool): Whether to convert the output back to fp32.
+
+ Example:
+
+ >>> import torch.nn as nn
+ >>> class MyModule1(nn.Module):
+ >>>
+ >>> # Convert x and y to fp16
+ >>> @auto_fp16()
+ >>> def forward(self, x, y):
+ >>> pass
+
+ >>> import torch.nn as nn
+ >>> class MyModule2(nn.Module):
+ >>>
+ >>> # convert pred to fp16
+ >>> @auto_fp16(apply_to=('pred', ))
+ >>> def do_something(self, pred, others):
+ >>> pass
+ """
+
+ def auto_fp16_wrapper(old_func):
+
+ @functools.wraps(old_func)
+ def new_func(*args, **kwargs):
+ # check if the module has set the attribute `fp16_enabled`, if not,
+ # just fallback to the original method.
+ if not isinstance(args[0], torch.nn.Module):
+ raise TypeError('@auto_fp16 can only be used to decorate the '
+ 'method of nn.Module')
+ if not (hasattr(args[0], 'fp16_enabled') and args[0].fp16_enabled):
+ return old_func(*args, **kwargs)
+
+ # get the arg spec of the decorated method
+ args_info = getfullargspec(old_func)
+ # get the argument names to be casted
+ args_to_cast = args_info.args if apply_to is None else apply_to
+ # convert the args that need to be processed
+ new_args = []
+ # NOTE: default args are not taken into consideration
+ if args:
+ arg_names = args_info.args[:len(args)]
+ for i, arg_name in enumerate(arg_names):
+ if arg_name in args_to_cast:
+ new_args.append(
+ cast_tensor_type(args[i], torch.float, torch.half))
+ else:
+ new_args.append(args[i])
+ # convert the kwargs that need to be processed
+ new_kwargs = {}
+ if kwargs:
+ for arg_name, arg_value in kwargs.items():
+ if arg_name in args_to_cast:
+ new_kwargs[arg_name] = cast_tensor_type(
+ arg_value, torch.float, torch.half)
+ else:
+ new_kwargs[arg_name] = arg_value
+ # apply converted arguments to the decorated method
+ if (TORCH_VERSION != 'parrots' and
+ digit_version(TORCH_VERSION) >= digit_version('1.6.0')):
+ with autocast(enabled=True):
+ output = old_func(*new_args, **new_kwargs)
+ else:
+ output = old_func(*new_args, **new_kwargs)
+ # cast the results back to fp32 if necessary
+ if out_fp32:
+ output = cast_tensor_type(output, torch.half, torch.float)
+ return output
+
+ return new_func
+
+ return auto_fp16_wrapper
+
+
+def force_fp32(apply_to=None, out_fp16=False):
+ """Decorator to convert input arguments to fp32 in force.
+
+ This decorator is useful when you write custom modules and want to support
+ mixed precision training. If there are some inputs that must be processed
+ in fp32 mode, then this decorator can handle it. If inputs arguments are
+ fp16 tensors, they will be converted to fp32 automatically. Arguments other
+ than fp16 tensors are ignored. If you are using PyTorch >= 1.6,
+ torch.cuda.amp is used as the backend, otherwise, original mmcv
+ implementation will be adopted.
+
+ Args:
+ apply_to (Iterable, optional): The argument names to be converted.
+ `None` indicates all arguments.
+ out_fp16 (bool): Whether to convert the output back to fp16.
+
+ Example:
+
+ >>> import torch.nn as nn
+ >>> class MyModule1(nn.Module):
+ >>>
+ >>> # Convert x and y to fp32
+ >>> @force_fp32()
+ >>> def loss(self, x, y):
+ >>> pass
+
+ >>> import torch.nn as nn
+ >>> class MyModule2(nn.Module):
+ >>>
+ >>> # convert pred to fp32
+ >>> @force_fp32(apply_to=('pred', ))
+ >>> def post_process(self, pred, others):
+ >>> pass
+ """
+
+ def force_fp32_wrapper(old_func):
+
+ @functools.wraps(old_func)
+ def new_func(*args, **kwargs):
+ # check if the module has set the attribute `fp16_enabled`, if not,
+ # just fallback to the original method.
+ if not isinstance(args[0], torch.nn.Module):
+ raise TypeError('@force_fp32 can only be used to decorate the '
+ 'method of nn.Module')
+ if not (hasattr(args[0], 'fp16_enabled') and args[0].fp16_enabled):
+ return old_func(*args, **kwargs)
+ # get the arg spec of the decorated method
+ args_info = getfullargspec(old_func)
+ # get the argument names to be casted
+ args_to_cast = args_info.args if apply_to is None else apply_to
+ # convert the args that need to be processed
+ new_args = []
+ if args:
+ arg_names = args_info.args[:len(args)]
+ for i, arg_name in enumerate(arg_names):
+ if arg_name in args_to_cast:
+ new_args.append(
+ cast_tensor_type(args[i], torch.half, torch.float))
+ else:
+ new_args.append(args[i])
+ # convert the kwargs that need to be processed
+ new_kwargs = dict()
+ if kwargs:
+ for arg_name, arg_value in kwargs.items():
+ if arg_name in args_to_cast:
+ new_kwargs[arg_name] = cast_tensor_type(
+ arg_value, torch.half, torch.float)
+ else:
+ new_kwargs[arg_name] = arg_value
+ # apply converted arguments to the decorated method
+ if (TORCH_VERSION != 'parrots' and
+ digit_version(TORCH_VERSION) >= digit_version('1.6.0')):
+ with autocast(enabled=False):
+ output = old_func(*new_args, **new_kwargs)
+ else:
+ output = old_func(*new_args, **new_kwargs)
+ # cast the results back to fp32 if necessary
+ if out_fp16:
+ output = cast_tensor_type(output, torch.float, torch.half)
+ return output
+
+ return new_func
+
+ return force_fp32_wrapper
+
+
+def allreduce_grads(params, coalesce=True, bucket_size_mb=-1):
+ warnings.warning(
+ '"mmcv.runner.fp16_utils.allreduce_grads" is deprecated, and will be '
+ 'removed in v2.8. Please switch to "mmcv.runner.allreduce_grads')
+ _allreduce_grads(params, coalesce=coalesce, bucket_size_mb=bucket_size_mb)
+
+
+def wrap_fp16_model(model):
+ """Wrap the FP32 model to FP16.
+
+ If you are using PyTorch >= 1.6, torch.cuda.amp is used as the
+ backend, otherwise, original mmcv implementation will be adopted.
+
+ For PyTorch >= 1.6, this function will
+ 1. Set fp16 flag inside the model to True.
+
+ Otherwise:
+ 1. Convert FP32 model to FP16.
+ 2. Remain some necessary layers to be FP32, e.g., normalization layers.
+ 3. Set `fp16_enabled` flag inside the model to True.
+
+ Args:
+ model (nn.Module): Model in FP32.
+ """
+ if (TORCH_VERSION == 'parrots'
+ or digit_version(TORCH_VERSION) < digit_version('1.6.0')):
+ # convert model to fp16
+ model.half()
+ # patch the normalization layers to make it work in fp32 mode
+ patch_norm_fp32(model)
+ # set `fp16_enabled` flag
+ for m in model.modules():
+ if hasattr(m, 'fp16_enabled'):
+ m.fp16_enabled = True
+
+
+def patch_norm_fp32(module):
+ """Recursively convert normalization layers from FP16 to FP32.
+
+ Args:
+ module (nn.Module): The modules to be converted in FP16.
+
+ Returns:
+ nn.Module: The converted module, the normalization layers have been
+ converted to FP32.
+ """
+ if isinstance(module, (nn.modules.batchnorm._BatchNorm, nn.GroupNorm)):
+ module.float()
+ if isinstance(module, nn.GroupNorm) or torch.__version__ < '1.3':
+ module.forward = patch_forward_method(module.forward, torch.half,
+ torch.float)
+ for child in module.children():
+ patch_norm_fp32(child)
+ return module
+
+
+def patch_forward_method(func, src_type, dst_type, convert_output=True):
+ """Patch the forward method of a module.
+
+ Args:
+ func (callable): The original forward method.
+ src_type (torch.dtype): Type of input arguments to be converted from.
+ dst_type (torch.dtype): Type of input arguments to be converted to.
+ convert_output (bool): Whether to convert the output back to src_type.
+
+ Returns:
+ callable: The patched forward method.
+ """
+
+ def new_forward(*args, **kwargs):
+ output = func(*cast_tensor_type(args, src_type, dst_type),
+ **cast_tensor_type(kwargs, src_type, dst_type))
+ if convert_output:
+ output = cast_tensor_type(output, dst_type, src_type)
+ return output
+
+ return new_forward
+
+
+class LossScaler:
+ """Class that manages loss scaling in mixed precision training which
+ supports both dynamic or static mode.
+
+ The implementation refers to
+ https://github.com/NVIDIA/apex/blob/master/apex/fp16_utils/loss_scaler.py.
+ Indirectly, by supplying ``mode='dynamic'`` for dynamic loss scaling.
+ It's important to understand how :class:`LossScaler` operates.
+ Loss scaling is designed to combat the problem of underflowing
+ gradients encountered at long times when training fp16 networks.
+ Dynamic loss scaling begins by attempting a very high loss
+ scale. Ironically, this may result in OVERflowing gradients.
+ If overflowing gradients are encountered, :class:`FP16_Optimizer` then
+ skips the update step for this particular iteration/minibatch,
+ and :class:`LossScaler` adjusts the loss scale to a lower value.
+ If a certain number of iterations occur without overflowing gradients
+ detected,:class:`LossScaler` increases the loss scale once more.
+ In this way :class:`LossScaler` attempts to "ride the edge" of always
+ using the highest loss scale possible without incurring overflow.
+
+ Args:
+ init_scale (float): Initial loss scale value, default: 2**32.
+ scale_factor (float): Factor used when adjusting the loss scale.
+ Default: 2.
+ mode (str): Loss scaling mode. 'dynamic' or 'static'
+ scale_window (int): Number of consecutive iterations without an
+ overflow to wait before increasing the loss scale. Default: 1000.
+ """
+
+ def __init__(self,
+ init_scale=2**32,
+ mode='dynamic',
+ scale_factor=2.,
+ scale_window=1000):
+ self.cur_scale = init_scale
+ self.cur_iter = 0
+ assert mode in ('dynamic',
+ 'static'), 'mode can only be dynamic or static'
+ self.mode = mode
+ self.last_overflow_iter = -1
+ self.scale_factor = scale_factor
+ self.scale_window = scale_window
+
+ def has_overflow(self, params):
+ """Check if params contain overflow."""
+ if self.mode != 'dynamic':
+ return False
+ for p in params:
+ if p.grad is not None and LossScaler._has_inf_or_nan(p.grad.data):
+ return True
+ return False
+
+ def _has_inf_or_nan(x):
+ """Check if params contain NaN."""
+ try:
+ cpu_sum = float(x.float().sum())
+ except RuntimeError as instance:
+ if 'value cannot be converted' not in instance.args[0]:
+ raise
+ return True
+ else:
+ if cpu_sum == float('inf') or cpu_sum == -float('inf') \
+ or cpu_sum != cpu_sum:
+ return True
+ return False
+
+ def update_scale(self, overflow):
+ """update the current loss scale value when overflow happens."""
+ if self.mode != 'dynamic':
+ return
+ if overflow:
+ self.cur_scale = max(self.cur_scale / self.scale_factor, 1)
+ self.last_overflow_iter = self.cur_iter
+ else:
+ if (self.cur_iter - self.last_overflow_iter) % \
+ self.scale_window == 0:
+ self.cur_scale *= self.scale_factor
+ self.cur_iter += 1
+
+ def state_dict(self):
+ """Returns the state of the scaler as a :class:`dict`."""
+ return dict(
+ cur_scale=self.cur_scale,
+ cur_iter=self.cur_iter,
+ mode=self.mode,
+ last_overflow_iter=self.last_overflow_iter,
+ scale_factor=self.scale_factor,
+ scale_window=self.scale_window)
+
+ def load_state_dict(self, state_dict):
+ """Loads the loss_scaler state dict.
+
+ Args:
+ state_dict (dict): scaler state.
+ """
+ self.cur_scale = state_dict['cur_scale']
+ self.cur_iter = state_dict['cur_iter']
+ self.mode = state_dict['mode']
+ self.last_overflow_iter = state_dict['last_overflow_iter']
+ self.scale_factor = state_dict['scale_factor']
+ self.scale_window = state_dict['scale_window']
+
+ @property
+ def loss_scale(self):
+ return self.cur_scale
diff --git a/annotator/uniformer/mmcv/runner/hooks/__init__.py b/annotator/uniformer/mmcv/runner/hooks/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..915af28cefab14a14c1188ed861161080fd138a3
--- /dev/null
+++ b/annotator/uniformer/mmcv/runner/hooks/__init__.py
@@ -0,0 +1,29 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+from .checkpoint import CheckpointHook
+from .closure import ClosureHook
+from .ema import EMAHook
+from .evaluation import DistEvalHook, EvalHook
+from .hook import HOOKS, Hook
+from .iter_timer import IterTimerHook
+from .logger import (DvcliveLoggerHook, LoggerHook, MlflowLoggerHook,
+ NeptuneLoggerHook, PaviLoggerHook, TensorboardLoggerHook,
+ TextLoggerHook, WandbLoggerHook)
+from .lr_updater import LrUpdaterHook
+from .memory import EmptyCacheHook
+from .momentum_updater import MomentumUpdaterHook
+from .optimizer import (Fp16OptimizerHook, GradientCumulativeFp16OptimizerHook,
+ GradientCumulativeOptimizerHook, OptimizerHook)
+from .profiler import ProfilerHook
+from .sampler_seed import DistSamplerSeedHook
+from .sync_buffer import SyncBuffersHook
+
+__all__ = [
+ 'HOOKS', 'Hook', 'CheckpointHook', 'ClosureHook', 'LrUpdaterHook',
+ 'OptimizerHook', 'Fp16OptimizerHook', 'IterTimerHook',
+ 'DistSamplerSeedHook', 'EmptyCacheHook', 'LoggerHook', 'MlflowLoggerHook',
+ 'PaviLoggerHook', 'TextLoggerHook', 'TensorboardLoggerHook',
+ 'NeptuneLoggerHook', 'WandbLoggerHook', 'DvcliveLoggerHook',
+ 'MomentumUpdaterHook', 'SyncBuffersHook', 'EMAHook', 'EvalHook',
+ 'DistEvalHook', 'ProfilerHook', 'GradientCumulativeOptimizerHook',
+ 'GradientCumulativeFp16OptimizerHook'
+]
diff --git a/annotator/uniformer/mmcv/runner/hooks/__pycache__/__init__.cpython-310.pyc b/annotator/uniformer/mmcv/runner/hooks/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8ff19e349939145e787ee76be5d47ae9a1924119
Binary files /dev/null and b/annotator/uniformer/mmcv/runner/hooks/__pycache__/__init__.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/runner/hooks/__pycache__/__init__.cpython-38.pyc b/annotator/uniformer/mmcv/runner/hooks/__pycache__/__init__.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5b6f64f5e32088fd911815615ffc64e0f8cf18c2
Binary files /dev/null and b/annotator/uniformer/mmcv/runner/hooks/__pycache__/__init__.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/runner/hooks/__pycache__/checkpoint.cpython-310.pyc b/annotator/uniformer/mmcv/runner/hooks/__pycache__/checkpoint.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d5664efa290b5d3ab500bb7c72d318db93c2e572
Binary files /dev/null and b/annotator/uniformer/mmcv/runner/hooks/__pycache__/checkpoint.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/runner/hooks/__pycache__/checkpoint.cpython-38.pyc b/annotator/uniformer/mmcv/runner/hooks/__pycache__/checkpoint.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3ae741b22d673f52cb39d110ab11ecdef5db2f7c
Binary files /dev/null and b/annotator/uniformer/mmcv/runner/hooks/__pycache__/checkpoint.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/runner/hooks/__pycache__/closure.cpython-310.pyc b/annotator/uniformer/mmcv/runner/hooks/__pycache__/closure.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6b8654ca5e1e684c0e535782c171284e3f487fbe
Binary files /dev/null and b/annotator/uniformer/mmcv/runner/hooks/__pycache__/closure.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/runner/hooks/__pycache__/closure.cpython-38.pyc b/annotator/uniformer/mmcv/runner/hooks/__pycache__/closure.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..879fb92d18f8a84207f199a7f1f3d8a7cce1e466
Binary files /dev/null and b/annotator/uniformer/mmcv/runner/hooks/__pycache__/closure.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/runner/hooks/__pycache__/ema.cpython-310.pyc b/annotator/uniformer/mmcv/runner/hooks/__pycache__/ema.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..72105534cb15c760900dfa7413d208a2c09bd149
Binary files /dev/null and b/annotator/uniformer/mmcv/runner/hooks/__pycache__/ema.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/runner/hooks/__pycache__/ema.cpython-38.pyc b/annotator/uniformer/mmcv/runner/hooks/__pycache__/ema.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4779a896f85d5e925b4f08f04c4408b2a97058f0
Binary files /dev/null and b/annotator/uniformer/mmcv/runner/hooks/__pycache__/ema.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/runner/hooks/__pycache__/evaluation.cpython-310.pyc b/annotator/uniformer/mmcv/runner/hooks/__pycache__/evaluation.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ec4a30f5aafbeae0b48b53c9d5e60c0aaca09b20
Binary files /dev/null and b/annotator/uniformer/mmcv/runner/hooks/__pycache__/evaluation.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/runner/hooks/__pycache__/evaluation.cpython-38.pyc b/annotator/uniformer/mmcv/runner/hooks/__pycache__/evaluation.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..924524f2c2dc7516f00a91d8d9e1d2cdf56ed914
Binary files /dev/null and b/annotator/uniformer/mmcv/runner/hooks/__pycache__/evaluation.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/runner/hooks/__pycache__/hook.cpython-310.pyc b/annotator/uniformer/mmcv/runner/hooks/__pycache__/hook.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..870445ba8f7cd4615b4e0696c2ad7793c3dac209
Binary files /dev/null and b/annotator/uniformer/mmcv/runner/hooks/__pycache__/hook.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/runner/hooks/__pycache__/hook.cpython-38.pyc b/annotator/uniformer/mmcv/runner/hooks/__pycache__/hook.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5157b5b7e37e0c6c4afa8177f8085bcc8e814b32
Binary files /dev/null and b/annotator/uniformer/mmcv/runner/hooks/__pycache__/hook.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/runner/hooks/__pycache__/iter_timer.cpython-310.pyc b/annotator/uniformer/mmcv/runner/hooks/__pycache__/iter_timer.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..85ea51ac06933f679fcd79cb5489f6f7750f523f
Binary files /dev/null and b/annotator/uniformer/mmcv/runner/hooks/__pycache__/iter_timer.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/runner/hooks/__pycache__/iter_timer.cpython-38.pyc b/annotator/uniformer/mmcv/runner/hooks/__pycache__/iter_timer.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..eafd847144ba8327cbee04d497fcd40ccb72a436
Binary files /dev/null and b/annotator/uniformer/mmcv/runner/hooks/__pycache__/iter_timer.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/runner/hooks/__pycache__/lr_updater.cpython-310.pyc b/annotator/uniformer/mmcv/runner/hooks/__pycache__/lr_updater.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f6e74b2451542177819e807b47879ea493af6488
Binary files /dev/null and b/annotator/uniformer/mmcv/runner/hooks/__pycache__/lr_updater.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/runner/hooks/__pycache__/lr_updater.cpython-38.pyc b/annotator/uniformer/mmcv/runner/hooks/__pycache__/lr_updater.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..cfd8bba6dd38dce0a23c24b0ac75fb3e75f214fa
Binary files /dev/null and b/annotator/uniformer/mmcv/runner/hooks/__pycache__/lr_updater.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/runner/hooks/__pycache__/memory.cpython-310.pyc b/annotator/uniformer/mmcv/runner/hooks/__pycache__/memory.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2d03267df26612466202c1a5c92ba331c5d96b93
Binary files /dev/null and b/annotator/uniformer/mmcv/runner/hooks/__pycache__/memory.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/runner/hooks/__pycache__/memory.cpython-38.pyc b/annotator/uniformer/mmcv/runner/hooks/__pycache__/memory.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e59653de5f951cae72fa6e4bb92292388d14653c
Binary files /dev/null and b/annotator/uniformer/mmcv/runner/hooks/__pycache__/memory.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/runner/hooks/__pycache__/momentum_updater.cpython-310.pyc b/annotator/uniformer/mmcv/runner/hooks/__pycache__/momentum_updater.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e35032a44fd7bfab5dca97b1dbad2a970f41f033
Binary files /dev/null and b/annotator/uniformer/mmcv/runner/hooks/__pycache__/momentum_updater.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/runner/hooks/__pycache__/momentum_updater.cpython-38.pyc b/annotator/uniformer/mmcv/runner/hooks/__pycache__/momentum_updater.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..029dc981106d050539280065e316d541d9a8c918
Binary files /dev/null and b/annotator/uniformer/mmcv/runner/hooks/__pycache__/momentum_updater.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/runner/hooks/__pycache__/optimizer.cpython-310.pyc b/annotator/uniformer/mmcv/runner/hooks/__pycache__/optimizer.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0b031f8277ce4865cfb219855359f692f9b487ab
Binary files /dev/null and b/annotator/uniformer/mmcv/runner/hooks/__pycache__/optimizer.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/runner/hooks/__pycache__/optimizer.cpython-38.pyc b/annotator/uniformer/mmcv/runner/hooks/__pycache__/optimizer.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..88a79e69a64a3bc9c9605f731bcf4d438dd2f26d
Binary files /dev/null and b/annotator/uniformer/mmcv/runner/hooks/__pycache__/optimizer.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/runner/hooks/__pycache__/profiler.cpython-310.pyc b/annotator/uniformer/mmcv/runner/hooks/__pycache__/profiler.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c0d33a2c21200b0bdf61139dbbde7e3c5f4a7e7e
Binary files /dev/null and b/annotator/uniformer/mmcv/runner/hooks/__pycache__/profiler.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/runner/hooks/__pycache__/profiler.cpython-38.pyc b/annotator/uniformer/mmcv/runner/hooks/__pycache__/profiler.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4bf96a39bf45b6b97918945d1c0aebb6497da9a2
Binary files /dev/null and b/annotator/uniformer/mmcv/runner/hooks/__pycache__/profiler.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/runner/hooks/__pycache__/sampler_seed.cpython-310.pyc b/annotator/uniformer/mmcv/runner/hooks/__pycache__/sampler_seed.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..46c619550af29869b1fb7a76539e78849e4e28b2
Binary files /dev/null and b/annotator/uniformer/mmcv/runner/hooks/__pycache__/sampler_seed.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/runner/hooks/__pycache__/sampler_seed.cpython-38.pyc b/annotator/uniformer/mmcv/runner/hooks/__pycache__/sampler_seed.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d55ec759c5ffafee68b55b99575b746cbb217f6c
Binary files /dev/null and b/annotator/uniformer/mmcv/runner/hooks/__pycache__/sampler_seed.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/runner/hooks/__pycache__/sync_buffer.cpython-310.pyc b/annotator/uniformer/mmcv/runner/hooks/__pycache__/sync_buffer.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0d5b22c3ab53e73416bbbbd675f4a9613af35935
Binary files /dev/null and b/annotator/uniformer/mmcv/runner/hooks/__pycache__/sync_buffer.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/runner/hooks/__pycache__/sync_buffer.cpython-38.pyc b/annotator/uniformer/mmcv/runner/hooks/__pycache__/sync_buffer.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fc4b8fe85fe09cf5122f3b7c75a93af797d8df19
Binary files /dev/null and b/annotator/uniformer/mmcv/runner/hooks/__pycache__/sync_buffer.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/runner/hooks/checkpoint.py b/annotator/uniformer/mmcv/runner/hooks/checkpoint.py
new file mode 100644
index 0000000000000000000000000000000000000000..6af3fae43ac4b35532641a81eb13557edfc7dfba
--- /dev/null
+++ b/annotator/uniformer/mmcv/runner/hooks/checkpoint.py
@@ -0,0 +1,167 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import os.path as osp
+import warnings
+
+from annotator.uniformer.mmcv.fileio import FileClient
+from ..dist_utils import allreduce_params, master_only
+from .hook import HOOKS, Hook
+
+
+@HOOKS.register_module()
+class CheckpointHook(Hook):
+ """Save checkpoints periodically.
+
+ Args:
+ interval (int): The saving period. If ``by_epoch=True``, interval
+ indicates epochs, otherwise it indicates iterations.
+ Default: -1, which means "never".
+ by_epoch (bool): Saving checkpoints by epoch or by iteration.
+ Default: True.
+ save_optimizer (bool): Whether to save optimizer state_dict in the
+ checkpoint. It is usually used for resuming experiments.
+ Default: True.
+ out_dir (str, optional): The root directory to save checkpoints. If not
+ specified, ``runner.work_dir`` will be used by default. If
+ specified, the ``out_dir`` will be the concatenation of ``out_dir``
+ and the last level directory of ``runner.work_dir``.
+ `Changed in version 1.3.16.`
+ max_keep_ckpts (int, optional): The maximum checkpoints to keep.
+ In some cases we want only the latest few checkpoints and would
+ like to delete old ones to save the disk space.
+ Default: -1, which means unlimited.
+ save_last (bool, optional): Whether to force the last checkpoint to be
+ saved regardless of interval. Default: True.
+ sync_buffer (bool, optional): Whether to synchronize buffers in
+ different gpus. Default: False.
+ file_client_args (dict, optional): Arguments to instantiate a
+ FileClient. See :class:`mmcv.fileio.FileClient` for details.
+ Default: None.
+ `New in version 1.3.16.`
+
+ .. warning::
+ Before v1.3.16, the ``out_dir`` argument indicates the path where the
+ checkpoint is stored. However, since v1.3.16, ``out_dir`` indicates the
+ root directory and the final path to save checkpoint is the
+ concatenation of ``out_dir`` and the last level directory of
+ ``runner.work_dir``. Suppose the value of ``out_dir`` is "/path/of/A"
+ and the value of ``runner.work_dir`` is "/path/of/B", then the final
+ path will be "/path/of/A/B".
+ """
+
+ def __init__(self,
+ interval=-1,
+ by_epoch=True,
+ save_optimizer=True,
+ out_dir=None,
+ max_keep_ckpts=-1,
+ save_last=True,
+ sync_buffer=False,
+ file_client_args=None,
+ **kwargs):
+ self.interval = interval
+ self.by_epoch = by_epoch
+ self.save_optimizer = save_optimizer
+ self.out_dir = out_dir
+ self.max_keep_ckpts = max_keep_ckpts
+ self.save_last = save_last
+ self.args = kwargs
+ self.sync_buffer = sync_buffer
+ self.file_client_args = file_client_args
+
+ def before_run(self, runner):
+ if not self.out_dir:
+ self.out_dir = runner.work_dir
+
+ self.file_client = FileClient.infer_client(self.file_client_args,
+ self.out_dir)
+
+ # if `self.out_dir` is not equal to `runner.work_dir`, it means that
+ # `self.out_dir` is set so the final `self.out_dir` is the
+ # concatenation of `self.out_dir` and the last level directory of
+ # `runner.work_dir`
+ if self.out_dir != runner.work_dir:
+ basename = osp.basename(runner.work_dir.rstrip(osp.sep))
+ self.out_dir = self.file_client.join_path(self.out_dir, basename)
+
+ runner.logger.info((f'Checkpoints will be saved to {self.out_dir} by '
+ f'{self.file_client.name}.'))
+
+ # disable the create_symlink option because some file backends do not
+ # allow to create a symlink
+ if 'create_symlink' in self.args:
+ if self.args[
+ 'create_symlink'] and not self.file_client.allow_symlink:
+ self.args['create_symlink'] = False
+ warnings.warn(
+ ('create_symlink is set as True by the user but is changed'
+ 'to be False because creating symbolic link is not '
+ f'allowed in {self.file_client.name}'))
+ else:
+ self.args['create_symlink'] = self.file_client.allow_symlink
+
+ def after_train_epoch(self, runner):
+ if not self.by_epoch:
+ return
+
+ # save checkpoint for following cases:
+ # 1. every ``self.interval`` epochs
+ # 2. reach the last epoch of training
+ if self.every_n_epochs(
+ runner, self.interval) or (self.save_last
+ and self.is_last_epoch(runner)):
+ runner.logger.info(
+ f'Saving checkpoint at {runner.epoch + 1} epochs')
+ if self.sync_buffer:
+ allreduce_params(runner.model.buffers())
+ self._save_checkpoint(runner)
+
+ @master_only
+ def _save_checkpoint(self, runner):
+ """Save the current checkpoint and delete unwanted checkpoint."""
+ runner.save_checkpoint(
+ self.out_dir, save_optimizer=self.save_optimizer, **self.args)
+ if runner.meta is not None:
+ if self.by_epoch:
+ cur_ckpt_filename = self.args.get(
+ 'filename_tmpl', 'epoch_{}.pth').format(runner.epoch + 1)
+ else:
+ cur_ckpt_filename = self.args.get(
+ 'filename_tmpl', 'iter_{}.pth').format(runner.iter + 1)
+ runner.meta.setdefault('hook_msgs', dict())
+ runner.meta['hook_msgs']['last_ckpt'] = self.file_client.join_path(
+ self.out_dir, cur_ckpt_filename)
+ # remove other checkpoints
+ if self.max_keep_ckpts > 0:
+ if self.by_epoch:
+ name = 'epoch_{}.pth'
+ current_ckpt = runner.epoch + 1
+ else:
+ name = 'iter_{}.pth'
+ current_ckpt = runner.iter + 1
+ redundant_ckpts = range(
+ current_ckpt - self.max_keep_ckpts * self.interval, 0,
+ -self.interval)
+ filename_tmpl = self.args.get('filename_tmpl', name)
+ for _step in redundant_ckpts:
+ ckpt_path = self.file_client.join_path(
+ self.out_dir, filename_tmpl.format(_step))
+ if self.file_client.isfile(ckpt_path):
+ self.file_client.remove(ckpt_path)
+ else:
+ break
+
+ def after_train_iter(self, runner):
+ if self.by_epoch:
+ return
+
+ # save checkpoint for following cases:
+ # 1. every ``self.interval`` iterations
+ # 2. reach the last iteration of training
+ if self.every_n_iters(
+ runner, self.interval) or (self.save_last
+ and self.is_last_iter(runner)):
+ runner.logger.info(
+ f'Saving checkpoint at {runner.iter + 1} iterations')
+ if self.sync_buffer:
+ allreduce_params(runner.model.buffers())
+ self._save_checkpoint(runner)
diff --git a/annotator/uniformer/mmcv/runner/hooks/closure.py b/annotator/uniformer/mmcv/runner/hooks/closure.py
new file mode 100644
index 0000000000000000000000000000000000000000..b955f81f425be4ac3e6bb3f4aac653887989e872
--- /dev/null
+++ b/annotator/uniformer/mmcv/runner/hooks/closure.py
@@ -0,0 +1,11 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+from .hook import HOOKS, Hook
+
+
+@HOOKS.register_module()
+class ClosureHook(Hook):
+
+ def __init__(self, fn_name, fn):
+ assert hasattr(self, fn_name)
+ assert callable(fn)
+ setattr(self, fn_name, fn)
diff --git a/annotator/uniformer/mmcv/runner/hooks/ema.py b/annotator/uniformer/mmcv/runner/hooks/ema.py
new file mode 100644
index 0000000000000000000000000000000000000000..15c7e68088f019802a59e7ae41cc1fe0c7f28f96
--- /dev/null
+++ b/annotator/uniformer/mmcv/runner/hooks/ema.py
@@ -0,0 +1,89 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+from ...parallel import is_module_wrapper
+from ..hooks.hook import HOOKS, Hook
+
+
+@HOOKS.register_module()
+class EMAHook(Hook):
+ r"""Exponential Moving Average Hook.
+
+ Use Exponential Moving Average on all parameters of model in training
+ process. All parameters have a ema backup, which update by the formula
+ as below. EMAHook takes priority over EvalHook and CheckpointSaverHook.
+
+ .. math::
+
+ \text{Xema\_{t+1}} = (1 - \text{momentum}) \times
+ \text{Xema\_{t}} + \text{momentum} \times X_t
+
+ Args:
+ momentum (float): The momentum used for updating ema parameter.
+ Defaults to 0.0002.
+ interval (int): Update ema parameter every interval iteration.
+ Defaults to 1.
+ warm_up (int): During first warm_up steps, we may use smaller momentum
+ to update ema parameters more slowly. Defaults to 100.
+ resume_from (str): The checkpoint path. Defaults to None.
+ """
+
+ def __init__(self,
+ momentum=0.0002,
+ interval=1,
+ warm_up=100,
+ resume_from=None):
+ assert isinstance(interval, int) and interval > 0
+ self.warm_up = warm_up
+ self.interval = interval
+ assert momentum > 0 and momentum < 1
+ self.momentum = momentum**interval
+ self.checkpoint = resume_from
+
+ def before_run(self, runner):
+ """To resume model with it's ema parameters more friendly.
+
+ Register ema parameter as ``named_buffer`` to model
+ """
+ model = runner.model
+ if is_module_wrapper(model):
+ model = model.module
+ self.param_ema_buffer = {}
+ self.model_parameters = dict(model.named_parameters(recurse=True))
+ for name, value in self.model_parameters.items():
+ # "." is not allowed in module's buffer name
+ buffer_name = f"ema_{name.replace('.', '_')}"
+ self.param_ema_buffer[name] = buffer_name
+ model.register_buffer(buffer_name, value.data.clone())
+ self.model_buffers = dict(model.named_buffers(recurse=True))
+ if self.checkpoint is not None:
+ runner.resume(self.checkpoint)
+
+ def after_train_iter(self, runner):
+ """Update ema parameter every self.interval iterations."""
+ curr_step = runner.iter
+ # We warm up the momentum considering the instability at beginning
+ momentum = min(self.momentum,
+ (1 + curr_step) / (self.warm_up + curr_step))
+ if curr_step % self.interval != 0:
+ return
+ for name, parameter in self.model_parameters.items():
+ buffer_name = self.param_ema_buffer[name]
+ buffer_parameter = self.model_buffers[buffer_name]
+ buffer_parameter.mul_(1 - momentum).add_(momentum, parameter.data)
+
+ def after_train_epoch(self, runner):
+ """We load parameter values from ema backup to model before the
+ EvalHook."""
+ self._swap_ema_parameters()
+
+ def before_train_epoch(self, runner):
+ """We recover model's parameter from ema backup after last epoch's
+ EvalHook."""
+ self._swap_ema_parameters()
+
+ def _swap_ema_parameters(self):
+ """Swap the parameter of model with parameter in ema_buffer."""
+ for name, value in self.model_parameters.items():
+ temp = value.data.clone()
+ ema_buffer = self.model_buffers[self.param_ema_buffer[name]]
+ value.data.copy_(ema_buffer.data)
+ ema_buffer.data.copy_(temp)
diff --git a/annotator/uniformer/mmcv/runner/hooks/evaluation.py b/annotator/uniformer/mmcv/runner/hooks/evaluation.py
new file mode 100644
index 0000000000000000000000000000000000000000..4d00999ce5665c53bded8de9e084943eee2d230d
--- /dev/null
+++ b/annotator/uniformer/mmcv/runner/hooks/evaluation.py
@@ -0,0 +1,509 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import os.path as osp
+import warnings
+from math import inf
+
+import torch.distributed as dist
+from torch.nn.modules.batchnorm import _BatchNorm
+from torch.utils.data import DataLoader
+
+from annotator.uniformer.mmcv.fileio import FileClient
+from annotator.uniformer.mmcv.utils import is_seq_of
+from .hook import Hook
+from .logger import LoggerHook
+
+
+class EvalHook(Hook):
+ """Non-Distributed evaluation hook.
+
+ This hook will regularly perform evaluation in a given interval when
+ performing in non-distributed environment.
+
+ Args:
+ dataloader (DataLoader): A PyTorch dataloader, whose dataset has
+ implemented ``evaluate`` function.
+ start (int | None, optional): Evaluation starting epoch. It enables
+ evaluation before the training starts if ``start`` <= the resuming
+ epoch. If None, whether to evaluate is merely decided by
+ ``interval``. Default: None.
+ interval (int): Evaluation interval. Default: 1.
+ by_epoch (bool): Determine perform evaluation by epoch or by iteration.
+ If set to True, it will perform by epoch. Otherwise, by iteration.
+ Default: True.
+ save_best (str, optional): If a metric is specified, it would measure
+ the best checkpoint during evaluation. The information about best
+ checkpoint would be saved in ``runner.meta['hook_msgs']`` to keep
+ best score value and best checkpoint path, which will be also
+ loaded when resume checkpoint. Options are the evaluation metrics
+ on the test dataset. e.g., ``bbox_mAP``, ``segm_mAP`` for bbox
+ detection and instance segmentation. ``AR@100`` for proposal
+ recall. If ``save_best`` is ``auto``, the first key of the returned
+ ``OrderedDict`` result will be used. Default: None.
+ rule (str | None, optional): Comparison rule for best score. If set to
+ None, it will infer a reasonable rule. Keys such as 'acc', 'top'
+ .etc will be inferred by 'greater' rule. Keys contain 'loss' will
+ be inferred by 'less' rule. Options are 'greater', 'less', None.
+ Default: None.
+ test_fn (callable, optional): test a model with samples from a
+ dataloader, and return the test results. If ``None``, the default
+ test function ``mmcv.engine.single_gpu_test`` will be used.
+ (default: ``None``)
+ greater_keys (List[str] | None, optional): Metric keys that will be
+ inferred by 'greater' comparison rule. If ``None``,
+ _default_greater_keys will be used. (default: ``None``)
+ less_keys (List[str] | None, optional): Metric keys that will be
+ inferred by 'less' comparison rule. If ``None``, _default_less_keys
+ will be used. (default: ``None``)
+ out_dir (str, optional): The root directory to save checkpoints. If not
+ specified, `runner.work_dir` will be used by default. If specified,
+ the `out_dir` will be the concatenation of `out_dir` and the last
+ level directory of `runner.work_dir`.
+ `New in version 1.3.16.`
+ file_client_args (dict): Arguments to instantiate a FileClient.
+ See :class:`mmcv.fileio.FileClient` for details. Default: None.
+ `New in version 1.3.16.`
+ **eval_kwargs: Evaluation arguments fed into the evaluate function of
+ the dataset.
+
+ Notes:
+ If new arguments are added for EvalHook, tools/test.py,
+ tools/eval_metric.py may be affected.
+ """
+
+ # Since the key for determine greater or less is related to the downstream
+ # tasks, downstream repos may need to overwrite the following inner
+ # variable accordingly.
+
+ rule_map = {'greater': lambda x, y: x > y, 'less': lambda x, y: x < y}
+ init_value_map = {'greater': -inf, 'less': inf}
+ _default_greater_keys = [
+ 'acc', 'top', 'AR@', 'auc', 'precision', 'mAP', 'mDice', 'mIoU',
+ 'mAcc', 'aAcc'
+ ]
+ _default_less_keys = ['loss']
+
+ def __init__(self,
+ dataloader,
+ start=None,
+ interval=1,
+ by_epoch=True,
+ save_best=None,
+ rule=None,
+ test_fn=None,
+ greater_keys=None,
+ less_keys=None,
+ out_dir=None,
+ file_client_args=None,
+ **eval_kwargs):
+ if not isinstance(dataloader, DataLoader):
+ raise TypeError(f'dataloader must be a pytorch DataLoader, '
+ f'but got {type(dataloader)}')
+
+ if interval <= 0:
+ raise ValueError(f'interval must be a positive number, '
+ f'but got {interval}')
+
+ assert isinstance(by_epoch, bool), '``by_epoch`` should be a boolean'
+
+ if start is not None and start < 0:
+ raise ValueError(f'The evaluation start epoch {start} is smaller '
+ f'than 0')
+
+ self.dataloader = dataloader
+ self.interval = interval
+ self.start = start
+ self.by_epoch = by_epoch
+
+ assert isinstance(save_best, str) or save_best is None, \
+ '""save_best"" should be a str or None ' \
+ f'rather than {type(save_best)}'
+ self.save_best = save_best
+ self.eval_kwargs = eval_kwargs
+ self.initial_flag = True
+
+ if test_fn is None:
+ from annotator.uniformer.mmcv.engine import single_gpu_test
+ self.test_fn = single_gpu_test
+ else:
+ self.test_fn = test_fn
+
+ if greater_keys is None:
+ self.greater_keys = self._default_greater_keys
+ else:
+ if not isinstance(greater_keys, (list, tuple)):
+ greater_keys = (greater_keys, )
+ assert is_seq_of(greater_keys, str)
+ self.greater_keys = greater_keys
+
+ if less_keys is None:
+ self.less_keys = self._default_less_keys
+ else:
+ if not isinstance(less_keys, (list, tuple)):
+ less_keys = (less_keys, )
+ assert is_seq_of(less_keys, str)
+ self.less_keys = less_keys
+
+ if self.save_best is not None:
+ self.best_ckpt_path = None
+ self._init_rule(rule, self.save_best)
+
+ self.out_dir = out_dir
+ self.file_client_args = file_client_args
+
+ def _init_rule(self, rule, key_indicator):
+ """Initialize rule, key_indicator, comparison_func, and best score.
+
+ Here is the rule to determine which rule is used for key indicator
+ when the rule is not specific (note that the key indicator matching
+ is case-insensitive):
+ 1. If the key indicator is in ``self.greater_keys``, the rule will be
+ specified as 'greater'.
+ 2. Or if the key indicator is in ``self.less_keys``, the rule will be
+ specified as 'less'.
+ 3. Or if the key indicator is equal to the substring in any one item
+ in ``self.greater_keys``, the rule will be specified as 'greater'.
+ 4. Or if the key indicator is equal to the substring in any one item
+ in ``self.less_keys``, the rule will be specified as 'less'.
+
+ Args:
+ rule (str | None): Comparison rule for best score.
+ key_indicator (str | None): Key indicator to determine the
+ comparison rule.
+ """
+ if rule not in self.rule_map and rule is not None:
+ raise KeyError(f'rule must be greater, less or None, '
+ f'but got {rule}.')
+
+ if rule is None:
+ if key_indicator != 'auto':
+ # `_lc` here means we use the lower case of keys for
+ # case-insensitive matching
+ key_indicator_lc = key_indicator.lower()
+ greater_keys = [key.lower() for key in self.greater_keys]
+ less_keys = [key.lower() for key in self.less_keys]
+
+ if key_indicator_lc in greater_keys:
+ rule = 'greater'
+ elif key_indicator_lc in less_keys:
+ rule = 'less'
+ elif any(key in key_indicator_lc for key in greater_keys):
+ rule = 'greater'
+ elif any(key in key_indicator_lc for key in less_keys):
+ rule = 'less'
+ else:
+ raise ValueError(f'Cannot infer the rule for key '
+ f'{key_indicator}, thus a specific rule '
+ f'must be specified.')
+ self.rule = rule
+ self.key_indicator = key_indicator
+ if self.rule is not None:
+ self.compare_func = self.rule_map[self.rule]
+
+ def before_run(self, runner):
+ if not self.out_dir:
+ self.out_dir = runner.work_dir
+
+ self.file_client = FileClient.infer_client(self.file_client_args,
+ self.out_dir)
+
+ # if `self.out_dir` is not equal to `runner.work_dir`, it means that
+ # `self.out_dir` is set so the final `self.out_dir` is the
+ # concatenation of `self.out_dir` and the last level directory of
+ # `runner.work_dir`
+ if self.out_dir != runner.work_dir:
+ basename = osp.basename(runner.work_dir.rstrip(osp.sep))
+ self.out_dir = self.file_client.join_path(self.out_dir, basename)
+ runner.logger.info(
+ (f'The best checkpoint will be saved to {self.out_dir} by '
+ f'{self.file_client.name}'))
+
+ if self.save_best is not None:
+ if runner.meta is None:
+ warnings.warn('runner.meta is None. Creating an empty one.')
+ runner.meta = dict()
+ runner.meta.setdefault('hook_msgs', dict())
+ self.best_ckpt_path = runner.meta['hook_msgs'].get(
+ 'best_ckpt', None)
+
+ def before_train_iter(self, runner):
+ """Evaluate the model only at the start of training by iteration."""
+ if self.by_epoch or not self.initial_flag:
+ return
+ if self.start is not None and runner.iter >= self.start:
+ self.after_train_iter(runner)
+ self.initial_flag = False
+
+ def before_train_epoch(self, runner):
+ """Evaluate the model only at the start of training by epoch."""
+ if not (self.by_epoch and self.initial_flag):
+ return
+ if self.start is not None and runner.epoch >= self.start:
+ self.after_train_epoch(runner)
+ self.initial_flag = False
+
+ def after_train_iter(self, runner):
+ """Called after every training iter to evaluate the results."""
+ if not self.by_epoch and self._should_evaluate(runner):
+ # Because the priority of EvalHook is higher than LoggerHook, the
+ # training log and the evaluating log are mixed. Therefore,
+ # we need to dump the training log and clear it before evaluating
+ # log is generated. In addition, this problem will only appear in
+ # `IterBasedRunner` whose `self.by_epoch` is False, because
+ # `EpochBasedRunner` whose `self.by_epoch` is True calls
+ # `_do_evaluate` in `after_train_epoch` stage, and at this stage
+ # the training log has been printed, so it will not cause any
+ # problem. more details at
+ # https://github.com/open-mmlab/mmsegmentation/issues/694
+ for hook in runner._hooks:
+ if isinstance(hook, LoggerHook):
+ hook.after_train_iter(runner)
+ runner.log_buffer.clear()
+
+ self._do_evaluate(runner)
+
+ def after_train_epoch(self, runner):
+ """Called after every training epoch to evaluate the results."""
+ if self.by_epoch and self._should_evaluate(runner):
+ self._do_evaluate(runner)
+
+ def _do_evaluate(self, runner):
+ """perform evaluation and save ckpt."""
+ results = self.test_fn(runner.model, self.dataloader)
+ runner.log_buffer.output['eval_iter_num'] = len(self.dataloader)
+ key_score = self.evaluate(runner, results)
+ # the key_score may be `None` so it needs to skip the action to save
+ # the best checkpoint
+ if self.save_best and key_score:
+ self._save_ckpt(runner, key_score)
+
+ def _should_evaluate(self, runner):
+ """Judge whether to perform evaluation.
+
+ Here is the rule to judge whether to perform evaluation:
+ 1. It will not perform evaluation during the epoch/iteration interval,
+ which is determined by ``self.interval``.
+ 2. It will not perform evaluation if the start time is larger than
+ current time.
+ 3. It will not perform evaluation when current time is larger than
+ the start time but during epoch/iteration interval.
+
+ Returns:
+ bool: The flag indicating whether to perform evaluation.
+ """
+ if self.by_epoch:
+ current = runner.epoch
+ check_time = self.every_n_epochs
+ else:
+ current = runner.iter
+ check_time = self.every_n_iters
+
+ if self.start is None:
+ if not check_time(runner, self.interval):
+ # No evaluation during the interval.
+ return False
+ elif (current + 1) < self.start:
+ # No evaluation if start is larger than the current time.
+ return False
+ else:
+ # Evaluation only at epochs/iters 3, 5, 7...
+ # if start==3 and interval==2
+ if (current + 1 - self.start) % self.interval:
+ return False
+ return True
+
+ def _save_ckpt(self, runner, key_score):
+ """Save the best checkpoint.
+
+ It will compare the score according to the compare function, write
+ related information (best score, best checkpoint path) and save the
+ best checkpoint into ``work_dir``.
+ """
+ if self.by_epoch:
+ current = f'epoch_{runner.epoch + 1}'
+ cur_type, cur_time = 'epoch', runner.epoch + 1
+ else:
+ current = f'iter_{runner.iter + 1}'
+ cur_type, cur_time = 'iter', runner.iter + 1
+
+ best_score = runner.meta['hook_msgs'].get(
+ 'best_score', self.init_value_map[self.rule])
+ if self.compare_func(key_score, best_score):
+ best_score = key_score
+ runner.meta['hook_msgs']['best_score'] = best_score
+
+ if self.best_ckpt_path and self.file_client.isfile(
+ self.best_ckpt_path):
+ self.file_client.remove(self.best_ckpt_path)
+ runner.logger.info(
+ (f'The previous best checkpoint {self.best_ckpt_path} was '
+ 'removed'))
+
+ best_ckpt_name = f'best_{self.key_indicator}_{current}.pth'
+ self.best_ckpt_path = self.file_client.join_path(
+ self.out_dir, best_ckpt_name)
+ runner.meta['hook_msgs']['best_ckpt'] = self.best_ckpt_path
+
+ runner.save_checkpoint(
+ self.out_dir, best_ckpt_name, create_symlink=False)
+ runner.logger.info(
+ f'Now best checkpoint is saved as {best_ckpt_name}.')
+ runner.logger.info(
+ f'Best {self.key_indicator} is {best_score:0.4f} '
+ f'at {cur_time} {cur_type}.')
+
+ def evaluate(self, runner, results):
+ """Evaluate the results.
+
+ Args:
+ runner (:obj:`mmcv.Runner`): The underlined training runner.
+ results (list): Output results.
+ """
+ eval_res = self.dataloader.dataset.evaluate(
+ results, logger=runner.logger, **self.eval_kwargs)
+
+ for name, val in eval_res.items():
+ runner.log_buffer.output[name] = val
+ runner.log_buffer.ready = True
+
+ if self.save_best is not None:
+ # If the performance of model is pool, the `eval_res` may be an
+ # empty dict and it will raise exception when `self.save_best` is
+ # not None. More details at
+ # https://github.com/open-mmlab/mmdetection/issues/6265.
+ if not eval_res:
+ warnings.warn(
+ 'Since `eval_res` is an empty dict, the behavior to save '
+ 'the best checkpoint will be skipped in this evaluation.')
+ return None
+
+ if self.key_indicator == 'auto':
+ # infer from eval_results
+ self._init_rule(self.rule, list(eval_res.keys())[0])
+ return eval_res[self.key_indicator]
+
+ return None
+
+
+class DistEvalHook(EvalHook):
+ """Distributed evaluation hook.
+
+ This hook will regularly perform evaluation in a given interval when
+ performing in distributed environment.
+
+ Args:
+ dataloader (DataLoader): A PyTorch dataloader, whose dataset has
+ implemented ``evaluate`` function.
+ start (int | None, optional): Evaluation starting epoch. It enables
+ evaluation before the training starts if ``start`` <= the resuming
+ epoch. If None, whether to evaluate is merely decided by
+ ``interval``. Default: None.
+ interval (int): Evaluation interval. Default: 1.
+ by_epoch (bool): Determine perform evaluation by epoch or by iteration.
+ If set to True, it will perform by epoch. Otherwise, by iteration.
+ default: True.
+ save_best (str, optional): If a metric is specified, it would measure
+ the best checkpoint during evaluation. The information about best
+ checkpoint would be saved in ``runner.meta['hook_msgs']`` to keep
+ best score value and best checkpoint path, which will be also
+ loaded when resume checkpoint. Options are the evaluation metrics
+ on the test dataset. e.g., ``bbox_mAP``, ``segm_mAP`` for bbox
+ detection and instance segmentation. ``AR@100`` for proposal
+ recall. If ``save_best`` is ``auto``, the first key of the returned
+ ``OrderedDict`` result will be used. Default: None.
+ rule (str | None, optional): Comparison rule for best score. If set to
+ None, it will infer a reasonable rule. Keys such as 'acc', 'top'
+ .etc will be inferred by 'greater' rule. Keys contain 'loss' will
+ be inferred by 'less' rule. Options are 'greater', 'less', None.
+ Default: None.
+ test_fn (callable, optional): test a model with samples from a
+ dataloader in a multi-gpu manner, and return the test results. If
+ ``None``, the default test function ``mmcv.engine.multi_gpu_test``
+ will be used. (default: ``None``)
+ tmpdir (str | None): Temporary directory to save the results of all
+ processes. Default: None.
+ gpu_collect (bool): Whether to use gpu or cpu to collect results.
+ Default: False.
+ broadcast_bn_buffer (bool): Whether to broadcast the
+ buffer(running_mean and running_var) of rank 0 to other rank
+ before evaluation. Default: True.
+ out_dir (str, optional): The root directory to save checkpoints. If not
+ specified, `runner.work_dir` will be used by default. If specified,
+ the `out_dir` will be the concatenation of `out_dir` and the last
+ level directory of `runner.work_dir`.
+ file_client_args (dict): Arguments to instantiate a FileClient.
+ See :class:`mmcv.fileio.FileClient` for details. Default: None.
+ **eval_kwargs: Evaluation arguments fed into the evaluate function of
+ the dataset.
+ """
+
+ def __init__(self,
+ dataloader,
+ start=None,
+ interval=1,
+ by_epoch=True,
+ save_best=None,
+ rule=None,
+ test_fn=None,
+ greater_keys=None,
+ less_keys=None,
+ broadcast_bn_buffer=True,
+ tmpdir=None,
+ gpu_collect=False,
+ out_dir=None,
+ file_client_args=None,
+ **eval_kwargs):
+
+ if test_fn is None:
+ from annotator.uniformer.mmcv.engine import multi_gpu_test
+ test_fn = multi_gpu_test
+
+ super().__init__(
+ dataloader,
+ start=start,
+ interval=interval,
+ by_epoch=by_epoch,
+ save_best=save_best,
+ rule=rule,
+ test_fn=test_fn,
+ greater_keys=greater_keys,
+ less_keys=less_keys,
+ out_dir=out_dir,
+ file_client_args=file_client_args,
+ **eval_kwargs)
+
+ self.broadcast_bn_buffer = broadcast_bn_buffer
+ self.tmpdir = tmpdir
+ self.gpu_collect = gpu_collect
+
+ def _do_evaluate(self, runner):
+ """perform evaluation and save ckpt."""
+ # Synchronization of BatchNorm's buffer (running_mean
+ # and running_var) is not supported in the DDP of pytorch,
+ # which may cause the inconsistent performance of models in
+ # different ranks, so we broadcast BatchNorm's buffers
+ # of rank 0 to other ranks to avoid this.
+ if self.broadcast_bn_buffer:
+ model = runner.model
+ for name, module in model.named_modules():
+ if isinstance(module,
+ _BatchNorm) and module.track_running_stats:
+ dist.broadcast(module.running_var, 0)
+ dist.broadcast(module.running_mean, 0)
+
+ tmpdir = self.tmpdir
+ if tmpdir is None:
+ tmpdir = osp.join(runner.work_dir, '.eval_hook')
+
+ results = self.test_fn(
+ runner.model,
+ self.dataloader,
+ tmpdir=tmpdir,
+ gpu_collect=self.gpu_collect)
+ if runner.rank == 0:
+ print('\n')
+ runner.log_buffer.output['eval_iter_num'] = len(self.dataloader)
+ key_score = self.evaluate(runner, results)
+ # the key_score may be `None` so it needs to skip the action to
+ # save the best checkpoint
+ if self.save_best and key_score:
+ self._save_ckpt(runner, key_score)
diff --git a/annotator/uniformer/mmcv/runner/hooks/hook.py b/annotator/uniformer/mmcv/runner/hooks/hook.py
new file mode 100644
index 0000000000000000000000000000000000000000..b8855c107727ecf85b917c890fc8b7f6359238a4
--- /dev/null
+++ b/annotator/uniformer/mmcv/runner/hooks/hook.py
@@ -0,0 +1,92 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+from annotator.uniformer.mmcv.utils import Registry, is_method_overridden
+
+HOOKS = Registry('hook')
+
+
+class Hook:
+ stages = ('before_run', 'before_train_epoch', 'before_train_iter',
+ 'after_train_iter', 'after_train_epoch', 'before_val_epoch',
+ 'before_val_iter', 'after_val_iter', 'after_val_epoch',
+ 'after_run')
+
+ def before_run(self, runner):
+ pass
+
+ def after_run(self, runner):
+ pass
+
+ def before_epoch(self, runner):
+ pass
+
+ def after_epoch(self, runner):
+ pass
+
+ def before_iter(self, runner):
+ pass
+
+ def after_iter(self, runner):
+ pass
+
+ def before_train_epoch(self, runner):
+ self.before_epoch(runner)
+
+ def before_val_epoch(self, runner):
+ self.before_epoch(runner)
+
+ def after_train_epoch(self, runner):
+ self.after_epoch(runner)
+
+ def after_val_epoch(self, runner):
+ self.after_epoch(runner)
+
+ def before_train_iter(self, runner):
+ self.before_iter(runner)
+
+ def before_val_iter(self, runner):
+ self.before_iter(runner)
+
+ def after_train_iter(self, runner):
+ self.after_iter(runner)
+
+ def after_val_iter(self, runner):
+ self.after_iter(runner)
+
+ def every_n_epochs(self, runner, n):
+ return (runner.epoch + 1) % n == 0 if n > 0 else False
+
+ def every_n_inner_iters(self, runner, n):
+ return (runner.inner_iter + 1) % n == 0 if n > 0 else False
+
+ def every_n_iters(self, runner, n):
+ return (runner.iter + 1) % n == 0 if n > 0 else False
+
+ def end_of_epoch(self, runner):
+ return runner.inner_iter + 1 == len(runner.data_loader)
+
+ def is_last_epoch(self, runner):
+ return runner.epoch + 1 == runner._max_epochs
+
+ def is_last_iter(self, runner):
+ return runner.iter + 1 == runner._max_iters
+
+ def get_triggered_stages(self):
+ trigger_stages = set()
+ for stage in Hook.stages:
+ if is_method_overridden(stage, Hook, self):
+ trigger_stages.add(stage)
+
+ # some methods will be triggered in multi stages
+ # use this dict to map method to stages.
+ method_stages_map = {
+ 'before_epoch': ['before_train_epoch', 'before_val_epoch'],
+ 'after_epoch': ['after_train_epoch', 'after_val_epoch'],
+ 'before_iter': ['before_train_iter', 'before_val_iter'],
+ 'after_iter': ['after_train_iter', 'after_val_iter'],
+ }
+
+ for method, map_stages in method_stages_map.items():
+ if is_method_overridden(method, Hook, self):
+ trigger_stages.update(map_stages)
+
+ return [stage for stage in Hook.stages if stage in trigger_stages]
diff --git a/annotator/uniformer/mmcv/runner/hooks/iter_timer.py b/annotator/uniformer/mmcv/runner/hooks/iter_timer.py
new file mode 100644
index 0000000000000000000000000000000000000000..cfd5002fe85ffc6992155ac01003878064a1d9be
--- /dev/null
+++ b/annotator/uniformer/mmcv/runner/hooks/iter_timer.py
@@ -0,0 +1,18 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import time
+
+from .hook import HOOKS, Hook
+
+
+@HOOKS.register_module()
+class IterTimerHook(Hook):
+
+ def before_epoch(self, runner):
+ self.t = time.time()
+
+ def before_iter(self, runner):
+ runner.log_buffer.update({'data_time': time.time() - self.t})
+
+ def after_iter(self, runner):
+ runner.log_buffer.update({'time': time.time() - self.t})
+ self.t = time.time()
diff --git a/annotator/uniformer/mmcv/runner/hooks/logger/__init__.py b/annotator/uniformer/mmcv/runner/hooks/logger/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..a0b6b345640a895368ac8a647afef6f24333d90e
--- /dev/null
+++ b/annotator/uniformer/mmcv/runner/hooks/logger/__init__.py
@@ -0,0 +1,15 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+from .base import LoggerHook
+from .dvclive import DvcliveLoggerHook
+from .mlflow import MlflowLoggerHook
+from .neptune import NeptuneLoggerHook
+from .pavi import PaviLoggerHook
+from .tensorboard import TensorboardLoggerHook
+from .text import TextLoggerHook
+from .wandb import WandbLoggerHook
+
+__all__ = [
+ 'LoggerHook', 'MlflowLoggerHook', 'PaviLoggerHook',
+ 'TensorboardLoggerHook', 'TextLoggerHook', 'WandbLoggerHook',
+ 'NeptuneLoggerHook', 'DvcliveLoggerHook'
+]
diff --git a/annotator/uniformer/mmcv/runner/hooks/logger/__pycache__/__init__.cpython-310.pyc b/annotator/uniformer/mmcv/runner/hooks/logger/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..020dc18167c3213ec38080dd3b85a55aebec022e
Binary files /dev/null and b/annotator/uniformer/mmcv/runner/hooks/logger/__pycache__/__init__.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/runner/hooks/logger/__pycache__/__init__.cpython-38.pyc b/annotator/uniformer/mmcv/runner/hooks/logger/__pycache__/__init__.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..37dd911dc29b88e9f8b68715158575d97edc8345
Binary files /dev/null and b/annotator/uniformer/mmcv/runner/hooks/logger/__pycache__/__init__.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/runner/hooks/logger/__pycache__/base.cpython-310.pyc b/annotator/uniformer/mmcv/runner/hooks/logger/__pycache__/base.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..33fa934346d1c9237bc9cd212ecfc10db4062df1
Binary files /dev/null and b/annotator/uniformer/mmcv/runner/hooks/logger/__pycache__/base.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/runner/hooks/logger/__pycache__/base.cpython-38.pyc b/annotator/uniformer/mmcv/runner/hooks/logger/__pycache__/base.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..bde19eb31528ab0fd48a57958d265a79100fda7b
Binary files /dev/null and b/annotator/uniformer/mmcv/runner/hooks/logger/__pycache__/base.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/runner/hooks/logger/__pycache__/dvclive.cpython-310.pyc b/annotator/uniformer/mmcv/runner/hooks/logger/__pycache__/dvclive.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..df01dd47bd3b43c40b651b67365817c367ec4448
Binary files /dev/null and b/annotator/uniformer/mmcv/runner/hooks/logger/__pycache__/dvclive.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/runner/hooks/logger/__pycache__/dvclive.cpython-38.pyc b/annotator/uniformer/mmcv/runner/hooks/logger/__pycache__/dvclive.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5bb65d46e1c1c18c786a26e31fd48a51c95994b1
Binary files /dev/null and b/annotator/uniformer/mmcv/runner/hooks/logger/__pycache__/dvclive.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/runner/hooks/logger/__pycache__/mlflow.cpython-310.pyc b/annotator/uniformer/mmcv/runner/hooks/logger/__pycache__/mlflow.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4e4f8ba5e35f73a0c9312fc5421042883b2b7dcd
Binary files /dev/null and b/annotator/uniformer/mmcv/runner/hooks/logger/__pycache__/mlflow.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/runner/hooks/logger/__pycache__/mlflow.cpython-38.pyc b/annotator/uniformer/mmcv/runner/hooks/logger/__pycache__/mlflow.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9996ba8ef40b266c5357d00273fe93198ae1aeaf
Binary files /dev/null and b/annotator/uniformer/mmcv/runner/hooks/logger/__pycache__/mlflow.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/runner/hooks/logger/__pycache__/neptune.cpython-310.pyc b/annotator/uniformer/mmcv/runner/hooks/logger/__pycache__/neptune.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1ae4c8596bcecfc4926071c66f8b31cddebf9353
Binary files /dev/null and b/annotator/uniformer/mmcv/runner/hooks/logger/__pycache__/neptune.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/runner/hooks/logger/__pycache__/neptune.cpython-38.pyc b/annotator/uniformer/mmcv/runner/hooks/logger/__pycache__/neptune.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fecb4f347d342c2214ec862419b224754d013ef3
Binary files /dev/null and b/annotator/uniformer/mmcv/runner/hooks/logger/__pycache__/neptune.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/runner/hooks/logger/__pycache__/pavi.cpython-310.pyc b/annotator/uniformer/mmcv/runner/hooks/logger/__pycache__/pavi.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..38a479090816f6b14540bd9fabc0ff20d0cd061d
Binary files /dev/null and b/annotator/uniformer/mmcv/runner/hooks/logger/__pycache__/pavi.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/runner/hooks/logger/__pycache__/pavi.cpython-38.pyc b/annotator/uniformer/mmcv/runner/hooks/logger/__pycache__/pavi.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..27391b3cb49f3cdfdcf8112da4488105f6c73b01
Binary files /dev/null and b/annotator/uniformer/mmcv/runner/hooks/logger/__pycache__/pavi.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/runner/hooks/logger/__pycache__/tensorboard.cpython-310.pyc b/annotator/uniformer/mmcv/runner/hooks/logger/__pycache__/tensorboard.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1dc06c3945636bbd53364e9761c5c89ee5d1d22e
Binary files /dev/null and b/annotator/uniformer/mmcv/runner/hooks/logger/__pycache__/tensorboard.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/runner/hooks/logger/__pycache__/tensorboard.cpython-38.pyc b/annotator/uniformer/mmcv/runner/hooks/logger/__pycache__/tensorboard.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c54f60a1278d0186b70fe0d3bde6417e63397de7
Binary files /dev/null and b/annotator/uniformer/mmcv/runner/hooks/logger/__pycache__/tensorboard.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/runner/hooks/logger/__pycache__/text.cpython-310.pyc b/annotator/uniformer/mmcv/runner/hooks/logger/__pycache__/text.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b009ba35d05faeb7b3dd99b228ed493e910891e7
Binary files /dev/null and b/annotator/uniformer/mmcv/runner/hooks/logger/__pycache__/text.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/runner/hooks/logger/__pycache__/text.cpython-38.pyc b/annotator/uniformer/mmcv/runner/hooks/logger/__pycache__/text.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3a0b0fb2c7b77821ad1ab997ccacd114b93d11ca
Binary files /dev/null and b/annotator/uniformer/mmcv/runner/hooks/logger/__pycache__/text.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/runner/hooks/logger/__pycache__/wandb.cpython-310.pyc b/annotator/uniformer/mmcv/runner/hooks/logger/__pycache__/wandb.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..46a4b3323e4eea0129e8e40b2f83b2d16f63385b
Binary files /dev/null and b/annotator/uniformer/mmcv/runner/hooks/logger/__pycache__/wandb.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/runner/hooks/logger/__pycache__/wandb.cpython-38.pyc b/annotator/uniformer/mmcv/runner/hooks/logger/__pycache__/wandb.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..914c11d03e816a4c770f97376a2415e2cea8ba83
Binary files /dev/null and b/annotator/uniformer/mmcv/runner/hooks/logger/__pycache__/wandb.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/runner/hooks/logger/base.py b/annotator/uniformer/mmcv/runner/hooks/logger/base.py
new file mode 100644
index 0000000000000000000000000000000000000000..f845256729458ced821762a1b8ef881e17ff9955
--- /dev/null
+++ b/annotator/uniformer/mmcv/runner/hooks/logger/base.py
@@ -0,0 +1,166 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import numbers
+from abc import ABCMeta, abstractmethod
+
+import numpy as np
+import torch
+
+from ..hook import Hook
+
+
+class LoggerHook(Hook):
+ """Base class for logger hooks.
+
+ Args:
+ interval (int): Logging interval (every k iterations).
+ ignore_last (bool): Ignore the log of last iterations in each epoch
+ if less than `interval`.
+ reset_flag (bool): Whether to clear the output buffer after logging.
+ by_epoch (bool): Whether EpochBasedRunner is used.
+ """
+
+ __metaclass__ = ABCMeta
+
+ def __init__(self,
+ interval=10,
+ ignore_last=True,
+ reset_flag=False,
+ by_epoch=True):
+ self.interval = interval
+ self.ignore_last = ignore_last
+ self.reset_flag = reset_flag
+ self.by_epoch = by_epoch
+
+ @abstractmethod
+ def log(self, runner):
+ pass
+
+ @staticmethod
+ def is_scalar(val, include_np=True, include_torch=True):
+ """Tell the input variable is a scalar or not.
+
+ Args:
+ val: Input variable.
+ include_np (bool): Whether include 0-d np.ndarray as a scalar.
+ include_torch (bool): Whether include 0-d torch.Tensor as a scalar.
+
+ Returns:
+ bool: True or False.
+ """
+ if isinstance(val, numbers.Number):
+ return True
+ elif include_np and isinstance(val, np.ndarray) and val.ndim == 0:
+ return True
+ elif include_torch and isinstance(val, torch.Tensor) and len(val) == 1:
+ return True
+ else:
+ return False
+
+ def get_mode(self, runner):
+ if runner.mode == 'train':
+ if 'time' in runner.log_buffer.output:
+ mode = 'train'
+ else:
+ mode = 'val'
+ elif runner.mode == 'val':
+ mode = 'val'
+ else:
+ raise ValueError(f"runner mode should be 'train' or 'val', "
+ f'but got {runner.mode}')
+ return mode
+
+ def get_epoch(self, runner):
+ if runner.mode == 'train':
+ epoch = runner.epoch + 1
+ elif runner.mode == 'val':
+ # normal val mode
+ # runner.epoch += 1 has been done before val workflow
+ epoch = runner.epoch
+ else:
+ raise ValueError(f"runner mode should be 'train' or 'val', "
+ f'but got {runner.mode}')
+ return epoch
+
+ def get_iter(self, runner, inner_iter=False):
+ """Get the current training iteration step."""
+ if self.by_epoch and inner_iter:
+ current_iter = runner.inner_iter + 1
+ else:
+ current_iter = runner.iter + 1
+ return current_iter
+
+ def get_lr_tags(self, runner):
+ tags = {}
+ lrs = runner.current_lr()
+ if isinstance(lrs, dict):
+ for name, value in lrs.items():
+ tags[f'learning_rate/{name}'] = value[0]
+ else:
+ tags['learning_rate'] = lrs[0]
+ return tags
+
+ def get_momentum_tags(self, runner):
+ tags = {}
+ momentums = runner.current_momentum()
+ if isinstance(momentums, dict):
+ for name, value in momentums.items():
+ tags[f'momentum/{name}'] = value[0]
+ else:
+ tags['momentum'] = momentums[0]
+ return tags
+
+ def get_loggable_tags(self,
+ runner,
+ allow_scalar=True,
+ allow_text=False,
+ add_mode=True,
+ tags_to_skip=('time', 'data_time')):
+ tags = {}
+ for var, val in runner.log_buffer.output.items():
+ if var in tags_to_skip:
+ continue
+ if self.is_scalar(val) and not allow_scalar:
+ continue
+ if isinstance(val, str) and not allow_text:
+ continue
+ if add_mode:
+ var = f'{self.get_mode(runner)}/{var}'
+ tags[var] = val
+ tags.update(self.get_lr_tags(runner))
+ tags.update(self.get_momentum_tags(runner))
+ return tags
+
+ def before_run(self, runner):
+ for hook in runner.hooks[::-1]:
+ if isinstance(hook, LoggerHook):
+ hook.reset_flag = True
+ break
+
+ def before_epoch(self, runner):
+ runner.log_buffer.clear() # clear logs of last epoch
+
+ def after_train_iter(self, runner):
+ if self.by_epoch and self.every_n_inner_iters(runner, self.interval):
+ runner.log_buffer.average(self.interval)
+ elif not self.by_epoch and self.every_n_iters(runner, self.interval):
+ runner.log_buffer.average(self.interval)
+ elif self.end_of_epoch(runner) and not self.ignore_last:
+ # not precise but more stable
+ runner.log_buffer.average(self.interval)
+
+ if runner.log_buffer.ready:
+ self.log(runner)
+ if self.reset_flag:
+ runner.log_buffer.clear_output()
+
+ def after_train_epoch(self, runner):
+ if runner.log_buffer.ready:
+ self.log(runner)
+ if self.reset_flag:
+ runner.log_buffer.clear_output()
+
+ def after_val_epoch(self, runner):
+ runner.log_buffer.average()
+ self.log(runner)
+ if self.reset_flag:
+ runner.log_buffer.clear_output()
diff --git a/annotator/uniformer/mmcv/runner/hooks/logger/dvclive.py b/annotator/uniformer/mmcv/runner/hooks/logger/dvclive.py
new file mode 100644
index 0000000000000000000000000000000000000000..687cdc58c0336c92b1e4f9a410ba67ebaab2bc7a
--- /dev/null
+++ b/annotator/uniformer/mmcv/runner/hooks/logger/dvclive.py
@@ -0,0 +1,58 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+from ...dist_utils import master_only
+from ..hook import HOOKS
+from .base import LoggerHook
+
+
+@HOOKS.register_module()
+class DvcliveLoggerHook(LoggerHook):
+ """Class to log metrics with dvclive.
+
+ It requires `dvclive`_ to be installed.
+
+ Args:
+ path (str): Directory where dvclive will write TSV log files.
+ interval (int): Logging interval (every k iterations).
+ Default 10.
+ ignore_last (bool): Ignore the log of last iterations in each epoch
+ if less than `interval`.
+ Default: True.
+ reset_flag (bool): Whether to clear the output buffer after logging.
+ Default: True.
+ by_epoch (bool): Whether EpochBasedRunner is used.
+ Default: True.
+
+ .. _dvclive:
+ https://dvc.org/doc/dvclive
+ """
+
+ def __init__(self,
+ path,
+ interval=10,
+ ignore_last=True,
+ reset_flag=True,
+ by_epoch=True):
+
+ super(DvcliveLoggerHook, self).__init__(interval, ignore_last,
+ reset_flag, by_epoch)
+ self.path = path
+ self.import_dvclive()
+
+ def import_dvclive(self):
+ try:
+ import dvclive
+ except ImportError:
+ raise ImportError(
+ 'Please run "pip install dvclive" to install dvclive')
+ self.dvclive = dvclive
+
+ @master_only
+ def before_run(self, runner):
+ self.dvclive.init(self.path)
+
+ @master_only
+ def log(self, runner):
+ tags = self.get_loggable_tags(runner)
+ if tags:
+ for k, v in tags.items():
+ self.dvclive.log(k, v, step=self.get_iter(runner))
diff --git a/annotator/uniformer/mmcv/runner/hooks/logger/mlflow.py b/annotator/uniformer/mmcv/runner/hooks/logger/mlflow.py
new file mode 100644
index 0000000000000000000000000000000000000000..f9a72592be47b534ce22573775fd5a7e8e86d72d
--- /dev/null
+++ b/annotator/uniformer/mmcv/runner/hooks/logger/mlflow.py
@@ -0,0 +1,78 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+from ...dist_utils import master_only
+from ..hook import HOOKS
+from .base import LoggerHook
+
+
+@HOOKS.register_module()
+class MlflowLoggerHook(LoggerHook):
+
+ def __init__(self,
+ exp_name=None,
+ tags=None,
+ log_model=True,
+ interval=10,
+ ignore_last=True,
+ reset_flag=False,
+ by_epoch=True):
+ """Class to log metrics and (optionally) a trained model to MLflow.
+
+ It requires `MLflow`_ to be installed.
+
+ Args:
+ exp_name (str, optional): Name of the experiment to be used.
+ Default None.
+ If not None, set the active experiment.
+ If experiment does not exist, an experiment with provided name
+ will be created.
+ tags (dict of str: str, optional): Tags for the current run.
+ Default None.
+ If not None, set tags for the current run.
+ log_model (bool, optional): Whether to log an MLflow artifact.
+ Default True.
+ If True, log runner.model as an MLflow artifact
+ for the current run.
+ interval (int): Logging interval (every k iterations).
+ ignore_last (bool): Ignore the log of last iterations in each epoch
+ if less than `interval`.
+ reset_flag (bool): Whether to clear the output buffer after logging
+ by_epoch (bool): Whether EpochBasedRunner is used.
+
+ .. _MLflow:
+ https://www.mlflow.org/docs/latest/index.html
+ """
+ super(MlflowLoggerHook, self).__init__(interval, ignore_last,
+ reset_flag, by_epoch)
+ self.import_mlflow()
+ self.exp_name = exp_name
+ self.tags = tags
+ self.log_model = log_model
+
+ def import_mlflow(self):
+ try:
+ import mlflow
+ import mlflow.pytorch as mlflow_pytorch
+ except ImportError:
+ raise ImportError(
+ 'Please run "pip install mlflow" to install mlflow')
+ self.mlflow = mlflow
+ self.mlflow_pytorch = mlflow_pytorch
+
+ @master_only
+ def before_run(self, runner):
+ super(MlflowLoggerHook, self).before_run(runner)
+ if self.exp_name is not None:
+ self.mlflow.set_experiment(self.exp_name)
+ if self.tags is not None:
+ self.mlflow.set_tags(self.tags)
+
+ @master_only
+ def log(self, runner):
+ tags = self.get_loggable_tags(runner)
+ if tags:
+ self.mlflow.log_metrics(tags, step=self.get_iter(runner))
+
+ @master_only
+ def after_run(self, runner):
+ if self.log_model:
+ self.mlflow_pytorch.log_model(runner.model, 'models')
diff --git a/annotator/uniformer/mmcv/runner/hooks/logger/neptune.py b/annotator/uniformer/mmcv/runner/hooks/logger/neptune.py
new file mode 100644
index 0000000000000000000000000000000000000000..7a38772b0c93a8608f32c6357b8616e77c139dc9
--- /dev/null
+++ b/annotator/uniformer/mmcv/runner/hooks/logger/neptune.py
@@ -0,0 +1,82 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+from ...dist_utils import master_only
+from ..hook import HOOKS
+from .base import LoggerHook
+
+
+@HOOKS.register_module()
+class NeptuneLoggerHook(LoggerHook):
+ """Class to log metrics to NeptuneAI.
+
+ It requires `neptune-client` to be installed.
+
+ Args:
+ init_kwargs (dict): a dict contains the initialization keys as below:
+ - project (str): Name of a project in a form of
+ namespace/project_name. If None, the value of
+ NEPTUNE_PROJECT environment variable will be taken.
+ - api_token (str): User’s API token.
+ If None, the value of NEPTUNE_API_TOKEN environment
+ variable will be taken. Note: It is strongly recommended
+ to use NEPTUNE_API_TOKEN environment variable rather than
+ placing your API token in plain text in your source code.
+ - name (str, optional, default is 'Untitled'): Editable name of
+ the run. Name is displayed in the run's Details and in
+ Runs table as a column.
+ Check https://docs.neptune.ai/api-reference/neptune#init for
+ more init arguments.
+ interval (int): Logging interval (every k iterations).
+ ignore_last (bool): Ignore the log of last iterations in each epoch
+ if less than `interval`.
+ reset_flag (bool): Whether to clear the output buffer after logging
+ by_epoch (bool): Whether EpochBasedRunner is used.
+
+ .. _NeptuneAI:
+ https://docs.neptune.ai/you-should-know/logging-metadata
+ """
+
+ def __init__(self,
+ init_kwargs=None,
+ interval=10,
+ ignore_last=True,
+ reset_flag=True,
+ with_step=True,
+ by_epoch=True):
+
+ super(NeptuneLoggerHook, self).__init__(interval, ignore_last,
+ reset_flag, by_epoch)
+ self.import_neptune()
+ self.init_kwargs = init_kwargs
+ self.with_step = with_step
+
+ def import_neptune(self):
+ try:
+ import neptune.new as neptune
+ except ImportError:
+ raise ImportError(
+ 'Please run "pip install neptune-client" to install neptune')
+ self.neptune = neptune
+ self.run = None
+
+ @master_only
+ def before_run(self, runner):
+ if self.init_kwargs:
+ self.run = self.neptune.init(**self.init_kwargs)
+ else:
+ self.run = self.neptune.init()
+
+ @master_only
+ def log(self, runner):
+ tags = self.get_loggable_tags(runner)
+ if tags:
+ for tag_name, tag_value in tags.items():
+ if self.with_step:
+ self.run[tag_name].log(
+ tag_value, step=self.get_iter(runner))
+ else:
+ tags['global_step'] = self.get_iter(runner)
+ self.run[tag_name].log(tags)
+
+ @master_only
+ def after_run(self, runner):
+ self.run.stop()
diff --git a/annotator/uniformer/mmcv/runner/hooks/logger/pavi.py b/annotator/uniformer/mmcv/runner/hooks/logger/pavi.py
new file mode 100644
index 0000000000000000000000000000000000000000..1dcf146d8163aff1363e9764999b0a74d674a595
--- /dev/null
+++ b/annotator/uniformer/mmcv/runner/hooks/logger/pavi.py
@@ -0,0 +1,117 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import json
+import os
+import os.path as osp
+
+import torch
+import yaml
+
+import annotator.uniformer.mmcv as mmcv
+from ....parallel.utils import is_module_wrapper
+from ...dist_utils import master_only
+from ..hook import HOOKS
+from .base import LoggerHook
+
+
+@HOOKS.register_module()
+class PaviLoggerHook(LoggerHook):
+
+ def __init__(self,
+ init_kwargs=None,
+ add_graph=False,
+ add_last_ckpt=False,
+ interval=10,
+ ignore_last=True,
+ reset_flag=False,
+ by_epoch=True,
+ img_key='img_info'):
+ super(PaviLoggerHook, self).__init__(interval, ignore_last, reset_flag,
+ by_epoch)
+ self.init_kwargs = init_kwargs
+ self.add_graph = add_graph
+ self.add_last_ckpt = add_last_ckpt
+ self.img_key = img_key
+
+ @master_only
+ def before_run(self, runner):
+ super(PaviLoggerHook, self).before_run(runner)
+ try:
+ from pavi import SummaryWriter
+ except ImportError:
+ raise ImportError('Please run "pip install pavi" to install pavi.')
+
+ self.run_name = runner.work_dir.split('/')[-1]
+
+ if not self.init_kwargs:
+ self.init_kwargs = dict()
+ self.init_kwargs['name'] = self.run_name
+ self.init_kwargs['model'] = runner._model_name
+ if runner.meta is not None:
+ if 'config_dict' in runner.meta:
+ config_dict = runner.meta['config_dict']
+ assert isinstance(
+ config_dict,
+ dict), ('meta["config_dict"] has to be of a dict, '
+ f'but got {type(config_dict)}')
+ elif 'config_file' in runner.meta:
+ config_file = runner.meta['config_file']
+ config_dict = dict(mmcv.Config.fromfile(config_file))
+ else:
+ config_dict = None
+ if config_dict is not None:
+ # 'max_.*iter' is parsed in pavi sdk as the maximum iterations
+ # to properly set up the progress bar.
+ config_dict = config_dict.copy()
+ config_dict.setdefault('max_iter', runner.max_iters)
+ # non-serializable values are first converted in
+ # mmcv.dump to json
+ config_dict = json.loads(
+ mmcv.dump(config_dict, file_format='json'))
+ session_text = yaml.dump(config_dict)
+ self.init_kwargs['session_text'] = session_text
+ self.writer = SummaryWriter(**self.init_kwargs)
+
+ def get_step(self, runner):
+ """Get the total training step/epoch."""
+ if self.get_mode(runner) == 'val' and self.by_epoch:
+ return self.get_epoch(runner)
+ else:
+ return self.get_iter(runner)
+
+ @master_only
+ def log(self, runner):
+ tags = self.get_loggable_tags(runner, add_mode=False)
+ if tags:
+ self.writer.add_scalars(
+ self.get_mode(runner), tags, self.get_step(runner))
+
+ @master_only
+ def after_run(self, runner):
+ if self.add_last_ckpt:
+ ckpt_path = osp.join(runner.work_dir, 'latest.pth')
+ if osp.islink(ckpt_path):
+ ckpt_path = osp.join(runner.work_dir, os.readlink(ckpt_path))
+
+ if osp.isfile(ckpt_path):
+ # runner.epoch += 1 has been done before `after_run`.
+ iteration = runner.epoch if self.by_epoch else runner.iter
+ return self.writer.add_snapshot_file(
+ tag=self.run_name,
+ snapshot_file_path=ckpt_path,
+ iteration=iteration)
+
+ # flush the buffer and send a task ending signal to Pavi
+ self.writer.close()
+
+ @master_only
+ def before_epoch(self, runner):
+ if runner.epoch == 0 and self.add_graph:
+ if is_module_wrapper(runner.model):
+ _model = runner.model.module
+ else:
+ _model = runner.model
+ device = next(_model.parameters()).device
+ data = next(iter(runner.data_loader))
+ image = data[self.img_key][0:1].to(device)
+ with torch.no_grad():
+ self.writer.add_graph(_model, image)
diff --git a/annotator/uniformer/mmcv/runner/hooks/logger/tensorboard.py b/annotator/uniformer/mmcv/runner/hooks/logger/tensorboard.py
new file mode 100644
index 0000000000000000000000000000000000000000..4dd5011dc08def6c09eef86d3ce5b124c9fc5372
--- /dev/null
+++ b/annotator/uniformer/mmcv/runner/hooks/logger/tensorboard.py
@@ -0,0 +1,57 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import os.path as osp
+
+from annotator.uniformer.mmcv.utils import TORCH_VERSION, digit_version
+from ...dist_utils import master_only
+from ..hook import HOOKS
+from .base import LoggerHook
+
+
+@HOOKS.register_module()
+class TensorboardLoggerHook(LoggerHook):
+
+ def __init__(self,
+ log_dir=None,
+ interval=10,
+ ignore_last=True,
+ reset_flag=False,
+ by_epoch=True):
+ super(TensorboardLoggerHook, self).__init__(interval, ignore_last,
+ reset_flag, by_epoch)
+ self.log_dir = log_dir
+
+ @master_only
+ def before_run(self, runner):
+ super(TensorboardLoggerHook, self).before_run(runner)
+ if (TORCH_VERSION == 'parrots'
+ or digit_version(TORCH_VERSION) < digit_version('1.1')):
+ try:
+ from tensorboardX import SummaryWriter
+ except ImportError:
+ raise ImportError('Please install tensorboardX to use '
+ 'TensorboardLoggerHook.')
+ else:
+ try:
+ from torch.utils.tensorboard import SummaryWriter
+ except ImportError:
+ raise ImportError(
+ 'Please run "pip install future tensorboard" to install '
+ 'the dependencies to use torch.utils.tensorboard '
+ '(applicable to PyTorch 1.1 or higher)')
+
+ if self.log_dir is None:
+ self.log_dir = osp.join(runner.work_dir, 'tf_logs')
+ self.writer = SummaryWriter(self.log_dir)
+
+ @master_only
+ def log(self, runner):
+ tags = self.get_loggable_tags(runner, allow_text=True)
+ for tag, val in tags.items():
+ if isinstance(val, str):
+ self.writer.add_text(tag, val, self.get_iter(runner))
+ else:
+ self.writer.add_scalar(tag, val, self.get_iter(runner))
+
+ @master_only
+ def after_run(self, runner):
+ self.writer.close()
diff --git a/annotator/uniformer/mmcv/runner/hooks/logger/text.py b/annotator/uniformer/mmcv/runner/hooks/logger/text.py
new file mode 100644
index 0000000000000000000000000000000000000000..87b1a3eca9595a130121526f8b4c29915387ab35
--- /dev/null
+++ b/annotator/uniformer/mmcv/runner/hooks/logger/text.py
@@ -0,0 +1,256 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import datetime
+import os
+import os.path as osp
+from collections import OrderedDict
+
+import torch
+import torch.distributed as dist
+
+import annotator.uniformer.mmcv as mmcv
+from annotator.uniformer.mmcv.fileio.file_client import FileClient
+from annotator.uniformer.mmcv.utils import is_tuple_of, scandir
+from ..hook import HOOKS
+from .base import LoggerHook
+
+
+@HOOKS.register_module()
+class TextLoggerHook(LoggerHook):
+ """Logger hook in text.
+
+ In this logger hook, the information will be printed on terminal and
+ saved in json file.
+
+ Args:
+ by_epoch (bool, optional): Whether EpochBasedRunner is used.
+ Default: True.
+ interval (int, optional): Logging interval (every k iterations).
+ Default: 10.
+ ignore_last (bool, optional): Ignore the log of last iterations in each
+ epoch if less than :attr:`interval`. Default: True.
+ reset_flag (bool, optional): Whether to clear the output buffer after
+ logging. Default: False.
+ interval_exp_name (int, optional): Logging interval for experiment
+ name. This feature is to help users conveniently get the experiment
+ information from screen or log file. Default: 1000.
+ out_dir (str, optional): Logs are saved in ``runner.work_dir`` default.
+ If ``out_dir`` is specified, logs will be copied to a new directory
+ which is the concatenation of ``out_dir`` and the last level
+ directory of ``runner.work_dir``. Default: None.
+ `New in version 1.3.16.`
+ out_suffix (str or tuple[str], optional): Those filenames ending with
+ ``out_suffix`` will be copied to ``out_dir``.
+ Default: ('.log.json', '.log', '.py').
+ `New in version 1.3.16.`
+ keep_local (bool, optional): Whether to keep local log when
+ :attr:`out_dir` is specified. If False, the local log will be
+ removed. Default: True.
+ `New in version 1.3.16.`
+ file_client_args (dict, optional): Arguments to instantiate a
+ FileClient. See :class:`mmcv.fileio.FileClient` for details.
+ Default: None.
+ `New in version 1.3.16.`
+ """
+
+ def __init__(self,
+ by_epoch=True,
+ interval=10,
+ ignore_last=True,
+ reset_flag=False,
+ interval_exp_name=1000,
+ out_dir=None,
+ out_suffix=('.log.json', '.log', '.py'),
+ keep_local=True,
+ file_client_args=None):
+ super(TextLoggerHook, self).__init__(interval, ignore_last, reset_flag,
+ by_epoch)
+ self.by_epoch = by_epoch
+ self.time_sec_tot = 0
+ self.interval_exp_name = interval_exp_name
+
+ if out_dir is None and file_client_args is not None:
+ raise ValueError(
+ 'file_client_args should be "None" when `out_dir` is not'
+ 'specified.')
+ self.out_dir = out_dir
+
+ if not (out_dir is None or isinstance(out_dir, str)
+ or is_tuple_of(out_dir, str)):
+ raise TypeError('out_dir should be "None" or string or tuple of '
+ 'string, but got {out_dir}')
+ self.out_suffix = out_suffix
+
+ self.keep_local = keep_local
+ self.file_client_args = file_client_args
+ if self.out_dir is not None:
+ self.file_client = FileClient.infer_client(file_client_args,
+ self.out_dir)
+
+ def before_run(self, runner):
+ super(TextLoggerHook, self).before_run(runner)
+
+ if self.out_dir is not None:
+ self.file_client = FileClient.infer_client(self.file_client_args,
+ self.out_dir)
+ # The final `self.out_dir` is the concatenation of `self.out_dir`
+ # and the last level directory of `runner.work_dir`
+ basename = osp.basename(runner.work_dir.rstrip(osp.sep))
+ self.out_dir = self.file_client.join_path(self.out_dir, basename)
+ runner.logger.info(
+ (f'Text logs will be saved to {self.out_dir} by '
+ f'{self.file_client.name} after the training process.'))
+
+ self.start_iter = runner.iter
+ self.json_log_path = osp.join(runner.work_dir,
+ f'{runner.timestamp}.log.json')
+ if runner.meta is not None:
+ self._dump_log(runner.meta, runner)
+
+ def _get_max_memory(self, runner):
+ device = getattr(runner.model, 'output_device', None)
+ mem = torch.cuda.max_memory_allocated(device=device)
+ mem_mb = torch.tensor([mem / (1024 * 1024)],
+ dtype=torch.int,
+ device=device)
+ if runner.world_size > 1:
+ dist.reduce(mem_mb, 0, op=dist.ReduceOp.MAX)
+ return mem_mb.item()
+
+ def _log_info(self, log_dict, runner):
+ # print exp name for users to distinguish experiments
+ # at every ``interval_exp_name`` iterations and the end of each epoch
+ if runner.meta is not None and 'exp_name' in runner.meta:
+ if (self.every_n_iters(runner, self.interval_exp_name)) or (
+ self.by_epoch and self.end_of_epoch(runner)):
+ exp_info = f'Exp name: {runner.meta["exp_name"]}'
+ runner.logger.info(exp_info)
+
+ if log_dict['mode'] == 'train':
+ if isinstance(log_dict['lr'], dict):
+ lr_str = []
+ for k, val in log_dict['lr'].items():
+ lr_str.append(f'lr_{k}: {val:.3e}')
+ lr_str = ' '.join(lr_str)
+ else:
+ lr_str = f'lr: {log_dict["lr"]:.3e}'
+
+ # by epoch: Epoch [4][100/1000]
+ # by iter: Iter [100/100000]
+ if self.by_epoch:
+ log_str = f'Epoch [{log_dict["epoch"]}]' \
+ f'[{log_dict["iter"]}/{len(runner.data_loader)}]\t'
+ else:
+ log_str = f'Iter [{log_dict["iter"]}/{runner.max_iters}]\t'
+ log_str += f'{lr_str}, '
+
+ if 'time' in log_dict.keys():
+ self.time_sec_tot += (log_dict['time'] * self.interval)
+ time_sec_avg = self.time_sec_tot / (
+ runner.iter - self.start_iter + 1)
+ eta_sec = time_sec_avg * (runner.max_iters - runner.iter - 1)
+ eta_str = str(datetime.timedelta(seconds=int(eta_sec)))
+ log_str += f'eta: {eta_str}, '
+ log_str += f'time: {log_dict["time"]:.3f}, ' \
+ f'data_time: {log_dict["data_time"]:.3f}, '
+ # statistic memory
+ if torch.cuda.is_available():
+ log_str += f'memory: {log_dict["memory"]}, '
+ else:
+ # val/test time
+ # here 1000 is the length of the val dataloader
+ # by epoch: Epoch[val] [4][1000]
+ # by iter: Iter[val] [1000]
+ if self.by_epoch:
+ log_str = f'Epoch({log_dict["mode"]}) ' \
+ f'[{log_dict["epoch"]}][{log_dict["iter"]}]\t'
+ else:
+ log_str = f'Iter({log_dict["mode"]}) [{log_dict["iter"]}]\t'
+
+ log_items = []
+ for name, val in log_dict.items():
+ # TODO: resolve this hack
+ # these items have been in log_str
+ if name in [
+ 'mode', 'Epoch', 'iter', 'lr', 'time', 'data_time',
+ 'memory', 'epoch'
+ ]:
+ continue
+ if isinstance(val, float):
+ val = f'{val:.4f}'
+ log_items.append(f'{name}: {val}')
+ log_str += ', '.join(log_items)
+
+ runner.logger.info(log_str)
+
+ def _dump_log(self, log_dict, runner):
+ # dump log in json format
+ json_log = OrderedDict()
+ for k, v in log_dict.items():
+ json_log[k] = self._round_float(v)
+ # only append log at last line
+ if runner.rank == 0:
+ with open(self.json_log_path, 'a+') as f:
+ mmcv.dump(json_log, f, file_format='json')
+ f.write('\n')
+
+ def _round_float(self, items):
+ if isinstance(items, list):
+ return [self._round_float(item) for item in items]
+ elif isinstance(items, float):
+ return round(items, 5)
+ else:
+ return items
+
+ def log(self, runner):
+ if 'eval_iter_num' in runner.log_buffer.output:
+ # this doesn't modify runner.iter and is regardless of by_epoch
+ cur_iter = runner.log_buffer.output.pop('eval_iter_num')
+ else:
+ cur_iter = self.get_iter(runner, inner_iter=True)
+
+ log_dict = OrderedDict(
+ mode=self.get_mode(runner),
+ epoch=self.get_epoch(runner),
+ iter=cur_iter)
+
+ # only record lr of the first param group
+ cur_lr = runner.current_lr()
+ if isinstance(cur_lr, list):
+ log_dict['lr'] = cur_lr[0]
+ else:
+ assert isinstance(cur_lr, dict)
+ log_dict['lr'] = {}
+ for k, lr_ in cur_lr.items():
+ assert isinstance(lr_, list)
+ log_dict['lr'].update({k: lr_[0]})
+
+ if 'time' in runner.log_buffer.output:
+ # statistic memory
+ if torch.cuda.is_available():
+ log_dict['memory'] = self._get_max_memory(runner)
+
+ log_dict = dict(log_dict, **runner.log_buffer.output)
+
+ self._log_info(log_dict, runner)
+ self._dump_log(log_dict, runner)
+ return log_dict
+
+ def after_run(self, runner):
+ # copy or upload logs to self.out_dir
+ if self.out_dir is not None:
+ for filename in scandir(runner.work_dir, self.out_suffix, True):
+ local_filepath = osp.join(runner.work_dir, filename)
+ out_filepath = self.file_client.join_path(
+ self.out_dir, filename)
+ with open(local_filepath, 'r') as f:
+ self.file_client.put_text(f.read(), out_filepath)
+
+ runner.logger.info(
+ (f'The file {local_filepath} has been uploaded to '
+ f'{out_filepath}.'))
+
+ if not self.keep_local:
+ os.remove(local_filepath)
+ runner.logger.info(
+ (f'{local_filepath} was removed due to the '
+ '`self.keep_local=False`'))
diff --git a/annotator/uniformer/mmcv/runner/hooks/logger/wandb.py b/annotator/uniformer/mmcv/runner/hooks/logger/wandb.py
new file mode 100644
index 0000000000000000000000000000000000000000..9f6808462eb79ab2b04806a5d9f0d3dd079b5ea9
--- /dev/null
+++ b/annotator/uniformer/mmcv/runner/hooks/logger/wandb.py
@@ -0,0 +1,56 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+from ...dist_utils import master_only
+from ..hook import HOOKS
+from .base import LoggerHook
+
+
+@HOOKS.register_module()
+class WandbLoggerHook(LoggerHook):
+
+ def __init__(self,
+ init_kwargs=None,
+ interval=10,
+ ignore_last=True,
+ reset_flag=False,
+ commit=True,
+ by_epoch=True,
+ with_step=True):
+ super(WandbLoggerHook, self).__init__(interval, ignore_last,
+ reset_flag, by_epoch)
+ self.import_wandb()
+ self.init_kwargs = init_kwargs
+ self.commit = commit
+ self.with_step = with_step
+
+ def import_wandb(self):
+ try:
+ import wandb
+ except ImportError:
+ raise ImportError(
+ 'Please run "pip install wandb" to install wandb')
+ self.wandb = wandb
+
+ @master_only
+ def before_run(self, runner):
+ super(WandbLoggerHook, self).before_run(runner)
+ if self.wandb is None:
+ self.import_wandb()
+ if self.init_kwargs:
+ self.wandb.init(**self.init_kwargs)
+ else:
+ self.wandb.init()
+
+ @master_only
+ def log(self, runner):
+ tags = self.get_loggable_tags(runner)
+ if tags:
+ if self.with_step:
+ self.wandb.log(
+ tags, step=self.get_iter(runner), commit=self.commit)
+ else:
+ tags['global_step'] = self.get_iter(runner)
+ self.wandb.log(tags, commit=self.commit)
+
+ @master_only
+ def after_run(self, runner):
+ self.wandb.join()
diff --git a/annotator/uniformer/mmcv/runner/hooks/lr_updater.py b/annotator/uniformer/mmcv/runner/hooks/lr_updater.py
new file mode 100644
index 0000000000000000000000000000000000000000..6365908ddf6070086de2ffc0afada46ed2f32256
--- /dev/null
+++ b/annotator/uniformer/mmcv/runner/hooks/lr_updater.py
@@ -0,0 +1,670 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import numbers
+from math import cos, pi
+
+import annotator.uniformer.mmcv as mmcv
+from .hook import HOOKS, Hook
+
+
+class LrUpdaterHook(Hook):
+ """LR Scheduler in MMCV.
+
+ Args:
+ by_epoch (bool): LR changes epoch by epoch
+ warmup (string): Type of warmup used. It can be None(use no warmup),
+ 'constant', 'linear' or 'exp'
+ warmup_iters (int): The number of iterations or epochs that warmup
+ lasts
+ warmup_ratio (float): LR used at the beginning of warmup equals to
+ warmup_ratio * initial_lr
+ warmup_by_epoch (bool): When warmup_by_epoch == True, warmup_iters
+ means the number of epochs that warmup lasts, otherwise means the
+ number of iteration that warmup lasts
+ """
+
+ def __init__(self,
+ by_epoch=True,
+ warmup=None,
+ warmup_iters=0,
+ warmup_ratio=0.1,
+ warmup_by_epoch=False):
+ # validate the "warmup" argument
+ if warmup is not None:
+ if warmup not in ['constant', 'linear', 'exp']:
+ raise ValueError(
+ f'"{warmup}" is not a supported type for warming up, valid'
+ ' types are "constant" and "linear"')
+ if warmup is not None:
+ assert warmup_iters > 0, \
+ '"warmup_iters" must be a positive integer'
+ assert 0 < warmup_ratio <= 1.0, \
+ '"warmup_ratio" must be in range (0,1]'
+
+ self.by_epoch = by_epoch
+ self.warmup = warmup
+ self.warmup_iters = warmup_iters
+ self.warmup_ratio = warmup_ratio
+ self.warmup_by_epoch = warmup_by_epoch
+
+ if self.warmup_by_epoch:
+ self.warmup_epochs = self.warmup_iters
+ self.warmup_iters = None
+ else:
+ self.warmup_epochs = None
+
+ self.base_lr = [] # initial lr for all param groups
+ self.regular_lr = [] # expected lr if no warming up is performed
+
+ def _set_lr(self, runner, lr_groups):
+ if isinstance(runner.optimizer, dict):
+ for k, optim in runner.optimizer.items():
+ for param_group, lr in zip(optim.param_groups, lr_groups[k]):
+ param_group['lr'] = lr
+ else:
+ for param_group, lr in zip(runner.optimizer.param_groups,
+ lr_groups):
+ param_group['lr'] = lr
+
+ def get_lr(self, runner, base_lr):
+ raise NotImplementedError
+
+ def get_regular_lr(self, runner):
+ if isinstance(runner.optimizer, dict):
+ lr_groups = {}
+ for k in runner.optimizer.keys():
+ _lr_group = [
+ self.get_lr(runner, _base_lr)
+ for _base_lr in self.base_lr[k]
+ ]
+ lr_groups.update({k: _lr_group})
+
+ return lr_groups
+ else:
+ return [self.get_lr(runner, _base_lr) for _base_lr in self.base_lr]
+
+ def get_warmup_lr(self, cur_iters):
+
+ def _get_warmup_lr(cur_iters, regular_lr):
+ if self.warmup == 'constant':
+ warmup_lr = [_lr * self.warmup_ratio for _lr in regular_lr]
+ elif self.warmup == 'linear':
+ k = (1 - cur_iters / self.warmup_iters) * (1 -
+ self.warmup_ratio)
+ warmup_lr = [_lr * (1 - k) for _lr in regular_lr]
+ elif self.warmup == 'exp':
+ k = self.warmup_ratio**(1 - cur_iters / self.warmup_iters)
+ warmup_lr = [_lr * k for _lr in regular_lr]
+ return warmup_lr
+
+ if isinstance(self.regular_lr, dict):
+ lr_groups = {}
+ for key, regular_lr in self.regular_lr.items():
+ lr_groups[key] = _get_warmup_lr(cur_iters, regular_lr)
+ return lr_groups
+ else:
+ return _get_warmup_lr(cur_iters, self.regular_lr)
+
+ def before_run(self, runner):
+ # NOTE: when resuming from a checkpoint, if 'initial_lr' is not saved,
+ # it will be set according to the optimizer params
+ if isinstance(runner.optimizer, dict):
+ self.base_lr = {}
+ for k, optim in runner.optimizer.items():
+ for group in optim.param_groups:
+ group.setdefault('initial_lr', group['lr'])
+ _base_lr = [
+ group['initial_lr'] for group in optim.param_groups
+ ]
+ self.base_lr.update({k: _base_lr})
+ else:
+ for group in runner.optimizer.param_groups:
+ group.setdefault('initial_lr', group['lr'])
+ self.base_lr = [
+ group['initial_lr'] for group in runner.optimizer.param_groups
+ ]
+
+ def before_train_epoch(self, runner):
+ if self.warmup_iters is None:
+ epoch_len = len(runner.data_loader)
+ self.warmup_iters = self.warmup_epochs * epoch_len
+
+ if not self.by_epoch:
+ return
+
+ self.regular_lr = self.get_regular_lr(runner)
+ self._set_lr(runner, self.regular_lr)
+
+ def before_train_iter(self, runner):
+ cur_iter = runner.iter
+ if not self.by_epoch:
+ self.regular_lr = self.get_regular_lr(runner)
+ if self.warmup is None or cur_iter >= self.warmup_iters:
+ self._set_lr(runner, self.regular_lr)
+ else:
+ warmup_lr = self.get_warmup_lr(cur_iter)
+ self._set_lr(runner, warmup_lr)
+ elif self.by_epoch:
+ if self.warmup is None or cur_iter > self.warmup_iters:
+ return
+ elif cur_iter == self.warmup_iters:
+ self._set_lr(runner, self.regular_lr)
+ else:
+ warmup_lr = self.get_warmup_lr(cur_iter)
+ self._set_lr(runner, warmup_lr)
+
+
+@HOOKS.register_module()
+class FixedLrUpdaterHook(LrUpdaterHook):
+
+ def __init__(self, **kwargs):
+ super(FixedLrUpdaterHook, self).__init__(**kwargs)
+
+ def get_lr(self, runner, base_lr):
+ return base_lr
+
+
+@HOOKS.register_module()
+class StepLrUpdaterHook(LrUpdaterHook):
+ """Step LR scheduler with min_lr clipping.
+
+ Args:
+ step (int | list[int]): Step to decay the LR. If an int value is given,
+ regard it as the decay interval. If a list is given, decay LR at
+ these steps.
+ gamma (float, optional): Decay LR ratio. Default: 0.1.
+ min_lr (float, optional): Minimum LR value to keep. If LR after decay
+ is lower than `min_lr`, it will be clipped to this value. If None
+ is given, we don't perform lr clipping. Default: None.
+ """
+
+ def __init__(self, step, gamma=0.1, min_lr=None, **kwargs):
+ if isinstance(step, list):
+ assert mmcv.is_list_of(step, int)
+ assert all([s > 0 for s in step])
+ elif isinstance(step, int):
+ assert step > 0
+ else:
+ raise TypeError('"step" must be a list or integer')
+ self.step = step
+ self.gamma = gamma
+ self.min_lr = min_lr
+ super(StepLrUpdaterHook, self).__init__(**kwargs)
+
+ def get_lr(self, runner, base_lr):
+ progress = runner.epoch if self.by_epoch else runner.iter
+
+ # calculate exponential term
+ if isinstance(self.step, int):
+ exp = progress // self.step
+ else:
+ exp = len(self.step)
+ for i, s in enumerate(self.step):
+ if progress < s:
+ exp = i
+ break
+
+ lr = base_lr * (self.gamma**exp)
+ if self.min_lr is not None:
+ # clip to a minimum value
+ lr = max(lr, self.min_lr)
+ return lr
+
+
+@HOOKS.register_module()
+class ExpLrUpdaterHook(LrUpdaterHook):
+
+ def __init__(self, gamma, **kwargs):
+ self.gamma = gamma
+ super(ExpLrUpdaterHook, self).__init__(**kwargs)
+
+ def get_lr(self, runner, base_lr):
+ progress = runner.epoch if self.by_epoch else runner.iter
+ return base_lr * self.gamma**progress
+
+
+@HOOKS.register_module()
+class PolyLrUpdaterHook(LrUpdaterHook):
+
+ def __init__(self, power=1., min_lr=0., **kwargs):
+ self.power = power
+ self.min_lr = min_lr
+ super(PolyLrUpdaterHook, self).__init__(**kwargs)
+
+ def get_lr(self, runner, base_lr):
+ if self.by_epoch:
+ progress = runner.epoch
+ max_progress = runner.max_epochs
+ else:
+ progress = runner.iter
+ max_progress = runner.max_iters
+ coeff = (1 - progress / max_progress)**self.power
+ return (base_lr - self.min_lr) * coeff + self.min_lr
+
+
+@HOOKS.register_module()
+class InvLrUpdaterHook(LrUpdaterHook):
+
+ def __init__(self, gamma, power=1., **kwargs):
+ self.gamma = gamma
+ self.power = power
+ super(InvLrUpdaterHook, self).__init__(**kwargs)
+
+ def get_lr(self, runner, base_lr):
+ progress = runner.epoch if self.by_epoch else runner.iter
+ return base_lr * (1 + self.gamma * progress)**(-self.power)
+
+
+@HOOKS.register_module()
+class CosineAnnealingLrUpdaterHook(LrUpdaterHook):
+
+ def __init__(self, min_lr=None, min_lr_ratio=None, **kwargs):
+ assert (min_lr is None) ^ (min_lr_ratio is None)
+ self.min_lr = min_lr
+ self.min_lr_ratio = min_lr_ratio
+ super(CosineAnnealingLrUpdaterHook, self).__init__(**kwargs)
+
+ def get_lr(self, runner, base_lr):
+ if self.by_epoch:
+ progress = runner.epoch
+ max_progress = runner.max_epochs
+ else:
+ progress = runner.iter
+ max_progress = runner.max_iters
+
+ if self.min_lr_ratio is not None:
+ target_lr = base_lr * self.min_lr_ratio
+ else:
+ target_lr = self.min_lr
+ return annealing_cos(base_lr, target_lr, progress / max_progress)
+
+
+@HOOKS.register_module()
+class FlatCosineAnnealingLrUpdaterHook(LrUpdaterHook):
+ """Flat + Cosine lr schedule.
+
+ Modified from https://github.com/fastai/fastai/blob/master/fastai/callback/schedule.py#L128 # noqa: E501
+
+ Args:
+ start_percent (float): When to start annealing the learning rate
+ after the percentage of the total training steps.
+ The value should be in range [0, 1).
+ Default: 0.75
+ min_lr (float, optional): The minimum lr. Default: None.
+ min_lr_ratio (float, optional): The ratio of minimum lr to the base lr.
+ Either `min_lr` or `min_lr_ratio` should be specified.
+ Default: None.
+ """
+
+ def __init__(self,
+ start_percent=0.75,
+ min_lr=None,
+ min_lr_ratio=None,
+ **kwargs):
+ assert (min_lr is None) ^ (min_lr_ratio is None)
+ if start_percent < 0 or start_percent > 1 or not isinstance(
+ start_percent, float):
+ raise ValueError(
+ 'expected float between 0 and 1 start_percent, but '
+ f'got {start_percent}')
+ self.start_percent = start_percent
+ self.min_lr = min_lr
+ self.min_lr_ratio = min_lr_ratio
+ super(FlatCosineAnnealingLrUpdaterHook, self).__init__(**kwargs)
+
+ def get_lr(self, runner, base_lr):
+ if self.by_epoch:
+ start = round(runner.max_epochs * self.start_percent)
+ progress = runner.epoch - start
+ max_progress = runner.max_epochs - start
+ else:
+ start = round(runner.max_iters * self.start_percent)
+ progress = runner.iter - start
+ max_progress = runner.max_iters - start
+
+ if self.min_lr_ratio is not None:
+ target_lr = base_lr * self.min_lr_ratio
+ else:
+ target_lr = self.min_lr
+
+ if progress < 0:
+ return base_lr
+ else:
+ return annealing_cos(base_lr, target_lr, progress / max_progress)
+
+
+@HOOKS.register_module()
+class CosineRestartLrUpdaterHook(LrUpdaterHook):
+ """Cosine annealing with restarts learning rate scheme.
+
+ Args:
+ periods (list[int]): Periods for each cosine anneling cycle.
+ restart_weights (list[float], optional): Restart weights at each
+ restart iteration. Default: [1].
+ min_lr (float, optional): The minimum lr. Default: None.
+ min_lr_ratio (float, optional): The ratio of minimum lr to the base lr.
+ Either `min_lr` or `min_lr_ratio` should be specified.
+ Default: None.
+ """
+
+ def __init__(self,
+ periods,
+ restart_weights=[1],
+ min_lr=None,
+ min_lr_ratio=None,
+ **kwargs):
+ assert (min_lr is None) ^ (min_lr_ratio is None)
+ self.periods = periods
+ self.min_lr = min_lr
+ self.min_lr_ratio = min_lr_ratio
+ self.restart_weights = restart_weights
+ assert (len(self.periods) == len(self.restart_weights)
+ ), 'periods and restart_weights should have the same length.'
+ super(CosineRestartLrUpdaterHook, self).__init__(**kwargs)
+
+ self.cumulative_periods = [
+ sum(self.periods[0:i + 1]) for i in range(0, len(self.periods))
+ ]
+
+ def get_lr(self, runner, base_lr):
+ if self.by_epoch:
+ progress = runner.epoch
+ else:
+ progress = runner.iter
+
+ if self.min_lr_ratio is not None:
+ target_lr = base_lr * self.min_lr_ratio
+ else:
+ target_lr = self.min_lr
+
+ idx = get_position_from_periods(progress, self.cumulative_periods)
+ current_weight = self.restart_weights[idx]
+ nearest_restart = 0 if idx == 0 else self.cumulative_periods[idx - 1]
+ current_periods = self.periods[idx]
+
+ alpha = min((progress - nearest_restart) / current_periods, 1)
+ return annealing_cos(base_lr, target_lr, alpha, current_weight)
+
+
+def get_position_from_periods(iteration, cumulative_periods):
+ """Get the position from a period list.
+
+ It will return the index of the right-closest number in the period list.
+ For example, the cumulative_periods = [100, 200, 300, 400],
+ if iteration == 50, return 0;
+ if iteration == 210, return 2;
+ if iteration == 300, return 3.
+
+ Args:
+ iteration (int): Current iteration.
+ cumulative_periods (list[int]): Cumulative period list.
+
+ Returns:
+ int: The position of the right-closest number in the period list.
+ """
+ for i, period in enumerate(cumulative_periods):
+ if iteration < period:
+ return i
+ raise ValueError(f'Current iteration {iteration} exceeds '
+ f'cumulative_periods {cumulative_periods}')
+
+
+@HOOKS.register_module()
+class CyclicLrUpdaterHook(LrUpdaterHook):
+ """Cyclic LR Scheduler.
+
+ Implement the cyclical learning rate policy (CLR) described in
+ https://arxiv.org/pdf/1506.01186.pdf
+
+ Different from the original paper, we use cosine annealing rather than
+ triangular policy inside a cycle. This improves the performance in the
+ 3D detection area.
+
+ Args:
+ by_epoch (bool): Whether to update LR by epoch.
+ target_ratio (tuple[float]): Relative ratio of the highest LR and the
+ lowest LR to the initial LR.
+ cyclic_times (int): Number of cycles during training
+ step_ratio_up (float): The ratio of the increasing process of LR in
+ the total cycle.
+ anneal_strategy (str): {'cos', 'linear'}
+ Specifies the annealing strategy: 'cos' for cosine annealing,
+ 'linear' for linear annealing. Default: 'cos'.
+ """
+
+ def __init__(self,
+ by_epoch=False,
+ target_ratio=(10, 1e-4),
+ cyclic_times=1,
+ step_ratio_up=0.4,
+ anneal_strategy='cos',
+ **kwargs):
+ if isinstance(target_ratio, float):
+ target_ratio = (target_ratio, target_ratio / 1e5)
+ elif isinstance(target_ratio, tuple):
+ target_ratio = (target_ratio[0], target_ratio[0] / 1e5) \
+ if len(target_ratio) == 1 else target_ratio
+ else:
+ raise ValueError('target_ratio should be either float '
+ f'or tuple, got {type(target_ratio)}')
+
+ assert len(target_ratio) == 2, \
+ '"target_ratio" must be list or tuple of two floats'
+ assert 0 <= step_ratio_up < 1.0, \
+ '"step_ratio_up" must be in range [0,1)'
+
+ self.target_ratio = target_ratio
+ self.cyclic_times = cyclic_times
+ self.step_ratio_up = step_ratio_up
+ self.lr_phases = [] # init lr_phases
+ # validate anneal_strategy
+ if anneal_strategy not in ['cos', 'linear']:
+ raise ValueError('anneal_strategy must be one of "cos" or '
+ f'"linear", instead got {anneal_strategy}')
+ elif anneal_strategy == 'cos':
+ self.anneal_func = annealing_cos
+ elif anneal_strategy == 'linear':
+ self.anneal_func = annealing_linear
+
+ assert not by_epoch, \
+ 'currently only support "by_epoch" = False'
+ super(CyclicLrUpdaterHook, self).__init__(by_epoch, **kwargs)
+
+ def before_run(self, runner):
+ super(CyclicLrUpdaterHook, self).before_run(runner)
+ # initiate lr_phases
+ # total lr_phases are separated as up and down
+ max_iter_per_phase = runner.max_iters // self.cyclic_times
+ iter_up_phase = int(self.step_ratio_up * max_iter_per_phase)
+ self.lr_phases.append(
+ [0, iter_up_phase, max_iter_per_phase, 1, self.target_ratio[0]])
+ self.lr_phases.append([
+ iter_up_phase, max_iter_per_phase, max_iter_per_phase,
+ self.target_ratio[0], self.target_ratio[1]
+ ])
+
+ def get_lr(self, runner, base_lr):
+ curr_iter = runner.iter
+ for (start_iter, end_iter, max_iter_per_phase, start_ratio,
+ end_ratio) in self.lr_phases:
+ curr_iter %= max_iter_per_phase
+ if start_iter <= curr_iter < end_iter:
+ progress = curr_iter - start_iter
+ return self.anneal_func(base_lr * start_ratio,
+ base_lr * end_ratio,
+ progress / (end_iter - start_iter))
+
+
+@HOOKS.register_module()
+class OneCycleLrUpdaterHook(LrUpdaterHook):
+ """One Cycle LR Scheduler.
+
+ The 1cycle learning rate policy changes the learning rate after every
+ batch. The one cycle learning rate policy is described in
+ https://arxiv.org/pdf/1708.07120.pdf
+
+ Args:
+ max_lr (float or list): Upper learning rate boundaries in the cycle
+ for each parameter group.
+ total_steps (int, optional): The total number of steps in the cycle.
+ Note that if a value is not provided here, it will be the max_iter
+ of runner. Default: None.
+ pct_start (float): The percentage of the cycle (in number of steps)
+ spent increasing the learning rate.
+ Default: 0.3
+ anneal_strategy (str): {'cos', 'linear'}
+ Specifies the annealing strategy: 'cos' for cosine annealing,
+ 'linear' for linear annealing.
+ Default: 'cos'
+ div_factor (float): Determines the initial learning rate via
+ initial_lr = max_lr/div_factor
+ Default: 25
+ final_div_factor (float): Determines the minimum learning rate via
+ min_lr = initial_lr/final_div_factor
+ Default: 1e4
+ three_phase (bool): If three_phase is True, use a third phase of the
+ schedule to annihilate the learning rate according to
+ final_div_factor instead of modifying the second phase (the first
+ two phases will be symmetrical about the step indicated by
+ pct_start).
+ Default: False
+ """
+
+ def __init__(self,
+ max_lr,
+ total_steps=None,
+ pct_start=0.3,
+ anneal_strategy='cos',
+ div_factor=25,
+ final_div_factor=1e4,
+ three_phase=False,
+ **kwargs):
+ # validate by_epoch, currently only support by_epoch = False
+ if 'by_epoch' not in kwargs:
+ kwargs['by_epoch'] = False
+ else:
+ assert not kwargs['by_epoch'], \
+ 'currently only support "by_epoch" = False'
+ if not isinstance(max_lr, (numbers.Number, list, dict)):
+ raise ValueError('the type of max_lr must be the one of list or '
+ f'dict, but got {type(max_lr)}')
+ self._max_lr = max_lr
+ if total_steps is not None:
+ if not isinstance(total_steps, int):
+ raise ValueError('the type of total_steps must be int, but'
+ f'got {type(total_steps)}')
+ self.total_steps = total_steps
+ # validate pct_start
+ if pct_start < 0 or pct_start > 1 or not isinstance(pct_start, float):
+ raise ValueError('expected float between 0 and 1 pct_start, but '
+ f'got {pct_start}')
+ self.pct_start = pct_start
+ # validate anneal_strategy
+ if anneal_strategy not in ['cos', 'linear']:
+ raise ValueError('anneal_strategy must be one of "cos" or '
+ f'"linear", instead got {anneal_strategy}')
+ elif anneal_strategy == 'cos':
+ self.anneal_func = annealing_cos
+ elif anneal_strategy == 'linear':
+ self.anneal_func = annealing_linear
+ self.div_factor = div_factor
+ self.final_div_factor = final_div_factor
+ self.three_phase = three_phase
+ self.lr_phases = [] # init lr_phases
+ super(OneCycleLrUpdaterHook, self).__init__(**kwargs)
+
+ def before_run(self, runner):
+ if hasattr(self, 'total_steps'):
+ total_steps = self.total_steps
+ else:
+ total_steps = runner.max_iters
+ if total_steps < runner.max_iters:
+ raise ValueError(
+ 'The total steps must be greater than or equal to max '
+ f'iterations {runner.max_iters} of runner, but total steps '
+ f'is {total_steps}.')
+
+ if isinstance(runner.optimizer, dict):
+ self.base_lr = {}
+ for k, optim in runner.optimizer.items():
+ _max_lr = format_param(k, optim, self._max_lr)
+ self.base_lr[k] = [lr / self.div_factor for lr in _max_lr]
+ for group, lr in zip(optim.param_groups, self.base_lr[k]):
+ group.setdefault('initial_lr', lr)
+ else:
+ k = type(runner.optimizer).__name__
+ _max_lr = format_param(k, runner.optimizer, self._max_lr)
+ self.base_lr = [lr / self.div_factor for lr in _max_lr]
+ for group, lr in zip(runner.optimizer.param_groups, self.base_lr):
+ group.setdefault('initial_lr', lr)
+
+ if self.three_phase:
+ self.lr_phases.append(
+ [float(self.pct_start * total_steps) - 1, 1, self.div_factor])
+ self.lr_phases.append([
+ float(2 * self.pct_start * total_steps) - 2, self.div_factor, 1
+ ])
+ self.lr_phases.append(
+ [total_steps - 1, 1, 1 / self.final_div_factor])
+ else:
+ self.lr_phases.append(
+ [float(self.pct_start * total_steps) - 1, 1, self.div_factor])
+ self.lr_phases.append(
+ [total_steps - 1, self.div_factor, 1 / self.final_div_factor])
+
+ def get_lr(self, runner, base_lr):
+ curr_iter = runner.iter
+ start_iter = 0
+ for i, (end_iter, start_lr, end_lr) in enumerate(self.lr_phases):
+ if curr_iter <= end_iter:
+ pct = (curr_iter - start_iter) / (end_iter - start_iter)
+ lr = self.anneal_func(base_lr * start_lr, base_lr * end_lr,
+ pct)
+ break
+ start_iter = end_iter
+ return lr
+
+
+def annealing_cos(start, end, factor, weight=1):
+ """Calculate annealing cos learning rate.
+
+ Cosine anneal from `weight * start + (1 - weight) * end` to `end` as
+ percentage goes from 0.0 to 1.0.
+
+ Args:
+ start (float): The starting learning rate of the cosine annealing.
+ end (float): The ending learing rate of the cosine annealing.
+ factor (float): The coefficient of `pi` when calculating the current
+ percentage. Range from 0.0 to 1.0.
+ weight (float, optional): The combination factor of `start` and `end`
+ when calculating the actual starting learning rate. Default to 1.
+ """
+ cos_out = cos(pi * factor) + 1
+ return end + 0.5 * weight * (start - end) * cos_out
+
+
+def annealing_linear(start, end, factor):
+ """Calculate annealing linear learning rate.
+
+ Linear anneal from `start` to `end` as percentage goes from 0.0 to 1.0.
+
+ Args:
+ start (float): The starting learning rate of the linear annealing.
+ end (float): The ending learing rate of the linear annealing.
+ factor (float): The coefficient of `pi` when calculating the current
+ percentage. Range from 0.0 to 1.0.
+ """
+ return start + (end - start) * factor
+
+
+def format_param(name, optim, param):
+ if isinstance(param, numbers.Number):
+ return [param] * len(optim.param_groups)
+ elif isinstance(param, (list, tuple)): # multi param groups
+ if len(param) != len(optim.param_groups):
+ raise ValueError(f'expected {len(optim.param_groups)} '
+ f'values for {name}, got {len(param)}')
+ return param
+ else: # multi optimizers
+ if name not in param:
+ raise KeyError(f'{name} is not found in {param.keys()}')
+ return param[name]
diff --git a/annotator/uniformer/mmcv/runner/hooks/memory.py b/annotator/uniformer/mmcv/runner/hooks/memory.py
new file mode 100644
index 0000000000000000000000000000000000000000..70cf9a838fb314e3bd3c07aadbc00921a81e83ed
--- /dev/null
+++ b/annotator/uniformer/mmcv/runner/hooks/memory.py
@@ -0,0 +1,25 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import torch
+
+from .hook import HOOKS, Hook
+
+
+@HOOKS.register_module()
+class EmptyCacheHook(Hook):
+
+ def __init__(self, before_epoch=False, after_epoch=True, after_iter=False):
+ self._before_epoch = before_epoch
+ self._after_epoch = after_epoch
+ self._after_iter = after_iter
+
+ def after_iter(self, runner):
+ if self._after_iter:
+ torch.cuda.empty_cache()
+
+ def before_epoch(self, runner):
+ if self._before_epoch:
+ torch.cuda.empty_cache()
+
+ def after_epoch(self, runner):
+ if self._after_epoch:
+ torch.cuda.empty_cache()
diff --git a/annotator/uniformer/mmcv/runner/hooks/momentum_updater.py b/annotator/uniformer/mmcv/runner/hooks/momentum_updater.py
new file mode 100644
index 0000000000000000000000000000000000000000..60437756ceedf06055ec349df69a25465738d3f0
--- /dev/null
+++ b/annotator/uniformer/mmcv/runner/hooks/momentum_updater.py
@@ -0,0 +1,493 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import annotator.uniformer.mmcv as mmcv
+from .hook import HOOKS, Hook
+from .lr_updater import annealing_cos, annealing_linear, format_param
+
+
+class MomentumUpdaterHook(Hook):
+
+ def __init__(self,
+ by_epoch=True,
+ warmup=None,
+ warmup_iters=0,
+ warmup_ratio=0.9):
+ # validate the "warmup" argument
+ if warmup is not None:
+ if warmup not in ['constant', 'linear', 'exp']:
+ raise ValueError(
+ f'"{warmup}" is not a supported type for warming up, valid'
+ ' types are "constant" and "linear"')
+ if warmup is not None:
+ assert warmup_iters > 0, \
+ '"warmup_iters" must be a positive integer'
+ assert 0 < warmup_ratio <= 1.0, \
+ '"warmup_momentum" must be in range (0,1]'
+
+ self.by_epoch = by_epoch
+ self.warmup = warmup
+ self.warmup_iters = warmup_iters
+ self.warmup_ratio = warmup_ratio
+
+ self.base_momentum = [] # initial momentum for all param groups
+ self.regular_momentum = [
+ ] # expected momentum if no warming up is performed
+
+ def _set_momentum(self, runner, momentum_groups):
+ if isinstance(runner.optimizer, dict):
+ for k, optim in runner.optimizer.items():
+ for param_group, mom in zip(optim.param_groups,
+ momentum_groups[k]):
+ if 'momentum' in param_group.keys():
+ param_group['momentum'] = mom
+ elif 'betas' in param_group.keys():
+ param_group['betas'] = (mom, param_group['betas'][1])
+ else:
+ for param_group, mom in zip(runner.optimizer.param_groups,
+ momentum_groups):
+ if 'momentum' in param_group.keys():
+ param_group['momentum'] = mom
+ elif 'betas' in param_group.keys():
+ param_group['betas'] = (mom, param_group['betas'][1])
+
+ def get_momentum(self, runner, base_momentum):
+ raise NotImplementedError
+
+ def get_regular_momentum(self, runner):
+ if isinstance(runner.optimizer, dict):
+ momentum_groups = {}
+ for k in runner.optimizer.keys():
+ _momentum_group = [
+ self.get_momentum(runner, _base_momentum)
+ for _base_momentum in self.base_momentum[k]
+ ]
+ momentum_groups.update({k: _momentum_group})
+ return momentum_groups
+ else:
+ return [
+ self.get_momentum(runner, _base_momentum)
+ for _base_momentum in self.base_momentum
+ ]
+
+ def get_warmup_momentum(self, cur_iters):
+
+ def _get_warmup_momentum(cur_iters, regular_momentum):
+ if self.warmup == 'constant':
+ warmup_momentum = [
+ _momentum / self.warmup_ratio
+ for _momentum in self.regular_momentum
+ ]
+ elif self.warmup == 'linear':
+ k = (1 - cur_iters / self.warmup_iters) * (1 -
+ self.warmup_ratio)
+ warmup_momentum = [
+ _momentum / (1 - k) for _momentum in self.regular_mom
+ ]
+ elif self.warmup == 'exp':
+ k = self.warmup_ratio**(1 - cur_iters / self.warmup_iters)
+ warmup_momentum = [
+ _momentum / k for _momentum in self.regular_mom
+ ]
+ return warmup_momentum
+
+ if isinstance(self.regular_momentum, dict):
+ momentum_groups = {}
+ for key, regular_momentum in self.regular_momentum.items():
+ momentum_groups[key] = _get_warmup_momentum(
+ cur_iters, regular_momentum)
+ return momentum_groups
+ else:
+ return _get_warmup_momentum(cur_iters, self.regular_momentum)
+
+ def before_run(self, runner):
+ # NOTE: when resuming from a checkpoint,
+ # if 'initial_momentum' is not saved,
+ # it will be set according to the optimizer params
+ if isinstance(runner.optimizer, dict):
+ self.base_momentum = {}
+ for k, optim in runner.optimizer.items():
+ for group in optim.param_groups:
+ if 'momentum' in group.keys():
+ group.setdefault('initial_momentum', group['momentum'])
+ else:
+ group.setdefault('initial_momentum', group['betas'][0])
+ _base_momentum = [
+ group['initial_momentum'] for group in optim.param_groups
+ ]
+ self.base_momentum.update({k: _base_momentum})
+ else:
+ for group in runner.optimizer.param_groups:
+ if 'momentum' in group.keys():
+ group.setdefault('initial_momentum', group['momentum'])
+ else:
+ group.setdefault('initial_momentum', group['betas'][0])
+ self.base_momentum = [
+ group['initial_momentum']
+ for group in runner.optimizer.param_groups
+ ]
+
+ def before_train_epoch(self, runner):
+ if not self.by_epoch:
+ return
+ self.regular_mom = self.get_regular_momentum(runner)
+ self._set_momentum(runner, self.regular_mom)
+
+ def before_train_iter(self, runner):
+ cur_iter = runner.iter
+ if not self.by_epoch:
+ self.regular_mom = self.get_regular_momentum(runner)
+ if self.warmup is None or cur_iter >= self.warmup_iters:
+ self._set_momentum(runner, self.regular_mom)
+ else:
+ warmup_momentum = self.get_warmup_momentum(cur_iter)
+ self._set_momentum(runner, warmup_momentum)
+ elif self.by_epoch:
+ if self.warmup is None or cur_iter > self.warmup_iters:
+ return
+ elif cur_iter == self.warmup_iters:
+ self._set_momentum(runner, self.regular_mom)
+ else:
+ warmup_momentum = self.get_warmup_momentum(cur_iter)
+ self._set_momentum(runner, warmup_momentum)
+
+
+@HOOKS.register_module()
+class StepMomentumUpdaterHook(MomentumUpdaterHook):
+ """Step momentum scheduler with min value clipping.
+
+ Args:
+ step (int | list[int]): Step to decay the momentum. If an int value is
+ given, regard it as the decay interval. If a list is given, decay
+ momentum at these steps.
+ gamma (float, optional): Decay momentum ratio. Default: 0.5.
+ min_momentum (float, optional): Minimum momentum value to keep. If
+ momentum after decay is lower than this value, it will be clipped
+ accordingly. If None is given, we don't perform lr clipping.
+ Default: None.
+ """
+
+ def __init__(self, step, gamma=0.5, min_momentum=None, **kwargs):
+ if isinstance(step, list):
+ assert mmcv.is_list_of(step, int)
+ assert all([s > 0 for s in step])
+ elif isinstance(step, int):
+ assert step > 0
+ else:
+ raise TypeError('"step" must be a list or integer')
+ self.step = step
+ self.gamma = gamma
+ self.min_momentum = min_momentum
+ super(StepMomentumUpdaterHook, self).__init__(**kwargs)
+
+ def get_momentum(self, runner, base_momentum):
+ progress = runner.epoch if self.by_epoch else runner.iter
+
+ # calculate exponential term
+ if isinstance(self.step, int):
+ exp = progress // self.step
+ else:
+ exp = len(self.step)
+ for i, s in enumerate(self.step):
+ if progress < s:
+ exp = i
+ break
+
+ momentum = base_momentum * (self.gamma**exp)
+ if self.min_momentum is not None:
+ # clip to a minimum value
+ momentum = max(momentum, self.min_momentum)
+ return momentum
+
+
+@HOOKS.register_module()
+class CosineAnnealingMomentumUpdaterHook(MomentumUpdaterHook):
+
+ def __init__(self, min_momentum=None, min_momentum_ratio=None, **kwargs):
+ assert (min_momentum is None) ^ (min_momentum_ratio is None)
+ self.min_momentum = min_momentum
+ self.min_momentum_ratio = min_momentum_ratio
+ super(CosineAnnealingMomentumUpdaterHook, self).__init__(**kwargs)
+
+ def get_momentum(self, runner, base_momentum):
+ if self.by_epoch:
+ progress = runner.epoch
+ max_progress = runner.max_epochs
+ else:
+ progress = runner.iter
+ max_progress = runner.max_iters
+ if self.min_momentum_ratio is not None:
+ target_momentum = base_momentum * self.min_momentum_ratio
+ else:
+ target_momentum = self.min_momentum
+ return annealing_cos(base_momentum, target_momentum,
+ progress / max_progress)
+
+
+@HOOKS.register_module()
+class CyclicMomentumUpdaterHook(MomentumUpdaterHook):
+ """Cyclic momentum Scheduler.
+
+ Implement the cyclical momentum scheduler policy described in
+ https://arxiv.org/pdf/1708.07120.pdf
+
+ This momentum scheduler usually used together with the CyclicLRUpdater
+ to improve the performance in the 3D detection area.
+
+ Attributes:
+ target_ratio (tuple[float]): Relative ratio of the lowest momentum and
+ the highest momentum to the initial momentum.
+ cyclic_times (int): Number of cycles during training
+ step_ratio_up (float): The ratio of the increasing process of momentum
+ in the total cycle.
+ by_epoch (bool): Whether to update momentum by epoch.
+ """
+
+ def __init__(self,
+ by_epoch=False,
+ target_ratio=(0.85 / 0.95, 1),
+ cyclic_times=1,
+ step_ratio_up=0.4,
+ **kwargs):
+ if isinstance(target_ratio, float):
+ target_ratio = (target_ratio, target_ratio / 1e5)
+ elif isinstance(target_ratio, tuple):
+ target_ratio = (target_ratio[0], target_ratio[0] / 1e5) \
+ if len(target_ratio) == 1 else target_ratio
+ else:
+ raise ValueError('target_ratio should be either float '
+ f'or tuple, got {type(target_ratio)}')
+
+ assert len(target_ratio) == 2, \
+ '"target_ratio" must be list or tuple of two floats'
+ assert 0 <= step_ratio_up < 1.0, \
+ '"step_ratio_up" must be in range [0,1)'
+
+ self.target_ratio = target_ratio
+ self.cyclic_times = cyclic_times
+ self.step_ratio_up = step_ratio_up
+ self.momentum_phases = [] # init momentum_phases
+ # currently only support by_epoch=False
+ assert not by_epoch, \
+ 'currently only support "by_epoch" = False'
+ super(CyclicMomentumUpdaterHook, self).__init__(by_epoch, **kwargs)
+
+ def before_run(self, runner):
+ super(CyclicMomentumUpdaterHook, self).before_run(runner)
+ # initiate momentum_phases
+ # total momentum_phases are separated as up and down
+ max_iter_per_phase = runner.max_iters // self.cyclic_times
+ iter_up_phase = int(self.step_ratio_up * max_iter_per_phase)
+ self.momentum_phases.append(
+ [0, iter_up_phase, max_iter_per_phase, 1, self.target_ratio[0]])
+ self.momentum_phases.append([
+ iter_up_phase, max_iter_per_phase, max_iter_per_phase,
+ self.target_ratio[0], self.target_ratio[1]
+ ])
+
+ def get_momentum(self, runner, base_momentum):
+ curr_iter = runner.iter
+ for (start_iter, end_iter, max_iter_per_phase, start_ratio,
+ end_ratio) in self.momentum_phases:
+ curr_iter %= max_iter_per_phase
+ if start_iter <= curr_iter < end_iter:
+ progress = curr_iter - start_iter
+ return annealing_cos(base_momentum * start_ratio,
+ base_momentum * end_ratio,
+ progress / (end_iter - start_iter))
+
+
+@HOOKS.register_module()
+class OneCycleMomentumUpdaterHook(MomentumUpdaterHook):
+ """OneCycle momentum Scheduler.
+
+ This momentum scheduler usually used together with the OneCycleLrUpdater
+ to improve the performance.
+
+ Args:
+ base_momentum (float or list): Lower momentum boundaries in the cycle
+ for each parameter group. Note that momentum is cycled inversely
+ to learning rate; at the peak of a cycle, momentum is
+ 'base_momentum' and learning rate is 'max_lr'.
+ Default: 0.85
+ max_momentum (float or list): Upper momentum boundaries in the cycle
+ for each parameter group. Functionally,
+ it defines the cycle amplitude (max_momentum - base_momentum).
+ Note that momentum is cycled inversely
+ to learning rate; at the start of a cycle, momentum is
+ 'max_momentum' and learning rate is 'base_lr'
+ Default: 0.95
+ pct_start (float): The percentage of the cycle (in number of steps)
+ spent increasing the learning rate.
+ Default: 0.3
+ anneal_strategy (str): {'cos', 'linear'}
+ Specifies the annealing strategy: 'cos' for cosine annealing,
+ 'linear' for linear annealing.
+ Default: 'cos'
+ three_phase (bool): If three_phase is True, use a third phase of the
+ schedule to annihilate the learning rate according to
+ final_div_factor instead of modifying the second phase (the first
+ two phases will be symmetrical about the step indicated by
+ pct_start).
+ Default: False
+ """
+
+ def __init__(self,
+ base_momentum=0.85,
+ max_momentum=0.95,
+ pct_start=0.3,
+ anneal_strategy='cos',
+ three_phase=False,
+ **kwargs):
+ # validate by_epoch, currently only support by_epoch=False
+ if 'by_epoch' not in kwargs:
+ kwargs['by_epoch'] = False
+ else:
+ assert not kwargs['by_epoch'], \
+ 'currently only support "by_epoch" = False'
+ if not isinstance(base_momentum, (float, list, dict)):
+ raise ValueError('base_momentum must be the type among of float,'
+ 'list or dict.')
+ self._base_momentum = base_momentum
+ if not isinstance(max_momentum, (float, list, dict)):
+ raise ValueError('max_momentum must be the type among of float,'
+ 'list or dict.')
+ self._max_momentum = max_momentum
+ # validate pct_start
+ if pct_start < 0 or pct_start > 1 or not isinstance(pct_start, float):
+ raise ValueError('Expected float between 0 and 1 pct_start, but '
+ f'got {pct_start}')
+ self.pct_start = pct_start
+ # validate anneal_strategy
+ if anneal_strategy not in ['cos', 'linear']:
+ raise ValueError('anneal_strategy must by one of "cos" or '
+ f'"linear", instead got {anneal_strategy}')
+ elif anneal_strategy == 'cos':
+ self.anneal_func = annealing_cos
+ elif anneal_strategy == 'linear':
+ self.anneal_func = annealing_linear
+ self.three_phase = three_phase
+ self.momentum_phases = [] # init momentum_phases
+ super(OneCycleMomentumUpdaterHook, self).__init__(**kwargs)
+
+ def before_run(self, runner):
+ if isinstance(runner.optimizer, dict):
+ for k, optim in runner.optimizer.items():
+ if ('momentum' not in optim.defaults
+ and 'betas' not in optim.defaults):
+ raise ValueError('optimizer must support momentum with'
+ 'option enabled')
+ self.use_beta1 = 'betas' in optim.defaults
+ _base_momentum = format_param(k, optim, self._base_momentum)
+ _max_momentum = format_param(k, optim, self._max_momentum)
+ for group, b_momentum, m_momentum in zip(
+ optim.param_groups, _base_momentum, _max_momentum):
+ if self.use_beta1:
+ _, beta2 = group['betas']
+ group['betas'] = (m_momentum, beta2)
+ else:
+ group['momentum'] = m_momentum
+ group['base_momentum'] = b_momentum
+ group['max_momentum'] = m_momentum
+ else:
+ optim = runner.optimizer
+ if ('momentum' not in optim.defaults
+ and 'betas' not in optim.defaults):
+ raise ValueError('optimizer must support momentum with'
+ 'option enabled')
+ self.use_beta1 = 'betas' in optim.defaults
+ k = type(optim).__name__
+ _base_momentum = format_param(k, optim, self._base_momentum)
+ _max_momentum = format_param(k, optim, self._max_momentum)
+ for group, b_momentum, m_momentum in zip(optim.param_groups,
+ _base_momentum,
+ _max_momentum):
+ if self.use_beta1:
+ _, beta2 = group['betas']
+ group['betas'] = (m_momentum, beta2)
+ else:
+ group['momentum'] = m_momentum
+ group['base_momentum'] = b_momentum
+ group['max_momentum'] = m_momentum
+
+ if self.three_phase:
+ self.momentum_phases.append({
+ 'end_iter':
+ float(self.pct_start * runner.max_iters) - 1,
+ 'start_momentum':
+ 'max_momentum',
+ 'end_momentum':
+ 'base_momentum'
+ })
+ self.momentum_phases.append({
+ 'end_iter':
+ float(2 * self.pct_start * runner.max_iters) - 2,
+ 'start_momentum':
+ 'base_momentum',
+ 'end_momentum':
+ 'max_momentum'
+ })
+ self.momentum_phases.append({
+ 'end_iter': runner.max_iters - 1,
+ 'start_momentum': 'max_momentum',
+ 'end_momentum': 'max_momentum'
+ })
+ else:
+ self.momentum_phases.append({
+ 'end_iter':
+ float(self.pct_start * runner.max_iters) - 1,
+ 'start_momentum':
+ 'max_momentum',
+ 'end_momentum':
+ 'base_momentum'
+ })
+ self.momentum_phases.append({
+ 'end_iter': runner.max_iters - 1,
+ 'start_momentum': 'base_momentum',
+ 'end_momentum': 'max_momentum'
+ })
+
+ def _set_momentum(self, runner, momentum_groups):
+ if isinstance(runner.optimizer, dict):
+ for k, optim in runner.optimizer.items():
+ for param_group, mom in zip(optim.param_groups,
+ momentum_groups[k]):
+ if 'momentum' in param_group.keys():
+ param_group['momentum'] = mom
+ elif 'betas' in param_group.keys():
+ param_group['betas'] = (mom, param_group['betas'][1])
+ else:
+ for param_group, mom in zip(runner.optimizer.param_groups,
+ momentum_groups):
+ if 'momentum' in param_group.keys():
+ param_group['momentum'] = mom
+ elif 'betas' in param_group.keys():
+ param_group['betas'] = (mom, param_group['betas'][1])
+
+ def get_momentum(self, runner, param_group):
+ curr_iter = runner.iter
+ start_iter = 0
+ for i, phase in enumerate(self.momentum_phases):
+ end_iter = phase['end_iter']
+ if curr_iter <= end_iter or i == len(self.momentum_phases) - 1:
+ pct = (curr_iter - start_iter) / (end_iter - start_iter)
+ momentum = self.anneal_func(
+ param_group[phase['start_momentum']],
+ param_group[phase['end_momentum']], pct)
+ break
+ start_iter = end_iter
+ return momentum
+
+ def get_regular_momentum(self, runner):
+ if isinstance(runner.optimizer, dict):
+ momentum_groups = {}
+ for k, optim in runner.optimizer.items():
+ _momentum_group = [
+ self.get_momentum(runner, param_group)
+ for param_group in optim.param_groups
+ ]
+ momentum_groups.update({k: _momentum_group})
+ return momentum_groups
+ else:
+ momentum_groups = []
+ for param_group in runner.optimizer.param_groups:
+ momentum_groups.append(self.get_momentum(runner, param_group))
+ return momentum_groups
diff --git a/annotator/uniformer/mmcv/runner/hooks/optimizer.py b/annotator/uniformer/mmcv/runner/hooks/optimizer.py
new file mode 100644
index 0000000000000000000000000000000000000000..4ef3e9ff8f9c6926e32bdf027612267b64ed80df
--- /dev/null
+++ b/annotator/uniformer/mmcv/runner/hooks/optimizer.py
@@ -0,0 +1,508 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import copy
+from collections import defaultdict
+from itertools import chain
+
+from torch.nn.utils import clip_grad
+
+from annotator.uniformer.mmcv.utils import TORCH_VERSION, _BatchNorm, digit_version
+from ..dist_utils import allreduce_grads
+from ..fp16_utils import LossScaler, wrap_fp16_model
+from .hook import HOOKS, Hook
+
+try:
+ # If PyTorch version >= 1.6.0, torch.cuda.amp.GradScaler would be imported
+ # and used; otherwise, auto fp16 will adopt mmcv's implementation.
+ from torch.cuda.amp import GradScaler
+except ImportError:
+ pass
+
+
+@HOOKS.register_module()
+class OptimizerHook(Hook):
+
+ def __init__(self, grad_clip=None):
+ self.grad_clip = grad_clip
+
+ def clip_grads(self, params):
+ params = list(
+ filter(lambda p: p.requires_grad and p.grad is not None, params))
+ if len(params) > 0:
+ return clip_grad.clip_grad_norm_(params, **self.grad_clip)
+
+ def after_train_iter(self, runner):
+ runner.optimizer.zero_grad()
+ runner.outputs['loss'].backward()
+ if self.grad_clip is not None:
+ grad_norm = self.clip_grads(runner.model.parameters())
+ if grad_norm is not None:
+ # Add grad norm to the logger
+ runner.log_buffer.update({'grad_norm': float(grad_norm)},
+ runner.outputs['num_samples'])
+ runner.optimizer.step()
+
+
+@HOOKS.register_module()
+class GradientCumulativeOptimizerHook(OptimizerHook):
+ """Optimizer Hook implements multi-iters gradient cumulating.
+
+ Args:
+ cumulative_iters (int, optional): Num of gradient cumulative iters.
+ The optimizer will step every `cumulative_iters` iters.
+ Defaults to 1.
+
+ Examples:
+ >>> # Use cumulative_iters to simulate a large batch size
+ >>> # It is helpful when the hardware cannot handle a large batch size.
+ >>> loader = DataLoader(data, batch_size=64)
+ >>> optim_hook = GradientCumulativeOptimizerHook(cumulative_iters=4)
+ >>> # almost equals to
+ >>> loader = DataLoader(data, batch_size=256)
+ >>> optim_hook = OptimizerHook()
+ """
+
+ def __init__(self, cumulative_iters=1, **kwargs):
+ super(GradientCumulativeOptimizerHook, self).__init__(**kwargs)
+
+ assert isinstance(cumulative_iters, int) and cumulative_iters > 0, \
+ f'cumulative_iters only accepts positive int, but got ' \
+ f'{type(cumulative_iters)} instead.'
+
+ self.cumulative_iters = cumulative_iters
+ self.divisible_iters = 0
+ self.remainder_iters = 0
+ self.initialized = False
+
+ def has_batch_norm(self, module):
+ if isinstance(module, _BatchNorm):
+ return True
+ for m in module.children():
+ if self.has_batch_norm(m):
+ return True
+ return False
+
+ def _init(self, runner):
+ if runner.iter % self.cumulative_iters != 0:
+ runner.logger.warning(
+ 'Resume iter number is not divisible by cumulative_iters in '
+ 'GradientCumulativeOptimizerHook, which means the gradient of '
+ 'some iters is lost and the result may be influenced slightly.'
+ )
+
+ if self.has_batch_norm(runner.model) and self.cumulative_iters > 1:
+ runner.logger.warning(
+ 'GradientCumulativeOptimizerHook may slightly decrease '
+ 'performance if the model has BatchNorm layers.')
+
+ residual_iters = runner.max_iters - runner.iter
+
+ self.divisible_iters = (
+ residual_iters // self.cumulative_iters * self.cumulative_iters)
+ self.remainder_iters = residual_iters - self.divisible_iters
+
+ self.initialized = True
+
+ def after_train_iter(self, runner):
+ if not self.initialized:
+ self._init(runner)
+
+ if runner.iter < self.divisible_iters:
+ loss_factor = self.cumulative_iters
+ else:
+ loss_factor = self.remainder_iters
+ loss = runner.outputs['loss']
+ loss = loss / loss_factor
+ loss.backward()
+
+ if (self.every_n_iters(runner, self.cumulative_iters)
+ or self.is_last_iter(runner)):
+
+ if self.grad_clip is not None:
+ grad_norm = self.clip_grads(runner.model.parameters())
+ if grad_norm is not None:
+ # Add grad norm to the logger
+ runner.log_buffer.update({'grad_norm': float(grad_norm)},
+ runner.outputs['num_samples'])
+ runner.optimizer.step()
+ runner.optimizer.zero_grad()
+
+
+if (TORCH_VERSION != 'parrots'
+ and digit_version(TORCH_VERSION) >= digit_version('1.6.0')):
+
+ @HOOKS.register_module()
+ class Fp16OptimizerHook(OptimizerHook):
+ """FP16 optimizer hook (using PyTorch's implementation).
+
+ If you are using PyTorch >= 1.6, torch.cuda.amp is used as the backend,
+ to take care of the optimization procedure.
+
+ Args:
+ loss_scale (float | str | dict): Scale factor configuration.
+ If loss_scale is a float, static loss scaling will be used with
+ the specified scale. If loss_scale is a string, it must be
+ 'dynamic', then dynamic loss scaling will be used.
+ It can also be a dict containing arguments of GradScalar.
+ Defaults to 512. For Pytorch >= 1.6, mmcv uses official
+ implementation of GradScaler. If you use a dict version of
+ loss_scale to create GradScaler, please refer to:
+ https://pytorch.org/docs/stable/amp.html#torch.cuda.amp.GradScaler
+ for the parameters.
+
+ Examples:
+ >>> loss_scale = dict(
+ ... init_scale=65536.0,
+ ... growth_factor=2.0,
+ ... backoff_factor=0.5,
+ ... growth_interval=2000
+ ... )
+ >>> optimizer_hook = Fp16OptimizerHook(loss_scale=loss_scale)
+ """
+
+ def __init__(self,
+ grad_clip=None,
+ coalesce=True,
+ bucket_size_mb=-1,
+ loss_scale=512.,
+ distributed=True):
+ self.grad_clip = grad_clip
+ self.coalesce = coalesce
+ self.bucket_size_mb = bucket_size_mb
+ self.distributed = distributed
+ self._scale_update_param = None
+ if loss_scale == 'dynamic':
+ self.loss_scaler = GradScaler()
+ elif isinstance(loss_scale, float):
+ self._scale_update_param = loss_scale
+ self.loss_scaler = GradScaler(init_scale=loss_scale)
+ elif isinstance(loss_scale, dict):
+ self.loss_scaler = GradScaler(**loss_scale)
+ else:
+ raise ValueError('loss_scale must be of type float, dict, or '
+ f'"dynamic", got {loss_scale}')
+
+ def before_run(self, runner):
+ """Preparing steps before Mixed Precision Training."""
+ # wrap model mode to fp16
+ wrap_fp16_model(runner.model)
+ # resume from state dict
+ if 'fp16' in runner.meta and 'loss_scaler' in runner.meta['fp16']:
+ scaler_state_dict = runner.meta['fp16']['loss_scaler']
+ self.loss_scaler.load_state_dict(scaler_state_dict)
+
+ def copy_grads_to_fp32(self, fp16_net, fp32_weights):
+ """Copy gradients from fp16 model to fp32 weight copy."""
+ for fp32_param, fp16_param in zip(fp32_weights,
+ fp16_net.parameters()):
+ if fp16_param.grad is not None:
+ if fp32_param.grad is None:
+ fp32_param.grad = fp32_param.data.new(
+ fp32_param.size())
+ fp32_param.grad.copy_(fp16_param.grad)
+
+ def copy_params_to_fp16(self, fp16_net, fp32_weights):
+ """Copy updated params from fp32 weight copy to fp16 model."""
+ for fp16_param, fp32_param in zip(fp16_net.parameters(),
+ fp32_weights):
+ fp16_param.data.copy_(fp32_param.data)
+
+ def after_train_iter(self, runner):
+ """Backward optimization steps for Mixed Precision Training. For
+ dynamic loss scaling, please refer to
+ https://pytorch.org/docs/stable/amp.html#torch.cuda.amp.GradScaler.
+
+ 1. Scale the loss by a scale factor.
+ 2. Backward the loss to obtain the gradients.
+ 3. Unscale the optimizer’s gradient tensors.
+ 4. Call optimizer.step() and update scale factor.
+ 5. Save loss_scaler state_dict for resume purpose.
+ """
+ # clear grads of last iteration
+ runner.model.zero_grad()
+ runner.optimizer.zero_grad()
+
+ self.loss_scaler.scale(runner.outputs['loss']).backward()
+ self.loss_scaler.unscale_(runner.optimizer)
+ # grad clip
+ if self.grad_clip is not None:
+ grad_norm = self.clip_grads(runner.model.parameters())
+ if grad_norm is not None:
+ # Add grad norm to the logger
+ runner.log_buffer.update({'grad_norm': float(grad_norm)},
+ runner.outputs['num_samples'])
+ # backward and update scaler
+ self.loss_scaler.step(runner.optimizer)
+ self.loss_scaler.update(self._scale_update_param)
+
+ # save state_dict of loss_scaler
+ runner.meta.setdefault(
+ 'fp16', {})['loss_scaler'] = self.loss_scaler.state_dict()
+
+ @HOOKS.register_module()
+ class GradientCumulativeFp16OptimizerHook(GradientCumulativeOptimizerHook,
+ Fp16OptimizerHook):
+ """Fp16 optimizer Hook (using PyTorch's implementation) implements
+ multi-iters gradient cumulating.
+
+ If you are using PyTorch >= 1.6, torch.cuda.amp is used as the backend,
+ to take care of the optimization procedure.
+ """
+
+ def __init__(self, *args, **kwargs):
+ super(GradientCumulativeFp16OptimizerHook,
+ self).__init__(*args, **kwargs)
+
+ def after_train_iter(self, runner):
+ if not self.initialized:
+ self._init(runner)
+
+ if runner.iter < self.divisible_iters:
+ loss_factor = self.cumulative_iters
+ else:
+ loss_factor = self.remainder_iters
+ loss = runner.outputs['loss']
+ loss = loss / loss_factor
+
+ self.loss_scaler.scale(loss).backward()
+
+ if (self.every_n_iters(runner, self.cumulative_iters)
+ or self.is_last_iter(runner)):
+
+ # copy fp16 grads in the model to fp32 params in the optimizer
+ self.loss_scaler.unscale_(runner.optimizer)
+
+ if self.grad_clip is not None:
+ grad_norm = self.clip_grads(runner.model.parameters())
+ if grad_norm is not None:
+ # Add grad norm to the logger
+ runner.log_buffer.update(
+ {'grad_norm': float(grad_norm)},
+ runner.outputs['num_samples'])
+
+ # backward and update scaler
+ self.loss_scaler.step(runner.optimizer)
+ self.loss_scaler.update(self._scale_update_param)
+
+ # save state_dict of loss_scaler
+ runner.meta.setdefault(
+ 'fp16', {})['loss_scaler'] = self.loss_scaler.state_dict()
+
+ # clear grads
+ runner.model.zero_grad()
+ runner.optimizer.zero_grad()
+
+else:
+
+ @HOOKS.register_module()
+ class Fp16OptimizerHook(OptimizerHook):
+ """FP16 optimizer hook (mmcv's implementation).
+
+ The steps of fp16 optimizer is as follows.
+ 1. Scale the loss value.
+ 2. BP in the fp16 model.
+ 2. Copy gradients from fp16 model to fp32 weights.
+ 3. Update fp32 weights.
+ 4. Copy updated parameters from fp32 weights to fp16 model.
+
+ Refer to https://arxiv.org/abs/1710.03740 for more details.
+
+ Args:
+ loss_scale (float | str | dict): Scale factor configuration.
+ If loss_scale is a float, static loss scaling will be used with
+ the specified scale. If loss_scale is a string, it must be
+ 'dynamic', then dynamic loss scaling will be used.
+ It can also be a dict containing arguments of LossScaler.
+ Defaults to 512.
+ """
+
+ def __init__(self,
+ grad_clip=None,
+ coalesce=True,
+ bucket_size_mb=-1,
+ loss_scale=512.,
+ distributed=True):
+ self.grad_clip = grad_clip
+ self.coalesce = coalesce
+ self.bucket_size_mb = bucket_size_mb
+ self.distributed = distributed
+ if loss_scale == 'dynamic':
+ self.loss_scaler = LossScaler(mode='dynamic')
+ elif isinstance(loss_scale, float):
+ self.loss_scaler = LossScaler(
+ init_scale=loss_scale, mode='static')
+ elif isinstance(loss_scale, dict):
+ self.loss_scaler = LossScaler(**loss_scale)
+ else:
+ raise ValueError('loss_scale must be of type float, dict, or '
+ f'"dynamic", got {loss_scale}')
+
+ def before_run(self, runner):
+ """Preparing steps before Mixed Precision Training.
+
+ 1. Make a master copy of fp32 weights for optimization.
+ 2. Convert the main model from fp32 to fp16.
+ """
+ # keep a copy of fp32 weights
+ old_groups = runner.optimizer.param_groups
+ runner.optimizer.param_groups = copy.deepcopy(
+ runner.optimizer.param_groups)
+ state = defaultdict(dict)
+ p_map = {
+ old_p: p
+ for old_p, p in zip(
+ chain(*(g['params'] for g in old_groups)),
+ chain(*(g['params']
+ for g in runner.optimizer.param_groups)))
+ }
+ for k, v in runner.optimizer.state.items():
+ state[p_map[k]] = v
+ runner.optimizer.state = state
+ # convert model to fp16
+ wrap_fp16_model(runner.model)
+ # resume from state dict
+ if 'fp16' in runner.meta and 'loss_scaler' in runner.meta['fp16']:
+ scaler_state_dict = runner.meta['fp16']['loss_scaler']
+ self.loss_scaler.load_state_dict(scaler_state_dict)
+
+ def copy_grads_to_fp32(self, fp16_net, fp32_weights):
+ """Copy gradients from fp16 model to fp32 weight copy."""
+ for fp32_param, fp16_param in zip(fp32_weights,
+ fp16_net.parameters()):
+ if fp16_param.grad is not None:
+ if fp32_param.grad is None:
+ fp32_param.grad = fp32_param.data.new(
+ fp32_param.size())
+ fp32_param.grad.copy_(fp16_param.grad)
+
+ def copy_params_to_fp16(self, fp16_net, fp32_weights):
+ """Copy updated params from fp32 weight copy to fp16 model."""
+ for fp16_param, fp32_param in zip(fp16_net.parameters(),
+ fp32_weights):
+ fp16_param.data.copy_(fp32_param.data)
+
+ def after_train_iter(self, runner):
+ """Backward optimization steps for Mixed Precision Training. For
+ dynamic loss scaling, please refer `loss_scalar.py`
+
+ 1. Scale the loss by a scale factor.
+ 2. Backward the loss to obtain the gradients (fp16).
+ 3. Copy gradients from the model to the fp32 weight copy.
+ 4. Scale the gradients back and update the fp32 weight copy.
+ 5. Copy back the params from fp32 weight copy to the fp16 model.
+ 6. Save loss_scaler state_dict for resume purpose.
+ """
+ # clear grads of last iteration
+ runner.model.zero_grad()
+ runner.optimizer.zero_grad()
+ # scale the loss value
+ scaled_loss = runner.outputs['loss'] * self.loss_scaler.loss_scale
+ scaled_loss.backward()
+ # copy fp16 grads in the model to fp32 params in the optimizer
+
+ fp32_weights = []
+ for param_group in runner.optimizer.param_groups:
+ fp32_weights += param_group['params']
+ self.copy_grads_to_fp32(runner.model, fp32_weights)
+ # allreduce grads
+ if self.distributed:
+ allreduce_grads(fp32_weights, self.coalesce,
+ self.bucket_size_mb)
+
+ has_overflow = self.loss_scaler.has_overflow(fp32_weights)
+ # if has overflow, skip this iteration
+ if not has_overflow:
+ # scale the gradients back
+ for param in fp32_weights:
+ if param.grad is not None:
+ param.grad.div_(self.loss_scaler.loss_scale)
+ if self.grad_clip is not None:
+ grad_norm = self.clip_grads(fp32_weights)
+ if grad_norm is not None:
+ # Add grad norm to the logger
+ runner.log_buffer.update(
+ {'grad_norm': float(grad_norm)},
+ runner.outputs['num_samples'])
+ # update fp32 params
+ runner.optimizer.step()
+ # copy fp32 params to the fp16 model
+ self.copy_params_to_fp16(runner.model, fp32_weights)
+ self.loss_scaler.update_scale(has_overflow)
+ if has_overflow:
+ runner.logger.warning('Check overflow, downscale loss scale '
+ f'to {self.loss_scaler.cur_scale}')
+
+ # save state_dict of loss_scaler
+ runner.meta.setdefault(
+ 'fp16', {})['loss_scaler'] = self.loss_scaler.state_dict()
+
+ @HOOKS.register_module()
+ class GradientCumulativeFp16OptimizerHook(GradientCumulativeOptimizerHook,
+ Fp16OptimizerHook):
+ """Fp16 optimizer Hook (using mmcv implementation) implements multi-
+ iters gradient cumulating."""
+
+ def __init__(self, *args, **kwargs):
+ super(GradientCumulativeFp16OptimizerHook,
+ self).__init__(*args, **kwargs)
+
+ def after_train_iter(self, runner):
+ if not self.initialized:
+ self._init(runner)
+
+ if runner.iter < self.divisible_iters:
+ loss_factor = self.cumulative_iters
+ else:
+ loss_factor = self.remainder_iters
+
+ loss = runner.outputs['loss']
+ loss = loss / loss_factor
+
+ # scale the loss value
+ scaled_loss = loss * self.loss_scaler.loss_scale
+ scaled_loss.backward()
+
+ if (self.every_n_iters(runner, self.cumulative_iters)
+ or self.is_last_iter(runner)):
+
+ # copy fp16 grads in the model to fp32 params in the optimizer
+ fp32_weights = []
+ for param_group in runner.optimizer.param_groups:
+ fp32_weights += param_group['params']
+ self.copy_grads_to_fp32(runner.model, fp32_weights)
+ # allreduce grads
+ if self.distributed:
+ allreduce_grads(fp32_weights, self.coalesce,
+ self.bucket_size_mb)
+
+ has_overflow = self.loss_scaler.has_overflow(fp32_weights)
+ # if has overflow, skip this iteration
+ if not has_overflow:
+ # scale the gradients back
+ for param in fp32_weights:
+ if param.grad is not None:
+ param.grad.div_(self.loss_scaler.loss_scale)
+ if self.grad_clip is not None:
+ grad_norm = self.clip_grads(fp32_weights)
+ if grad_norm is not None:
+ # Add grad norm to the logger
+ runner.log_buffer.update(
+ {'grad_norm': float(grad_norm)},
+ runner.outputs['num_samples'])
+ # update fp32 params
+ runner.optimizer.step()
+ # copy fp32 params to the fp16 model
+ self.copy_params_to_fp16(runner.model, fp32_weights)
+ else:
+ runner.logger.warning(
+ 'Check overflow, downscale loss scale '
+ f'to {self.loss_scaler.cur_scale}')
+
+ self.loss_scaler.update_scale(has_overflow)
+
+ # save state_dict of loss_scaler
+ runner.meta.setdefault(
+ 'fp16', {})['loss_scaler'] = self.loss_scaler.state_dict()
+
+ # clear grads
+ runner.model.zero_grad()
+ runner.optimizer.zero_grad()
diff --git a/annotator/uniformer/mmcv/runner/hooks/profiler.py b/annotator/uniformer/mmcv/runner/hooks/profiler.py
new file mode 100644
index 0000000000000000000000000000000000000000..b70236997eec59c2209ef351ae38863b4112d0ec
--- /dev/null
+++ b/annotator/uniformer/mmcv/runner/hooks/profiler.py
@@ -0,0 +1,180 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import warnings
+from typing import Callable, List, Optional, Union
+
+import torch
+
+from ..dist_utils import master_only
+from .hook import HOOKS, Hook
+
+
+@HOOKS.register_module()
+class ProfilerHook(Hook):
+ """Profiler to analyze performance during training.
+
+ PyTorch Profiler is a tool that allows the collection of the performance
+ metrics during the training. More details on Profiler can be found at
+ https://pytorch.org/docs/1.8.1/profiler.html#torch.profiler.profile
+
+ Args:
+ by_epoch (bool): Profile performance by epoch or by iteration.
+ Default: True.
+ profile_iters (int): Number of iterations for profiling.
+ If ``by_epoch=True``, profile_iters indicates that they are the
+ first profile_iters epochs at the beginning of the
+ training, otherwise it indicates the first profile_iters
+ iterations. Default: 1.
+ activities (list[str]): List of activity groups (CPU, CUDA) to use in
+ profiling. Default: ['cpu', 'cuda'].
+ schedule (dict, optional): Config of generating the callable schedule.
+ if schedule is None, profiler will not add step markers into the
+ trace and table view. Default: None.
+ on_trace_ready (callable, dict): Either a handler or a dict of generate
+ handler. Default: None.
+ record_shapes (bool): Save information about operator's input shapes.
+ Default: False.
+ profile_memory (bool): Track tensor memory allocation/deallocation.
+ Default: False.
+ with_stack (bool): Record source information (file and line number)
+ for the ops. Default: False.
+ with_flops (bool): Use formula to estimate the FLOPS of specific
+ operators (matrix multiplication and 2D convolution).
+ Default: False.
+ json_trace_path (str, optional): Exports the collected trace in Chrome
+ JSON format. Default: None.
+
+ Example:
+ >>> runner = ... # instantiate a Runner
+ >>> # tensorboard trace
+ >>> trace_config = dict(type='tb_trace', dir_name='work_dir')
+ >>> profiler_config = dict(on_trace_ready=trace_config)
+ >>> runner.register_profiler_hook(profiler_config)
+ >>> runner.run(data_loaders=[trainloader], workflow=[('train', 1)])
+ """
+
+ def __init__(self,
+ by_epoch: bool = True,
+ profile_iters: int = 1,
+ activities: List[str] = ['cpu', 'cuda'],
+ schedule: Optional[dict] = None,
+ on_trace_ready: Optional[Union[Callable, dict]] = None,
+ record_shapes: bool = False,
+ profile_memory: bool = False,
+ with_stack: bool = False,
+ with_flops: bool = False,
+ json_trace_path: Optional[str] = None) -> None:
+ try:
+ from torch import profiler # torch version >= 1.8.1
+ except ImportError:
+ raise ImportError('profiler is the new feature of torch1.8.1, '
+ f'but your version is {torch.__version__}')
+
+ assert isinstance(by_epoch, bool), '``by_epoch`` should be a boolean.'
+ self.by_epoch = by_epoch
+
+ if profile_iters < 1:
+ raise ValueError('profile_iters should be greater than 0, but got '
+ f'{profile_iters}')
+ self.profile_iters = profile_iters
+
+ if not isinstance(activities, list):
+ raise ValueError(
+ f'activities should be list, but got {type(activities)}')
+ self.activities = []
+ for activity in activities:
+ activity = activity.lower()
+ if activity == 'cpu':
+ self.activities.append(profiler.ProfilerActivity.CPU)
+ elif activity == 'cuda':
+ self.activities.append(profiler.ProfilerActivity.CUDA)
+ else:
+ raise ValueError(
+ f'activity should be "cpu" or "cuda", but got {activity}')
+
+ if schedule is not None:
+ self.schedule = profiler.schedule(**schedule)
+ else:
+ self.schedule = None
+
+ self.on_trace_ready = on_trace_ready
+ self.record_shapes = record_shapes
+ self.profile_memory = profile_memory
+ self.with_stack = with_stack
+ self.with_flops = with_flops
+ self.json_trace_path = json_trace_path
+
+ @master_only
+ def before_run(self, runner):
+ if self.by_epoch and runner.max_epochs < self.profile_iters:
+ raise ValueError('self.profile_iters should not be greater than '
+ f'{runner.max_epochs}')
+
+ if not self.by_epoch and runner.max_iters < self.profile_iters:
+ raise ValueError('self.profile_iters should not be greater than '
+ f'{runner.max_iters}')
+
+ if callable(self.on_trace_ready): # handler
+ _on_trace_ready = self.on_trace_ready
+ elif isinstance(self.on_trace_ready, dict): # config of handler
+ trace_cfg = self.on_trace_ready.copy()
+ trace_type = trace_cfg.pop('type') # log_trace handler
+ if trace_type == 'log_trace':
+
+ def _log_handler(prof):
+ print(prof.key_averages().table(**trace_cfg))
+
+ _on_trace_ready = _log_handler
+ elif trace_type == 'tb_trace': # tensorboard_trace handler
+ try:
+ import torch_tb_profiler # noqa: F401
+ except ImportError:
+ raise ImportError('please run "pip install '
+ 'torch-tb-profiler" to install '
+ 'torch_tb_profiler')
+ _on_trace_ready = torch.profiler.tensorboard_trace_handler(
+ **trace_cfg)
+ else:
+ raise ValueError('trace_type should be "log_trace" or '
+ f'"tb_trace", but got {trace_type}')
+ elif self.on_trace_ready is None:
+ _on_trace_ready = None # type: ignore
+ else:
+ raise ValueError('on_trace_ready should be handler, dict or None, '
+ f'but got {type(self.on_trace_ready)}')
+
+ if runner.max_epochs > 1:
+ warnings.warn(f'profiler will profile {runner.max_epochs} epochs '
+ 'instead of 1 epoch. Since profiler will slow down '
+ 'the training, it is recommended to train 1 epoch '
+ 'with ProfilerHook and adjust your setting according'
+ ' to the profiler summary. During normal training '
+ '(epoch > 1), you may disable the ProfilerHook.')
+
+ self.profiler = torch.profiler.profile(
+ activities=self.activities,
+ schedule=self.schedule,
+ on_trace_ready=_on_trace_ready,
+ record_shapes=self.record_shapes,
+ profile_memory=self.profile_memory,
+ with_stack=self.with_stack,
+ with_flops=self.with_flops)
+
+ self.profiler.__enter__()
+ runner.logger.info('profiler is profiling...')
+
+ @master_only
+ def after_train_epoch(self, runner):
+ if self.by_epoch and runner.epoch == self.profile_iters - 1:
+ runner.logger.info('profiler may take a few minutes...')
+ self.profiler.__exit__(None, None, None)
+ if self.json_trace_path is not None:
+ self.profiler.export_chrome_trace(self.json_trace_path)
+
+ @master_only
+ def after_train_iter(self, runner):
+ self.profiler.step()
+ if not self.by_epoch and runner.iter == self.profile_iters - 1:
+ runner.logger.info('profiler may take a few minutes...')
+ self.profiler.__exit__(None, None, None)
+ if self.json_trace_path is not None:
+ self.profiler.export_chrome_trace(self.json_trace_path)
diff --git a/annotator/uniformer/mmcv/runner/hooks/sampler_seed.py b/annotator/uniformer/mmcv/runner/hooks/sampler_seed.py
new file mode 100644
index 0000000000000000000000000000000000000000..ee0dc6bdd8df5775857028aaed5444c0f59caf80
--- /dev/null
+++ b/annotator/uniformer/mmcv/runner/hooks/sampler_seed.py
@@ -0,0 +1,20 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+from .hook import HOOKS, Hook
+
+
+@HOOKS.register_module()
+class DistSamplerSeedHook(Hook):
+ """Data-loading sampler for distributed training.
+
+ When distributed training, it is only useful in conjunction with
+ :obj:`EpochBasedRunner`, while :obj:`IterBasedRunner` achieves the same
+ purpose with :obj:`IterLoader`.
+ """
+
+ def before_epoch(self, runner):
+ if hasattr(runner.data_loader.sampler, 'set_epoch'):
+ # in case the data loader uses `SequentialSampler` in Pytorch
+ runner.data_loader.sampler.set_epoch(runner.epoch)
+ elif hasattr(runner.data_loader.batch_sampler.sampler, 'set_epoch'):
+ # batch sampler in pytorch warps the sampler as its attributes.
+ runner.data_loader.batch_sampler.sampler.set_epoch(runner.epoch)
diff --git a/annotator/uniformer/mmcv/runner/hooks/sync_buffer.py b/annotator/uniformer/mmcv/runner/hooks/sync_buffer.py
new file mode 100644
index 0000000000000000000000000000000000000000..6376b7ff894280cb2782243b25e8973650591577
--- /dev/null
+++ b/annotator/uniformer/mmcv/runner/hooks/sync_buffer.py
@@ -0,0 +1,22 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+from ..dist_utils import allreduce_params
+from .hook import HOOKS, Hook
+
+
+@HOOKS.register_module()
+class SyncBuffersHook(Hook):
+ """Synchronize model buffers such as running_mean and running_var in BN at
+ the end of each epoch.
+
+ Args:
+ distributed (bool): Whether distributed training is used. It is
+ effective only for distributed training. Defaults to True.
+ """
+
+ def __init__(self, distributed=True):
+ self.distributed = distributed
+
+ def after_epoch(self, runner):
+ """All-reduce model buffers at the end of each epoch."""
+ if self.distributed:
+ allreduce_params(runner.model.buffers())
diff --git a/annotator/uniformer/mmcv/runner/iter_based_runner.py b/annotator/uniformer/mmcv/runner/iter_based_runner.py
new file mode 100644
index 0000000000000000000000000000000000000000..1df4de8c0285669dec9b014dfd1f3dd1600f0831
--- /dev/null
+++ b/annotator/uniformer/mmcv/runner/iter_based_runner.py
@@ -0,0 +1,273 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import os.path as osp
+import platform
+import shutil
+import time
+import warnings
+
+import torch
+from torch.optim import Optimizer
+
+import annotator.uniformer.mmcv as mmcv
+from .base_runner import BaseRunner
+from .builder import RUNNERS
+from .checkpoint import save_checkpoint
+from .hooks import IterTimerHook
+from .utils import get_host_info
+
+
+class IterLoader:
+
+ def __init__(self, dataloader):
+ self._dataloader = dataloader
+ self.iter_loader = iter(self._dataloader)
+ self._epoch = 0
+
+ @property
+ def epoch(self):
+ return self._epoch
+
+ def __next__(self):
+ try:
+ data = next(self.iter_loader)
+ except StopIteration:
+ self._epoch += 1
+ if hasattr(self._dataloader.sampler, 'set_epoch'):
+ self._dataloader.sampler.set_epoch(self._epoch)
+ time.sleep(2) # Prevent possible deadlock during epoch transition
+ self.iter_loader = iter(self._dataloader)
+ data = next(self.iter_loader)
+
+ return data
+
+ def __len__(self):
+ return len(self._dataloader)
+
+
+@RUNNERS.register_module()
+class IterBasedRunner(BaseRunner):
+ """Iteration-based Runner.
+
+ This runner train models iteration by iteration.
+ """
+
+ def train(self, data_loader, **kwargs):
+ self.model.train()
+ self.mode = 'train'
+ self.data_loader = data_loader
+ self._epoch = data_loader.epoch
+ data_batch = next(data_loader)
+ self.call_hook('before_train_iter')
+ outputs = self.model.train_step(data_batch, self.optimizer, **kwargs)
+ if not isinstance(outputs, dict):
+ raise TypeError('model.train_step() must return a dict')
+ if 'log_vars' in outputs:
+ self.log_buffer.update(outputs['log_vars'], outputs['num_samples'])
+ self.outputs = outputs
+ self.call_hook('after_train_iter')
+ self._inner_iter += 1
+ self._iter += 1
+
+ @torch.no_grad()
+ def val(self, data_loader, **kwargs):
+ self.model.eval()
+ self.mode = 'val'
+ self.data_loader = data_loader
+ data_batch = next(data_loader)
+ self.call_hook('before_val_iter')
+ outputs = self.model.val_step(data_batch, **kwargs)
+ if not isinstance(outputs, dict):
+ raise TypeError('model.val_step() must return a dict')
+ if 'log_vars' in outputs:
+ self.log_buffer.update(outputs['log_vars'], outputs['num_samples'])
+ self.outputs = outputs
+ self.call_hook('after_val_iter')
+ self._inner_iter += 1
+
+ def run(self, data_loaders, workflow, max_iters=None, **kwargs):
+ """Start running.
+
+ Args:
+ data_loaders (list[:obj:`DataLoader`]): Dataloaders for training
+ and validation.
+ workflow (list[tuple]): A list of (phase, iters) to specify the
+ running order and iterations. E.g, [('train', 10000),
+ ('val', 1000)] means running 10000 iterations for training and
+ 1000 iterations for validation, iteratively.
+ """
+ assert isinstance(data_loaders, list)
+ assert mmcv.is_list_of(workflow, tuple)
+ assert len(data_loaders) == len(workflow)
+ if max_iters is not None:
+ warnings.warn(
+ 'setting max_iters in run is deprecated, '
+ 'please set max_iters in runner_config', DeprecationWarning)
+ self._max_iters = max_iters
+ assert self._max_iters is not None, (
+ 'max_iters must be specified during instantiation')
+
+ work_dir = self.work_dir if self.work_dir is not None else 'NONE'
+ self.logger.info('Start running, host: %s, work_dir: %s',
+ get_host_info(), work_dir)
+ self.logger.info('Hooks will be executed in the following order:\n%s',
+ self.get_hook_info())
+ self.logger.info('workflow: %s, max: %d iters', workflow,
+ self._max_iters)
+ self.call_hook('before_run')
+
+ iter_loaders = [IterLoader(x) for x in data_loaders]
+
+ self.call_hook('before_epoch')
+
+ while self.iter < self._max_iters:
+ for i, flow in enumerate(workflow):
+ self._inner_iter = 0
+ mode, iters = flow
+ if not isinstance(mode, str) or not hasattr(self, mode):
+ raise ValueError(
+ 'runner has no method named "{}" to run a workflow'.
+ format(mode))
+ iter_runner = getattr(self, mode)
+ for _ in range(iters):
+ if mode == 'train' and self.iter >= self._max_iters:
+ break
+ iter_runner(iter_loaders[i], **kwargs)
+
+ time.sleep(1) # wait for some hooks like loggers to finish
+ self.call_hook('after_epoch')
+ self.call_hook('after_run')
+
+ def resume(self,
+ checkpoint,
+ resume_optimizer=True,
+ map_location='default'):
+ """Resume model from checkpoint.
+
+ Args:
+ checkpoint (str): Checkpoint to resume from.
+ resume_optimizer (bool, optional): Whether resume the optimizer(s)
+ if the checkpoint file includes optimizer(s). Default to True.
+ map_location (str, optional): Same as :func:`torch.load`.
+ Default to 'default'.
+ """
+ if map_location == 'default':
+ device_id = torch.cuda.current_device()
+ checkpoint = self.load_checkpoint(
+ checkpoint,
+ map_location=lambda storage, loc: storage.cuda(device_id))
+ else:
+ checkpoint = self.load_checkpoint(
+ checkpoint, map_location=map_location)
+
+ self._epoch = checkpoint['meta']['epoch']
+ self._iter = checkpoint['meta']['iter']
+ self._inner_iter = checkpoint['meta']['iter']
+ if 'optimizer' in checkpoint and resume_optimizer:
+ if isinstance(self.optimizer, Optimizer):
+ self.optimizer.load_state_dict(checkpoint['optimizer'])
+ elif isinstance(self.optimizer, dict):
+ for k in self.optimizer.keys():
+ self.optimizer[k].load_state_dict(
+ checkpoint['optimizer'][k])
+ else:
+ raise TypeError(
+ 'Optimizer should be dict or torch.optim.Optimizer '
+ f'but got {type(self.optimizer)}')
+
+ self.logger.info(f'resumed from epoch: {self.epoch}, iter {self.iter}')
+
+ def save_checkpoint(self,
+ out_dir,
+ filename_tmpl='iter_{}.pth',
+ meta=None,
+ save_optimizer=True,
+ create_symlink=True):
+ """Save checkpoint to file.
+
+ Args:
+ out_dir (str): Directory to save checkpoint files.
+ filename_tmpl (str, optional): Checkpoint file template.
+ Defaults to 'iter_{}.pth'.
+ meta (dict, optional): Metadata to be saved in checkpoint.
+ Defaults to None.
+ save_optimizer (bool, optional): Whether save optimizer.
+ Defaults to True.
+ create_symlink (bool, optional): Whether create symlink to the
+ latest checkpoint file. Defaults to True.
+ """
+ if meta is None:
+ meta = {}
+ elif not isinstance(meta, dict):
+ raise TypeError(
+ f'meta should be a dict or None, but got {type(meta)}')
+ if self.meta is not None:
+ meta.update(self.meta)
+ # Note: meta.update(self.meta) should be done before
+ # meta.update(epoch=self.epoch + 1, iter=self.iter) otherwise
+ # there will be problems with resumed checkpoints.
+ # More details in https://github.com/open-mmlab/mmcv/pull/1108
+ meta.update(epoch=self.epoch + 1, iter=self.iter)
+
+ filename = filename_tmpl.format(self.iter + 1)
+ filepath = osp.join(out_dir, filename)
+ optimizer = self.optimizer if save_optimizer else None
+ save_checkpoint(self.model, filepath, optimizer=optimizer, meta=meta)
+ # in some environments, `os.symlink` is not supported, you may need to
+ # set `create_symlink` to False
+ if create_symlink:
+ dst_file = osp.join(out_dir, 'latest.pth')
+ if platform.system() != 'Windows':
+ mmcv.symlink(filename, dst_file)
+ else:
+ shutil.copy(filepath, dst_file)
+
+ def register_training_hooks(self,
+ lr_config,
+ optimizer_config=None,
+ checkpoint_config=None,
+ log_config=None,
+ momentum_config=None,
+ custom_hooks_config=None):
+ """Register default hooks for iter-based training.
+
+ Checkpoint hook, optimizer stepper hook and logger hooks will be set to
+ `by_epoch=False` by default.
+
+ Default hooks include:
+
+ +----------------------+-------------------------+
+ | Hooks | Priority |
+ +======================+=========================+
+ | LrUpdaterHook | VERY_HIGH (10) |
+ +----------------------+-------------------------+
+ | MomentumUpdaterHook | HIGH (30) |
+ +----------------------+-------------------------+
+ | OptimizerStepperHook | ABOVE_NORMAL (40) |
+ +----------------------+-------------------------+
+ | CheckpointSaverHook | NORMAL (50) |
+ +----------------------+-------------------------+
+ | IterTimerHook | LOW (70) |
+ +----------------------+-------------------------+
+ | LoggerHook(s) | VERY_LOW (90) |
+ +----------------------+-------------------------+
+ | CustomHook(s) | defaults to NORMAL (50) |
+ +----------------------+-------------------------+
+
+ If custom hooks have same priority with default hooks, custom hooks
+ will be triggered after default hooks.
+ """
+ if checkpoint_config is not None:
+ checkpoint_config.setdefault('by_epoch', False)
+ if lr_config is not None:
+ lr_config.setdefault('by_epoch', False)
+ if log_config is not None:
+ for info in log_config['hooks']:
+ info.setdefault('by_epoch', False)
+ super(IterBasedRunner, self).register_training_hooks(
+ lr_config=lr_config,
+ momentum_config=momentum_config,
+ optimizer_config=optimizer_config,
+ checkpoint_config=checkpoint_config,
+ log_config=log_config,
+ timer_config=IterTimerHook(),
+ custom_hooks_config=custom_hooks_config)
diff --git a/annotator/uniformer/mmcv/runner/log_buffer.py b/annotator/uniformer/mmcv/runner/log_buffer.py
new file mode 100644
index 0000000000000000000000000000000000000000..d949e2941c5400088c7cd8a1dc893d8b233ae785
--- /dev/null
+++ b/annotator/uniformer/mmcv/runner/log_buffer.py
@@ -0,0 +1,41 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+from collections import OrderedDict
+
+import numpy as np
+
+
+class LogBuffer:
+
+ def __init__(self):
+ self.val_history = OrderedDict()
+ self.n_history = OrderedDict()
+ self.output = OrderedDict()
+ self.ready = False
+
+ def clear(self):
+ self.val_history.clear()
+ self.n_history.clear()
+ self.clear_output()
+
+ def clear_output(self):
+ self.output.clear()
+ self.ready = False
+
+ def update(self, vars, count=1):
+ assert isinstance(vars, dict)
+ for key, var in vars.items():
+ if key not in self.val_history:
+ self.val_history[key] = []
+ self.n_history[key] = []
+ self.val_history[key].append(var)
+ self.n_history[key].append(count)
+
+ def average(self, n=0):
+ """Average latest n values or all values."""
+ assert n >= 0
+ for key in self.val_history:
+ values = np.array(self.val_history[key][-n:])
+ nums = np.array(self.n_history[key][-n:])
+ avg = np.sum(values * nums) / np.sum(nums)
+ self.output[key] = avg
+ self.ready = True
diff --git a/annotator/uniformer/mmcv/runner/optimizer/__init__.py b/annotator/uniformer/mmcv/runner/optimizer/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..53c34d0470992cbc374f29681fdd00dc0e57968d
--- /dev/null
+++ b/annotator/uniformer/mmcv/runner/optimizer/__init__.py
@@ -0,0 +1,9 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+from .builder import (OPTIMIZER_BUILDERS, OPTIMIZERS, build_optimizer,
+ build_optimizer_constructor)
+from .default_constructor import DefaultOptimizerConstructor
+
+__all__ = [
+ 'OPTIMIZER_BUILDERS', 'OPTIMIZERS', 'DefaultOptimizerConstructor',
+ 'build_optimizer', 'build_optimizer_constructor'
+]
diff --git a/annotator/uniformer/mmcv/runner/optimizer/__pycache__/__init__.cpython-310.pyc b/annotator/uniformer/mmcv/runner/optimizer/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..be116b37b045c2e375a3b98e235560f1265f4fe3
Binary files /dev/null and b/annotator/uniformer/mmcv/runner/optimizer/__pycache__/__init__.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/runner/optimizer/__pycache__/__init__.cpython-38.pyc b/annotator/uniformer/mmcv/runner/optimizer/__pycache__/__init__.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..cb308791760904d1bbc06ee2f7ad305e4e8c968d
Binary files /dev/null and b/annotator/uniformer/mmcv/runner/optimizer/__pycache__/__init__.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/runner/optimizer/__pycache__/builder.cpython-310.pyc b/annotator/uniformer/mmcv/runner/optimizer/__pycache__/builder.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..dc7e8868f11e91fe6fb76abd021e81875ecb977c
Binary files /dev/null and b/annotator/uniformer/mmcv/runner/optimizer/__pycache__/builder.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/runner/optimizer/__pycache__/builder.cpython-38.pyc b/annotator/uniformer/mmcv/runner/optimizer/__pycache__/builder.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f88ba29b0a5da8d2a7b502b455fe4dd31b7bea3c
Binary files /dev/null and b/annotator/uniformer/mmcv/runner/optimizer/__pycache__/builder.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/runner/optimizer/__pycache__/default_constructor.cpython-310.pyc b/annotator/uniformer/mmcv/runner/optimizer/__pycache__/default_constructor.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9ce6fa919ce18d411c6e18476c482e67c4d7f253
Binary files /dev/null and b/annotator/uniformer/mmcv/runner/optimizer/__pycache__/default_constructor.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/runner/optimizer/__pycache__/default_constructor.cpython-38.pyc b/annotator/uniformer/mmcv/runner/optimizer/__pycache__/default_constructor.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..bb9fb38d745bf0603f8fc0dc7d69e7f47cc1e4bc
Binary files /dev/null and b/annotator/uniformer/mmcv/runner/optimizer/__pycache__/default_constructor.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/runner/optimizer/builder.py b/annotator/uniformer/mmcv/runner/optimizer/builder.py
new file mode 100644
index 0000000000000000000000000000000000000000..f9234eed8f1f186d9d8dfda34562157ee39bdb3a
--- /dev/null
+++ b/annotator/uniformer/mmcv/runner/optimizer/builder.py
@@ -0,0 +1,44 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import copy
+import inspect
+
+import torch
+
+from ...utils import Registry, build_from_cfg
+
+OPTIMIZERS = Registry('optimizer')
+OPTIMIZER_BUILDERS = Registry('optimizer builder')
+
+
+def register_torch_optimizers():
+ torch_optimizers = []
+ for module_name in dir(torch.optim):
+ if module_name.startswith('__'):
+ continue
+ _optim = getattr(torch.optim, module_name)
+ if inspect.isclass(_optim) and issubclass(_optim,
+ torch.optim.Optimizer):
+ OPTIMIZERS.register_module()(_optim)
+ torch_optimizers.append(module_name)
+ return torch_optimizers
+
+
+TORCH_OPTIMIZERS = register_torch_optimizers()
+
+
+def build_optimizer_constructor(cfg):
+ return build_from_cfg(cfg, OPTIMIZER_BUILDERS)
+
+
+def build_optimizer(model, cfg):
+ optimizer_cfg = copy.deepcopy(cfg)
+ constructor_type = optimizer_cfg.pop('constructor',
+ 'DefaultOptimizerConstructor')
+ paramwise_cfg = optimizer_cfg.pop('paramwise_cfg', None)
+ optim_constructor = build_optimizer_constructor(
+ dict(
+ type=constructor_type,
+ optimizer_cfg=optimizer_cfg,
+ paramwise_cfg=paramwise_cfg))
+ optimizer = optim_constructor(model)
+ return optimizer
diff --git a/annotator/uniformer/mmcv/runner/optimizer/default_constructor.py b/annotator/uniformer/mmcv/runner/optimizer/default_constructor.py
new file mode 100644
index 0000000000000000000000000000000000000000..2c0da3503b75441738efe38d70352b55a210a34a
--- /dev/null
+++ b/annotator/uniformer/mmcv/runner/optimizer/default_constructor.py
@@ -0,0 +1,249 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import warnings
+
+import torch
+from torch.nn import GroupNorm, LayerNorm
+
+from annotator.uniformer.mmcv.utils import _BatchNorm, _InstanceNorm, build_from_cfg, is_list_of
+from annotator.uniformer.mmcv.utils.ext_loader import check_ops_exist
+from .builder import OPTIMIZER_BUILDERS, OPTIMIZERS
+
+
+@OPTIMIZER_BUILDERS.register_module()
+class DefaultOptimizerConstructor:
+ """Default constructor for optimizers.
+
+ By default each parameter share the same optimizer settings, and we
+ provide an argument ``paramwise_cfg`` to specify parameter-wise settings.
+ It is a dict and may contain the following fields:
+
+ - ``custom_keys`` (dict): Specified parameters-wise settings by keys. If
+ one of the keys in ``custom_keys`` is a substring of the name of one
+ parameter, then the setting of the parameter will be specified by
+ ``custom_keys[key]`` and other setting like ``bias_lr_mult`` etc. will
+ be ignored. It should be noted that the aforementioned ``key`` is the
+ longest key that is a substring of the name of the parameter. If there
+ are multiple matched keys with the same length, then the key with lower
+ alphabet order will be chosen.
+ ``custom_keys[key]`` should be a dict and may contain fields ``lr_mult``
+ and ``decay_mult``. See Example 2 below.
+ - ``bias_lr_mult`` (float): It will be multiplied to the learning
+ rate for all bias parameters (except for those in normalization
+ layers and offset layers of DCN).
+ - ``bias_decay_mult`` (float): It will be multiplied to the weight
+ decay for all bias parameters (except for those in
+ normalization layers, depthwise conv layers, offset layers of DCN).
+ - ``norm_decay_mult`` (float): It will be multiplied to the weight
+ decay for all weight and bias parameters of normalization
+ layers.
+ - ``dwconv_decay_mult`` (float): It will be multiplied to the weight
+ decay for all weight and bias parameters of depthwise conv
+ layers.
+ - ``dcn_offset_lr_mult`` (float): It will be multiplied to the learning
+ rate for parameters of offset layer in the deformable convs
+ of a model.
+ - ``bypass_duplicate`` (bool): If true, the duplicate parameters
+ would not be added into optimizer. Default: False.
+
+ Note:
+ 1. If the option ``dcn_offset_lr_mult`` is used, the constructor will
+ override the effect of ``bias_lr_mult`` in the bias of offset
+ layer. So be careful when using both ``bias_lr_mult`` and
+ ``dcn_offset_lr_mult``. If you wish to apply both of them to the
+ offset layer in deformable convs, set ``dcn_offset_lr_mult``
+ to the original ``dcn_offset_lr_mult`` * ``bias_lr_mult``.
+ 2. If the option ``dcn_offset_lr_mult`` is used, the constructor will
+ apply it to all the DCN layers in the model. So be careful when
+ the model contains multiple DCN layers in places other than
+ backbone.
+
+ Args:
+ model (:obj:`nn.Module`): The model with parameters to be optimized.
+ optimizer_cfg (dict): The config dict of the optimizer.
+ Positional fields are
+
+ - `type`: class name of the optimizer.
+
+ Optional fields are
+
+ - any arguments of the corresponding optimizer type, e.g.,
+ lr, weight_decay, momentum, etc.
+ paramwise_cfg (dict, optional): Parameter-wise options.
+
+ Example 1:
+ >>> model = torch.nn.modules.Conv1d(1, 1, 1)
+ >>> optimizer_cfg = dict(type='SGD', lr=0.01, momentum=0.9,
+ >>> weight_decay=0.0001)
+ >>> paramwise_cfg = dict(norm_decay_mult=0.)
+ >>> optim_builder = DefaultOptimizerConstructor(
+ >>> optimizer_cfg, paramwise_cfg)
+ >>> optimizer = optim_builder(model)
+
+ Example 2:
+ >>> # assume model have attribute model.backbone and model.cls_head
+ >>> optimizer_cfg = dict(type='SGD', lr=0.01, weight_decay=0.95)
+ >>> paramwise_cfg = dict(custom_keys={
+ '.backbone': dict(lr_mult=0.1, decay_mult=0.9)})
+ >>> optim_builder = DefaultOptimizerConstructor(
+ >>> optimizer_cfg, paramwise_cfg)
+ >>> optimizer = optim_builder(model)
+ >>> # Then the `lr` and `weight_decay` for model.backbone is
+ >>> # (0.01 * 0.1, 0.95 * 0.9). `lr` and `weight_decay` for
+ >>> # model.cls_head is (0.01, 0.95).
+ """
+
+ def __init__(self, optimizer_cfg, paramwise_cfg=None):
+ if not isinstance(optimizer_cfg, dict):
+ raise TypeError('optimizer_cfg should be a dict',
+ f'but got {type(optimizer_cfg)}')
+ self.optimizer_cfg = optimizer_cfg
+ self.paramwise_cfg = {} if paramwise_cfg is None else paramwise_cfg
+ self.base_lr = optimizer_cfg.get('lr', None)
+ self.base_wd = optimizer_cfg.get('weight_decay', None)
+ self._validate_cfg()
+
+ def _validate_cfg(self):
+ if not isinstance(self.paramwise_cfg, dict):
+ raise TypeError('paramwise_cfg should be None or a dict, '
+ f'but got {type(self.paramwise_cfg)}')
+
+ if 'custom_keys' in self.paramwise_cfg:
+ if not isinstance(self.paramwise_cfg['custom_keys'], dict):
+ raise TypeError(
+ 'If specified, custom_keys must be a dict, '
+ f'but got {type(self.paramwise_cfg["custom_keys"])}')
+ if self.base_wd is None:
+ for key in self.paramwise_cfg['custom_keys']:
+ if 'decay_mult' in self.paramwise_cfg['custom_keys'][key]:
+ raise ValueError('base_wd should not be None')
+
+ # get base lr and weight decay
+ # weight_decay must be explicitly specified if mult is specified
+ if ('bias_decay_mult' in self.paramwise_cfg
+ or 'norm_decay_mult' in self.paramwise_cfg
+ or 'dwconv_decay_mult' in self.paramwise_cfg):
+ if self.base_wd is None:
+ raise ValueError('base_wd should not be None')
+
+ def _is_in(self, param_group, param_group_list):
+ assert is_list_of(param_group_list, dict)
+ param = set(param_group['params'])
+ param_set = set()
+ for group in param_group_list:
+ param_set.update(set(group['params']))
+
+ return not param.isdisjoint(param_set)
+
+ def add_params(self, params, module, prefix='', is_dcn_module=None):
+ """Add all parameters of module to the params list.
+
+ The parameters of the given module will be added to the list of param
+ groups, with specific rules defined by paramwise_cfg.
+
+ Args:
+ params (list[dict]): A list of param groups, it will be modified
+ in place.
+ module (nn.Module): The module to be added.
+ prefix (str): The prefix of the module
+ is_dcn_module (int|float|None): If the current module is a
+ submodule of DCN, `is_dcn_module` will be passed to
+ control conv_offset layer's learning rate. Defaults to None.
+ """
+ # get param-wise options
+ custom_keys = self.paramwise_cfg.get('custom_keys', {})
+ # first sort with alphabet order and then sort with reversed len of str
+ sorted_keys = sorted(sorted(custom_keys.keys()), key=len, reverse=True)
+
+ bias_lr_mult = self.paramwise_cfg.get('bias_lr_mult', 1.)
+ bias_decay_mult = self.paramwise_cfg.get('bias_decay_mult', 1.)
+ norm_decay_mult = self.paramwise_cfg.get('norm_decay_mult', 1.)
+ dwconv_decay_mult = self.paramwise_cfg.get('dwconv_decay_mult', 1.)
+ bypass_duplicate = self.paramwise_cfg.get('bypass_duplicate', False)
+ dcn_offset_lr_mult = self.paramwise_cfg.get('dcn_offset_lr_mult', 1.)
+
+ # special rules for norm layers and depth-wise conv layers
+ is_norm = isinstance(module,
+ (_BatchNorm, _InstanceNorm, GroupNorm, LayerNorm))
+ is_dwconv = (
+ isinstance(module, torch.nn.Conv2d)
+ and module.in_channels == module.groups)
+
+ for name, param in module.named_parameters(recurse=False):
+ param_group = {'params': [param]}
+ if not param.requires_grad:
+ params.append(param_group)
+ continue
+ if bypass_duplicate and self._is_in(param_group, params):
+ warnings.warn(f'{prefix} is duplicate. It is skipped since '
+ f'bypass_duplicate={bypass_duplicate}')
+ continue
+ # if the parameter match one of the custom keys, ignore other rules
+ is_custom = False
+ for key in sorted_keys:
+ if key in f'{prefix}.{name}':
+ is_custom = True
+ lr_mult = custom_keys[key].get('lr_mult', 1.)
+ param_group['lr'] = self.base_lr * lr_mult
+ if self.base_wd is not None:
+ decay_mult = custom_keys[key].get('decay_mult', 1.)
+ param_group['weight_decay'] = self.base_wd * decay_mult
+ break
+
+ if not is_custom:
+ # bias_lr_mult affects all bias parameters
+ # except for norm.bias dcn.conv_offset.bias
+ if name == 'bias' and not (is_norm or is_dcn_module):
+ param_group['lr'] = self.base_lr * bias_lr_mult
+
+ if (prefix.find('conv_offset') != -1 and is_dcn_module
+ and isinstance(module, torch.nn.Conv2d)):
+ # deal with both dcn_offset's bias & weight
+ param_group['lr'] = self.base_lr * dcn_offset_lr_mult
+
+ # apply weight decay policies
+ if self.base_wd is not None:
+ # norm decay
+ if is_norm:
+ param_group[
+ 'weight_decay'] = self.base_wd * norm_decay_mult
+ # depth-wise conv
+ elif is_dwconv:
+ param_group[
+ 'weight_decay'] = self.base_wd * dwconv_decay_mult
+ # bias lr and decay
+ elif name == 'bias' and not is_dcn_module:
+ # TODO: current bias_decay_mult will have affect on DCN
+ param_group[
+ 'weight_decay'] = self.base_wd * bias_decay_mult
+ params.append(param_group)
+
+ if check_ops_exist():
+ from annotator.uniformer.mmcv.ops import DeformConv2d, ModulatedDeformConv2d
+ is_dcn_module = isinstance(module,
+ (DeformConv2d, ModulatedDeformConv2d))
+ else:
+ is_dcn_module = False
+ for child_name, child_mod in module.named_children():
+ child_prefix = f'{prefix}.{child_name}' if prefix else child_name
+ self.add_params(
+ params,
+ child_mod,
+ prefix=child_prefix,
+ is_dcn_module=is_dcn_module)
+
+ def __call__(self, model):
+ if hasattr(model, 'module'):
+ model = model.module
+
+ optimizer_cfg = self.optimizer_cfg.copy()
+ # if no paramwise option is specified, just use the global setting
+ if not self.paramwise_cfg:
+ optimizer_cfg['params'] = model.parameters()
+ return build_from_cfg(optimizer_cfg, OPTIMIZERS)
+
+ # set param-wise lr and weight decay recursively
+ params = []
+ self.add_params(params, model)
+ optimizer_cfg['params'] = params
+
+ return build_from_cfg(optimizer_cfg, OPTIMIZERS)
diff --git a/annotator/uniformer/mmcv/runner/priority.py b/annotator/uniformer/mmcv/runner/priority.py
new file mode 100644
index 0000000000000000000000000000000000000000..64cc4e3a05f8d5b89ab6eb32461e6e80f1d62e67
--- /dev/null
+++ b/annotator/uniformer/mmcv/runner/priority.py
@@ -0,0 +1,60 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+from enum import Enum
+
+
+class Priority(Enum):
+ """Hook priority levels.
+
+ +--------------+------------+
+ | Level | Value |
+ +==============+============+
+ | HIGHEST | 0 |
+ +--------------+------------+
+ | VERY_HIGH | 10 |
+ +--------------+------------+
+ | HIGH | 30 |
+ +--------------+------------+
+ | ABOVE_NORMAL | 40 |
+ +--------------+------------+
+ | NORMAL | 50 |
+ +--------------+------------+
+ | BELOW_NORMAL | 60 |
+ +--------------+------------+
+ | LOW | 70 |
+ +--------------+------------+
+ | VERY_LOW | 90 |
+ +--------------+------------+
+ | LOWEST | 100 |
+ +--------------+------------+
+ """
+
+ HIGHEST = 0
+ VERY_HIGH = 10
+ HIGH = 30
+ ABOVE_NORMAL = 40
+ NORMAL = 50
+ BELOW_NORMAL = 60
+ LOW = 70
+ VERY_LOW = 90
+ LOWEST = 100
+
+
+def get_priority(priority):
+ """Get priority value.
+
+ Args:
+ priority (int or str or :obj:`Priority`): Priority.
+
+ Returns:
+ int: The priority value.
+ """
+ if isinstance(priority, int):
+ if priority < 0 or priority > 100:
+ raise ValueError('priority must be between 0 and 100')
+ return priority
+ elif isinstance(priority, Priority):
+ return priority.value
+ elif isinstance(priority, str):
+ return Priority[priority.upper()].value
+ else:
+ raise TypeError('priority must be an integer or Priority enum value')
diff --git a/annotator/uniformer/mmcv/runner/utils.py b/annotator/uniformer/mmcv/runner/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..c5befb8e56ece50b5fecfd007b26f8a29124c0bd
--- /dev/null
+++ b/annotator/uniformer/mmcv/runner/utils.py
@@ -0,0 +1,93 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import os
+import random
+import sys
+import time
+import warnings
+from getpass import getuser
+from socket import gethostname
+
+import numpy as np
+import torch
+
+import annotator.uniformer.mmcv as mmcv
+
+
+def get_host_info():
+ """Get hostname and username.
+
+ Return empty string if exception raised, e.g. ``getpass.getuser()`` will
+ lead to error in docker container
+ """
+ host = ''
+ try:
+ host = f'{getuser()}@{gethostname()}'
+ except Exception as e:
+ warnings.warn(f'Host or user not found: {str(e)}')
+ finally:
+ return host
+
+
+def get_time_str():
+ return time.strftime('%Y%m%d_%H%M%S', time.localtime())
+
+
+def obj_from_dict(info, parent=None, default_args=None):
+ """Initialize an object from dict.
+
+ The dict must contain the key "type", which indicates the object type, it
+ can be either a string or type, such as "list" or ``list``. Remaining
+ fields are treated as the arguments for constructing the object.
+
+ Args:
+ info (dict): Object types and arguments.
+ parent (:class:`module`): Module which may containing expected object
+ classes.
+ default_args (dict, optional): Default arguments for initializing the
+ object.
+
+ Returns:
+ any type: Object built from the dict.
+ """
+ assert isinstance(info, dict) and 'type' in info
+ assert isinstance(default_args, dict) or default_args is None
+ args = info.copy()
+ obj_type = args.pop('type')
+ if mmcv.is_str(obj_type):
+ if parent is not None:
+ obj_type = getattr(parent, obj_type)
+ else:
+ obj_type = sys.modules[obj_type]
+ elif not isinstance(obj_type, type):
+ raise TypeError('type must be a str or valid type, but '
+ f'got {type(obj_type)}')
+ if default_args is not None:
+ for name, value in default_args.items():
+ args.setdefault(name, value)
+ return obj_type(**args)
+
+
+def set_random_seed(seed, deterministic=False, use_rank_shift=False):
+ """Set random seed.
+
+ Args:
+ seed (int): Seed to be used.
+ deterministic (bool): Whether to set the deterministic option for
+ CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`
+ to True and `torch.backends.cudnn.benchmark` to False.
+ Default: False.
+ rank_shift (bool): Whether to add rank number to the random seed to
+ have different random seed in different threads. Default: False.
+ """
+ if use_rank_shift:
+ rank, _ = mmcv.runner.get_dist_info()
+ seed += rank
+ random.seed(seed)
+ np.random.seed(seed)
+ torch.manual_seed(seed)
+ torch.cuda.manual_seed(seed)
+ torch.cuda.manual_seed_all(seed)
+ os.environ['PYTHONHASHSEED'] = str(seed)
+ if deterministic:
+ torch.backends.cudnn.deterministic = True
+ torch.backends.cudnn.benchmark = False
diff --git a/annotator/uniformer/mmcv/utils/__init__.py b/annotator/uniformer/mmcv/utils/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..378a0068432a371af364de9d73785901c0f83383
--- /dev/null
+++ b/annotator/uniformer/mmcv/utils/__init__.py
@@ -0,0 +1,69 @@
+# flake8: noqa
+# Copyright (c) OpenMMLab. All rights reserved.
+from .config import Config, ConfigDict, DictAction
+from .misc import (check_prerequisites, concat_list, deprecated_api_warning,
+ has_method, import_modules_from_strings, is_list_of,
+ is_method_overridden, is_seq_of, is_str, is_tuple_of,
+ iter_cast, list_cast, requires_executable, requires_package,
+ slice_list, to_1tuple, to_2tuple, to_3tuple, to_4tuple,
+ to_ntuple, tuple_cast)
+from .path import (check_file_exist, fopen, is_filepath, mkdir_or_exist,
+ scandir, symlink)
+from .progressbar import (ProgressBar, track_iter_progress,
+ track_parallel_progress, track_progress)
+from .testing import (assert_attrs_equal, assert_dict_contains_subset,
+ assert_dict_has_keys, assert_is_norm_layer,
+ assert_keys_equal, assert_params_all_zeros,
+ check_python_script)
+from .timer import Timer, TimerError, check_time
+from .version_utils import digit_version, get_git_hash
+
+try:
+ import torch
+except ImportError:
+ __all__ = [
+ 'Config', 'ConfigDict', 'DictAction', 'is_str', 'iter_cast',
+ 'list_cast', 'tuple_cast', 'is_seq_of', 'is_list_of', 'is_tuple_of',
+ 'slice_list', 'concat_list', 'check_prerequisites', 'requires_package',
+ 'requires_executable', 'is_filepath', 'fopen', 'check_file_exist',
+ 'mkdir_or_exist', 'symlink', 'scandir', 'ProgressBar',
+ 'track_progress', 'track_iter_progress', 'track_parallel_progress',
+ 'Timer', 'TimerError', 'check_time', 'deprecated_api_warning',
+ 'digit_version', 'get_git_hash', 'import_modules_from_strings',
+ 'assert_dict_contains_subset', 'assert_attrs_equal',
+ 'assert_dict_has_keys', 'assert_keys_equal', 'check_python_script',
+ 'to_1tuple', 'to_2tuple', 'to_3tuple', 'to_4tuple', 'to_ntuple',
+ 'is_method_overridden', 'has_method'
+ ]
+else:
+ from .env import collect_env
+ from .logging import get_logger, print_log
+ from .parrots_jit import jit, skip_no_elena
+ from .parrots_wrapper import (
+ TORCH_VERSION, BuildExtension, CppExtension, CUDAExtension, DataLoader,
+ PoolDataLoader, SyncBatchNorm, _AdaptiveAvgPoolNd, _AdaptiveMaxPoolNd,
+ _AvgPoolNd, _BatchNorm, _ConvNd, _ConvTransposeMixin, _InstanceNorm,
+ _MaxPoolNd, get_build_config, is_rocm_pytorch, _get_cuda_home)
+ from .registry import Registry, build_from_cfg
+ from .trace import is_jit_tracing
+ __all__ = [
+ 'Config', 'ConfigDict', 'DictAction', 'collect_env', 'get_logger',
+ 'print_log', 'is_str', 'iter_cast', 'list_cast', 'tuple_cast',
+ 'is_seq_of', 'is_list_of', 'is_tuple_of', 'slice_list', 'concat_list',
+ 'check_prerequisites', 'requires_package', 'requires_executable',
+ 'is_filepath', 'fopen', 'check_file_exist', 'mkdir_or_exist',
+ 'symlink', 'scandir', 'ProgressBar', 'track_progress',
+ 'track_iter_progress', 'track_parallel_progress', 'Registry',
+ 'build_from_cfg', 'Timer', 'TimerError', 'check_time', 'SyncBatchNorm',
+ '_AdaptiveAvgPoolNd', '_AdaptiveMaxPoolNd', '_AvgPoolNd', '_BatchNorm',
+ '_ConvNd', '_ConvTransposeMixin', '_InstanceNorm', '_MaxPoolNd',
+ 'get_build_config', 'BuildExtension', 'CppExtension', 'CUDAExtension',
+ 'DataLoader', 'PoolDataLoader', 'TORCH_VERSION',
+ 'deprecated_api_warning', 'digit_version', 'get_git_hash',
+ 'import_modules_from_strings', 'jit', 'skip_no_elena',
+ 'assert_dict_contains_subset', 'assert_attrs_equal',
+ 'assert_dict_has_keys', 'assert_keys_equal', 'assert_is_norm_layer',
+ 'assert_params_all_zeros', 'check_python_script',
+ 'is_method_overridden', 'is_jit_tracing', 'is_rocm_pytorch',
+ '_get_cuda_home', 'has_method'
+ ]
diff --git a/annotator/uniformer/mmcv/utils/__pycache__/__init__.cpython-310.pyc b/annotator/uniformer/mmcv/utils/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2d7924ab5f2c6c954e334e0540e63022b1399082
Binary files /dev/null and b/annotator/uniformer/mmcv/utils/__pycache__/__init__.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/utils/__pycache__/__init__.cpython-38.pyc b/annotator/uniformer/mmcv/utils/__pycache__/__init__.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..57a4d9bc19fef9df7a6a7f6237af3993a7ff32c1
Binary files /dev/null and b/annotator/uniformer/mmcv/utils/__pycache__/__init__.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/utils/__pycache__/config.cpython-310.pyc b/annotator/uniformer/mmcv/utils/__pycache__/config.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7f75f877a6d209037171c6915bee68ff7c466f21
Binary files /dev/null and b/annotator/uniformer/mmcv/utils/__pycache__/config.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/utils/__pycache__/config.cpython-38.pyc b/annotator/uniformer/mmcv/utils/__pycache__/config.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f38c01db943ea3807aaaacaadf62e5b5ebf1e295
Binary files /dev/null and b/annotator/uniformer/mmcv/utils/__pycache__/config.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/utils/__pycache__/env.cpython-310.pyc b/annotator/uniformer/mmcv/utils/__pycache__/env.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0de553346c6c9378d7f3cb5e68cf35274b6cbc6c
Binary files /dev/null and b/annotator/uniformer/mmcv/utils/__pycache__/env.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/utils/__pycache__/env.cpython-38.pyc b/annotator/uniformer/mmcv/utils/__pycache__/env.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..70a2ae37a535f15c97a2abaa0823109f2a2e1a6f
Binary files /dev/null and b/annotator/uniformer/mmcv/utils/__pycache__/env.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/utils/__pycache__/ext_loader.cpython-310.pyc b/annotator/uniformer/mmcv/utils/__pycache__/ext_loader.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..32a680767edcdf0d9d28d435cf700e0940514631
Binary files /dev/null and b/annotator/uniformer/mmcv/utils/__pycache__/ext_loader.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/utils/__pycache__/ext_loader.cpython-38.pyc b/annotator/uniformer/mmcv/utils/__pycache__/ext_loader.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..de0468c914e7377b80cef931e24760d05349300e
Binary files /dev/null and b/annotator/uniformer/mmcv/utils/__pycache__/ext_loader.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/utils/__pycache__/logging.cpython-310.pyc b/annotator/uniformer/mmcv/utils/__pycache__/logging.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3364ed73551555ab0821dde961d666d38f9a60a5
Binary files /dev/null and b/annotator/uniformer/mmcv/utils/__pycache__/logging.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/utils/__pycache__/logging.cpython-38.pyc b/annotator/uniformer/mmcv/utils/__pycache__/logging.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1774dab2ac1e6cabbbd5292ca26da0c2c19a0884
Binary files /dev/null and b/annotator/uniformer/mmcv/utils/__pycache__/logging.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/utils/__pycache__/misc.cpython-310.pyc b/annotator/uniformer/mmcv/utils/__pycache__/misc.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2ea9ac6e7a1341f85265990393772af3adf94394
Binary files /dev/null and b/annotator/uniformer/mmcv/utils/__pycache__/misc.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/utils/__pycache__/misc.cpython-38.pyc b/annotator/uniformer/mmcv/utils/__pycache__/misc.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5fb1b84e302b3feed24c162daba58d2a32f479fa
Binary files /dev/null and b/annotator/uniformer/mmcv/utils/__pycache__/misc.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/utils/__pycache__/parrots_jit.cpython-310.pyc b/annotator/uniformer/mmcv/utils/__pycache__/parrots_jit.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2a5a66d3d536a88211b4c0d54d381294bb13f59c
Binary files /dev/null and b/annotator/uniformer/mmcv/utils/__pycache__/parrots_jit.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/utils/__pycache__/parrots_jit.cpython-38.pyc b/annotator/uniformer/mmcv/utils/__pycache__/parrots_jit.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ded0e827aabc6ac0b37399e1ea14af6040590c4a
Binary files /dev/null and b/annotator/uniformer/mmcv/utils/__pycache__/parrots_jit.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/utils/__pycache__/parrots_wrapper.cpython-310.pyc b/annotator/uniformer/mmcv/utils/__pycache__/parrots_wrapper.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8d1d3c5cc99afaba7495b635d7f68735e8745d29
Binary files /dev/null and b/annotator/uniformer/mmcv/utils/__pycache__/parrots_wrapper.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/utils/__pycache__/parrots_wrapper.cpython-38.pyc b/annotator/uniformer/mmcv/utils/__pycache__/parrots_wrapper.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a5a0fd777777313a71e1f9e0f02ad507b0a4b9e9
Binary files /dev/null and b/annotator/uniformer/mmcv/utils/__pycache__/parrots_wrapper.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/utils/__pycache__/path.cpython-310.pyc b/annotator/uniformer/mmcv/utils/__pycache__/path.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..90290a361b51ac206552d52b7bf29940de7dcd5f
Binary files /dev/null and b/annotator/uniformer/mmcv/utils/__pycache__/path.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/utils/__pycache__/path.cpython-38.pyc b/annotator/uniformer/mmcv/utils/__pycache__/path.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b8cd580116d3dd0a7b20d34bea27187b611c2eaa
Binary files /dev/null and b/annotator/uniformer/mmcv/utils/__pycache__/path.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/utils/__pycache__/progressbar.cpython-310.pyc b/annotator/uniformer/mmcv/utils/__pycache__/progressbar.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..83ad734fb2be8f457585be44358b213f43e615c0
Binary files /dev/null and b/annotator/uniformer/mmcv/utils/__pycache__/progressbar.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/utils/__pycache__/progressbar.cpython-38.pyc b/annotator/uniformer/mmcv/utils/__pycache__/progressbar.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..437eb0c0da0f0b756ff0af322c12717f38b56043
Binary files /dev/null and b/annotator/uniformer/mmcv/utils/__pycache__/progressbar.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/utils/__pycache__/registry.cpython-310.pyc b/annotator/uniformer/mmcv/utils/__pycache__/registry.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f0a7843f5112b3e3a8423a954f0197d8a79c4078
Binary files /dev/null and b/annotator/uniformer/mmcv/utils/__pycache__/registry.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/utils/__pycache__/registry.cpython-38.pyc b/annotator/uniformer/mmcv/utils/__pycache__/registry.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ddab078adcce12cad56a8cfa8471219cd0470eff
Binary files /dev/null and b/annotator/uniformer/mmcv/utils/__pycache__/registry.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/utils/__pycache__/testing.cpython-310.pyc b/annotator/uniformer/mmcv/utils/__pycache__/testing.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4cbb902f0de1b7a940ff014bac6b148eff1086d0
Binary files /dev/null and b/annotator/uniformer/mmcv/utils/__pycache__/testing.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/utils/__pycache__/testing.cpython-38.pyc b/annotator/uniformer/mmcv/utils/__pycache__/testing.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..285ece15489cdc8b747e40a15b248164f550f7a0
Binary files /dev/null and b/annotator/uniformer/mmcv/utils/__pycache__/testing.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/utils/__pycache__/timer.cpython-310.pyc b/annotator/uniformer/mmcv/utils/__pycache__/timer.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f98deb14b1c11275cb185bd4e1d8ae6bffde53da
Binary files /dev/null and b/annotator/uniformer/mmcv/utils/__pycache__/timer.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/utils/__pycache__/timer.cpython-38.pyc b/annotator/uniformer/mmcv/utils/__pycache__/timer.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a1fa963a05b4fdb6d92285033f6ffe572b3a20ac
Binary files /dev/null and b/annotator/uniformer/mmcv/utils/__pycache__/timer.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/utils/__pycache__/trace.cpython-310.pyc b/annotator/uniformer/mmcv/utils/__pycache__/trace.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..59e3f9bece0b200d50f017f1d0053ad1db3226b8
Binary files /dev/null and b/annotator/uniformer/mmcv/utils/__pycache__/trace.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/utils/__pycache__/trace.cpython-38.pyc b/annotator/uniformer/mmcv/utils/__pycache__/trace.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2ebbcd8694ba95a2246426d1302dd37b9cce0c55
Binary files /dev/null and b/annotator/uniformer/mmcv/utils/__pycache__/trace.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/utils/__pycache__/version_utils.cpython-310.pyc b/annotator/uniformer/mmcv/utils/__pycache__/version_utils.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7012ff32a5251c0c2edd3f31fb74cef450efbf32
Binary files /dev/null and b/annotator/uniformer/mmcv/utils/__pycache__/version_utils.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/utils/__pycache__/version_utils.cpython-38.pyc b/annotator/uniformer/mmcv/utils/__pycache__/version_utils.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9152258f68bb8fdab09c014b71cd4226c33c2f8b
Binary files /dev/null and b/annotator/uniformer/mmcv/utils/__pycache__/version_utils.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/utils/config.py b/annotator/uniformer/mmcv/utils/config.py
new file mode 100644
index 0000000000000000000000000000000000000000..17149353aefac6d737c67bb2f35a3a6cd2147b0a
--- /dev/null
+++ b/annotator/uniformer/mmcv/utils/config.py
@@ -0,0 +1,688 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import ast
+import copy
+import os
+import os.path as osp
+import platform
+import shutil
+import sys
+import tempfile
+import uuid
+import warnings
+from argparse import Action, ArgumentParser
+from collections import abc
+from importlib import import_module
+
+from addict import Dict
+from yapf.yapflib.yapf_api import FormatCode
+
+from .misc import import_modules_from_strings
+from .path import check_file_exist
+
+if platform.system() == 'Windows':
+ import regex as re
+else:
+ import re
+
+BASE_KEY = '_base_'
+DELETE_KEY = '_delete_'
+DEPRECATION_KEY = '_deprecation_'
+RESERVED_KEYS = ['filename', 'text', 'pretty_text']
+
+
+class ConfigDict(Dict):
+
+ def __missing__(self, name):
+ raise KeyError(name)
+
+ def __getattr__(self, name):
+ try:
+ value = super(ConfigDict, self).__getattr__(name)
+ except KeyError:
+ ex = AttributeError(f"'{self.__class__.__name__}' object has no "
+ f"attribute '{name}'")
+ except Exception as e:
+ ex = e
+ else:
+ return value
+ raise ex
+
+
+def add_args(parser, cfg, prefix=''):
+ for k, v in cfg.items():
+ if isinstance(v, str):
+ parser.add_argument('--' + prefix + k)
+ elif isinstance(v, int):
+ parser.add_argument('--' + prefix + k, type=int)
+ elif isinstance(v, float):
+ parser.add_argument('--' + prefix + k, type=float)
+ elif isinstance(v, bool):
+ parser.add_argument('--' + prefix + k, action='store_true')
+ elif isinstance(v, dict):
+ add_args(parser, v, prefix + k + '.')
+ elif isinstance(v, abc.Iterable):
+ parser.add_argument('--' + prefix + k, type=type(v[0]), nargs='+')
+ else:
+ print(f'cannot parse key {prefix + k} of type {type(v)}')
+ return parser
+
+
+class Config:
+ """A facility for config and config files.
+
+ It supports common file formats as configs: python/json/yaml. The interface
+ is the same as a dict object and also allows access config values as
+ attributes.
+
+ Example:
+ >>> cfg = Config(dict(a=1, b=dict(b1=[0, 1])))
+ >>> cfg.a
+ 1
+ >>> cfg.b
+ {'b1': [0, 1]}
+ >>> cfg.b.b1
+ [0, 1]
+ >>> cfg = Config.fromfile('tests/data/config/a.py')
+ >>> cfg.filename
+ "/home/kchen/projects/mmcv/tests/data/config/a.py"
+ >>> cfg.item4
+ 'test'
+ >>> cfg
+ "Config [path: /home/kchen/projects/mmcv/tests/data/config/a.py]: "
+ "{'item1': [1, 2], 'item2': {'a': 0}, 'item3': True, 'item4': 'test'}"
+ """
+
+ @staticmethod
+ def _validate_py_syntax(filename):
+ with open(filename, 'r', encoding='utf-8') as f:
+ # Setting encoding explicitly to resolve coding issue on windows
+ content = f.read()
+ try:
+ ast.parse(content)
+ except SyntaxError as e:
+ raise SyntaxError('There are syntax errors in config '
+ f'file {filename}: {e}')
+
+ @staticmethod
+ def _substitute_predefined_vars(filename, temp_config_name):
+ file_dirname = osp.dirname(filename)
+ file_basename = osp.basename(filename)
+ file_basename_no_extension = osp.splitext(file_basename)[0]
+ file_extname = osp.splitext(filename)[1]
+ support_templates = dict(
+ fileDirname=file_dirname,
+ fileBasename=file_basename,
+ fileBasenameNoExtension=file_basename_no_extension,
+ fileExtname=file_extname)
+ with open(filename, 'r', encoding='utf-8') as f:
+ # Setting encoding explicitly to resolve coding issue on windows
+ config_file = f.read()
+ for key, value in support_templates.items():
+ regexp = r'\{\{\s*' + str(key) + r'\s*\}\}'
+ value = value.replace('\\', '/')
+ config_file = re.sub(regexp, value, config_file)
+ with open(temp_config_name, 'w', encoding='utf-8') as tmp_config_file:
+ tmp_config_file.write(config_file)
+
+ @staticmethod
+ def _pre_substitute_base_vars(filename, temp_config_name):
+ """Substitute base variable placehoders to string, so that parsing
+ would work."""
+ with open(filename, 'r', encoding='utf-8') as f:
+ # Setting encoding explicitly to resolve coding issue on windows
+ config_file = f.read()
+ base_var_dict = {}
+ regexp = r'\{\{\s*' + BASE_KEY + r'\.([\w\.]+)\s*\}\}'
+ base_vars = set(re.findall(regexp, config_file))
+ for base_var in base_vars:
+ randstr = f'_{base_var}_{uuid.uuid4().hex.lower()[:6]}'
+ base_var_dict[randstr] = base_var
+ regexp = r'\{\{\s*' + BASE_KEY + r'\.' + base_var + r'\s*\}\}'
+ config_file = re.sub(regexp, f'"{randstr}"', config_file)
+ with open(temp_config_name, 'w', encoding='utf-8') as tmp_config_file:
+ tmp_config_file.write(config_file)
+ return base_var_dict
+
+ @staticmethod
+ def _substitute_base_vars(cfg, base_var_dict, base_cfg):
+ """Substitute variable strings to their actual values."""
+ cfg = copy.deepcopy(cfg)
+
+ if isinstance(cfg, dict):
+ for k, v in cfg.items():
+ if isinstance(v, str) and v in base_var_dict:
+ new_v = base_cfg
+ for new_k in base_var_dict[v].split('.'):
+ new_v = new_v[new_k]
+ cfg[k] = new_v
+ elif isinstance(v, (list, tuple, dict)):
+ cfg[k] = Config._substitute_base_vars(
+ v, base_var_dict, base_cfg)
+ elif isinstance(cfg, tuple):
+ cfg = tuple(
+ Config._substitute_base_vars(c, base_var_dict, base_cfg)
+ for c in cfg)
+ elif isinstance(cfg, list):
+ cfg = [
+ Config._substitute_base_vars(c, base_var_dict, base_cfg)
+ for c in cfg
+ ]
+ elif isinstance(cfg, str) and cfg in base_var_dict:
+ new_v = base_cfg
+ for new_k in base_var_dict[cfg].split('.'):
+ new_v = new_v[new_k]
+ cfg = new_v
+
+ return cfg
+
+ @staticmethod
+ def _file2dict(filename, use_predefined_variables=True):
+ filename = osp.abspath(osp.expanduser(filename))
+ check_file_exist(filename)
+ fileExtname = osp.splitext(filename)[1]
+ if fileExtname not in ['.py', '.json', '.yaml', '.yml']:
+ raise IOError('Only py/yml/yaml/json type are supported now!')
+
+ with tempfile.TemporaryDirectory() as temp_config_dir:
+ temp_config_file = tempfile.NamedTemporaryFile(
+ dir=temp_config_dir, suffix=fileExtname)
+ if platform.system() == 'Windows':
+ temp_config_file.close()
+ temp_config_name = osp.basename(temp_config_file.name)
+ # Substitute predefined variables
+ if use_predefined_variables:
+ Config._substitute_predefined_vars(filename,
+ temp_config_file.name)
+ else:
+ shutil.copyfile(filename, temp_config_file.name)
+ # Substitute base variables from placeholders to strings
+ base_var_dict = Config._pre_substitute_base_vars(
+ temp_config_file.name, temp_config_file.name)
+
+ if filename.endswith('.py'):
+ temp_module_name = osp.splitext(temp_config_name)[0]
+ sys.path.insert(0, temp_config_dir)
+ Config._validate_py_syntax(filename)
+ mod = import_module(temp_module_name)
+ sys.path.pop(0)
+ cfg_dict = {
+ name: value
+ for name, value in mod.__dict__.items()
+ if not name.startswith('__')
+ }
+ # delete imported module
+ del sys.modules[temp_module_name]
+ elif filename.endswith(('.yml', '.yaml', '.json')):
+ import annotator.uniformer.mmcv as mmcv
+ cfg_dict = mmcv.load(temp_config_file.name)
+ # close temp file
+ temp_config_file.close()
+
+ # check deprecation information
+ if DEPRECATION_KEY in cfg_dict:
+ deprecation_info = cfg_dict.pop(DEPRECATION_KEY)
+ warning_msg = f'The config file {filename} will be deprecated ' \
+ 'in the future.'
+ if 'expected' in deprecation_info:
+ warning_msg += f' Please use {deprecation_info["expected"]} ' \
+ 'instead.'
+ if 'reference' in deprecation_info:
+ warning_msg += ' More information can be found at ' \
+ f'{deprecation_info["reference"]}'
+ warnings.warn(warning_msg)
+
+ cfg_text = filename + '\n'
+ with open(filename, 'r', encoding='utf-8') as f:
+ # Setting encoding explicitly to resolve coding issue on windows
+ cfg_text += f.read()
+
+ if BASE_KEY in cfg_dict:
+ cfg_dir = osp.dirname(filename)
+ base_filename = cfg_dict.pop(BASE_KEY)
+ base_filename = base_filename if isinstance(
+ base_filename, list) else [base_filename]
+
+ cfg_dict_list = list()
+ cfg_text_list = list()
+ for f in base_filename:
+ _cfg_dict, _cfg_text = Config._file2dict(osp.join(cfg_dir, f))
+ cfg_dict_list.append(_cfg_dict)
+ cfg_text_list.append(_cfg_text)
+
+ base_cfg_dict = dict()
+ for c in cfg_dict_list:
+ duplicate_keys = base_cfg_dict.keys() & c.keys()
+ if len(duplicate_keys) > 0:
+ raise KeyError('Duplicate key is not allowed among bases. '
+ f'Duplicate keys: {duplicate_keys}')
+ base_cfg_dict.update(c)
+
+ # Substitute base variables from strings to their actual values
+ cfg_dict = Config._substitute_base_vars(cfg_dict, base_var_dict,
+ base_cfg_dict)
+
+ base_cfg_dict = Config._merge_a_into_b(cfg_dict, base_cfg_dict)
+ cfg_dict = base_cfg_dict
+
+ # merge cfg_text
+ cfg_text_list.append(cfg_text)
+ cfg_text = '\n'.join(cfg_text_list)
+
+ return cfg_dict, cfg_text
+
+ @staticmethod
+ def _merge_a_into_b(a, b, allow_list_keys=False):
+ """merge dict ``a`` into dict ``b`` (non-inplace).
+
+ Values in ``a`` will overwrite ``b``. ``b`` is copied first to avoid
+ in-place modifications.
+
+ Args:
+ a (dict): The source dict to be merged into ``b``.
+ b (dict): The origin dict to be fetch keys from ``a``.
+ allow_list_keys (bool): If True, int string keys (e.g. '0', '1')
+ are allowed in source ``a`` and will replace the element of the
+ corresponding index in b if b is a list. Default: False.
+
+ Returns:
+ dict: The modified dict of ``b`` using ``a``.
+
+ Examples:
+ # Normally merge a into b.
+ >>> Config._merge_a_into_b(
+ ... dict(obj=dict(a=2)), dict(obj=dict(a=1)))
+ {'obj': {'a': 2}}
+
+ # Delete b first and merge a into b.
+ >>> Config._merge_a_into_b(
+ ... dict(obj=dict(_delete_=True, a=2)), dict(obj=dict(a=1)))
+ {'obj': {'a': 2}}
+
+ # b is a list
+ >>> Config._merge_a_into_b(
+ ... {'0': dict(a=2)}, [dict(a=1), dict(b=2)], True)
+ [{'a': 2}, {'b': 2}]
+ """
+ b = b.copy()
+ for k, v in a.items():
+ if allow_list_keys and k.isdigit() and isinstance(b, list):
+ k = int(k)
+ if len(b) <= k:
+ raise KeyError(f'Index {k} exceeds the length of list {b}')
+ b[k] = Config._merge_a_into_b(v, b[k], allow_list_keys)
+ elif isinstance(v,
+ dict) and k in b and not v.pop(DELETE_KEY, False):
+ allowed_types = (dict, list) if allow_list_keys else dict
+ if not isinstance(b[k], allowed_types):
+ raise TypeError(
+ f'{k}={v} in child config cannot inherit from base '
+ f'because {k} is a dict in the child config but is of '
+ f'type {type(b[k])} in base config. You may set '
+ f'`{DELETE_KEY}=True` to ignore the base config')
+ b[k] = Config._merge_a_into_b(v, b[k], allow_list_keys)
+ else:
+ b[k] = v
+ return b
+
+ @staticmethod
+ def fromfile(filename,
+ use_predefined_variables=True,
+ import_custom_modules=True):
+ cfg_dict, cfg_text = Config._file2dict(filename,
+ use_predefined_variables)
+ if import_custom_modules and cfg_dict.get('custom_imports', None):
+ import_modules_from_strings(**cfg_dict['custom_imports'])
+ return Config(cfg_dict, cfg_text=cfg_text, filename=filename)
+
+ @staticmethod
+ def fromstring(cfg_str, file_format):
+ """Generate config from config str.
+
+ Args:
+ cfg_str (str): Config str.
+ file_format (str): Config file format corresponding to the
+ config str. Only py/yml/yaml/json type are supported now!
+
+ Returns:
+ obj:`Config`: Config obj.
+ """
+ if file_format not in ['.py', '.json', '.yaml', '.yml']:
+ raise IOError('Only py/yml/yaml/json type are supported now!')
+ if file_format != '.py' and 'dict(' in cfg_str:
+ # check if users specify a wrong suffix for python
+ warnings.warn(
+ 'Please check "file_format", the file format may be .py')
+ with tempfile.NamedTemporaryFile(
+ 'w', encoding='utf-8', suffix=file_format,
+ delete=False) as temp_file:
+ temp_file.write(cfg_str)
+ # on windows, previous implementation cause error
+ # see PR 1077 for details
+ cfg = Config.fromfile(temp_file.name)
+ os.remove(temp_file.name)
+ return cfg
+
+ @staticmethod
+ def auto_argparser(description=None):
+ """Generate argparser from config file automatically (experimental)"""
+ partial_parser = ArgumentParser(description=description)
+ partial_parser.add_argument('config', help='config file path')
+ cfg_file = partial_parser.parse_known_args()[0].config
+ cfg = Config.fromfile(cfg_file)
+ parser = ArgumentParser(description=description)
+ parser.add_argument('config', help='config file path')
+ add_args(parser, cfg)
+ return parser, cfg
+
+ def __init__(self, cfg_dict=None, cfg_text=None, filename=None):
+ if cfg_dict is None:
+ cfg_dict = dict()
+ elif not isinstance(cfg_dict, dict):
+ raise TypeError('cfg_dict must be a dict, but '
+ f'got {type(cfg_dict)}')
+ for key in cfg_dict:
+ if key in RESERVED_KEYS:
+ raise KeyError(f'{key} is reserved for config file')
+
+ super(Config, self).__setattr__('_cfg_dict', ConfigDict(cfg_dict))
+ super(Config, self).__setattr__('_filename', filename)
+ if cfg_text:
+ text = cfg_text
+ elif filename:
+ with open(filename, 'r') as f:
+ text = f.read()
+ else:
+ text = ''
+ super(Config, self).__setattr__('_text', text)
+
+ @property
+ def filename(self):
+ return self._filename
+
+ @property
+ def text(self):
+ return self._text
+
+ @property
+ def pretty_text(self):
+
+ indent = 4
+
+ def _indent(s_, num_spaces):
+ s = s_.split('\n')
+ if len(s) == 1:
+ return s_
+ first = s.pop(0)
+ s = [(num_spaces * ' ') + line for line in s]
+ s = '\n'.join(s)
+ s = first + '\n' + s
+ return s
+
+ def _format_basic_types(k, v, use_mapping=False):
+ if isinstance(v, str):
+ v_str = f"'{v}'"
+ else:
+ v_str = str(v)
+
+ if use_mapping:
+ k_str = f"'{k}'" if isinstance(k, str) else str(k)
+ attr_str = f'{k_str}: {v_str}'
+ else:
+ attr_str = f'{str(k)}={v_str}'
+ attr_str = _indent(attr_str, indent)
+
+ return attr_str
+
+ def _format_list(k, v, use_mapping=False):
+ # check if all items in the list are dict
+ if all(isinstance(_, dict) for _ in v):
+ v_str = '[\n'
+ v_str += '\n'.join(
+ f'dict({_indent(_format_dict(v_), indent)}),'
+ for v_ in v).rstrip(',')
+ if use_mapping:
+ k_str = f"'{k}'" if isinstance(k, str) else str(k)
+ attr_str = f'{k_str}: {v_str}'
+ else:
+ attr_str = f'{str(k)}={v_str}'
+ attr_str = _indent(attr_str, indent) + ']'
+ else:
+ attr_str = _format_basic_types(k, v, use_mapping)
+ return attr_str
+
+ def _contain_invalid_identifier(dict_str):
+ contain_invalid_identifier = False
+ for key_name in dict_str:
+ contain_invalid_identifier |= \
+ (not str(key_name).isidentifier())
+ return contain_invalid_identifier
+
+ def _format_dict(input_dict, outest_level=False):
+ r = ''
+ s = []
+
+ use_mapping = _contain_invalid_identifier(input_dict)
+ if use_mapping:
+ r += '{'
+ for idx, (k, v) in enumerate(input_dict.items()):
+ is_last = idx >= len(input_dict) - 1
+ end = '' if outest_level or is_last else ','
+ if isinstance(v, dict):
+ v_str = '\n' + _format_dict(v)
+ if use_mapping:
+ k_str = f"'{k}'" if isinstance(k, str) else str(k)
+ attr_str = f'{k_str}: dict({v_str}'
+ else:
+ attr_str = f'{str(k)}=dict({v_str}'
+ attr_str = _indent(attr_str, indent) + ')' + end
+ elif isinstance(v, list):
+ attr_str = _format_list(k, v, use_mapping) + end
+ else:
+ attr_str = _format_basic_types(k, v, use_mapping) + end
+
+ s.append(attr_str)
+ r += '\n'.join(s)
+ if use_mapping:
+ r += '}'
+ return r
+
+ cfg_dict = self._cfg_dict.to_dict()
+ text = _format_dict(cfg_dict, outest_level=True)
+ # copied from setup.cfg
+ yapf_style = dict(
+ based_on_style='pep8',
+ blank_line_before_nested_class_or_def=True,
+ split_before_expression_after_opening_paren=True)
+ text, _ = FormatCode(text, style_config=yapf_style, verify=True)
+
+ return text
+
+ def __repr__(self):
+ return f'Config (path: {self.filename}): {self._cfg_dict.__repr__()}'
+
+ def __len__(self):
+ return len(self._cfg_dict)
+
+ def __getattr__(self, name):
+ return getattr(self._cfg_dict, name)
+
+ def __getitem__(self, name):
+ return self._cfg_dict.__getitem__(name)
+
+ def __setattr__(self, name, value):
+ if isinstance(value, dict):
+ value = ConfigDict(value)
+ self._cfg_dict.__setattr__(name, value)
+
+ def __setitem__(self, name, value):
+ if isinstance(value, dict):
+ value = ConfigDict(value)
+ self._cfg_dict.__setitem__(name, value)
+
+ def __iter__(self):
+ return iter(self._cfg_dict)
+
+ def __getstate__(self):
+ return (self._cfg_dict, self._filename, self._text)
+
+ def __setstate__(self, state):
+ _cfg_dict, _filename, _text = state
+ super(Config, self).__setattr__('_cfg_dict', _cfg_dict)
+ super(Config, self).__setattr__('_filename', _filename)
+ super(Config, self).__setattr__('_text', _text)
+
+ def dump(self, file=None):
+ cfg_dict = super(Config, self).__getattribute__('_cfg_dict').to_dict()
+ if self.filename.endswith('.py'):
+ if file is None:
+ return self.pretty_text
+ else:
+ with open(file, 'w', encoding='utf-8') as f:
+ f.write(self.pretty_text)
+ else:
+ import annotator.uniformer.mmcv as mmcv
+ if file is None:
+ file_format = self.filename.split('.')[-1]
+ return mmcv.dump(cfg_dict, file_format=file_format)
+ else:
+ mmcv.dump(cfg_dict, file)
+
+ def merge_from_dict(self, options, allow_list_keys=True):
+ """Merge list into cfg_dict.
+
+ Merge the dict parsed by MultipleKVAction into this cfg.
+
+ Examples:
+ >>> options = {'model.backbone.depth': 50,
+ ... 'model.backbone.with_cp':True}
+ >>> cfg = Config(dict(model=dict(backbone=dict(type='ResNet'))))
+ >>> cfg.merge_from_dict(options)
+ >>> cfg_dict = super(Config, self).__getattribute__('_cfg_dict')
+ >>> assert cfg_dict == dict(
+ ... model=dict(backbone=dict(depth=50, with_cp=True)))
+
+ # Merge list element
+ >>> cfg = Config(dict(pipeline=[
+ ... dict(type='LoadImage'), dict(type='LoadAnnotations')]))
+ >>> options = dict(pipeline={'0': dict(type='SelfLoadImage')})
+ >>> cfg.merge_from_dict(options, allow_list_keys=True)
+ >>> cfg_dict = super(Config, self).__getattribute__('_cfg_dict')
+ >>> assert cfg_dict == dict(pipeline=[
+ ... dict(type='SelfLoadImage'), dict(type='LoadAnnotations')])
+
+ Args:
+ options (dict): dict of configs to merge from.
+ allow_list_keys (bool): If True, int string keys (e.g. '0', '1')
+ are allowed in ``options`` and will replace the element of the
+ corresponding index in the config if the config is a list.
+ Default: True.
+ """
+ option_cfg_dict = {}
+ for full_key, v in options.items():
+ d = option_cfg_dict
+ key_list = full_key.split('.')
+ for subkey in key_list[:-1]:
+ d.setdefault(subkey, ConfigDict())
+ d = d[subkey]
+ subkey = key_list[-1]
+ d[subkey] = v
+
+ cfg_dict = super(Config, self).__getattribute__('_cfg_dict')
+ super(Config, self).__setattr__(
+ '_cfg_dict',
+ Config._merge_a_into_b(
+ option_cfg_dict, cfg_dict, allow_list_keys=allow_list_keys))
+
+
+class DictAction(Action):
+ """
+ argparse action to split an argument into KEY=VALUE form
+ on the first = and append to a dictionary. List options can
+ be passed as comma separated values, i.e 'KEY=V1,V2,V3', or with explicit
+ brackets, i.e. 'KEY=[V1,V2,V3]'. It also support nested brackets to build
+ list/tuple values. e.g. 'KEY=[(V1,V2),(V3,V4)]'
+ """
+
+ @staticmethod
+ def _parse_int_float_bool(val):
+ try:
+ return int(val)
+ except ValueError:
+ pass
+ try:
+ return float(val)
+ except ValueError:
+ pass
+ if val.lower() in ['true', 'false']:
+ return True if val.lower() == 'true' else False
+ return val
+
+ @staticmethod
+ def _parse_iterable(val):
+ """Parse iterable values in the string.
+
+ All elements inside '()' or '[]' are treated as iterable values.
+
+ Args:
+ val (str): Value string.
+
+ Returns:
+ list | tuple: The expanded list or tuple from the string.
+
+ Examples:
+ >>> DictAction._parse_iterable('1,2,3')
+ [1, 2, 3]
+ >>> DictAction._parse_iterable('[a, b, c]')
+ ['a', 'b', 'c']
+ >>> DictAction._parse_iterable('[(1, 2, 3), [a, b], c]')
+ [(1, 2, 3), ['a', 'b'], 'c']
+ """
+
+ def find_next_comma(string):
+ """Find the position of next comma in the string.
+
+ If no ',' is found in the string, return the string length. All
+ chars inside '()' and '[]' are treated as one element and thus ','
+ inside these brackets are ignored.
+ """
+ assert (string.count('(') == string.count(')')) and (
+ string.count('[') == string.count(']')), \
+ f'Imbalanced brackets exist in {string}'
+ end = len(string)
+ for idx, char in enumerate(string):
+ pre = string[:idx]
+ # The string before this ',' is balanced
+ if ((char == ',') and (pre.count('(') == pre.count(')'))
+ and (pre.count('[') == pre.count(']'))):
+ end = idx
+ break
+ return end
+
+ # Strip ' and " characters and replace whitespace.
+ val = val.strip('\'\"').replace(' ', '')
+ is_tuple = False
+ if val.startswith('(') and val.endswith(')'):
+ is_tuple = True
+ val = val[1:-1]
+ elif val.startswith('[') and val.endswith(']'):
+ val = val[1:-1]
+ elif ',' not in val:
+ # val is a single value
+ return DictAction._parse_int_float_bool(val)
+
+ values = []
+ while len(val) > 0:
+ comma_idx = find_next_comma(val)
+ element = DictAction._parse_iterable(val[:comma_idx])
+ values.append(element)
+ val = val[comma_idx + 1:]
+ if is_tuple:
+ values = tuple(values)
+ return values
+
+ def __call__(self, parser, namespace, values, option_string=None):
+ options = {}
+ for kv in values:
+ key, val = kv.split('=', maxsplit=1)
+ options[key] = self._parse_iterable(val)
+ setattr(namespace, self.dest, options)
diff --git a/annotator/uniformer/mmcv/utils/env.py b/annotator/uniformer/mmcv/utils/env.py
new file mode 100644
index 0000000000000000000000000000000000000000..e3f0d92529e193e6d8339419bcd9bed7901a7769
--- /dev/null
+++ b/annotator/uniformer/mmcv/utils/env.py
@@ -0,0 +1,95 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+"""This file holding some environment constant for sharing by other files."""
+
+import os.path as osp
+import subprocess
+import sys
+from collections import defaultdict
+
+import cv2
+import torch
+
+import annotator.uniformer.mmcv as mmcv
+from .parrots_wrapper import get_build_config
+
+
+def collect_env():
+ """Collect the information of the running environments.
+
+ Returns:
+ dict: The environment information. The following fields are contained.
+
+ - sys.platform: The variable of ``sys.platform``.
+ - Python: Python version.
+ - CUDA available: Bool, indicating if CUDA is available.
+ - GPU devices: Device type of each GPU.
+ - CUDA_HOME (optional): The env var ``CUDA_HOME``.
+ - NVCC (optional): NVCC version.
+ - GCC: GCC version, "n/a" if GCC is not installed.
+ - PyTorch: PyTorch version.
+ - PyTorch compiling details: The output of \
+ ``torch.__config__.show()``.
+ - TorchVision (optional): TorchVision version.
+ - OpenCV: OpenCV version.
+ - MMCV: MMCV version.
+ - MMCV Compiler: The GCC version for compiling MMCV ops.
+ - MMCV CUDA Compiler: The CUDA version for compiling MMCV ops.
+ """
+ env_info = {}
+ env_info['sys.platform'] = sys.platform
+ env_info['Python'] = sys.version.replace('\n', '')
+
+ cuda_available = torch.cuda.is_available()
+ env_info['CUDA available'] = cuda_available
+
+ if cuda_available:
+ devices = defaultdict(list)
+ for k in range(torch.cuda.device_count()):
+ devices[torch.cuda.get_device_name(k)].append(str(k))
+ for name, device_ids in devices.items():
+ env_info['GPU ' + ','.join(device_ids)] = name
+
+ from annotator.uniformer.mmcv.utils.parrots_wrapper import _get_cuda_home
+ CUDA_HOME = _get_cuda_home()
+ env_info['CUDA_HOME'] = CUDA_HOME
+
+ if CUDA_HOME is not None and osp.isdir(CUDA_HOME):
+ try:
+ nvcc = osp.join(CUDA_HOME, 'bin/nvcc')
+ nvcc = subprocess.check_output(
+ f'"{nvcc}" -V | tail -n1', shell=True)
+ nvcc = nvcc.decode('utf-8').strip()
+ except subprocess.SubprocessError:
+ nvcc = 'Not Available'
+ env_info['NVCC'] = nvcc
+
+ try:
+ gcc = subprocess.check_output('gcc --version | head -n1', shell=True)
+ gcc = gcc.decode('utf-8').strip()
+ env_info['GCC'] = gcc
+ except subprocess.CalledProcessError: # gcc is unavailable
+ env_info['GCC'] = 'n/a'
+
+ env_info['PyTorch'] = torch.__version__
+ env_info['PyTorch compiling details'] = get_build_config()
+
+ try:
+ import torchvision
+ env_info['TorchVision'] = torchvision.__version__
+ except ModuleNotFoundError:
+ pass
+
+ env_info['OpenCV'] = cv2.__version__
+
+ env_info['MMCV'] = mmcv.__version__
+
+ try:
+ from annotator.uniformer.mmcv.ops import get_compiler_version, get_compiling_cuda_version
+ except ModuleNotFoundError:
+ env_info['MMCV Compiler'] = 'n/a'
+ env_info['MMCV CUDA Compiler'] = 'n/a'
+ else:
+ env_info['MMCV Compiler'] = get_compiler_version()
+ env_info['MMCV CUDA Compiler'] = get_compiling_cuda_version()
+
+ return env_info
diff --git a/annotator/uniformer/mmcv/utils/ext_loader.py b/annotator/uniformer/mmcv/utils/ext_loader.py
new file mode 100644
index 0000000000000000000000000000000000000000..cd8044f71184fa1081566da0ab771caf5f5b39f8
--- /dev/null
+++ b/annotator/uniformer/mmcv/utils/ext_loader.py
@@ -0,0 +1,71 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import importlib
+import os
+import pkgutil
+import warnings
+from collections import namedtuple
+
+import torch
+
+if torch.__version__ != 'parrots':
+
+ def load_ext(name, funcs):
+ ext = importlib.import_module('annotator.uniformer.mmcv.' + name)
+ for fun in funcs:
+ assert hasattr(ext, fun), f'{fun} miss in module {name}'
+ return ext
+else:
+ from parrots import extension
+ from parrots.base import ParrotsException
+
+ has_return_value_ops = [
+ 'nms',
+ 'softnms',
+ 'nms_match',
+ 'nms_rotated',
+ 'top_pool_forward',
+ 'top_pool_backward',
+ 'bottom_pool_forward',
+ 'bottom_pool_backward',
+ 'left_pool_forward',
+ 'left_pool_backward',
+ 'right_pool_forward',
+ 'right_pool_backward',
+ 'fused_bias_leakyrelu',
+ 'upfirdn2d',
+ 'ms_deform_attn_forward',
+ 'pixel_group',
+ 'contour_expand',
+ ]
+
+ def get_fake_func(name, e):
+
+ def fake_func(*args, **kwargs):
+ warnings.warn(f'{name} is not supported in parrots now')
+ raise e
+
+ return fake_func
+
+ def load_ext(name, funcs):
+ ExtModule = namedtuple('ExtModule', funcs)
+ ext_list = []
+ lib_root = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
+ for fun in funcs:
+ try:
+ ext_fun = extension.load(fun, name, lib_dir=lib_root)
+ except ParrotsException as e:
+ if 'No element registered' not in e.message:
+ warnings.warn(e.message)
+ ext_fun = get_fake_func(fun, e)
+ ext_list.append(ext_fun)
+ else:
+ if fun in has_return_value_ops:
+ ext_list.append(ext_fun.op)
+ else:
+ ext_list.append(ext_fun.op_)
+ return ExtModule(*ext_list)
+
+
+def check_ops_exist():
+ ext_loader = pkgutil.find_loader('mmcv._ext')
+ return ext_loader is not None
diff --git a/annotator/uniformer/mmcv/utils/logging.py b/annotator/uniformer/mmcv/utils/logging.py
new file mode 100644
index 0000000000000000000000000000000000000000..4aa0e04bb9b3ab2a4bfbc4def50404ccbac2c6e6
--- /dev/null
+++ b/annotator/uniformer/mmcv/utils/logging.py
@@ -0,0 +1,110 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import logging
+
+import torch.distributed as dist
+
+logger_initialized = {}
+
+
+def get_logger(name, log_file=None, log_level=logging.INFO, file_mode='w'):
+ """Initialize and get a logger by name.
+
+ If the logger has not been initialized, this method will initialize the
+ logger by adding one or two handlers, otherwise the initialized logger will
+ be directly returned. During initialization, a StreamHandler will always be
+ added. If `log_file` is specified and the process rank is 0, a FileHandler
+ will also be added.
+
+ Args:
+ name (str): Logger name.
+ log_file (str | None): The log filename. If specified, a FileHandler
+ will be added to the logger.
+ log_level (int): The logger level. Note that only the process of
+ rank 0 is affected, and other processes will set the level to
+ "Error" thus be silent most of the time.
+ file_mode (str): The file mode used in opening log file.
+ Defaults to 'w'.
+
+ Returns:
+ logging.Logger: The expected logger.
+ """
+ logger = logging.getLogger(name)
+ if name in logger_initialized:
+ return logger
+ # handle hierarchical names
+ # e.g., logger "a" is initialized, then logger "a.b" will skip the
+ # initialization since it is a child of "a".
+ for logger_name in logger_initialized:
+ if name.startswith(logger_name):
+ return logger
+
+ # handle duplicate logs to the console
+ # Starting in 1.8.0, PyTorch DDP attaches a StreamHandler (NOTSET)
+ # to the root logger. As logger.propagate is True by default, this root
+ # level handler causes logging messages from rank>0 processes to
+ # unexpectedly show up on the console, creating much unwanted clutter.
+ # To fix this issue, we set the root logger's StreamHandler, if any, to log
+ # at the ERROR level.
+ for handler in logger.root.handlers:
+ if type(handler) is logging.StreamHandler:
+ handler.setLevel(logging.ERROR)
+
+ stream_handler = logging.StreamHandler()
+ handlers = [stream_handler]
+
+ if dist.is_available() and dist.is_initialized():
+ rank = dist.get_rank()
+ else:
+ rank = 0
+
+ # only rank 0 will add a FileHandler
+ if rank == 0 and log_file is not None:
+ # Here, the default behaviour of the official logger is 'a'. Thus, we
+ # provide an interface to change the file mode to the default
+ # behaviour.
+ file_handler = logging.FileHandler(log_file, file_mode)
+ handlers.append(file_handler)
+
+ formatter = logging.Formatter(
+ '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
+ for handler in handlers:
+ handler.setFormatter(formatter)
+ handler.setLevel(log_level)
+ logger.addHandler(handler)
+
+ if rank == 0:
+ logger.setLevel(log_level)
+ else:
+ logger.setLevel(logging.ERROR)
+
+ logger_initialized[name] = True
+
+ return logger
+
+
+def print_log(msg, logger=None, level=logging.INFO):
+ """Print a log message.
+
+ Args:
+ msg (str): The message to be logged.
+ logger (logging.Logger | str | None): The logger to be used.
+ Some special loggers are:
+ - "silent": no message will be printed.
+ - other str: the logger obtained with `get_root_logger(logger)`.
+ - None: The `print()` method will be used to print log messages.
+ level (int): Logging level. Only available when `logger` is a Logger
+ object or "root".
+ """
+ if logger is None:
+ print(msg)
+ elif isinstance(logger, logging.Logger):
+ logger.log(level, msg)
+ elif logger == 'silent':
+ pass
+ elif isinstance(logger, str):
+ _logger = get_logger(logger)
+ _logger.log(level, msg)
+ else:
+ raise TypeError(
+ 'logger should be either a logging.Logger object, str, '
+ f'"silent" or None, but got {type(logger)}')
diff --git a/annotator/uniformer/mmcv/utils/misc.py b/annotator/uniformer/mmcv/utils/misc.py
new file mode 100644
index 0000000000000000000000000000000000000000..2c58d0d7fee9fe3d4519270ad8c1e998d0d8a18c
--- /dev/null
+++ b/annotator/uniformer/mmcv/utils/misc.py
@@ -0,0 +1,377 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import collections.abc
+import functools
+import itertools
+import subprocess
+import warnings
+from collections import abc
+from importlib import import_module
+from inspect import getfullargspec
+from itertools import repeat
+
+
+# From PyTorch internals
+def _ntuple(n):
+
+ def parse(x):
+ if isinstance(x, collections.abc.Iterable):
+ return x
+ return tuple(repeat(x, n))
+
+ return parse
+
+
+to_1tuple = _ntuple(1)
+to_2tuple = _ntuple(2)
+to_3tuple = _ntuple(3)
+to_4tuple = _ntuple(4)
+to_ntuple = _ntuple
+
+
+def is_str(x):
+ """Whether the input is an string instance.
+
+ Note: This method is deprecated since python 2 is no longer supported.
+ """
+ return isinstance(x, str)
+
+
+def import_modules_from_strings(imports, allow_failed_imports=False):
+ """Import modules from the given list of strings.
+
+ Args:
+ imports (list | str | None): The given module names to be imported.
+ allow_failed_imports (bool): If True, the failed imports will return
+ None. Otherwise, an ImportError is raise. Default: False.
+
+ Returns:
+ list[module] | module | None: The imported modules.
+
+ Examples:
+ >>> osp, sys = import_modules_from_strings(
+ ... ['os.path', 'sys'])
+ >>> import os.path as osp_
+ >>> import sys as sys_
+ >>> assert osp == osp_
+ >>> assert sys == sys_
+ """
+ if not imports:
+ return
+ single_import = False
+ if isinstance(imports, str):
+ single_import = True
+ imports = [imports]
+ if not isinstance(imports, list):
+ raise TypeError(
+ f'custom_imports must be a list but got type {type(imports)}')
+ imported = []
+ for imp in imports:
+ if not isinstance(imp, str):
+ raise TypeError(
+ f'{imp} is of type {type(imp)} and cannot be imported.')
+ try:
+ imported_tmp = import_module(imp)
+ except ImportError:
+ if allow_failed_imports:
+ warnings.warn(f'{imp} failed to import and is ignored.',
+ UserWarning)
+ imported_tmp = None
+ else:
+ raise ImportError
+ imported.append(imported_tmp)
+ if single_import:
+ imported = imported[0]
+ return imported
+
+
+def iter_cast(inputs, dst_type, return_type=None):
+ """Cast elements of an iterable object into some type.
+
+ Args:
+ inputs (Iterable): The input object.
+ dst_type (type): Destination type.
+ return_type (type, optional): If specified, the output object will be
+ converted to this type, otherwise an iterator.
+
+ Returns:
+ iterator or specified type: The converted object.
+ """
+ if not isinstance(inputs, abc.Iterable):
+ raise TypeError('inputs must be an iterable object')
+ if not isinstance(dst_type, type):
+ raise TypeError('"dst_type" must be a valid type')
+
+ out_iterable = map(dst_type, inputs)
+
+ if return_type is None:
+ return out_iterable
+ else:
+ return return_type(out_iterable)
+
+
+def list_cast(inputs, dst_type):
+ """Cast elements of an iterable object into a list of some type.
+
+ A partial method of :func:`iter_cast`.
+ """
+ return iter_cast(inputs, dst_type, return_type=list)
+
+
+def tuple_cast(inputs, dst_type):
+ """Cast elements of an iterable object into a tuple of some type.
+
+ A partial method of :func:`iter_cast`.
+ """
+ return iter_cast(inputs, dst_type, return_type=tuple)
+
+
+def is_seq_of(seq, expected_type, seq_type=None):
+ """Check whether it is a sequence of some type.
+
+ Args:
+ seq (Sequence): The sequence to be checked.
+ expected_type (type): Expected type of sequence items.
+ seq_type (type, optional): Expected sequence type.
+
+ Returns:
+ bool: Whether the sequence is valid.
+ """
+ if seq_type is None:
+ exp_seq_type = abc.Sequence
+ else:
+ assert isinstance(seq_type, type)
+ exp_seq_type = seq_type
+ if not isinstance(seq, exp_seq_type):
+ return False
+ for item in seq:
+ if not isinstance(item, expected_type):
+ return False
+ return True
+
+
+def is_list_of(seq, expected_type):
+ """Check whether it is a list of some type.
+
+ A partial method of :func:`is_seq_of`.
+ """
+ return is_seq_of(seq, expected_type, seq_type=list)
+
+
+def is_tuple_of(seq, expected_type):
+ """Check whether it is a tuple of some type.
+
+ A partial method of :func:`is_seq_of`.
+ """
+ return is_seq_of(seq, expected_type, seq_type=tuple)
+
+
+def slice_list(in_list, lens):
+ """Slice a list into several sub lists by a list of given length.
+
+ Args:
+ in_list (list): The list to be sliced.
+ lens(int or list): The expected length of each out list.
+
+ Returns:
+ list: A list of sliced list.
+ """
+ if isinstance(lens, int):
+ assert len(in_list) % lens == 0
+ lens = [lens] * int(len(in_list) / lens)
+ if not isinstance(lens, list):
+ raise TypeError('"indices" must be an integer or a list of integers')
+ elif sum(lens) != len(in_list):
+ raise ValueError('sum of lens and list length does not '
+ f'match: {sum(lens)} != {len(in_list)}')
+ out_list = []
+ idx = 0
+ for i in range(len(lens)):
+ out_list.append(in_list[idx:idx + lens[i]])
+ idx += lens[i]
+ return out_list
+
+
+def concat_list(in_list):
+ """Concatenate a list of list into a single list.
+
+ Args:
+ in_list (list): The list of list to be merged.
+
+ Returns:
+ list: The concatenated flat list.
+ """
+ return list(itertools.chain(*in_list))
+
+
+def check_prerequisites(
+ prerequisites,
+ checker,
+ msg_tmpl='Prerequisites "{}" are required in method "{}" but not '
+ 'found, please install them first.'): # yapf: disable
+ """A decorator factory to check if prerequisites are satisfied.
+
+ Args:
+ prerequisites (str of list[str]): Prerequisites to be checked.
+ checker (callable): The checker method that returns True if a
+ prerequisite is meet, False otherwise.
+ msg_tmpl (str): The message template with two variables.
+
+ Returns:
+ decorator: A specific decorator.
+ """
+
+ def wrap(func):
+
+ @functools.wraps(func)
+ def wrapped_func(*args, **kwargs):
+ requirements = [prerequisites] if isinstance(
+ prerequisites, str) else prerequisites
+ missing = []
+ for item in requirements:
+ if not checker(item):
+ missing.append(item)
+ if missing:
+ print(msg_tmpl.format(', '.join(missing), func.__name__))
+ raise RuntimeError('Prerequisites not meet.')
+ else:
+ return func(*args, **kwargs)
+
+ return wrapped_func
+
+ return wrap
+
+
+def _check_py_package(package):
+ try:
+ import_module(package)
+ except ImportError:
+ return False
+ else:
+ return True
+
+
+def _check_executable(cmd):
+ if subprocess.call(f'which {cmd}', shell=True) != 0:
+ return False
+ else:
+ return True
+
+
+def requires_package(prerequisites):
+ """A decorator to check if some python packages are installed.
+
+ Example:
+ >>> @requires_package('numpy')
+ >>> func(arg1, args):
+ >>> return numpy.zeros(1)
+ array([0.])
+ >>> @requires_package(['numpy', 'non_package'])
+ >>> func(arg1, args):
+ >>> return numpy.zeros(1)
+ ImportError
+ """
+ return check_prerequisites(prerequisites, checker=_check_py_package)
+
+
+def requires_executable(prerequisites):
+ """A decorator to check if some executable files are installed.
+
+ Example:
+ >>> @requires_executable('ffmpeg')
+ >>> func(arg1, args):
+ >>> print(1)
+ 1
+ """
+ return check_prerequisites(prerequisites, checker=_check_executable)
+
+
+def deprecated_api_warning(name_dict, cls_name=None):
+ """A decorator to check if some arguments are deprecate and try to replace
+ deprecate src_arg_name to dst_arg_name.
+
+ Args:
+ name_dict(dict):
+ key (str): Deprecate argument names.
+ val (str): Expected argument names.
+
+ Returns:
+ func: New function.
+ """
+
+ def api_warning_wrapper(old_func):
+
+ @functools.wraps(old_func)
+ def new_func(*args, **kwargs):
+ # get the arg spec of the decorated method
+ args_info = getfullargspec(old_func)
+ # get name of the function
+ func_name = old_func.__name__
+ if cls_name is not None:
+ func_name = f'{cls_name}.{func_name}'
+ if args:
+ arg_names = args_info.args[:len(args)]
+ for src_arg_name, dst_arg_name in name_dict.items():
+ if src_arg_name in arg_names:
+ warnings.warn(
+ f'"{src_arg_name}" is deprecated in '
+ f'`{func_name}`, please use "{dst_arg_name}" '
+ 'instead')
+ arg_names[arg_names.index(src_arg_name)] = dst_arg_name
+ if kwargs:
+ for src_arg_name, dst_arg_name in name_dict.items():
+ if src_arg_name in kwargs:
+
+ assert dst_arg_name not in kwargs, (
+ f'The expected behavior is to replace '
+ f'the deprecated key `{src_arg_name}` to '
+ f'new key `{dst_arg_name}`, but got them '
+ f'in the arguments at the same time, which '
+ f'is confusing. `{src_arg_name} will be '
+ f'deprecated in the future, please '
+ f'use `{dst_arg_name}` instead.')
+
+ warnings.warn(
+ f'"{src_arg_name}" is deprecated in '
+ f'`{func_name}`, please use "{dst_arg_name}" '
+ 'instead')
+ kwargs[dst_arg_name] = kwargs.pop(src_arg_name)
+
+ # apply converted arguments to the decorated method
+ output = old_func(*args, **kwargs)
+ return output
+
+ return new_func
+
+ return api_warning_wrapper
+
+
+def is_method_overridden(method, base_class, derived_class):
+ """Check if a method of base class is overridden in derived class.
+
+ Args:
+ method (str): the method name to check.
+ base_class (type): the class of the base class.
+ derived_class (type | Any): the class or instance of the derived class.
+ """
+ assert isinstance(base_class, type), \
+ "base_class doesn't accept instance, Please pass class instead."
+
+ if not isinstance(derived_class, type):
+ derived_class = derived_class.__class__
+
+ base_method = getattr(base_class, method)
+ derived_method = getattr(derived_class, method)
+ return derived_method != base_method
+
+
+def has_method(obj: object, method: str) -> bool:
+ """Check whether the object has a method.
+
+ Args:
+ method (str): The method name to check.
+ obj (object): The object to check.
+
+ Returns:
+ bool: True if the object has the method else False.
+ """
+ return hasattr(obj, method) and callable(getattr(obj, method))
diff --git a/annotator/uniformer/mmcv/utils/parrots_jit.py b/annotator/uniformer/mmcv/utils/parrots_jit.py
new file mode 100644
index 0000000000000000000000000000000000000000..61873f6dbb9b10ed972c90aa8faa321e3cb3249e
--- /dev/null
+++ b/annotator/uniformer/mmcv/utils/parrots_jit.py
@@ -0,0 +1,41 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import os
+
+from .parrots_wrapper import TORCH_VERSION
+
+parrots_jit_option = os.getenv('PARROTS_JIT_OPTION')
+
+if TORCH_VERSION == 'parrots' and parrots_jit_option == 'ON':
+ from parrots.jit import pat as jit
+else:
+
+ def jit(func=None,
+ check_input=None,
+ full_shape=True,
+ derivate=False,
+ coderize=False,
+ optimize=False):
+
+ def wrapper(func):
+
+ def wrapper_inner(*args, **kargs):
+ return func(*args, **kargs)
+
+ return wrapper_inner
+
+ if func is None:
+ return wrapper
+ else:
+ return func
+
+
+if TORCH_VERSION == 'parrots':
+ from parrots.utils.tester import skip_no_elena
+else:
+
+ def skip_no_elena(func):
+
+ def wrapper(*args, **kargs):
+ return func(*args, **kargs)
+
+ return wrapper
diff --git a/annotator/uniformer/mmcv/utils/parrots_wrapper.py b/annotator/uniformer/mmcv/utils/parrots_wrapper.py
new file mode 100644
index 0000000000000000000000000000000000000000..93c97640d4b9ed088ca82cfe03e6efebfcfa9dbf
--- /dev/null
+++ b/annotator/uniformer/mmcv/utils/parrots_wrapper.py
@@ -0,0 +1,107 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+from functools import partial
+
+import torch
+
+TORCH_VERSION = torch.__version__
+
+
+def is_rocm_pytorch() -> bool:
+ is_rocm = False
+ if TORCH_VERSION != 'parrots':
+ try:
+ from torch.utils.cpp_extension import ROCM_HOME
+ is_rocm = True if ((torch.version.hip is not None) and
+ (ROCM_HOME is not None)) else False
+ except ImportError:
+ pass
+ return is_rocm
+
+
+def _get_cuda_home():
+ if TORCH_VERSION == 'parrots':
+ from parrots.utils.build_extension import CUDA_HOME
+ else:
+ if is_rocm_pytorch():
+ from torch.utils.cpp_extension import ROCM_HOME
+ CUDA_HOME = ROCM_HOME
+ else:
+ from torch.utils.cpp_extension import CUDA_HOME
+ return CUDA_HOME
+
+
+def get_build_config():
+ if TORCH_VERSION == 'parrots':
+ from parrots.config import get_build_info
+ return get_build_info()
+ else:
+ return torch.__config__.show()
+
+
+def _get_conv():
+ if TORCH_VERSION == 'parrots':
+ from parrots.nn.modules.conv import _ConvNd, _ConvTransposeMixin
+ else:
+ from torch.nn.modules.conv import _ConvNd, _ConvTransposeMixin
+ return _ConvNd, _ConvTransposeMixin
+
+
+def _get_dataloader():
+ if TORCH_VERSION == 'parrots':
+ from torch.utils.data import DataLoader, PoolDataLoader
+ else:
+ from torch.utils.data import DataLoader
+ PoolDataLoader = DataLoader
+ return DataLoader, PoolDataLoader
+
+
+def _get_extension():
+ if TORCH_VERSION == 'parrots':
+ from parrots.utils.build_extension import BuildExtension, Extension
+ CppExtension = partial(Extension, cuda=False)
+ CUDAExtension = partial(Extension, cuda=True)
+ else:
+ from torch.utils.cpp_extension import (BuildExtension, CppExtension,
+ CUDAExtension)
+ return BuildExtension, CppExtension, CUDAExtension
+
+
+def _get_pool():
+ if TORCH_VERSION == 'parrots':
+ from parrots.nn.modules.pool import (_AdaptiveAvgPoolNd,
+ _AdaptiveMaxPoolNd, _AvgPoolNd,
+ _MaxPoolNd)
+ else:
+ from torch.nn.modules.pooling import (_AdaptiveAvgPoolNd,
+ _AdaptiveMaxPoolNd, _AvgPoolNd,
+ _MaxPoolNd)
+ return _AdaptiveAvgPoolNd, _AdaptiveMaxPoolNd, _AvgPoolNd, _MaxPoolNd
+
+
+def _get_norm():
+ if TORCH_VERSION == 'parrots':
+ from parrots.nn.modules.batchnorm import _BatchNorm, _InstanceNorm
+ SyncBatchNorm_ = torch.nn.SyncBatchNorm2d
+ else:
+ from torch.nn.modules.instancenorm import _InstanceNorm
+ from torch.nn.modules.batchnorm import _BatchNorm
+ SyncBatchNorm_ = torch.nn.SyncBatchNorm
+ return _BatchNorm, _InstanceNorm, SyncBatchNorm_
+
+
+_ConvNd, _ConvTransposeMixin = _get_conv()
+DataLoader, PoolDataLoader = _get_dataloader()
+BuildExtension, CppExtension, CUDAExtension = _get_extension()
+_BatchNorm, _InstanceNorm, SyncBatchNorm_ = _get_norm()
+_AdaptiveAvgPoolNd, _AdaptiveMaxPoolNd, _AvgPoolNd, _MaxPoolNd = _get_pool()
+
+
+class SyncBatchNorm(SyncBatchNorm_):
+
+ def _check_input_dim(self, input):
+ if TORCH_VERSION == 'parrots':
+ if input.dim() < 2:
+ raise ValueError(
+ f'expected at least 2D input (got {input.dim()}D input)')
+ else:
+ super()._check_input_dim(input)
diff --git a/annotator/uniformer/mmcv/utils/path.py b/annotator/uniformer/mmcv/utils/path.py
new file mode 100644
index 0000000000000000000000000000000000000000..7dab4b3041413b1432b0f434b8b14783097d33c6
--- /dev/null
+++ b/annotator/uniformer/mmcv/utils/path.py
@@ -0,0 +1,101 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import os
+import os.path as osp
+from pathlib import Path
+
+from .misc import is_str
+
+
+def is_filepath(x):
+ return is_str(x) or isinstance(x, Path)
+
+
+def fopen(filepath, *args, **kwargs):
+ if is_str(filepath):
+ return open(filepath, *args, **kwargs)
+ elif isinstance(filepath, Path):
+ return filepath.open(*args, **kwargs)
+ raise ValueError('`filepath` should be a string or a Path')
+
+
+def check_file_exist(filename, msg_tmpl='file "{}" does not exist'):
+ if not osp.isfile(filename):
+ raise FileNotFoundError(msg_tmpl.format(filename))
+
+
+def mkdir_or_exist(dir_name, mode=0o777):
+ if dir_name == '':
+ return
+ dir_name = osp.expanduser(dir_name)
+ os.makedirs(dir_name, mode=mode, exist_ok=True)
+
+
+def symlink(src, dst, overwrite=True, **kwargs):
+ if os.path.lexists(dst) and overwrite:
+ os.remove(dst)
+ os.symlink(src, dst, **kwargs)
+
+
+def scandir(dir_path, suffix=None, recursive=False, case_sensitive=True):
+ """Scan a directory to find the interested files.
+
+ Args:
+ dir_path (str | obj:`Path`): Path of the directory.
+ suffix (str | tuple(str), optional): File suffix that we are
+ interested in. Default: None.
+ recursive (bool, optional): If set to True, recursively scan the
+ directory. Default: False.
+ case_sensitive (bool, optional) : If set to False, ignore the case of
+ suffix. Default: True.
+
+ Returns:
+ A generator for all the interested files with relative paths.
+ """
+ if isinstance(dir_path, (str, Path)):
+ dir_path = str(dir_path)
+ else:
+ raise TypeError('"dir_path" must be a string or Path object')
+
+ if (suffix is not None) and not isinstance(suffix, (str, tuple)):
+ raise TypeError('"suffix" must be a string or tuple of strings')
+
+ if suffix is not None and not case_sensitive:
+ suffix = suffix.lower() if isinstance(suffix, str) else tuple(
+ item.lower() for item in suffix)
+
+ root = dir_path
+
+ def _scandir(dir_path, suffix, recursive, case_sensitive):
+ for entry in os.scandir(dir_path):
+ if not entry.name.startswith('.') and entry.is_file():
+ rel_path = osp.relpath(entry.path, root)
+ _rel_path = rel_path if case_sensitive else rel_path.lower()
+ if suffix is None or _rel_path.endswith(suffix):
+ yield rel_path
+ elif recursive and os.path.isdir(entry.path):
+ # scan recursively if entry.path is a directory
+ yield from _scandir(entry.path, suffix, recursive,
+ case_sensitive)
+
+ return _scandir(dir_path, suffix, recursive, case_sensitive)
+
+
+def find_vcs_root(path, markers=('.git', )):
+ """Finds the root directory (including itself) of specified markers.
+
+ Args:
+ path (str): Path of directory or file.
+ markers (list[str], optional): List of file or directory names.
+
+ Returns:
+ The directory contained one of the markers or None if not found.
+ """
+ if osp.isfile(path):
+ path = osp.dirname(path)
+
+ prev, cur = None, osp.abspath(osp.expanduser(path))
+ while cur != prev:
+ if any(osp.exists(osp.join(cur, marker)) for marker in markers):
+ return cur
+ prev, cur = cur, osp.split(cur)[0]
+ return None
diff --git a/annotator/uniformer/mmcv/utils/progressbar.py b/annotator/uniformer/mmcv/utils/progressbar.py
new file mode 100644
index 0000000000000000000000000000000000000000..0062f670dd94fa9da559ab26ef85517dcf5211c7
--- /dev/null
+++ b/annotator/uniformer/mmcv/utils/progressbar.py
@@ -0,0 +1,208 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import sys
+from collections.abc import Iterable
+from multiprocessing import Pool
+from shutil import get_terminal_size
+
+from .timer import Timer
+
+
+class ProgressBar:
+ """A progress bar which can print the progress."""
+
+ def __init__(self, task_num=0, bar_width=50, start=True, file=sys.stdout):
+ self.task_num = task_num
+ self.bar_width = bar_width
+ self.completed = 0
+ self.file = file
+ if start:
+ self.start()
+
+ @property
+ def terminal_width(self):
+ width, _ = get_terminal_size()
+ return width
+
+ def start(self):
+ if self.task_num > 0:
+ self.file.write(f'[{" " * self.bar_width}] 0/{self.task_num}, '
+ 'elapsed: 0s, ETA:')
+ else:
+ self.file.write('completed: 0, elapsed: 0s')
+ self.file.flush()
+ self.timer = Timer()
+
+ def update(self, num_tasks=1):
+ assert num_tasks > 0
+ self.completed += num_tasks
+ elapsed = self.timer.since_start()
+ if elapsed > 0:
+ fps = self.completed / elapsed
+ else:
+ fps = float('inf')
+ if self.task_num > 0:
+ percentage = self.completed / float(self.task_num)
+ eta = int(elapsed * (1 - percentage) / percentage + 0.5)
+ msg = f'\r[{{}}] {self.completed}/{self.task_num}, ' \
+ f'{fps:.1f} task/s, elapsed: {int(elapsed + 0.5)}s, ' \
+ f'ETA: {eta:5}s'
+
+ bar_width = min(self.bar_width,
+ int(self.terminal_width - len(msg)) + 2,
+ int(self.terminal_width * 0.6))
+ bar_width = max(2, bar_width)
+ mark_width = int(bar_width * percentage)
+ bar_chars = '>' * mark_width + ' ' * (bar_width - mark_width)
+ self.file.write(msg.format(bar_chars))
+ else:
+ self.file.write(
+ f'completed: {self.completed}, elapsed: {int(elapsed + 0.5)}s,'
+ f' {fps:.1f} tasks/s')
+ self.file.flush()
+
+
+def track_progress(func, tasks, bar_width=50, file=sys.stdout, **kwargs):
+ """Track the progress of tasks execution with a progress bar.
+
+ Tasks are done with a simple for-loop.
+
+ Args:
+ func (callable): The function to be applied to each task.
+ tasks (list or tuple[Iterable, int]): A list of tasks or
+ (tasks, total num).
+ bar_width (int): Width of progress bar.
+
+ Returns:
+ list: The task results.
+ """
+ if isinstance(tasks, tuple):
+ assert len(tasks) == 2
+ assert isinstance(tasks[0], Iterable)
+ assert isinstance(tasks[1], int)
+ task_num = tasks[1]
+ tasks = tasks[0]
+ elif isinstance(tasks, Iterable):
+ task_num = len(tasks)
+ else:
+ raise TypeError(
+ '"tasks" must be an iterable object or a (iterator, int) tuple')
+ prog_bar = ProgressBar(task_num, bar_width, file=file)
+ results = []
+ for task in tasks:
+ results.append(func(task, **kwargs))
+ prog_bar.update()
+ prog_bar.file.write('\n')
+ return results
+
+
+def init_pool(process_num, initializer=None, initargs=None):
+ if initializer is None:
+ return Pool(process_num)
+ elif initargs is None:
+ return Pool(process_num, initializer)
+ else:
+ if not isinstance(initargs, tuple):
+ raise TypeError('"initargs" must be a tuple')
+ return Pool(process_num, initializer, initargs)
+
+
+def track_parallel_progress(func,
+ tasks,
+ nproc,
+ initializer=None,
+ initargs=None,
+ bar_width=50,
+ chunksize=1,
+ skip_first=False,
+ keep_order=True,
+ file=sys.stdout):
+ """Track the progress of parallel task execution with a progress bar.
+
+ The built-in :mod:`multiprocessing` module is used for process pools and
+ tasks are done with :func:`Pool.map` or :func:`Pool.imap_unordered`.
+
+ Args:
+ func (callable): The function to be applied to each task.
+ tasks (list or tuple[Iterable, int]): A list of tasks or
+ (tasks, total num).
+ nproc (int): Process (worker) number.
+ initializer (None or callable): Refer to :class:`multiprocessing.Pool`
+ for details.
+ initargs (None or tuple): Refer to :class:`multiprocessing.Pool` for
+ details.
+ chunksize (int): Refer to :class:`multiprocessing.Pool` for details.
+ bar_width (int): Width of progress bar.
+ skip_first (bool): Whether to skip the first sample for each worker
+ when estimating fps, since the initialization step may takes
+ longer.
+ keep_order (bool): If True, :func:`Pool.imap` is used, otherwise
+ :func:`Pool.imap_unordered` is used.
+
+ Returns:
+ list: The task results.
+ """
+ if isinstance(tasks, tuple):
+ assert len(tasks) == 2
+ assert isinstance(tasks[0], Iterable)
+ assert isinstance(tasks[1], int)
+ task_num = tasks[1]
+ tasks = tasks[0]
+ elif isinstance(tasks, Iterable):
+ task_num = len(tasks)
+ else:
+ raise TypeError(
+ '"tasks" must be an iterable object or a (iterator, int) tuple')
+ pool = init_pool(nproc, initializer, initargs)
+ start = not skip_first
+ task_num -= nproc * chunksize * int(skip_first)
+ prog_bar = ProgressBar(task_num, bar_width, start, file=file)
+ results = []
+ if keep_order:
+ gen = pool.imap(func, tasks, chunksize)
+ else:
+ gen = pool.imap_unordered(func, tasks, chunksize)
+ for result in gen:
+ results.append(result)
+ if skip_first:
+ if len(results) < nproc * chunksize:
+ continue
+ elif len(results) == nproc * chunksize:
+ prog_bar.start()
+ continue
+ prog_bar.update()
+ prog_bar.file.write('\n')
+ pool.close()
+ pool.join()
+ return results
+
+
+def track_iter_progress(tasks, bar_width=50, file=sys.stdout):
+ """Track the progress of tasks iteration or enumeration with a progress
+ bar.
+
+ Tasks are yielded with a simple for-loop.
+
+ Args:
+ tasks (list or tuple[Iterable, int]): A list of tasks or
+ (tasks, total num).
+ bar_width (int): Width of progress bar.
+
+ Yields:
+ list: The task results.
+ """
+ if isinstance(tasks, tuple):
+ assert len(tasks) == 2
+ assert isinstance(tasks[0], Iterable)
+ assert isinstance(tasks[1], int)
+ task_num = tasks[1]
+ tasks = tasks[0]
+ elif isinstance(tasks, Iterable):
+ task_num = len(tasks)
+ else:
+ raise TypeError(
+ '"tasks" must be an iterable object or a (iterator, int) tuple')
+ prog_bar = ProgressBar(task_num, bar_width, file=file)
+ for task in tasks:
+ yield task
+ prog_bar.update()
+ prog_bar.file.write('\n')
diff --git a/annotator/uniformer/mmcv/utils/registry.py b/annotator/uniformer/mmcv/utils/registry.py
new file mode 100644
index 0000000000000000000000000000000000000000..fa9df39bc9f3d8d568361e7250ab35468f2b74e0
--- /dev/null
+++ b/annotator/uniformer/mmcv/utils/registry.py
@@ -0,0 +1,315 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import inspect
+import warnings
+from functools import partial
+
+from .misc import is_seq_of
+
+
+def build_from_cfg(cfg, registry, default_args=None):
+ """Build a module from config dict.
+
+ Args:
+ cfg (dict): Config dict. It should at least contain the key "type".
+ registry (:obj:`Registry`): The registry to search the type from.
+ default_args (dict, optional): Default initialization arguments.
+
+ Returns:
+ object: The constructed object.
+ """
+ if not isinstance(cfg, dict):
+ raise TypeError(f'cfg must be a dict, but got {type(cfg)}')
+ if 'type' not in cfg:
+ if default_args is None or 'type' not in default_args:
+ raise KeyError(
+ '`cfg` or `default_args` must contain the key "type", '
+ f'but got {cfg}\n{default_args}')
+ if not isinstance(registry, Registry):
+ raise TypeError('registry must be an mmcv.Registry object, '
+ f'but got {type(registry)}')
+ if not (isinstance(default_args, dict) or default_args is None):
+ raise TypeError('default_args must be a dict or None, '
+ f'but got {type(default_args)}')
+
+ args = cfg.copy()
+
+ if default_args is not None:
+ for name, value in default_args.items():
+ args.setdefault(name, value)
+
+ obj_type = args.pop('type')
+ if isinstance(obj_type, str):
+ obj_cls = registry.get(obj_type)
+ if obj_cls is None:
+ raise KeyError(
+ f'{obj_type} is not in the {registry.name} registry')
+ elif inspect.isclass(obj_type):
+ obj_cls = obj_type
+ else:
+ raise TypeError(
+ f'type must be a str or valid type, but got {type(obj_type)}')
+ try:
+ return obj_cls(**args)
+ except Exception as e:
+ # Normal TypeError does not print class name.
+ raise type(e)(f'{obj_cls.__name__}: {e}')
+
+
+class Registry:
+ """A registry to map strings to classes.
+
+ Registered object could be built from registry.
+ Example:
+ >>> MODELS = Registry('models')
+ >>> @MODELS.register_module()
+ >>> class ResNet:
+ >>> pass
+ >>> resnet = MODELS.build(dict(type='ResNet'))
+
+ Please refer to
+ https://mmcv.readthedocs.io/en/latest/understand_mmcv/registry.html for
+ advanced usage.
+
+ Args:
+ name (str): Registry name.
+ build_func(func, optional): Build function to construct instance from
+ Registry, func:`build_from_cfg` is used if neither ``parent`` or
+ ``build_func`` is specified. If ``parent`` is specified and
+ ``build_func`` is not given, ``build_func`` will be inherited
+ from ``parent``. Default: None.
+ parent (Registry, optional): Parent registry. The class registered in
+ children registry could be built from parent. Default: None.
+ scope (str, optional): The scope of registry. It is the key to search
+ for children registry. If not specified, scope will be the name of
+ the package where class is defined, e.g. mmdet, mmcls, mmseg.
+ Default: None.
+ """
+
+ def __init__(self, name, build_func=None, parent=None, scope=None):
+ self._name = name
+ self._module_dict = dict()
+ self._children = dict()
+ self._scope = self.infer_scope() if scope is None else scope
+
+ # self.build_func will be set with the following priority:
+ # 1. build_func
+ # 2. parent.build_func
+ # 3. build_from_cfg
+ if build_func is None:
+ if parent is not None:
+ self.build_func = parent.build_func
+ else:
+ self.build_func = build_from_cfg
+ else:
+ self.build_func = build_func
+ if parent is not None:
+ assert isinstance(parent, Registry)
+ parent._add_children(self)
+ self.parent = parent
+ else:
+ self.parent = None
+
+ def __len__(self):
+ return len(self._module_dict)
+
+ def __contains__(self, key):
+ return self.get(key) is not None
+
+ def __repr__(self):
+ format_str = self.__class__.__name__ + \
+ f'(name={self._name}, ' \
+ f'items={self._module_dict})'
+ return format_str
+
+ @staticmethod
+ def infer_scope():
+ """Infer the scope of registry.
+
+ The name of the package where registry is defined will be returned.
+
+ Example:
+ # in mmdet/models/backbone/resnet.py
+ >>> MODELS = Registry('models')
+ >>> @MODELS.register_module()
+ >>> class ResNet:
+ >>> pass
+ The scope of ``ResNet`` will be ``mmdet``.
+
+
+ Returns:
+ scope (str): The inferred scope name.
+ """
+ # inspect.stack() trace where this function is called, the index-2
+ # indicates the frame where `infer_scope()` is called
+ filename = inspect.getmodule(inspect.stack()[2][0]).__name__
+ split_filename = filename.split('.')
+ return split_filename[0]
+
+ @staticmethod
+ def split_scope_key(key):
+ """Split scope and key.
+
+ The first scope will be split from key.
+
+ Examples:
+ >>> Registry.split_scope_key('mmdet.ResNet')
+ 'mmdet', 'ResNet'
+ >>> Registry.split_scope_key('ResNet')
+ None, 'ResNet'
+
+ Return:
+ scope (str, None): The first scope.
+ key (str): The remaining key.
+ """
+ split_index = key.find('.')
+ if split_index != -1:
+ return key[:split_index], key[split_index + 1:]
+ else:
+ return None, key
+
+ @property
+ def name(self):
+ return self._name
+
+ @property
+ def scope(self):
+ return self._scope
+
+ @property
+ def module_dict(self):
+ return self._module_dict
+
+ @property
+ def children(self):
+ return self._children
+
+ def get(self, key):
+ """Get the registry record.
+
+ Args:
+ key (str): The class name in string format.
+
+ Returns:
+ class: The corresponding class.
+ """
+ scope, real_key = self.split_scope_key(key)
+ if scope is None or scope == self._scope:
+ # get from self
+ if real_key in self._module_dict:
+ return self._module_dict[real_key]
+ else:
+ # get from self._children
+ if scope in self._children:
+ return self._children[scope].get(real_key)
+ else:
+ # goto root
+ parent = self.parent
+ while parent.parent is not None:
+ parent = parent.parent
+ return parent.get(key)
+
+ def build(self, *args, **kwargs):
+ return self.build_func(*args, **kwargs, registry=self)
+
+ def _add_children(self, registry):
+ """Add children for a registry.
+
+ The ``registry`` will be added as children based on its scope.
+ The parent registry could build objects from children registry.
+
+ Example:
+ >>> models = Registry('models')
+ >>> mmdet_models = Registry('models', parent=models)
+ >>> @mmdet_models.register_module()
+ >>> class ResNet:
+ >>> pass
+ >>> resnet = models.build(dict(type='mmdet.ResNet'))
+ """
+
+ assert isinstance(registry, Registry)
+ assert registry.scope is not None
+ assert registry.scope not in self.children, \
+ f'scope {registry.scope} exists in {self.name} registry'
+ self.children[registry.scope] = registry
+
+ def _register_module(self, module_class, module_name=None, force=False):
+ if not inspect.isclass(module_class):
+ raise TypeError('module must be a class, '
+ f'but got {type(module_class)}')
+
+ if module_name is None:
+ module_name = module_class.__name__
+ if isinstance(module_name, str):
+ module_name = [module_name]
+ for name in module_name:
+ if not force and name in self._module_dict:
+ raise KeyError(f'{name} is already registered '
+ f'in {self.name}')
+ self._module_dict[name] = module_class
+
+ def deprecated_register_module(self, cls=None, force=False):
+ warnings.warn(
+ 'The old API of register_module(module, force=False) '
+ 'is deprecated and will be removed, please use the new API '
+ 'register_module(name=None, force=False, module=None) instead.')
+ if cls is None:
+ return partial(self.deprecated_register_module, force=force)
+ self._register_module(cls, force=force)
+ return cls
+
+ def register_module(self, name=None, force=False, module=None):
+ """Register a module.
+
+ A record will be added to `self._module_dict`, whose key is the class
+ name or the specified name, and value is the class itself.
+ It can be used as a decorator or a normal function.
+
+ Example:
+ >>> backbones = Registry('backbone')
+ >>> @backbones.register_module()
+ >>> class ResNet:
+ >>> pass
+
+ >>> backbones = Registry('backbone')
+ >>> @backbones.register_module(name='mnet')
+ >>> class MobileNet:
+ >>> pass
+
+ >>> backbones = Registry('backbone')
+ >>> class ResNet:
+ >>> pass
+ >>> backbones.register_module(ResNet)
+
+ Args:
+ name (str | None): The module name to be registered. If not
+ specified, the class name will be used.
+ force (bool, optional): Whether to override an existing class with
+ the same name. Default: False.
+ module (type): Module class to be registered.
+ """
+ if not isinstance(force, bool):
+ raise TypeError(f'force must be a boolean, but got {type(force)}')
+ # NOTE: This is a walkaround to be compatible with the old api,
+ # while it may introduce unexpected bugs.
+ if isinstance(name, type):
+ return self.deprecated_register_module(name, force=force)
+
+ # raise the error ahead of time
+ if not (name is None or isinstance(name, str) or is_seq_of(name, str)):
+ raise TypeError(
+ 'name must be either of None, an instance of str or a sequence'
+ f' of str, but got {type(name)}')
+
+ # use it as a normal method: x.register_module(module=SomeClass)
+ if module is not None:
+ self._register_module(
+ module_class=module, module_name=name, force=force)
+ return module
+
+ # use it as a decorator: @x.register_module()
+ def _register(cls):
+ self._register_module(
+ module_class=cls, module_name=name, force=force)
+ return cls
+
+ return _register
diff --git a/annotator/uniformer/mmcv/utils/testing.py b/annotator/uniformer/mmcv/utils/testing.py
new file mode 100644
index 0000000000000000000000000000000000000000..a27f936da8ec14bac18562ede0a79d476d82f797
--- /dev/null
+++ b/annotator/uniformer/mmcv/utils/testing.py
@@ -0,0 +1,140 @@
+# Copyright (c) Open-MMLab.
+import sys
+from collections.abc import Iterable
+from runpy import run_path
+from shlex import split
+from typing import Any, Dict, List
+from unittest.mock import patch
+
+
+def check_python_script(cmd):
+ """Run the python cmd script with `__main__`. The difference between
+ `os.system` is that, this function exectues code in the current process, so
+ that it can be tracked by coverage tools. Currently it supports two forms:
+
+ - ./tests/data/scripts/hello.py zz
+ - python tests/data/scripts/hello.py zz
+ """
+ args = split(cmd)
+ if args[0] == 'python':
+ args = args[1:]
+ with patch.object(sys, 'argv', args):
+ run_path(args[0], run_name='__main__')
+
+
+def _any(judge_result):
+ """Since built-in ``any`` works only when the element of iterable is not
+ iterable, implement the function."""
+ if not isinstance(judge_result, Iterable):
+ return judge_result
+
+ try:
+ for element in judge_result:
+ if _any(element):
+ return True
+ except TypeError:
+ # Maybe encounter the case: torch.tensor(True) | torch.tensor(False)
+ if judge_result:
+ return True
+ return False
+
+
+def assert_dict_contains_subset(dict_obj: Dict[Any, Any],
+ expected_subset: Dict[Any, Any]) -> bool:
+ """Check if the dict_obj contains the expected_subset.
+
+ Args:
+ dict_obj (Dict[Any, Any]): Dict object to be checked.
+ expected_subset (Dict[Any, Any]): Subset expected to be contained in
+ dict_obj.
+
+ Returns:
+ bool: Whether the dict_obj contains the expected_subset.
+ """
+
+ for key, value in expected_subset.items():
+ if key not in dict_obj.keys() or _any(dict_obj[key] != value):
+ return False
+ return True
+
+
+def assert_attrs_equal(obj: Any, expected_attrs: Dict[str, Any]) -> bool:
+ """Check if attribute of class object is correct.
+
+ Args:
+ obj (object): Class object to be checked.
+ expected_attrs (Dict[str, Any]): Dict of the expected attrs.
+
+ Returns:
+ bool: Whether the attribute of class object is correct.
+ """
+ for attr, value in expected_attrs.items():
+ if not hasattr(obj, attr) or _any(getattr(obj, attr) != value):
+ return False
+ return True
+
+
+def assert_dict_has_keys(obj: Dict[str, Any],
+ expected_keys: List[str]) -> bool:
+ """Check if the obj has all the expected_keys.
+
+ Args:
+ obj (Dict[str, Any]): Object to be checked.
+ expected_keys (List[str]): Keys expected to contained in the keys of
+ the obj.
+
+ Returns:
+ bool: Whether the obj has the expected keys.
+ """
+ return set(expected_keys).issubset(set(obj.keys()))
+
+
+def assert_keys_equal(result_keys: List[str], target_keys: List[str]) -> bool:
+ """Check if target_keys is equal to result_keys.
+
+ Args:
+ result_keys (List[str]): Result keys to be checked.
+ target_keys (List[str]): Target keys to be checked.
+
+ Returns:
+ bool: Whether target_keys is equal to result_keys.
+ """
+ return set(result_keys) == set(target_keys)
+
+
+def assert_is_norm_layer(module) -> bool:
+ """Check if the module is a norm layer.
+
+ Args:
+ module (nn.Module): The module to be checked.
+
+ Returns:
+ bool: Whether the module is a norm layer.
+ """
+ from .parrots_wrapper import _BatchNorm, _InstanceNorm
+ from torch.nn import GroupNorm, LayerNorm
+ norm_layer_candidates = (_BatchNorm, _InstanceNorm, GroupNorm, LayerNorm)
+ return isinstance(module, norm_layer_candidates)
+
+
+def assert_params_all_zeros(module) -> bool:
+ """Check if the parameters of the module is all zeros.
+
+ Args:
+ module (nn.Module): The module to be checked.
+
+ Returns:
+ bool: Whether the parameters of the module is all zeros.
+ """
+ weight_data = module.weight.data
+ is_weight_zero = weight_data.allclose(
+ weight_data.new_zeros(weight_data.size()))
+
+ if hasattr(module, 'bias') and module.bias is not None:
+ bias_data = module.bias.data
+ is_bias_zero = bias_data.allclose(
+ bias_data.new_zeros(bias_data.size()))
+ else:
+ is_bias_zero = True
+
+ return is_weight_zero and is_bias_zero
diff --git a/annotator/uniformer/mmcv/utils/timer.py b/annotator/uniformer/mmcv/utils/timer.py
new file mode 100644
index 0000000000000000000000000000000000000000..e3db7d497d8b374e18b5297e0a1d6eb186fd8cba
--- /dev/null
+++ b/annotator/uniformer/mmcv/utils/timer.py
@@ -0,0 +1,118 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+from time import time
+
+
+class TimerError(Exception):
+
+ def __init__(self, message):
+ self.message = message
+ super(TimerError, self).__init__(message)
+
+
+class Timer:
+ """A flexible Timer class.
+
+ :Example:
+
+ >>> import time
+ >>> import annotator.uniformer.mmcv as mmcv
+ >>> with mmcv.Timer():
+ >>> # simulate a code block that will run for 1s
+ >>> time.sleep(1)
+ 1.000
+ >>> with mmcv.Timer(print_tmpl='it takes {:.1f} seconds'):
+ >>> # simulate a code block that will run for 1s
+ >>> time.sleep(1)
+ it takes 1.0 seconds
+ >>> timer = mmcv.Timer()
+ >>> time.sleep(0.5)
+ >>> print(timer.since_start())
+ 0.500
+ >>> time.sleep(0.5)
+ >>> print(timer.since_last_check())
+ 0.500
+ >>> print(timer.since_start())
+ 1.000
+ """
+
+ def __init__(self, start=True, print_tmpl=None):
+ self._is_running = False
+ self.print_tmpl = print_tmpl if print_tmpl else '{:.3f}'
+ if start:
+ self.start()
+
+ @property
+ def is_running(self):
+ """bool: indicate whether the timer is running"""
+ return self._is_running
+
+ def __enter__(self):
+ self.start()
+ return self
+
+ def __exit__(self, type, value, traceback):
+ print(self.print_tmpl.format(self.since_last_check()))
+ self._is_running = False
+
+ def start(self):
+ """Start the timer."""
+ if not self._is_running:
+ self._t_start = time()
+ self._is_running = True
+ self._t_last = time()
+
+ def since_start(self):
+ """Total time since the timer is started.
+
+ Returns (float): Time in seconds.
+ """
+ if not self._is_running:
+ raise TimerError('timer is not running')
+ self._t_last = time()
+ return self._t_last - self._t_start
+
+ def since_last_check(self):
+ """Time since the last checking.
+
+ Either :func:`since_start` or :func:`since_last_check` is a checking
+ operation.
+
+ Returns (float): Time in seconds.
+ """
+ if not self._is_running:
+ raise TimerError('timer is not running')
+ dur = time() - self._t_last
+ self._t_last = time()
+ return dur
+
+
+_g_timers = {} # global timers
+
+
+def check_time(timer_id):
+ """Add check points in a single line.
+
+ This method is suitable for running a task on a list of items. A timer will
+ be registered when the method is called for the first time.
+
+ :Example:
+
+ >>> import time
+ >>> import annotator.uniformer.mmcv as mmcv
+ >>> for i in range(1, 6):
+ >>> # simulate a code block
+ >>> time.sleep(i)
+ >>> mmcv.check_time('task1')
+ 2.000
+ 3.000
+ 4.000
+ 5.000
+
+ Args:
+ timer_id (str): Timer identifier.
+ """
+ if timer_id not in _g_timers:
+ _g_timers[timer_id] = Timer()
+ return 0
+ else:
+ return _g_timers[timer_id].since_last_check()
diff --git a/annotator/uniformer/mmcv/utils/trace.py b/annotator/uniformer/mmcv/utils/trace.py
new file mode 100644
index 0000000000000000000000000000000000000000..5ca99dc3eda05ef980d9a4249b50deca8273b6cc
--- /dev/null
+++ b/annotator/uniformer/mmcv/utils/trace.py
@@ -0,0 +1,23 @@
+import warnings
+
+import torch
+
+from annotator.uniformer.mmcv.utils import digit_version
+
+
+def is_jit_tracing() -> bool:
+ if (torch.__version__ != 'parrots'
+ and digit_version(torch.__version__) >= digit_version('1.6.0')):
+ on_trace = torch.jit.is_tracing()
+ # In PyTorch 1.6, torch.jit.is_tracing has a bug.
+ # Refers to https://github.com/pytorch/pytorch/issues/42448
+ if isinstance(on_trace, bool):
+ return on_trace
+ else:
+ return torch._C._is_tracing()
+ else:
+ warnings.warn(
+ 'torch.jit.is_tracing is only supported after v1.6.0. '
+ 'Therefore is_tracing returns False automatically. Please '
+ 'set on_trace manually if you are using trace.', UserWarning)
+ return False
diff --git a/annotator/uniformer/mmcv/utils/version_utils.py b/annotator/uniformer/mmcv/utils/version_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..963c45a2e8a86a88413ab6c18c22481fb9831985
--- /dev/null
+++ b/annotator/uniformer/mmcv/utils/version_utils.py
@@ -0,0 +1,90 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import os
+import subprocess
+import warnings
+
+from packaging.version import parse
+
+
+def digit_version(version_str: str, length: int = 4):
+ """Convert a version string into a tuple of integers.
+
+ This method is usually used for comparing two versions. For pre-release
+ versions: alpha < beta < rc.
+
+ Args:
+ version_str (str): The version string.
+ length (int): The maximum number of version levels. Default: 4.
+
+ Returns:
+ tuple[int]: The version info in digits (integers).
+ """
+ assert 'parrots' not in version_str
+ version = parse(version_str)
+ assert version.release, f'failed to parse version {version_str}'
+ release = list(version.release)
+ release = release[:length]
+ if len(release) < length:
+ release = release + [0] * (length - len(release))
+ if version.is_prerelease:
+ mapping = {'a': -3, 'b': -2, 'rc': -1}
+ val = -4
+ # version.pre can be None
+ if version.pre:
+ if version.pre[0] not in mapping:
+ warnings.warn(f'unknown prerelease version {version.pre[0]}, '
+ 'version checking may go wrong')
+ else:
+ val = mapping[version.pre[0]]
+ release.extend([val, version.pre[-1]])
+ else:
+ release.extend([val, 0])
+
+ elif version.is_postrelease:
+ release.extend([1, version.post])
+ else:
+ release.extend([0, 0])
+ return tuple(release)
+
+
+def _minimal_ext_cmd(cmd):
+ # construct minimal environment
+ env = {}
+ for k in ['SYSTEMROOT', 'PATH', 'HOME']:
+ v = os.environ.get(k)
+ if v is not None:
+ env[k] = v
+ # LANGUAGE is used on win32
+ env['LANGUAGE'] = 'C'
+ env['LANG'] = 'C'
+ env['LC_ALL'] = 'C'
+ out = subprocess.Popen(
+ cmd, stdout=subprocess.PIPE, env=env).communicate()[0]
+ return out
+
+
+def get_git_hash(fallback='unknown', digits=None):
+ """Get the git hash of the current repo.
+
+ Args:
+ fallback (str, optional): The fallback string when git hash is
+ unavailable. Defaults to 'unknown'.
+ digits (int, optional): kept digits of the hash. Defaults to None,
+ meaning all digits are kept.
+
+ Returns:
+ str: Git commit hash.
+ """
+
+ if digits is not None and not isinstance(digits, int):
+ raise TypeError('digits must be None or an integer')
+
+ try:
+ out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
+ sha = out.strip().decode('ascii')
+ if digits is not None:
+ sha = sha[:digits]
+ except OSError:
+ sha = fallback
+
+ return sha
diff --git a/annotator/uniformer/mmcv/version.py b/annotator/uniformer/mmcv/version.py
new file mode 100644
index 0000000000000000000000000000000000000000..1cce4e50bd692d4002e3cac3c545a3fb2efe95d0
--- /dev/null
+++ b/annotator/uniformer/mmcv/version.py
@@ -0,0 +1,35 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+__version__ = '1.3.17'
+
+
+def parse_version_info(version_str: str, length: int = 4) -> tuple:
+ """Parse a version string into a tuple.
+
+ Args:
+ version_str (str): The version string.
+ length (int): The maximum number of version levels. Default: 4.
+
+ Returns:
+ tuple[int | str]: The version info, e.g., "1.3.0" is parsed into
+ (1, 3, 0, 0, 0, 0), and "2.0.0rc1" is parsed into
+ (2, 0, 0, 0, 'rc', 1) (when length is set to 4).
+ """
+ from packaging.version import parse
+ version = parse(version_str)
+ assert version.release, f'failed to parse version {version_str}'
+ release = list(version.release)
+ release = release[:length]
+ if len(release) < length:
+ release = release + [0] * (length - len(release))
+ if version.is_prerelease:
+ release.extend(list(version.pre))
+ elif version.is_postrelease:
+ release.extend(list(version.post))
+ else:
+ release.extend([0, 0])
+ return tuple(release)
+
+
+version_info = tuple(int(x) for x in __version__.split('.')[:3])
+
+__all__ = ['__version__', 'version_info', 'parse_version_info']
diff --git a/annotator/uniformer/mmcv/video/__init__.py b/annotator/uniformer/mmcv/video/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..73199b01dec52820dc6ca0139903536344d5a1eb
--- /dev/null
+++ b/annotator/uniformer/mmcv/video/__init__.py
@@ -0,0 +1,11 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+from .io import Cache, VideoReader, frames2video
+from .optflow import (dequantize_flow, flow_from_bytes, flow_warp, flowread,
+ flowwrite, quantize_flow, sparse_flow_from_bytes)
+from .processing import concat_video, convert_video, cut_video, resize_video
+
+__all__ = [
+ 'Cache', 'VideoReader', 'frames2video', 'convert_video', 'resize_video',
+ 'cut_video', 'concat_video', 'flowread', 'flowwrite', 'quantize_flow',
+ 'dequantize_flow', 'flow_warp', 'flow_from_bytes', 'sparse_flow_from_bytes'
+]
diff --git a/annotator/uniformer/mmcv/video/__pycache__/__init__.cpython-310.pyc b/annotator/uniformer/mmcv/video/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..19c432ebec36c35e6c2504b675b843fb34048760
Binary files /dev/null and b/annotator/uniformer/mmcv/video/__pycache__/__init__.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/video/__pycache__/__init__.cpython-38.pyc b/annotator/uniformer/mmcv/video/__pycache__/__init__.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8ba9f8002a8e00b644eb7c32f92b9b2787995b55
Binary files /dev/null and b/annotator/uniformer/mmcv/video/__pycache__/__init__.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/video/__pycache__/io.cpython-310.pyc b/annotator/uniformer/mmcv/video/__pycache__/io.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a5c59ac1763628eb6a9ba552cc1f9df0463b41d0
Binary files /dev/null and b/annotator/uniformer/mmcv/video/__pycache__/io.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/video/__pycache__/io.cpython-38.pyc b/annotator/uniformer/mmcv/video/__pycache__/io.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b4ae4beef29531217e43b6b49c6fd23ccec1f657
Binary files /dev/null and b/annotator/uniformer/mmcv/video/__pycache__/io.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/video/__pycache__/optflow.cpython-310.pyc b/annotator/uniformer/mmcv/video/__pycache__/optflow.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b9c77ae1a5f59c1bfc61aa0716f06ef465a44ac4
Binary files /dev/null and b/annotator/uniformer/mmcv/video/__pycache__/optflow.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/video/__pycache__/optflow.cpython-38.pyc b/annotator/uniformer/mmcv/video/__pycache__/optflow.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f4e8aede85c25d1e18ddd18a6486e3a68a714ad7
Binary files /dev/null and b/annotator/uniformer/mmcv/video/__pycache__/optflow.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/video/__pycache__/processing.cpython-310.pyc b/annotator/uniformer/mmcv/video/__pycache__/processing.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5b504d080fd42c89cc85d22936e9fd0baaf86fe9
Binary files /dev/null and b/annotator/uniformer/mmcv/video/__pycache__/processing.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/video/__pycache__/processing.cpython-38.pyc b/annotator/uniformer/mmcv/video/__pycache__/processing.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0ce8d6f4e695541c37e7da8b541d26a26cb5654f
Binary files /dev/null and b/annotator/uniformer/mmcv/video/__pycache__/processing.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/video/io.py b/annotator/uniformer/mmcv/video/io.py
new file mode 100644
index 0000000000000000000000000000000000000000..9879154227f640c262853b92c219461c6f67ee8e
--- /dev/null
+++ b/annotator/uniformer/mmcv/video/io.py
@@ -0,0 +1,318 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import os.path as osp
+from collections import OrderedDict
+
+import cv2
+from cv2 import (CAP_PROP_FOURCC, CAP_PROP_FPS, CAP_PROP_FRAME_COUNT,
+ CAP_PROP_FRAME_HEIGHT, CAP_PROP_FRAME_WIDTH,
+ CAP_PROP_POS_FRAMES, VideoWriter_fourcc)
+
+from annotator.uniformer.mmcv.utils import (check_file_exist, mkdir_or_exist, scandir,
+ track_progress)
+
+
+class Cache:
+
+ def __init__(self, capacity):
+ self._cache = OrderedDict()
+ self._capacity = int(capacity)
+ if capacity <= 0:
+ raise ValueError('capacity must be a positive integer')
+
+ @property
+ def capacity(self):
+ return self._capacity
+
+ @property
+ def size(self):
+ return len(self._cache)
+
+ def put(self, key, val):
+ if key in self._cache:
+ return
+ if len(self._cache) >= self.capacity:
+ self._cache.popitem(last=False)
+ self._cache[key] = val
+
+ def get(self, key, default=None):
+ val = self._cache[key] if key in self._cache else default
+ return val
+
+
+class VideoReader:
+ """Video class with similar usage to a list object.
+
+ This video warpper class provides convenient apis to access frames.
+ There exists an issue of OpenCV's VideoCapture class that jumping to a
+ certain frame may be inaccurate. It is fixed in this class by checking
+ the position after jumping each time.
+ Cache is used when decoding videos. So if the same frame is visited for
+ the second time, there is no need to decode again if it is stored in the
+ cache.
+
+ :Example:
+
+ >>> import annotator.uniformer.mmcv as mmcv
+ >>> v = mmcv.VideoReader('sample.mp4')
+ >>> len(v) # get the total frame number with `len()`
+ 120
+ >>> for img in v: # v is iterable
+ >>> mmcv.imshow(img)
+ >>> v[5] # get the 6th frame
+ """
+
+ def __init__(self, filename, cache_capacity=10):
+ # Check whether the video path is a url
+ if not filename.startswith(('https://', 'http://')):
+ check_file_exist(filename, 'Video file not found: ' + filename)
+ self._vcap = cv2.VideoCapture(filename)
+ assert cache_capacity > 0
+ self._cache = Cache(cache_capacity)
+ self._position = 0
+ # get basic info
+ self._width = int(self._vcap.get(CAP_PROP_FRAME_WIDTH))
+ self._height = int(self._vcap.get(CAP_PROP_FRAME_HEIGHT))
+ self._fps = self._vcap.get(CAP_PROP_FPS)
+ self._frame_cnt = int(self._vcap.get(CAP_PROP_FRAME_COUNT))
+ self._fourcc = self._vcap.get(CAP_PROP_FOURCC)
+
+ @property
+ def vcap(self):
+ """:obj:`cv2.VideoCapture`: The raw VideoCapture object."""
+ return self._vcap
+
+ @property
+ def opened(self):
+ """bool: Indicate whether the video is opened."""
+ return self._vcap.isOpened()
+
+ @property
+ def width(self):
+ """int: Width of video frames."""
+ return self._width
+
+ @property
+ def height(self):
+ """int: Height of video frames."""
+ return self._height
+
+ @property
+ def resolution(self):
+ """tuple: Video resolution (width, height)."""
+ return (self._width, self._height)
+
+ @property
+ def fps(self):
+ """float: FPS of the video."""
+ return self._fps
+
+ @property
+ def frame_cnt(self):
+ """int: Total frames of the video."""
+ return self._frame_cnt
+
+ @property
+ def fourcc(self):
+ """str: "Four character code" of the video."""
+ return self._fourcc
+
+ @property
+ def position(self):
+ """int: Current cursor position, indicating frame decoded."""
+ return self._position
+
+ def _get_real_position(self):
+ return int(round(self._vcap.get(CAP_PROP_POS_FRAMES)))
+
+ def _set_real_position(self, frame_id):
+ self._vcap.set(CAP_PROP_POS_FRAMES, frame_id)
+ pos = self._get_real_position()
+ for _ in range(frame_id - pos):
+ self._vcap.read()
+ self._position = frame_id
+
+ def read(self):
+ """Read the next frame.
+
+ If the next frame have been decoded before and in the cache, then
+ return it directly, otherwise decode, cache and return it.
+
+ Returns:
+ ndarray or None: Return the frame if successful, otherwise None.
+ """
+ # pos = self._position
+ if self._cache:
+ img = self._cache.get(self._position)
+ if img is not None:
+ ret = True
+ else:
+ if self._position != self._get_real_position():
+ self._set_real_position(self._position)
+ ret, img = self._vcap.read()
+ if ret:
+ self._cache.put(self._position, img)
+ else:
+ ret, img = self._vcap.read()
+ if ret:
+ self._position += 1
+ return img
+
+ def get_frame(self, frame_id):
+ """Get frame by index.
+
+ Args:
+ frame_id (int): Index of the expected frame, 0-based.
+
+ Returns:
+ ndarray or None: Return the frame if successful, otherwise None.
+ """
+ if frame_id < 0 or frame_id >= self._frame_cnt:
+ raise IndexError(
+ f'"frame_id" must be between 0 and {self._frame_cnt - 1}')
+ if frame_id == self._position:
+ return self.read()
+ if self._cache:
+ img = self._cache.get(frame_id)
+ if img is not None:
+ self._position = frame_id + 1
+ return img
+ self._set_real_position(frame_id)
+ ret, img = self._vcap.read()
+ if ret:
+ if self._cache:
+ self._cache.put(self._position, img)
+ self._position += 1
+ return img
+
+ def current_frame(self):
+ """Get the current frame (frame that is just visited).
+
+ Returns:
+ ndarray or None: If the video is fresh, return None, otherwise
+ return the frame.
+ """
+ if self._position == 0:
+ return None
+ return self._cache.get(self._position - 1)
+
+ def cvt2frames(self,
+ frame_dir,
+ file_start=0,
+ filename_tmpl='{:06d}.jpg',
+ start=0,
+ max_num=0,
+ show_progress=True):
+ """Convert a video to frame images.
+
+ Args:
+ frame_dir (str): Output directory to store all the frame images.
+ file_start (int): Filenames will start from the specified number.
+ filename_tmpl (str): Filename template with the index as the
+ placeholder.
+ start (int): The starting frame index.
+ max_num (int): Maximum number of frames to be written.
+ show_progress (bool): Whether to show a progress bar.
+ """
+ mkdir_or_exist(frame_dir)
+ if max_num == 0:
+ task_num = self.frame_cnt - start
+ else:
+ task_num = min(self.frame_cnt - start, max_num)
+ if task_num <= 0:
+ raise ValueError('start must be less than total frame number')
+ if start > 0:
+ self._set_real_position(start)
+
+ def write_frame(file_idx):
+ img = self.read()
+ if img is None:
+ return
+ filename = osp.join(frame_dir, filename_tmpl.format(file_idx))
+ cv2.imwrite(filename, img)
+
+ if show_progress:
+ track_progress(write_frame, range(file_start,
+ file_start + task_num))
+ else:
+ for i in range(task_num):
+ write_frame(file_start + i)
+
+ def __len__(self):
+ return self.frame_cnt
+
+ def __getitem__(self, index):
+ if isinstance(index, slice):
+ return [
+ self.get_frame(i)
+ for i in range(*index.indices(self.frame_cnt))
+ ]
+ # support negative indexing
+ if index < 0:
+ index += self.frame_cnt
+ if index < 0:
+ raise IndexError('index out of range')
+ return self.get_frame(index)
+
+ def __iter__(self):
+ self._set_real_position(0)
+ return self
+
+ def __next__(self):
+ img = self.read()
+ if img is not None:
+ return img
+ else:
+ raise StopIteration
+
+ next = __next__
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ self._vcap.release()
+
+
+def frames2video(frame_dir,
+ video_file,
+ fps=30,
+ fourcc='XVID',
+ filename_tmpl='{:06d}.jpg',
+ start=0,
+ end=0,
+ show_progress=True):
+ """Read the frame images from a directory and join them as a video.
+
+ Args:
+ frame_dir (str): The directory containing video frames.
+ video_file (str): Output filename.
+ fps (float): FPS of the output video.
+ fourcc (str): Fourcc of the output video, this should be compatible
+ with the output file type.
+ filename_tmpl (str): Filename template with the index as the variable.
+ start (int): Starting frame index.
+ end (int): Ending frame index.
+ show_progress (bool): Whether to show a progress bar.
+ """
+ if end == 0:
+ ext = filename_tmpl.split('.')[-1]
+ end = len([name for name in scandir(frame_dir, ext)])
+ first_file = osp.join(frame_dir, filename_tmpl.format(start))
+ check_file_exist(first_file, 'The start frame not found: ' + first_file)
+ img = cv2.imread(first_file)
+ height, width = img.shape[:2]
+ resolution = (width, height)
+ vwriter = cv2.VideoWriter(video_file, VideoWriter_fourcc(*fourcc), fps,
+ resolution)
+
+ def write_frame(file_idx):
+ filename = osp.join(frame_dir, filename_tmpl.format(file_idx))
+ img = cv2.imread(filename)
+ vwriter.write(img)
+
+ if show_progress:
+ track_progress(write_frame, range(start, end))
+ else:
+ for i in range(start, end):
+ write_frame(i)
+ vwriter.release()
diff --git a/annotator/uniformer/mmcv/video/optflow.py b/annotator/uniformer/mmcv/video/optflow.py
new file mode 100644
index 0000000000000000000000000000000000000000..84160f8d6ef9fceb5a2f89e7481593109fc1905d
--- /dev/null
+++ b/annotator/uniformer/mmcv/video/optflow.py
@@ -0,0 +1,254 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import warnings
+
+import cv2
+import numpy as np
+
+from annotator.uniformer.mmcv.arraymisc import dequantize, quantize
+from annotator.uniformer.mmcv.image import imread, imwrite
+from annotator.uniformer.mmcv.utils import is_str
+
+
+def flowread(flow_or_path, quantize=False, concat_axis=0, *args, **kwargs):
+ """Read an optical flow map.
+
+ Args:
+ flow_or_path (ndarray or str): A flow map or filepath.
+ quantize (bool): whether to read quantized pair, if set to True,
+ remaining args will be passed to :func:`dequantize_flow`.
+ concat_axis (int): The axis that dx and dy are concatenated,
+ can be either 0 or 1. Ignored if quantize is False.
+
+ Returns:
+ ndarray: Optical flow represented as a (h, w, 2) numpy array
+ """
+ if isinstance(flow_or_path, np.ndarray):
+ if (flow_or_path.ndim != 3) or (flow_or_path.shape[-1] != 2):
+ raise ValueError(f'Invalid flow with shape {flow_or_path.shape}')
+ return flow_or_path
+ elif not is_str(flow_or_path):
+ raise TypeError(f'"flow_or_path" must be a filename or numpy array, '
+ f'not {type(flow_or_path)}')
+
+ if not quantize:
+ with open(flow_or_path, 'rb') as f:
+ try:
+ header = f.read(4).decode('utf-8')
+ except Exception:
+ raise IOError(f'Invalid flow file: {flow_or_path}')
+ else:
+ if header != 'PIEH':
+ raise IOError(f'Invalid flow file: {flow_or_path}, '
+ 'header does not contain PIEH')
+
+ w = np.fromfile(f, np.int32, 1).squeeze()
+ h = np.fromfile(f, np.int32, 1).squeeze()
+ flow = np.fromfile(f, np.float32, w * h * 2).reshape((h, w, 2))
+ else:
+ assert concat_axis in [0, 1]
+ cat_flow = imread(flow_or_path, flag='unchanged')
+ if cat_flow.ndim != 2:
+ raise IOError(
+ f'{flow_or_path} is not a valid quantized flow file, '
+ f'its dimension is {cat_flow.ndim}.')
+ assert cat_flow.shape[concat_axis] % 2 == 0
+ dx, dy = np.split(cat_flow, 2, axis=concat_axis)
+ flow = dequantize_flow(dx, dy, *args, **kwargs)
+
+ return flow.astype(np.float32)
+
+
+def flowwrite(flow, filename, quantize=False, concat_axis=0, *args, **kwargs):
+ """Write optical flow to file.
+
+ If the flow is not quantized, it will be saved as a .flo file losslessly,
+ otherwise a jpeg image which is lossy but of much smaller size. (dx and dy
+ will be concatenated horizontally into a single image if quantize is True.)
+
+ Args:
+ flow (ndarray): (h, w, 2) array of optical flow.
+ filename (str): Output filepath.
+ quantize (bool): Whether to quantize the flow and save it to 2 jpeg
+ images. If set to True, remaining args will be passed to
+ :func:`quantize_flow`.
+ concat_axis (int): The axis that dx and dy are concatenated,
+ can be either 0 or 1. Ignored if quantize is False.
+ """
+ if not quantize:
+ with open(filename, 'wb') as f:
+ f.write('PIEH'.encode('utf-8'))
+ np.array([flow.shape[1], flow.shape[0]], dtype=np.int32).tofile(f)
+ flow = flow.astype(np.float32)
+ flow.tofile(f)
+ f.flush()
+ else:
+ assert concat_axis in [0, 1]
+ dx, dy = quantize_flow(flow, *args, **kwargs)
+ dxdy = np.concatenate((dx, dy), axis=concat_axis)
+ imwrite(dxdy, filename)
+
+
+def quantize_flow(flow, max_val=0.02, norm=True):
+ """Quantize flow to [0, 255].
+
+ After this step, the size of flow will be much smaller, and can be
+ dumped as jpeg images.
+
+ Args:
+ flow (ndarray): (h, w, 2) array of optical flow.
+ max_val (float): Maximum value of flow, values beyond
+ [-max_val, max_val] will be truncated.
+ norm (bool): Whether to divide flow values by image width/height.
+
+ Returns:
+ tuple[ndarray]: Quantized dx and dy.
+ """
+ h, w, _ = flow.shape
+ dx = flow[..., 0]
+ dy = flow[..., 1]
+ if norm:
+ dx = dx / w # avoid inplace operations
+ dy = dy / h
+ # use 255 levels instead of 256 to make sure 0 is 0 after dequantization.
+ flow_comps = [
+ quantize(d, -max_val, max_val, 255, np.uint8) for d in [dx, dy]
+ ]
+ return tuple(flow_comps)
+
+
+def dequantize_flow(dx, dy, max_val=0.02, denorm=True):
+ """Recover from quantized flow.
+
+ Args:
+ dx (ndarray): Quantized dx.
+ dy (ndarray): Quantized dy.
+ max_val (float): Maximum value used when quantizing.
+ denorm (bool): Whether to multiply flow values with width/height.
+
+ Returns:
+ ndarray: Dequantized flow.
+ """
+ assert dx.shape == dy.shape
+ assert dx.ndim == 2 or (dx.ndim == 3 and dx.shape[-1] == 1)
+
+ dx, dy = [dequantize(d, -max_val, max_val, 255) for d in [dx, dy]]
+
+ if denorm:
+ dx *= dx.shape[1]
+ dy *= dx.shape[0]
+ flow = np.dstack((dx, dy))
+ return flow
+
+
+def flow_warp(img, flow, filling_value=0, interpolate_mode='nearest'):
+ """Use flow to warp img.
+
+ Args:
+ img (ndarray, float or uint8): Image to be warped.
+ flow (ndarray, float): Optical Flow.
+ filling_value (int): The missing pixels will be set with filling_value.
+ interpolate_mode (str): bilinear -> Bilinear Interpolation;
+ nearest -> Nearest Neighbor.
+
+ Returns:
+ ndarray: Warped image with the same shape of img
+ """
+ warnings.warn('This function is just for prototyping and cannot '
+ 'guarantee the computational efficiency.')
+ assert flow.ndim == 3, 'Flow must be in 3D arrays.'
+ height = flow.shape[0]
+ width = flow.shape[1]
+ channels = img.shape[2]
+
+ output = np.ones(
+ (height, width, channels), dtype=img.dtype) * filling_value
+
+ grid = np.indices((height, width)).swapaxes(0, 1).swapaxes(1, 2)
+ dx = grid[:, :, 0] + flow[:, :, 1]
+ dy = grid[:, :, 1] + flow[:, :, 0]
+ sx = np.floor(dx).astype(int)
+ sy = np.floor(dy).astype(int)
+ valid = (sx >= 0) & (sx < height - 1) & (sy >= 0) & (sy < width - 1)
+
+ if interpolate_mode == 'nearest':
+ output[valid, :] = img[dx[valid].round().astype(int),
+ dy[valid].round().astype(int), :]
+ elif interpolate_mode == 'bilinear':
+ # dirty walkround for integer positions
+ eps_ = 1e-6
+ dx, dy = dx + eps_, dy + eps_
+ left_top_ = img[np.floor(dx[valid]).astype(int),
+ np.floor(dy[valid]).astype(int), :] * (
+ np.ceil(dx[valid]) - dx[valid])[:, None] * (
+ np.ceil(dy[valid]) - dy[valid])[:, None]
+ left_down_ = img[np.ceil(dx[valid]).astype(int),
+ np.floor(dy[valid]).astype(int), :] * (
+ dx[valid] - np.floor(dx[valid]))[:, None] * (
+ np.ceil(dy[valid]) - dy[valid])[:, None]
+ right_top_ = img[np.floor(dx[valid]).astype(int),
+ np.ceil(dy[valid]).astype(int), :] * (
+ np.ceil(dx[valid]) - dx[valid])[:, None] * (
+ dy[valid] - np.floor(dy[valid]))[:, None]
+ right_down_ = img[np.ceil(dx[valid]).astype(int),
+ np.ceil(dy[valid]).astype(int), :] * (
+ dx[valid] - np.floor(dx[valid]))[:, None] * (
+ dy[valid] - np.floor(dy[valid]))[:, None]
+ output[valid, :] = left_top_ + left_down_ + right_top_ + right_down_
+ else:
+ raise NotImplementedError(
+ 'We only support interpolation modes of nearest and bilinear, '
+ f'but got {interpolate_mode}.')
+ return output.astype(img.dtype)
+
+
+def flow_from_bytes(content):
+ """Read dense optical flow from bytes.
+
+ .. note::
+ This load optical flow function works for FlyingChairs, FlyingThings3D,
+ Sintel, FlyingChairsOcc datasets, but cannot load the data from
+ ChairsSDHom.
+
+ Args:
+ content (bytes): Optical flow bytes got from files or other streams.
+
+ Returns:
+ ndarray: Loaded optical flow with the shape (H, W, 2).
+ """
+
+ # header in first 4 bytes
+ header = content[:4]
+ if header.decode('utf-8') != 'PIEH':
+ raise Exception('Flow file header does not contain PIEH')
+ # width in second 4 bytes
+ width = np.frombuffer(content[4:], np.int32, 1).squeeze()
+ # height in third 4 bytes
+ height = np.frombuffer(content[8:], np.int32, 1).squeeze()
+ # after first 12 bytes, all bytes are flow
+ flow = np.frombuffer(content[12:], np.float32, width * height * 2).reshape(
+ (height, width, 2))
+
+ return flow
+
+
+def sparse_flow_from_bytes(content):
+ """Read the optical flow in KITTI datasets from bytes.
+
+ This function is modified from RAFT load the `KITTI datasets
+ `_.
+
+ Args:
+ content (bytes): Optical flow bytes got from files or other streams.
+
+ Returns:
+ Tuple(ndarray, ndarray): Loaded optical flow with the shape (H, W, 2)
+ and flow valid mask with the shape (H, W).
+ """ # nopa
+
+ content = np.frombuffer(content, np.uint8)
+ flow = cv2.imdecode(content, cv2.IMREAD_ANYDEPTH | cv2.IMREAD_COLOR)
+ flow = flow[:, :, ::-1].astype(np.float32)
+ # flow shape (H, W, 2) valid shape (H, W)
+ flow, valid = flow[:, :, :2], flow[:, :, 2]
+ flow = (flow - 2**15) / 64.0
+ return flow, valid
diff --git a/annotator/uniformer/mmcv/video/processing.py b/annotator/uniformer/mmcv/video/processing.py
new file mode 100644
index 0000000000000000000000000000000000000000..3d90b96e0823d5f116755e7f498d25d17017224a
--- /dev/null
+++ b/annotator/uniformer/mmcv/video/processing.py
@@ -0,0 +1,160 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import os
+import os.path as osp
+import subprocess
+import tempfile
+
+from annotator.uniformer.mmcv.utils import requires_executable
+
+
+@requires_executable('ffmpeg')
+def convert_video(in_file,
+ out_file,
+ print_cmd=False,
+ pre_options='',
+ **kwargs):
+ """Convert a video with ffmpeg.
+
+ This provides a general api to ffmpeg, the executed command is::
+
+ `ffmpeg -y -i `
+
+ Options(kwargs) are mapped to ffmpeg commands with the following rules:
+
+ - key=val: "-key val"
+ - key=True: "-key"
+ - key=False: ""
+
+ Args:
+ in_file (str): Input video filename.
+ out_file (str): Output video filename.
+ pre_options (str): Options appears before "-i ".
+ print_cmd (bool): Whether to print the final ffmpeg command.
+ """
+ options = []
+ for k, v in kwargs.items():
+ if isinstance(v, bool):
+ if v:
+ options.append(f'-{k}')
+ elif k == 'log_level':
+ assert v in [
+ 'quiet', 'panic', 'fatal', 'error', 'warning', 'info',
+ 'verbose', 'debug', 'trace'
+ ]
+ options.append(f'-loglevel {v}')
+ else:
+ options.append(f'-{k} {v}')
+ cmd = f'ffmpeg -y {pre_options} -i {in_file} {" ".join(options)} ' \
+ f'{out_file}'
+ if print_cmd:
+ print(cmd)
+ subprocess.call(cmd, shell=True)
+
+
+@requires_executable('ffmpeg')
+def resize_video(in_file,
+ out_file,
+ size=None,
+ ratio=None,
+ keep_ar=False,
+ log_level='info',
+ print_cmd=False):
+ """Resize a video.
+
+ Args:
+ in_file (str): Input video filename.
+ out_file (str): Output video filename.
+ size (tuple): Expected size (w, h), eg, (320, 240) or (320, -1).
+ ratio (tuple or float): Expected resize ratio, (2, 0.5) means
+ (w*2, h*0.5).
+ keep_ar (bool): Whether to keep original aspect ratio.
+ log_level (str): Logging level of ffmpeg.
+ print_cmd (bool): Whether to print the final ffmpeg command.
+ """
+ if size is None and ratio is None:
+ raise ValueError('expected size or ratio must be specified')
+ if size is not None and ratio is not None:
+ raise ValueError('size and ratio cannot be specified at the same time')
+ options = {'log_level': log_level}
+ if size:
+ if not keep_ar:
+ options['vf'] = f'scale={size[0]}:{size[1]}'
+ else:
+ options['vf'] = f'scale=w={size[0]}:h={size[1]}:' \
+ 'force_original_aspect_ratio=decrease'
+ else:
+ if not isinstance(ratio, tuple):
+ ratio = (ratio, ratio)
+ options['vf'] = f'scale="trunc(iw*{ratio[0]}):trunc(ih*{ratio[1]})"'
+ convert_video(in_file, out_file, print_cmd, **options)
+
+
+@requires_executable('ffmpeg')
+def cut_video(in_file,
+ out_file,
+ start=None,
+ end=None,
+ vcodec=None,
+ acodec=None,
+ log_level='info',
+ print_cmd=False):
+ """Cut a clip from a video.
+
+ Args:
+ in_file (str): Input video filename.
+ out_file (str): Output video filename.
+ start (None or float): Start time (in seconds).
+ end (None or float): End time (in seconds).
+ vcodec (None or str): Output video codec, None for unchanged.
+ acodec (None or str): Output audio codec, None for unchanged.
+ log_level (str): Logging level of ffmpeg.
+ print_cmd (bool): Whether to print the final ffmpeg command.
+ """
+ options = {'log_level': log_level}
+ if vcodec is None:
+ options['vcodec'] = 'copy'
+ if acodec is None:
+ options['acodec'] = 'copy'
+ if start:
+ options['ss'] = start
+ else:
+ start = 0
+ if end:
+ options['t'] = end - start
+ convert_video(in_file, out_file, print_cmd, **options)
+
+
+@requires_executable('ffmpeg')
+def concat_video(video_list,
+ out_file,
+ vcodec=None,
+ acodec=None,
+ log_level='info',
+ print_cmd=False):
+ """Concatenate multiple videos into a single one.
+
+ Args:
+ video_list (list): A list of video filenames
+ out_file (str): Output video filename
+ vcodec (None or str): Output video codec, None for unchanged
+ acodec (None or str): Output audio codec, None for unchanged
+ log_level (str): Logging level of ffmpeg.
+ print_cmd (bool): Whether to print the final ffmpeg command.
+ """
+ tmp_filehandler, tmp_filename = tempfile.mkstemp(suffix='.txt', text=True)
+ with open(tmp_filename, 'w') as f:
+ for filename in video_list:
+ f.write(f'file {osp.abspath(filename)}\n')
+ options = {'log_level': log_level}
+ if vcodec is None:
+ options['vcodec'] = 'copy'
+ if acodec is None:
+ options['acodec'] = 'copy'
+ convert_video(
+ tmp_filename,
+ out_file,
+ print_cmd,
+ pre_options='-f concat -safe 0',
+ **options)
+ os.close(tmp_filehandler)
+ os.remove(tmp_filename)
diff --git a/annotator/uniformer/mmcv/visualization/__init__.py b/annotator/uniformer/mmcv/visualization/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..835df136bdcf69348281d22914d41aa84cdf92b1
--- /dev/null
+++ b/annotator/uniformer/mmcv/visualization/__init__.py
@@ -0,0 +1,9 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+from .color import Color, color_val
+from .image import imshow, imshow_bboxes, imshow_det_bboxes
+from .optflow import flow2rgb, flowshow, make_color_wheel
+
+__all__ = [
+ 'Color', 'color_val', 'imshow', 'imshow_bboxes', 'imshow_det_bboxes',
+ 'flowshow', 'flow2rgb', 'make_color_wheel'
+]
diff --git a/annotator/uniformer/mmcv/visualization/__pycache__/__init__.cpython-310.pyc b/annotator/uniformer/mmcv/visualization/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..486c92655a4720d37265aece3bf9c718bc0f7863
Binary files /dev/null and b/annotator/uniformer/mmcv/visualization/__pycache__/__init__.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/visualization/__pycache__/__init__.cpython-38.pyc b/annotator/uniformer/mmcv/visualization/__pycache__/__init__.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7412768f9875041da1719f01701dca033388c1ce
Binary files /dev/null and b/annotator/uniformer/mmcv/visualization/__pycache__/__init__.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/visualization/__pycache__/color.cpython-310.pyc b/annotator/uniformer/mmcv/visualization/__pycache__/color.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d5613cfd03f700d6fb7aff012ab20e31814bc5ca
Binary files /dev/null and b/annotator/uniformer/mmcv/visualization/__pycache__/color.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/visualization/__pycache__/color.cpython-38.pyc b/annotator/uniformer/mmcv/visualization/__pycache__/color.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1fc9401e4aafe03167c2d86c3ca08bd7928c0e80
Binary files /dev/null and b/annotator/uniformer/mmcv/visualization/__pycache__/color.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/visualization/__pycache__/image.cpython-310.pyc b/annotator/uniformer/mmcv/visualization/__pycache__/image.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3953024f4cbdd743c4585215c4a0ef45e0ce6d3c
Binary files /dev/null and b/annotator/uniformer/mmcv/visualization/__pycache__/image.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/visualization/__pycache__/image.cpython-38.pyc b/annotator/uniformer/mmcv/visualization/__pycache__/image.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5ad44f2a6cd10dcdd951f96a6ab7c7638ebfad94
Binary files /dev/null and b/annotator/uniformer/mmcv/visualization/__pycache__/image.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/visualization/__pycache__/optflow.cpython-310.pyc b/annotator/uniformer/mmcv/visualization/__pycache__/optflow.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7b9bc81f4acd8e76dd9fc45aec8c05142151d4bb
Binary files /dev/null and b/annotator/uniformer/mmcv/visualization/__pycache__/optflow.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv/visualization/__pycache__/optflow.cpython-38.pyc b/annotator/uniformer/mmcv/visualization/__pycache__/optflow.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..56721c60a03cdb13b99abda5d0e01a8f9c884cb5
Binary files /dev/null and b/annotator/uniformer/mmcv/visualization/__pycache__/optflow.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv/visualization/color.py b/annotator/uniformer/mmcv/visualization/color.py
new file mode 100644
index 0000000000000000000000000000000000000000..9041e0e6b7581c3356795d6a3c5e84667c88f025
--- /dev/null
+++ b/annotator/uniformer/mmcv/visualization/color.py
@@ -0,0 +1,51 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+from enum import Enum
+
+import numpy as np
+
+from annotator.uniformer.mmcv.utils import is_str
+
+
+class Color(Enum):
+ """An enum that defines common colors.
+
+ Contains red, green, blue, cyan, yellow, magenta, white and black.
+ """
+ red = (0, 0, 255)
+ green = (0, 255, 0)
+ blue = (255, 0, 0)
+ cyan = (255, 255, 0)
+ yellow = (0, 255, 255)
+ magenta = (255, 0, 255)
+ white = (255, 255, 255)
+ black = (0, 0, 0)
+
+
+def color_val(color):
+ """Convert various input to color tuples.
+
+ Args:
+ color (:obj:`Color`/str/tuple/int/ndarray): Color inputs
+
+ Returns:
+ tuple[int]: A tuple of 3 integers indicating BGR channels.
+ """
+ if is_str(color):
+ return Color[color].value
+ elif isinstance(color, Color):
+ return color.value
+ elif isinstance(color, tuple):
+ assert len(color) == 3
+ for channel in color:
+ assert 0 <= channel <= 255
+ return color
+ elif isinstance(color, int):
+ assert 0 <= color <= 255
+ return color, color, color
+ elif isinstance(color, np.ndarray):
+ assert color.ndim == 1 and color.size == 3
+ assert np.all((color >= 0) & (color <= 255))
+ color = color.astype(np.uint8)
+ return tuple(color)
+ else:
+ raise TypeError(f'Invalid type for color: {type(color)}')
diff --git a/annotator/uniformer/mmcv/visualization/image.py b/annotator/uniformer/mmcv/visualization/image.py
new file mode 100644
index 0000000000000000000000000000000000000000..61a56c75b67f593c298408462c63c0468be8e276
--- /dev/null
+++ b/annotator/uniformer/mmcv/visualization/image.py
@@ -0,0 +1,152 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import cv2
+import numpy as np
+
+from annotator.uniformer.mmcv.image import imread, imwrite
+from .color import color_val
+
+
+def imshow(img, win_name='', wait_time=0):
+ """Show an image.
+
+ Args:
+ img (str or ndarray): The image to be displayed.
+ win_name (str): The window name.
+ wait_time (int): Value of waitKey param.
+ """
+ cv2.imshow(win_name, imread(img))
+ if wait_time == 0: # prevent from hanging if windows was closed
+ while True:
+ ret = cv2.waitKey(1)
+
+ closed = cv2.getWindowProperty(win_name, cv2.WND_PROP_VISIBLE) < 1
+ # if user closed window or if some key pressed
+ if closed or ret != -1:
+ break
+ else:
+ ret = cv2.waitKey(wait_time)
+
+
+def imshow_bboxes(img,
+ bboxes,
+ colors='green',
+ top_k=-1,
+ thickness=1,
+ show=True,
+ win_name='',
+ wait_time=0,
+ out_file=None):
+ """Draw bboxes on an image.
+
+ Args:
+ img (str or ndarray): The image to be displayed.
+ bboxes (list or ndarray): A list of ndarray of shape (k, 4).
+ colors (list[str or tuple or Color]): A list of colors.
+ top_k (int): Plot the first k bboxes only if set positive.
+ thickness (int): Thickness of lines.
+ show (bool): Whether to show the image.
+ win_name (str): The window name.
+ wait_time (int): Value of waitKey param.
+ out_file (str, optional): The filename to write the image.
+
+ Returns:
+ ndarray: The image with bboxes drawn on it.
+ """
+ img = imread(img)
+ img = np.ascontiguousarray(img)
+
+ if isinstance(bboxes, np.ndarray):
+ bboxes = [bboxes]
+ if not isinstance(colors, list):
+ colors = [colors for _ in range(len(bboxes))]
+ colors = [color_val(c) for c in colors]
+ assert len(bboxes) == len(colors)
+
+ for i, _bboxes in enumerate(bboxes):
+ _bboxes = _bboxes.astype(np.int32)
+ if top_k <= 0:
+ _top_k = _bboxes.shape[0]
+ else:
+ _top_k = min(top_k, _bboxes.shape[0])
+ for j in range(_top_k):
+ left_top = (_bboxes[j, 0], _bboxes[j, 1])
+ right_bottom = (_bboxes[j, 2], _bboxes[j, 3])
+ cv2.rectangle(
+ img, left_top, right_bottom, colors[i], thickness=thickness)
+
+ if show:
+ imshow(img, win_name, wait_time)
+ if out_file is not None:
+ imwrite(img, out_file)
+ return img
+
+
+def imshow_det_bboxes(img,
+ bboxes,
+ labels,
+ class_names=None,
+ score_thr=0,
+ bbox_color='green',
+ text_color='green',
+ thickness=1,
+ font_scale=0.5,
+ show=True,
+ win_name='',
+ wait_time=0,
+ out_file=None):
+ """Draw bboxes and class labels (with scores) on an image.
+
+ Args:
+ img (str or ndarray): The image to be displayed.
+ bboxes (ndarray): Bounding boxes (with scores), shaped (n, 4) or
+ (n, 5).
+ labels (ndarray): Labels of bboxes.
+ class_names (list[str]): Names of each classes.
+ score_thr (float): Minimum score of bboxes to be shown.
+ bbox_color (str or tuple or :obj:`Color`): Color of bbox lines.
+ text_color (str or tuple or :obj:`Color`): Color of texts.
+ thickness (int): Thickness of lines.
+ font_scale (float): Font scales of texts.
+ show (bool): Whether to show the image.
+ win_name (str): The window name.
+ wait_time (int): Value of waitKey param.
+ out_file (str or None): The filename to write the image.
+
+ Returns:
+ ndarray: The image with bboxes drawn on it.
+ """
+ assert bboxes.ndim == 2
+ assert labels.ndim == 1
+ assert bboxes.shape[0] == labels.shape[0]
+ assert bboxes.shape[1] == 4 or bboxes.shape[1] == 5
+ img = imread(img)
+ img = np.ascontiguousarray(img)
+
+ if score_thr > 0:
+ assert bboxes.shape[1] == 5
+ scores = bboxes[:, -1]
+ inds = scores > score_thr
+ bboxes = bboxes[inds, :]
+ labels = labels[inds]
+
+ bbox_color = color_val(bbox_color)
+ text_color = color_val(text_color)
+
+ for bbox, label in zip(bboxes, labels):
+ bbox_int = bbox.astype(np.int32)
+ left_top = (bbox_int[0], bbox_int[1])
+ right_bottom = (bbox_int[2], bbox_int[3])
+ cv2.rectangle(
+ img, left_top, right_bottom, bbox_color, thickness=thickness)
+ label_text = class_names[
+ label] if class_names is not None else f'cls {label}'
+ if len(bbox) > 4:
+ label_text += f'|{bbox[-1]:.02f}'
+ cv2.putText(img, label_text, (bbox_int[0], bbox_int[1] - 2),
+ cv2.FONT_HERSHEY_COMPLEX, font_scale, text_color)
+
+ if show:
+ imshow(img, win_name, wait_time)
+ if out_file is not None:
+ imwrite(img, out_file)
+ return img
diff --git a/annotator/uniformer/mmcv/visualization/optflow.py b/annotator/uniformer/mmcv/visualization/optflow.py
new file mode 100644
index 0000000000000000000000000000000000000000..c3870c700f7c946177ee5d536ce3f6c814a77ce7
--- /dev/null
+++ b/annotator/uniformer/mmcv/visualization/optflow.py
@@ -0,0 +1,112 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+from __future__ import division
+
+import numpy as np
+
+from annotator.uniformer.mmcv.image import rgb2bgr
+from annotator.uniformer.mmcv.video import flowread
+from .image import imshow
+
+
+def flowshow(flow, win_name='', wait_time=0):
+ """Show optical flow.
+
+ Args:
+ flow (ndarray or str): The optical flow to be displayed.
+ win_name (str): The window name.
+ wait_time (int): Value of waitKey param.
+ """
+ flow = flowread(flow)
+ flow_img = flow2rgb(flow)
+ imshow(rgb2bgr(flow_img), win_name, wait_time)
+
+
+def flow2rgb(flow, color_wheel=None, unknown_thr=1e6):
+ """Convert flow map to RGB image.
+
+ Args:
+ flow (ndarray): Array of optical flow.
+ color_wheel (ndarray or None): Color wheel used to map flow field to
+ RGB colorspace. Default color wheel will be used if not specified.
+ unknown_thr (str): Values above this threshold will be marked as
+ unknown and thus ignored.
+
+ Returns:
+ ndarray: RGB image that can be visualized.
+ """
+ assert flow.ndim == 3 and flow.shape[-1] == 2
+ if color_wheel is None:
+ color_wheel = make_color_wheel()
+ assert color_wheel.ndim == 2 and color_wheel.shape[1] == 3
+ num_bins = color_wheel.shape[0]
+
+ dx = flow[:, :, 0].copy()
+ dy = flow[:, :, 1].copy()
+
+ ignore_inds = (
+ np.isnan(dx) | np.isnan(dy) | (np.abs(dx) > unknown_thr) |
+ (np.abs(dy) > unknown_thr))
+ dx[ignore_inds] = 0
+ dy[ignore_inds] = 0
+
+ rad = np.sqrt(dx**2 + dy**2)
+ if np.any(rad > np.finfo(float).eps):
+ max_rad = np.max(rad)
+ dx /= max_rad
+ dy /= max_rad
+
+ rad = np.sqrt(dx**2 + dy**2)
+ angle = np.arctan2(-dy, -dx) / np.pi
+
+ bin_real = (angle + 1) / 2 * (num_bins - 1)
+ bin_left = np.floor(bin_real).astype(int)
+ bin_right = (bin_left + 1) % num_bins
+ w = (bin_real - bin_left.astype(np.float32))[..., None]
+ flow_img = (1 -
+ w) * color_wheel[bin_left, :] + w * color_wheel[bin_right, :]
+ small_ind = rad <= 1
+ flow_img[small_ind] = 1 - rad[small_ind, None] * (1 - flow_img[small_ind])
+ flow_img[np.logical_not(small_ind)] *= 0.75
+
+ flow_img[ignore_inds, :] = 0
+
+ return flow_img
+
+
+def make_color_wheel(bins=None):
+ """Build a color wheel.
+
+ Args:
+ bins(list or tuple, optional): Specify the number of bins for each
+ color range, corresponding to six ranges: red -> yellow,
+ yellow -> green, green -> cyan, cyan -> blue, blue -> magenta,
+ magenta -> red. [15, 6, 4, 11, 13, 6] is used for default
+ (see Middlebury).
+
+ Returns:
+ ndarray: Color wheel of shape (total_bins, 3).
+ """
+ if bins is None:
+ bins = [15, 6, 4, 11, 13, 6]
+ assert len(bins) == 6
+
+ RY, YG, GC, CB, BM, MR = tuple(bins)
+
+ ry = [1, np.arange(RY) / RY, 0]
+ yg = [1 - np.arange(YG) / YG, 1, 0]
+ gc = [0, 1, np.arange(GC) / GC]
+ cb = [0, 1 - np.arange(CB) / CB, 1]
+ bm = [np.arange(BM) / BM, 0, 1]
+ mr = [1, 0, 1 - np.arange(MR) / MR]
+
+ num_bins = RY + YG + GC + CB + BM + MR
+
+ color_wheel = np.zeros((3, num_bins), dtype=np.float32)
+
+ col = 0
+ for i, color in enumerate([ry, yg, gc, cb, bm, mr]):
+ for j in range(3):
+ color_wheel[j, col:col + bins[i]] = color[j]
+ col += bins[i]
+
+ return color_wheel.T
diff --git a/annotator/uniformer/mmcv_custom/__init__.py b/annotator/uniformer/mmcv_custom/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..4b958738b9fd93bfcec239c550df1d9a44b8c536
--- /dev/null
+++ b/annotator/uniformer/mmcv_custom/__init__.py
@@ -0,0 +1,5 @@
+# -*- coding: utf-8 -*-
+
+from .checkpoint import load_checkpoint
+
+__all__ = ['load_checkpoint']
\ No newline at end of file
diff --git a/annotator/uniformer/mmcv_custom/__pycache__/__init__.cpython-310.pyc b/annotator/uniformer/mmcv_custom/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ed497b6325bae4b8a31f994265b1fcf213b1e334
Binary files /dev/null and b/annotator/uniformer/mmcv_custom/__pycache__/__init__.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv_custom/__pycache__/__init__.cpython-38.pyc b/annotator/uniformer/mmcv_custom/__pycache__/__init__.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5bfe61aea884c612ee519f808e8bb9807b536964
Binary files /dev/null and b/annotator/uniformer/mmcv_custom/__pycache__/__init__.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv_custom/__pycache__/checkpoint.cpython-310.pyc b/annotator/uniformer/mmcv_custom/__pycache__/checkpoint.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1e0c88e2ef993d3137886e13af411f0dc2b5c381
Binary files /dev/null and b/annotator/uniformer/mmcv_custom/__pycache__/checkpoint.cpython-310.pyc differ
diff --git a/annotator/uniformer/mmcv_custom/__pycache__/checkpoint.cpython-38.pyc b/annotator/uniformer/mmcv_custom/__pycache__/checkpoint.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..23b0b1413aa48d4ea30299217f79088b027e6ce3
Binary files /dev/null and b/annotator/uniformer/mmcv_custom/__pycache__/checkpoint.cpython-38.pyc differ
diff --git a/annotator/uniformer/mmcv_custom/checkpoint.py b/annotator/uniformer/mmcv_custom/checkpoint.py
new file mode 100644
index 0000000000000000000000000000000000000000..19b87fef0a52d31babcdb3edb8f3089b6420173f
--- /dev/null
+++ b/annotator/uniformer/mmcv_custom/checkpoint.py
@@ -0,0 +1,500 @@
+# Copyright (c) Open-MMLab. All rights reserved.
+import io
+import os
+import os.path as osp
+import pkgutil
+import time
+import warnings
+from collections import OrderedDict
+from importlib import import_module
+from tempfile import TemporaryDirectory
+
+import torch
+import torchvision
+from torch.optim import Optimizer
+from torch.utils import model_zoo
+from torch.nn import functional as F
+
+import annotator.uniformer.mmcv as mmcv
+from annotator.uniformer.mmcv.fileio import FileClient
+from annotator.uniformer.mmcv.fileio import load as load_file
+from annotator.uniformer.mmcv.parallel import is_module_wrapper
+from annotator.uniformer.mmcv.utils import mkdir_or_exist
+from annotator.uniformer.mmcv.runner import get_dist_info
+
+ENV_MMCV_HOME = 'MMCV_HOME'
+ENV_XDG_CACHE_HOME = 'XDG_CACHE_HOME'
+DEFAULT_CACHE_DIR = '~/.cache'
+
+
+def _get_mmcv_home():
+ mmcv_home = os.path.expanduser(
+ os.getenv(
+ ENV_MMCV_HOME,
+ os.path.join(
+ os.getenv(ENV_XDG_CACHE_HOME, DEFAULT_CACHE_DIR), 'mmcv')))
+
+ mkdir_or_exist(mmcv_home)
+ return mmcv_home
+
+
+def load_state_dict(module, state_dict, strict=False, logger=None):
+ """Load state_dict to a module.
+
+ This method is modified from :meth:`torch.nn.Module.load_state_dict`.
+ Default value for ``strict`` is set to ``False`` and the message for
+ param mismatch will be shown even if strict is False.
+
+ Args:
+ module (Module): Module that receives the state_dict.
+ state_dict (OrderedDict): Weights.
+ strict (bool): whether to strictly enforce that the keys
+ in :attr:`state_dict` match the keys returned by this module's
+ :meth:`~torch.nn.Module.state_dict` function. Default: ``False``.
+ logger (:obj:`logging.Logger`, optional): Logger to log the error
+ message. If not specified, print function will be used.
+ """
+ unexpected_keys = []
+ all_missing_keys = []
+ err_msg = []
+
+ metadata = getattr(state_dict, '_metadata', None)
+ state_dict = state_dict.copy()
+ if metadata is not None:
+ state_dict._metadata = metadata
+
+ # use _load_from_state_dict to enable checkpoint version control
+ def load(module, prefix=''):
+ # recursively check parallel module in case that the model has a
+ # complicated structure, e.g., nn.Module(nn.Module(DDP))
+ if is_module_wrapper(module):
+ module = module.module
+ local_metadata = {} if metadata is None else metadata.get(
+ prefix[:-1], {})
+ module._load_from_state_dict(state_dict, prefix, local_metadata, True,
+ all_missing_keys, unexpected_keys,
+ err_msg)
+ for name, child in module._modules.items():
+ if child is not None:
+ load(child, prefix + name + '.')
+
+ load(module)
+ load = None # break load->load reference cycle
+
+ # ignore "num_batches_tracked" of BN layers
+ missing_keys = [
+ key for key in all_missing_keys if 'num_batches_tracked' not in key
+ ]
+
+ if unexpected_keys:
+ err_msg.append('unexpected key in source '
+ f'state_dict: {", ".join(unexpected_keys)}\n')
+ if missing_keys:
+ err_msg.append(
+ f'missing keys in source state_dict: {", ".join(missing_keys)}\n')
+
+ rank, _ = get_dist_info()
+ if len(err_msg) > 0 and rank == 0:
+ err_msg.insert(
+ 0, 'The model and loaded state dict do not match exactly\n')
+ err_msg = '\n'.join(err_msg)
+ if strict:
+ raise RuntimeError(err_msg)
+ elif logger is not None:
+ logger.warning(err_msg)
+ else:
+ print(err_msg)
+
+
+def load_url_dist(url, model_dir=None):
+ """In distributed setting, this function only download checkpoint at local
+ rank 0."""
+ rank, world_size = get_dist_info()
+ rank = int(os.environ.get('LOCAL_RANK', rank))
+ if rank == 0:
+ checkpoint = model_zoo.load_url(url, model_dir=model_dir)
+ if world_size > 1:
+ torch.distributed.barrier()
+ if rank > 0:
+ checkpoint = model_zoo.load_url(url, model_dir=model_dir)
+ return checkpoint
+
+
+def load_pavimodel_dist(model_path, map_location=None):
+ """In distributed setting, this function only download checkpoint at local
+ rank 0."""
+ try:
+ from pavi import modelcloud
+ except ImportError:
+ raise ImportError(
+ 'Please install pavi to load checkpoint from modelcloud.')
+ rank, world_size = get_dist_info()
+ rank = int(os.environ.get('LOCAL_RANK', rank))
+ if rank == 0:
+ model = modelcloud.get(model_path)
+ with TemporaryDirectory() as tmp_dir:
+ downloaded_file = osp.join(tmp_dir, model.name)
+ model.download(downloaded_file)
+ checkpoint = torch.load(downloaded_file, map_location=map_location)
+ if world_size > 1:
+ torch.distributed.barrier()
+ if rank > 0:
+ model = modelcloud.get(model_path)
+ with TemporaryDirectory() as tmp_dir:
+ downloaded_file = osp.join(tmp_dir, model.name)
+ model.download(downloaded_file)
+ checkpoint = torch.load(
+ downloaded_file, map_location=map_location)
+ return checkpoint
+
+
+def load_fileclient_dist(filename, backend, map_location):
+ """In distributed setting, this function only download checkpoint at local
+ rank 0."""
+ rank, world_size = get_dist_info()
+ rank = int(os.environ.get('LOCAL_RANK', rank))
+ allowed_backends = ['ceph']
+ if backend not in allowed_backends:
+ raise ValueError(f'Load from Backend {backend} is not supported.')
+ if rank == 0:
+ fileclient = FileClient(backend=backend)
+ buffer = io.BytesIO(fileclient.get(filename))
+ checkpoint = torch.load(buffer, map_location=map_location)
+ if world_size > 1:
+ torch.distributed.barrier()
+ if rank > 0:
+ fileclient = FileClient(backend=backend)
+ buffer = io.BytesIO(fileclient.get(filename))
+ checkpoint = torch.load(buffer, map_location=map_location)
+ return checkpoint
+
+
+def get_torchvision_models():
+ model_urls = dict()
+ for _, name, ispkg in pkgutil.walk_packages(torchvision.models.__path__):
+ if ispkg:
+ continue
+ _zoo = import_module(f'torchvision.models.{name}')
+ if hasattr(_zoo, 'model_urls'):
+ _urls = getattr(_zoo, 'model_urls')
+ model_urls.update(_urls)
+ return model_urls
+
+
+def get_external_models():
+ mmcv_home = _get_mmcv_home()
+ default_json_path = osp.join(mmcv.__path__[0], 'model_zoo/open_mmlab.json')
+ default_urls = load_file(default_json_path)
+ assert isinstance(default_urls, dict)
+ external_json_path = osp.join(mmcv_home, 'open_mmlab.json')
+ if osp.exists(external_json_path):
+ external_urls = load_file(external_json_path)
+ assert isinstance(external_urls, dict)
+ default_urls.update(external_urls)
+
+ return default_urls
+
+
+def get_mmcls_models():
+ mmcls_json_path = osp.join(mmcv.__path__[0], 'model_zoo/mmcls.json')
+ mmcls_urls = load_file(mmcls_json_path)
+
+ return mmcls_urls
+
+
+def get_deprecated_model_names():
+ deprecate_json_path = osp.join(mmcv.__path__[0],
+ 'model_zoo/deprecated.json')
+ deprecate_urls = load_file(deprecate_json_path)
+ assert isinstance(deprecate_urls, dict)
+
+ return deprecate_urls
+
+
+def _process_mmcls_checkpoint(checkpoint):
+ state_dict = checkpoint['state_dict']
+ new_state_dict = OrderedDict()
+ for k, v in state_dict.items():
+ if k.startswith('backbone.'):
+ new_state_dict[k[9:]] = v
+ new_checkpoint = dict(state_dict=new_state_dict)
+
+ return new_checkpoint
+
+
+def _load_checkpoint(filename, map_location=None):
+ """Load checkpoint from somewhere (modelzoo, file, url).
+
+ Args:
+ filename (str): Accept local filepath, URL, ``torchvision://xxx``,
+ ``open-mmlab://xxx``. Please refer to ``docs/model_zoo.md`` for
+ details.
+ map_location (str | None): Same as :func:`torch.load`. Default: None.
+
+ Returns:
+ dict | OrderedDict: The loaded checkpoint. It can be either an
+ OrderedDict storing model weights or a dict containing other
+ information, which depends on the checkpoint.
+ """
+ if filename.startswith('modelzoo://'):
+ warnings.warn('The URL scheme of "modelzoo://" is deprecated, please '
+ 'use "torchvision://" instead')
+ model_urls = get_torchvision_models()
+ model_name = filename[11:]
+ checkpoint = load_url_dist(model_urls[model_name])
+ elif filename.startswith('torchvision://'):
+ model_urls = get_torchvision_models()
+ model_name = filename[14:]
+ checkpoint = load_url_dist(model_urls[model_name])
+ elif filename.startswith('open-mmlab://'):
+ model_urls = get_external_models()
+ model_name = filename[13:]
+ deprecated_urls = get_deprecated_model_names()
+ if model_name in deprecated_urls:
+ warnings.warn(f'open-mmlab://{model_name} is deprecated in favor '
+ f'of open-mmlab://{deprecated_urls[model_name]}')
+ model_name = deprecated_urls[model_name]
+ model_url = model_urls[model_name]
+ # check if is url
+ if model_url.startswith(('http://', 'https://')):
+ checkpoint = load_url_dist(model_url)
+ else:
+ filename = osp.join(_get_mmcv_home(), model_url)
+ if not osp.isfile(filename):
+ raise IOError(f'{filename} is not a checkpoint file')
+ checkpoint = torch.load(filename, map_location=map_location)
+ elif filename.startswith('mmcls://'):
+ model_urls = get_mmcls_models()
+ model_name = filename[8:]
+ checkpoint = load_url_dist(model_urls[model_name])
+ checkpoint = _process_mmcls_checkpoint(checkpoint)
+ elif filename.startswith(('http://', 'https://')):
+ checkpoint = load_url_dist(filename)
+ elif filename.startswith('pavi://'):
+ model_path = filename[7:]
+ checkpoint = load_pavimodel_dist(model_path, map_location=map_location)
+ elif filename.startswith('s3://'):
+ checkpoint = load_fileclient_dist(
+ filename, backend='ceph', map_location=map_location)
+ else:
+ if not osp.isfile(filename):
+ raise IOError(f'{filename} is not a checkpoint file')
+ checkpoint = torch.load(filename, map_location=map_location)
+ return checkpoint
+
+
+def load_checkpoint(model,
+ filename,
+ map_location='cpu',
+ strict=False,
+ logger=None):
+ """Load checkpoint from a file or URI.
+
+ Args:
+ model (Module): Module to load checkpoint.
+ filename (str): Accept local filepath, URL, ``torchvision://xxx``,
+ ``open-mmlab://xxx``. Please refer to ``docs/model_zoo.md`` for
+ details.
+ map_location (str): Same as :func:`torch.load`.
+ strict (bool): Whether to allow different params for the model and
+ checkpoint.
+ logger (:mod:`logging.Logger` or None): The logger for error message.
+
+ Returns:
+ dict or OrderedDict: The loaded checkpoint.
+ """
+ checkpoint = _load_checkpoint(filename, map_location)
+ # OrderedDict is a subclass of dict
+ if not isinstance(checkpoint, dict):
+ raise RuntimeError(
+ f'No state_dict found in checkpoint file {filename}')
+ # get state_dict from checkpoint
+ if 'state_dict' in checkpoint:
+ state_dict = checkpoint['state_dict']
+ elif 'model' in checkpoint:
+ state_dict = checkpoint['model']
+ else:
+ state_dict = checkpoint
+ # strip prefix of state_dict
+ if list(state_dict.keys())[0].startswith('module.'):
+ state_dict = {k[7:]: v for k, v in state_dict.items()}
+
+ # for MoBY, load model of online branch
+ if sorted(list(state_dict.keys()))[0].startswith('encoder'):
+ state_dict = {k.replace('encoder.', ''): v for k, v in state_dict.items() if k.startswith('encoder.')}
+
+ # reshape absolute position embedding
+ if state_dict.get('absolute_pos_embed') is not None:
+ absolute_pos_embed = state_dict['absolute_pos_embed']
+ N1, L, C1 = absolute_pos_embed.size()
+ N2, C2, H, W = model.absolute_pos_embed.size()
+ if N1 != N2 or C1 != C2 or L != H*W:
+ logger.warning("Error in loading absolute_pos_embed, pass")
+ else:
+ state_dict['absolute_pos_embed'] = absolute_pos_embed.view(N2, H, W, C2).permute(0, 3, 1, 2)
+
+ # interpolate position bias table if needed
+ relative_position_bias_table_keys = [k for k in state_dict.keys() if "relative_position_bias_table" in k]
+ for table_key in relative_position_bias_table_keys:
+ table_pretrained = state_dict[table_key]
+ table_current = model.state_dict()[table_key]
+ L1, nH1 = table_pretrained.size()
+ L2, nH2 = table_current.size()
+ if nH1 != nH2:
+ logger.warning(f"Error in loading {table_key}, pass")
+ else:
+ if L1 != L2:
+ S1 = int(L1 ** 0.5)
+ S2 = int(L2 ** 0.5)
+ table_pretrained_resized = F.interpolate(
+ table_pretrained.permute(1, 0).view(1, nH1, S1, S1),
+ size=(S2, S2), mode='bicubic')
+ state_dict[table_key] = table_pretrained_resized.view(nH2, L2).permute(1, 0)
+
+ # load state_dict
+ load_state_dict(model, state_dict, strict, logger)
+ return checkpoint
+
+
+def weights_to_cpu(state_dict):
+ """Copy a model state_dict to cpu.
+
+ Args:
+ state_dict (OrderedDict): Model weights on GPU.
+
+ Returns:
+ OrderedDict: Model weights on GPU.
+ """
+ state_dict_cpu = OrderedDict()
+ for key, val in state_dict.items():
+ state_dict_cpu[key] = val.cpu()
+ return state_dict_cpu
+
+
+def _save_to_state_dict(module, destination, prefix, keep_vars):
+ """Saves module state to `destination` dictionary.
+
+ This method is modified from :meth:`torch.nn.Module._save_to_state_dict`.
+
+ Args:
+ module (nn.Module): The module to generate state_dict.
+ destination (dict): A dict where state will be stored.
+ prefix (str): The prefix for parameters and buffers used in this
+ module.
+ """
+ for name, param in module._parameters.items():
+ if param is not None:
+ destination[prefix + name] = param if keep_vars else param.detach()
+ for name, buf in module._buffers.items():
+ # remove check of _non_persistent_buffers_set to allow nn.BatchNorm2d
+ if buf is not None:
+ destination[prefix + name] = buf if keep_vars else buf.detach()
+
+
+def get_state_dict(module, destination=None, prefix='', keep_vars=False):
+ """Returns a dictionary containing a whole state of the module.
+
+ Both parameters and persistent buffers (e.g. running averages) are
+ included. Keys are corresponding parameter and buffer names.
+
+ This method is modified from :meth:`torch.nn.Module.state_dict` to
+ recursively check parallel module in case that the model has a complicated
+ structure, e.g., nn.Module(nn.Module(DDP)).
+
+ Args:
+ module (nn.Module): The module to generate state_dict.
+ destination (OrderedDict): Returned dict for the state of the
+ module.
+ prefix (str): Prefix of the key.
+ keep_vars (bool): Whether to keep the variable property of the
+ parameters. Default: False.
+
+ Returns:
+ dict: A dictionary containing a whole state of the module.
+ """
+ # recursively check parallel module in case that the model has a
+ # complicated structure, e.g., nn.Module(nn.Module(DDP))
+ if is_module_wrapper(module):
+ module = module.module
+
+ # below is the same as torch.nn.Module.state_dict()
+ if destination is None:
+ destination = OrderedDict()
+ destination._metadata = OrderedDict()
+ destination._metadata[prefix[:-1]] = local_metadata = dict(
+ version=module._version)
+ _save_to_state_dict(module, destination, prefix, keep_vars)
+ for name, child in module._modules.items():
+ if child is not None:
+ get_state_dict(
+ child, destination, prefix + name + '.', keep_vars=keep_vars)
+ for hook in module._state_dict_hooks.values():
+ hook_result = hook(module, destination, prefix, local_metadata)
+ if hook_result is not None:
+ destination = hook_result
+ return destination
+
+
+def save_checkpoint(model, filename, optimizer=None, meta=None):
+ """Save checkpoint to file.
+
+ The checkpoint will have 3 fields: ``meta``, ``state_dict`` and
+ ``optimizer``. By default ``meta`` will contain version and time info.
+
+ Args:
+ model (Module): Module whose params are to be saved.
+ filename (str): Checkpoint filename.
+ optimizer (:obj:`Optimizer`, optional): Optimizer to be saved.
+ meta (dict, optional): Metadata to be saved in checkpoint.
+ """
+ if meta is None:
+ meta = {}
+ elif not isinstance(meta, dict):
+ raise TypeError(f'meta must be a dict or None, but got {type(meta)}')
+ meta.update(mmcv_version=mmcv.__version__, time=time.asctime())
+
+ if is_module_wrapper(model):
+ model = model.module
+
+ if hasattr(model, 'CLASSES') and model.CLASSES is not None:
+ # save class name to the meta
+ meta.update(CLASSES=model.CLASSES)
+
+ checkpoint = {
+ 'meta': meta,
+ 'state_dict': weights_to_cpu(get_state_dict(model))
+ }
+ # save optimizer state dict in the checkpoint
+ if isinstance(optimizer, Optimizer):
+ checkpoint['optimizer'] = optimizer.state_dict()
+ elif isinstance(optimizer, dict):
+ checkpoint['optimizer'] = {}
+ for name, optim in optimizer.items():
+ checkpoint['optimizer'][name] = optim.state_dict()
+
+ if filename.startswith('pavi://'):
+ try:
+ from pavi import modelcloud
+ from pavi.exception import NodeNotFoundError
+ except ImportError:
+ raise ImportError(
+ 'Please install pavi to load checkpoint from modelcloud.')
+ model_path = filename[7:]
+ root = modelcloud.Folder()
+ model_dir, model_name = osp.split(model_path)
+ try:
+ model = modelcloud.get(model_dir)
+ except NodeNotFoundError:
+ model = root.create_training_model(model_dir)
+ with TemporaryDirectory() as tmp_dir:
+ checkpoint_file = osp.join(tmp_dir, model_name)
+ with open(checkpoint_file, 'wb') as f:
+ torch.save(checkpoint, f)
+ f.flush()
+ model.create_file(checkpoint_file, name=model_name)
+ else:
+ mmcv.mkdir_or_exist(osp.dirname(filename))
+ # immediately flush buffer
+ with open(filename, 'wb') as f:
+ torch.save(checkpoint, f)
+ f.flush()
\ No newline at end of file
diff --git a/annotator/uniformer/mmdet/__init__.py b/annotator/uniformer/mmdet/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..ce2930f62a0091e06b37575b96db2ae51ca7908e
--- /dev/null
+++ b/annotator/uniformer/mmdet/__init__.py
@@ -0,0 +1,28 @@
+import mmcv
+
+from .version import __version__, short_version
+
+
+def digit_version(version_str):
+ digit_version = []
+ for x in version_str.split('.'):
+ if x.isdigit():
+ digit_version.append(int(x))
+ elif x.find('rc') != -1:
+ patch_version = x.split('rc')
+ digit_version.append(int(patch_version[0]) - 1)
+ digit_version.append(int(patch_version[1]))
+ return digit_version
+
+
+mmcv_minimum_version = '1.2.4'
+mmcv_maximum_version = '1.4.0'
+mmcv_version = digit_version(mmcv.__version__)
+
+
+assert (mmcv_version >= digit_version(mmcv_minimum_version)
+ and mmcv_version <= digit_version(mmcv_maximum_version)), \
+ f'MMCV=={mmcv.__version__} is used but incompatible. ' \
+ f'Please install mmcv>={mmcv_minimum_version}, <={mmcv_maximum_version}.'
+
+__all__ = ['__version__', 'short_version']
diff --git a/annotator/uniformer/mmdet/apis/__init__.py b/annotator/uniformer/mmdet/apis/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..1d8035b74877fdeccaa41cbc10a9f1f9924eac85
--- /dev/null
+++ b/annotator/uniformer/mmdet/apis/__init__.py
@@ -0,0 +1,10 @@
+from .inference import (async_inference_detector, inference_detector,
+ init_detector, show_result_pyplot)
+from .test import multi_gpu_test, single_gpu_test
+from .train import get_root_logger, set_random_seed, train_detector
+
+__all__ = [
+ 'get_root_logger', 'set_random_seed', 'train_detector', 'init_detector',
+ 'async_inference_detector', 'inference_detector', 'show_result_pyplot',
+ 'multi_gpu_test', 'single_gpu_test'
+]
diff --git a/annotator/uniformer/mmdet/apis/inference.py b/annotator/uniformer/mmdet/apis/inference.py
new file mode 100644
index 0000000000000000000000000000000000000000..df7bd4b7316cd8fad4cd10dbdfcbfeba59d95bbc
--- /dev/null
+++ b/annotator/uniformer/mmdet/apis/inference.py
@@ -0,0 +1,217 @@
+import warnings
+
+import mmcv
+import numpy as np
+import torch
+# from mmcv.ops import RoIPool
+from mmcv.parallel import collate, scatter
+from mmcv.runner import load_checkpoint
+
+from mmdet.core import get_classes
+from mmdet.datasets import replace_ImageToTensor
+from mmdet.datasets.pipelines import Compose
+from mmdet.models import build_detector
+
+
+def init_detector(config, checkpoint=None, device='cuda:0', cfg_options=None):
+ """Initialize a detector from config file.
+
+ Args:
+ config (str or :obj:`mmcv.Config`): Config file path or the config
+ object.
+ checkpoint (str, optional): Checkpoint path. If left as None, the model
+ will not load any weights.
+ cfg_options (dict): Options to override some settings in the used
+ config.
+
+ Returns:
+ nn.Module: The constructed detector.
+ """
+ if isinstance(config, str):
+ config = mmcv.Config.fromfile(config)
+ elif not isinstance(config, mmcv.Config):
+ raise TypeError('config must be a filename or Config object, '
+ f'but got {type(config)}')
+ if cfg_options is not None:
+ config.merge_from_dict(cfg_options)
+ config.model.pretrained = None
+ config.model.train_cfg = None
+ model = build_detector(config.model, test_cfg=config.get('test_cfg'))
+ if checkpoint is not None:
+ map_loc = 'cpu' if device == 'cpu' else None
+ checkpoint = load_checkpoint(model, checkpoint, map_location=map_loc)
+ if 'CLASSES' in checkpoint.get('meta', {}):
+ model.CLASSES = checkpoint['meta']['CLASSES']
+ else:
+ warnings.simplefilter('once')
+ warnings.warn('Class names are not saved in the checkpoint\'s '
+ 'meta data, use COCO classes by default.')
+ model.CLASSES = get_classes('coco')
+ model.cfg = config # save the config in the model for convenience
+ model.to(device)
+ model.eval()
+ return model
+
+
+class LoadImage(object):
+ """Deprecated.
+
+ A simple pipeline to load image.
+ """
+
+ def __call__(self, results):
+ """Call function to load images into results.
+
+ Args:
+ results (dict): A result dict contains the file name
+ of the image to be read.
+ Returns:
+ dict: ``results`` will be returned containing loaded image.
+ """
+ warnings.simplefilter('once')
+ warnings.warn('`LoadImage` is deprecated and will be removed in '
+ 'future releases. You may use `LoadImageFromWebcam` '
+ 'from `mmdet.datasets.pipelines.` instead.')
+ if isinstance(results['img'], str):
+ results['filename'] = results['img']
+ results['ori_filename'] = results['img']
+ else:
+ results['filename'] = None
+ results['ori_filename'] = None
+ img = mmcv.imread(results['img'])
+ results['img'] = img
+ results['img_fields'] = ['img']
+ results['img_shape'] = img.shape
+ results['ori_shape'] = img.shape
+ return results
+
+
+def inference_detector(model, imgs):
+ """Inference image(s) with the detector.
+
+ Args:
+ model (nn.Module): The loaded detector.
+ imgs (str/ndarray or list[str/ndarray] or tuple[str/ndarray]):
+ Either image files or loaded images.
+
+ Returns:
+ If imgs is a list or tuple, the same length list type results
+ will be returned, otherwise return the detection results directly.
+ """
+
+ if isinstance(imgs, (list, tuple)):
+ is_batch = True
+ else:
+ imgs = [imgs]
+ is_batch = False
+
+ cfg = model.cfg
+ device = next(model.parameters()).device # model device
+
+ if isinstance(imgs[0], np.ndarray):
+ cfg = cfg.copy()
+ # set loading pipeline type
+ cfg.data.test.pipeline[0].type = 'LoadImageFromWebcam'
+
+ cfg.data.test.pipeline = replace_ImageToTensor(cfg.data.test.pipeline)
+ test_pipeline = Compose(cfg.data.test.pipeline)
+
+ datas = []
+ for img in imgs:
+ # prepare data
+ if isinstance(img, np.ndarray):
+ # directly add img
+ data = dict(img=img)
+ else:
+ # add information into dict
+ data = dict(img_info=dict(filename=img), img_prefix=None)
+ # build the data pipeline
+ data = test_pipeline(data)
+ datas.append(data)
+
+ data = collate(datas, samples_per_gpu=len(imgs))
+ # just get the actual data from DataContainer
+ data['img_metas'] = [img_metas.data[0] for img_metas in data['img_metas']]
+ data['img'] = [img.data[0] for img in data['img']]
+ if next(model.parameters()).is_cuda:
+ # scatter to specified GPU
+ data = scatter(data, [device])[0]
+ else:
+ for m in model.modules():
+ assert not isinstance(
+ m, RoIPool
+ ), 'CPU inference with RoIPool is not supported currently.'
+
+ # forward the model
+ with torch.no_grad():
+ results = model(return_loss=False, rescale=True, **data)
+
+ if not is_batch:
+ return results[0]
+ else:
+ return results
+
+
+async def async_inference_detector(model, img):
+ """Async inference image(s) with the detector.
+
+ Args:
+ model (nn.Module): The loaded detector.
+ img (str | ndarray): Either image files or loaded images.
+
+ Returns:
+ Awaitable detection results.
+ """
+ cfg = model.cfg
+ device = next(model.parameters()).device # model device
+ # prepare data
+ if isinstance(img, np.ndarray):
+ # directly add img
+ data = dict(img=img)
+ cfg = cfg.copy()
+ # set loading pipeline type
+ cfg.data.test.pipeline[0].type = 'LoadImageFromWebcam'
+ else:
+ # add information into dict
+ data = dict(img_info=dict(filename=img), img_prefix=None)
+ # build the data pipeline
+ test_pipeline = Compose(cfg.data.test.pipeline)
+ data = test_pipeline(data)
+ data = scatter(collate([data], samples_per_gpu=1), [device])[0]
+
+ # We don't restore `torch.is_grad_enabled()` value during concurrent
+ # inference since execution can overlap
+ torch.set_grad_enabled(False)
+ result = await model.aforward_test(rescale=True, **data)
+ return result
+
+
+def show_result_pyplot(model,
+ img,
+ result,
+ score_thr=0.3,
+ title='result',
+ wait_time=0):
+ """Visualize the detection results on the image.
+
+ Args:
+ model (nn.Module): The loaded detector.
+ img (str or np.ndarray): Image filename or loaded image.
+ result (tuple[list] or list): The detection result, can be either
+ (bbox, segm) or just bbox.
+ score_thr (float): The threshold to visualize the bboxes and masks.
+ title (str): Title of the pyplot figure.
+ wait_time (float): Value of waitKey param.
+ Default: 0.
+ """
+ if hasattr(model, 'module'):
+ model = model.module
+ model.show_result(
+ img,
+ result,
+ score_thr=score_thr,
+ show=True,
+ wait_time=wait_time,
+ win_name=title,
+ bbox_color=(72, 101, 241),
+ text_color=(72, 101, 241))
diff --git a/annotator/uniformer/mmdet/apis/test.py b/annotator/uniformer/mmdet/apis/test.py
new file mode 100644
index 0000000000000000000000000000000000000000..e54b1b8c24efc448972c31ee5da63041d7f97a47
--- /dev/null
+++ b/annotator/uniformer/mmdet/apis/test.py
@@ -0,0 +1,190 @@
+import os.path as osp
+import pickle
+import shutil
+import tempfile
+import time
+
+import mmcv
+import torch
+import torch.distributed as dist
+from mmcv.image import tensor2imgs
+from mmcv.runner import get_dist_info
+
+from mmdet.core import encode_mask_results
+
+
+def single_gpu_test(model,
+ data_loader,
+ show=False,
+ out_dir=None,
+ show_score_thr=0.3):
+ model.eval()
+ results = []
+ dataset = data_loader.dataset
+ prog_bar = mmcv.ProgressBar(len(dataset))
+ for i, data in enumerate(data_loader):
+ with torch.no_grad():
+ result = model(return_loss=False, rescale=True, **data)
+
+ batch_size = len(result)
+ if show or out_dir:
+ if batch_size == 1 and isinstance(data['img'][0], torch.Tensor):
+ img_tensor = data['img'][0]
+ else:
+ img_tensor = data['img'][0].data[0]
+ img_metas = data['img_metas'][0].data[0]
+ imgs = tensor2imgs(img_tensor, **img_metas[0]['img_norm_cfg'])
+ assert len(imgs) == len(img_metas)
+
+ for i, (img, img_meta) in enumerate(zip(imgs, img_metas)):
+ h, w, _ = img_meta['img_shape']
+ img_show = img[:h, :w, :]
+
+ ori_h, ori_w = img_meta['ori_shape'][:-1]
+ img_show = mmcv.imresize(img_show, (ori_w, ori_h))
+
+ if out_dir:
+ out_file = osp.join(out_dir, img_meta['ori_filename'])
+ else:
+ out_file = None
+
+ model.module.show_result(
+ img_show,
+ result[i],
+ show=show,
+ out_file=out_file,
+ score_thr=show_score_thr)
+
+ # encode mask results
+ if isinstance(result[0], tuple):
+ result = [(bbox_results, encode_mask_results(mask_results))
+ for bbox_results, mask_results in result]
+ results.extend(result)
+
+ for _ in range(batch_size):
+ prog_bar.update()
+ return results
+
+
+def multi_gpu_test(model, data_loader, tmpdir=None, gpu_collect=False):
+ """Test model with multiple gpus.
+
+ This method tests model with multiple gpus and collects the results
+ under two different modes: gpu and cpu modes. By setting 'gpu_collect=True'
+ it encodes results to gpu tensors and use gpu communication for results
+ collection. On cpu mode it saves the results on different gpus to 'tmpdir'
+ and collects them by the rank 0 worker.
+
+ Args:
+ model (nn.Module): Model to be tested.
+ data_loader (nn.Dataloader): Pytorch data loader.
+ tmpdir (str): Path of directory to save the temporary results from
+ different gpus under cpu mode.
+ gpu_collect (bool): Option to use either gpu or cpu to collect results.
+
+ Returns:
+ list: The prediction results.
+ """
+ model.eval()
+ results = []
+ dataset = data_loader.dataset
+ rank, world_size = get_dist_info()
+ if rank == 0:
+ prog_bar = mmcv.ProgressBar(len(dataset))
+ time.sleep(2) # This line can prevent deadlock problem in some cases.
+ for i, data in enumerate(data_loader):
+ with torch.no_grad():
+ result = model(return_loss=False, rescale=True, **data)
+ # encode mask results
+ if isinstance(result[0], tuple):
+ result = [(bbox_results, encode_mask_results(mask_results))
+ for bbox_results, mask_results in result]
+ results.extend(result)
+
+ if rank == 0:
+ batch_size = len(result)
+ for _ in range(batch_size * world_size):
+ prog_bar.update()
+
+ # collect results from all ranks
+ if gpu_collect:
+ results = collect_results_gpu(results, len(dataset))
+ else:
+ results = collect_results_cpu(results, len(dataset), tmpdir)
+ return results
+
+
+def collect_results_cpu(result_part, size, tmpdir=None):
+ rank, world_size = get_dist_info()
+ # create a tmp dir if it is not specified
+ if tmpdir is None:
+ MAX_LEN = 512
+ # 32 is whitespace
+ dir_tensor = torch.full((MAX_LEN, ),
+ 32,
+ dtype=torch.uint8,
+ device='cuda')
+ if rank == 0:
+ mmcv.mkdir_or_exist('.dist_test')
+ tmpdir = tempfile.mkdtemp(dir='.dist_test')
+ tmpdir = torch.tensor(
+ bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda')
+ dir_tensor[:len(tmpdir)] = tmpdir
+ dist.broadcast(dir_tensor, 0)
+ tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip()
+ else:
+ mmcv.mkdir_or_exist(tmpdir)
+ # dump the part result to the dir
+ mmcv.dump(result_part, osp.join(tmpdir, f'part_{rank}.pkl'))
+ dist.barrier()
+ # collect all parts
+ if rank != 0:
+ return None
+ else:
+ # load results of all parts from tmp dir
+ part_list = []
+ for i in range(world_size):
+ part_file = osp.join(tmpdir, f'part_{i}.pkl')
+ part_list.append(mmcv.load(part_file))
+ # sort the results
+ ordered_results = []
+ for res in zip(*part_list):
+ ordered_results.extend(list(res))
+ # the dataloader may pad some samples
+ ordered_results = ordered_results[:size]
+ # remove tmp dir
+ shutil.rmtree(tmpdir)
+ return ordered_results
+
+
+def collect_results_gpu(result_part, size):
+ rank, world_size = get_dist_info()
+ # dump result part to tensor with pickle
+ part_tensor = torch.tensor(
+ bytearray(pickle.dumps(result_part)), dtype=torch.uint8, device='cuda')
+ # gather all result part tensor shape
+ shape_tensor = torch.tensor(part_tensor.shape, device='cuda')
+ shape_list = [shape_tensor.clone() for _ in range(world_size)]
+ dist.all_gather(shape_list, shape_tensor)
+ # padding result part tensor to max length
+ shape_max = torch.tensor(shape_list).max()
+ part_send = torch.zeros(shape_max, dtype=torch.uint8, device='cuda')
+ part_send[:shape_tensor[0]] = part_tensor
+ part_recv_list = [
+ part_tensor.new_zeros(shape_max) for _ in range(world_size)
+ ]
+ # gather all result part
+ dist.all_gather(part_recv_list, part_send)
+
+ if rank == 0:
+ part_list = []
+ for recv, shape in zip(part_recv_list, shape_list):
+ part_list.append(
+ pickle.loads(recv[:shape[0]].cpu().numpy().tobytes()))
+ # sort the results
+ ordered_results = []
+ for res in zip(*part_list):
+ ordered_results.extend(list(res))
+ # the dataloader may pad some samples
+ ordered_results = ordered_results[:size]
+ return ordered_results
diff --git a/annotator/uniformer/mmdet/apis/train.py b/annotator/uniformer/mmdet/apis/train.py
new file mode 100644
index 0000000000000000000000000000000000000000..7f2f1f95c0a8e7c9232f7aa490e8104f8e37c4f5
--- /dev/null
+++ b/annotator/uniformer/mmdet/apis/train.py
@@ -0,0 +1,185 @@
+import random
+import warnings
+
+import numpy as np
+import torch
+from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
+from mmcv.runner import (HOOKS, DistSamplerSeedHook, EpochBasedRunner,
+ Fp16OptimizerHook, OptimizerHook, build_optimizer,
+ build_runner)
+from mmcv.utils import build_from_cfg
+
+from mmdet.core import DistEvalHook, EvalHook
+from mmdet.datasets import (build_dataloader, build_dataset,
+ replace_ImageToTensor)
+from mmdet.utils import get_root_logger
+from mmcv_custom.runner import EpochBasedRunnerAmp
+try:
+ import apex
+except:
+ print('apex is not installed')
+
+
+def set_random_seed(seed, deterministic=False):
+ """Set random seed.
+
+ Args:
+ seed (int): Seed to be used.
+ deterministic (bool): Whether to set the deterministic option for
+ CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`
+ to True and `torch.backends.cudnn.benchmark` to False.
+ Default: False.
+ """
+ random.seed(seed)
+ np.random.seed(seed)
+ torch.manual_seed(seed)
+ torch.cuda.manual_seed_all(seed)
+ if deterministic:
+ torch.backends.cudnn.deterministic = True
+ torch.backends.cudnn.benchmark = False
+
+
+def train_detector(model,
+ dataset,
+ cfg,
+ distributed=False,
+ validate=False,
+ timestamp=None,
+ meta=None):
+ logger = get_root_logger(cfg.log_level)
+
+ # prepare data loaders
+ dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset]
+ if 'imgs_per_gpu' in cfg.data:
+ logger.warning('"imgs_per_gpu" is deprecated in MMDet V2.0. '
+ 'Please use "samples_per_gpu" instead')
+ if 'samples_per_gpu' in cfg.data:
+ logger.warning(
+ f'Got "imgs_per_gpu"={cfg.data.imgs_per_gpu} and '
+ f'"samples_per_gpu"={cfg.data.samples_per_gpu}, "imgs_per_gpu"'
+ f'={cfg.data.imgs_per_gpu} is used in this experiments')
+ else:
+ logger.warning(
+ 'Automatically set "samples_per_gpu"="imgs_per_gpu"='
+ f'{cfg.data.imgs_per_gpu} in this experiments')
+ cfg.data.samples_per_gpu = cfg.data.imgs_per_gpu
+
+ data_loaders = [
+ build_dataloader(
+ ds,
+ cfg.data.samples_per_gpu,
+ cfg.data.workers_per_gpu,
+ # cfg.gpus will be ignored if distributed
+ len(cfg.gpu_ids),
+ dist=distributed,
+ seed=cfg.seed) for ds in dataset
+ ]
+
+ # build optimizer
+ optimizer = build_optimizer(model, cfg.optimizer)
+
+ # use apex fp16 optimizer
+ if cfg.optimizer_config.get("type", None) and cfg.optimizer_config["type"] == "DistOptimizerHook":
+ if cfg.optimizer_config.get("use_fp16", False):
+ model, optimizer = apex.amp.initialize(
+ model.cuda(), optimizer, opt_level="O1")
+ for m in model.modules():
+ if hasattr(m, "fp16_enabled"):
+ m.fp16_enabled = True
+
+ # put model on gpus
+ if distributed:
+ find_unused_parameters = cfg.get('find_unused_parameters', False)
+ # Sets the `find_unused_parameters` parameter in
+ # torch.nn.parallel.DistributedDataParallel
+ model = MMDistributedDataParallel(
+ model.cuda(),
+ device_ids=[torch.cuda.current_device()],
+ broadcast_buffers=False,
+ find_unused_parameters=find_unused_parameters)
+ else:
+ model = MMDataParallel(
+ model.cuda(cfg.gpu_ids[0]), device_ids=cfg.gpu_ids)
+
+ if 'runner' not in cfg:
+ cfg.runner = {
+ 'type': 'EpochBasedRunner',
+ 'max_epochs': cfg.total_epochs
+ }
+ warnings.warn(
+ 'config is now expected to have a `runner` section, '
+ 'please set `runner` in your config.', UserWarning)
+ else:
+ if 'total_epochs' in cfg:
+ assert cfg.total_epochs == cfg.runner.max_epochs
+
+ # build runner
+ runner = build_runner(
+ cfg.runner,
+ default_args=dict(
+ model=model,
+ optimizer=optimizer,
+ work_dir=cfg.work_dir,
+ logger=logger,
+ meta=meta))
+
+ # an ugly workaround to make .log and .log.json filenames the same
+ runner.timestamp = timestamp
+
+ # fp16 setting
+ fp16_cfg = cfg.get('fp16', None)
+ if fp16_cfg is not None:
+ optimizer_config = Fp16OptimizerHook(
+ **cfg.optimizer_config, **fp16_cfg, distributed=distributed)
+ elif distributed and 'type' not in cfg.optimizer_config:
+ optimizer_config = OptimizerHook(**cfg.optimizer_config)
+ else:
+ optimizer_config = cfg.optimizer_config
+
+ # register hooks
+ runner.register_training_hooks(cfg.lr_config, optimizer_config,
+ cfg.checkpoint_config, cfg.log_config,
+ cfg.get('momentum_config', None))
+ if distributed:
+ if isinstance(runner, EpochBasedRunner):
+ runner.register_hook(DistSamplerSeedHook())
+
+ # register eval hooks
+ if validate:
+ # Support batch_size > 1 in validation
+ val_samples_per_gpu = cfg.data.val.pop('samples_per_gpu', 1)
+ if val_samples_per_gpu > 1:
+ # Replace 'ImageToTensor' to 'DefaultFormatBundle'
+ cfg.data.val.pipeline = replace_ImageToTensor(
+ cfg.data.val.pipeline)
+ val_dataset = build_dataset(cfg.data.val, dict(test_mode=True))
+ val_dataloader = build_dataloader(
+ val_dataset,
+ samples_per_gpu=val_samples_per_gpu,
+ workers_per_gpu=cfg.data.workers_per_gpu,
+ dist=distributed,
+ shuffle=False)
+ eval_cfg = cfg.get('evaluation', {})
+ eval_cfg['by_epoch'] = cfg.runner['type'] != 'IterBasedRunner'
+ eval_hook = DistEvalHook if distributed else EvalHook
+ runner.register_hook(eval_hook(val_dataloader, **eval_cfg))
+
+ # user-defined hooks
+ if cfg.get('custom_hooks', None):
+ custom_hooks = cfg.custom_hooks
+ assert isinstance(custom_hooks, list), \
+ f'custom_hooks expect list type, but got {type(custom_hooks)}'
+ for hook_cfg in cfg.custom_hooks:
+ assert isinstance(hook_cfg, dict), \
+ 'Each item in custom_hooks expects dict type, but got ' \
+ f'{type(hook_cfg)}'
+ hook_cfg = hook_cfg.copy()
+ priority = hook_cfg.pop('priority', 'NORMAL')
+ hook = build_from_cfg(hook_cfg, HOOKS)
+ runner.register_hook(hook, priority=priority)
+
+ if cfg.resume_from:
+ runner.resume(cfg.resume_from)
+ elif cfg.load_from:
+ runner.load_checkpoint(cfg.load_from)
+ runner.run(data_loaders, cfg.workflow)
diff --git a/annotator/uniformer/mmdet/core/__init__.py b/annotator/uniformer/mmdet/core/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e812391e23894ef296755381386d4849f774418a
--- /dev/null
+++ b/annotator/uniformer/mmdet/core/__init__.py
@@ -0,0 +1,7 @@
+from .anchor import * # noqa: F401, F403
+from .bbox import * # noqa: F401, F403
+from .evaluation import * # noqa: F401, F403
+from .export import * # noqa: F401, F403
+from .mask import * # noqa: F401, F403
+from .post_processing import * # noqa: F401, F403
+from .utils import * # noqa: F401, F403
diff --git a/annotator/uniformer/mmdet/core/anchor/__init__.py b/annotator/uniformer/mmdet/core/anchor/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..5838ff3eefb03bc83928fa13848cea9ff8647827
--- /dev/null
+++ b/annotator/uniformer/mmdet/core/anchor/__init__.py
@@ -0,0 +1,11 @@
+from .anchor_generator import (AnchorGenerator, LegacyAnchorGenerator,
+ YOLOAnchorGenerator)
+from .builder import ANCHOR_GENERATORS, build_anchor_generator
+from .point_generator import PointGenerator
+from .utils import anchor_inside_flags, calc_region, images_to_levels
+
+__all__ = [
+ 'AnchorGenerator', 'LegacyAnchorGenerator', 'anchor_inside_flags',
+ 'PointGenerator', 'images_to_levels', 'calc_region',
+ 'build_anchor_generator', 'ANCHOR_GENERATORS', 'YOLOAnchorGenerator'
+]
diff --git a/annotator/uniformer/mmdet/core/anchor/anchor_generator.py b/annotator/uniformer/mmdet/core/anchor/anchor_generator.py
new file mode 100644
index 0000000000000000000000000000000000000000..388d2608b8138da13d1208b99595fbd1db59d178
--- /dev/null
+++ b/annotator/uniformer/mmdet/core/anchor/anchor_generator.py
@@ -0,0 +1,727 @@
+import mmcv
+import numpy as np
+import torch
+from torch.nn.modules.utils import _pair
+
+from .builder import ANCHOR_GENERATORS
+
+
+@ANCHOR_GENERATORS.register_module()
+class AnchorGenerator(object):
+ """Standard anchor generator for 2D anchor-based detectors.
+
+ Args:
+ strides (list[int] | list[tuple[int, int]]): Strides of anchors
+ in multiple feature levels in order (w, h).
+ ratios (list[float]): The list of ratios between the height and width
+ of anchors in a single level.
+ scales (list[int] | None): Anchor scales for anchors in a single level.
+ It cannot be set at the same time if `octave_base_scale` and
+ `scales_per_octave` are set.
+ base_sizes (list[int] | None): The basic sizes
+ of anchors in multiple levels.
+ If None is given, strides will be used as base_sizes.
+ (If strides are non square, the shortest stride is taken.)
+ scale_major (bool): Whether to multiply scales first when generating
+ base anchors. If true, the anchors in the same row will have the
+ same scales. By default it is True in V2.0
+ octave_base_scale (int): The base scale of octave.
+ scales_per_octave (int): Number of scales for each octave.
+ `octave_base_scale` and `scales_per_octave` are usually used in
+ retinanet and the `scales` should be None when they are set.
+ centers (list[tuple[float, float]] | None): The centers of the anchor
+ relative to the feature grid center in multiple feature levels.
+ By default it is set to be None and not used. If a list of tuple of
+ float is given, they will be used to shift the centers of anchors.
+ center_offset (float): The offset of center in proportion to anchors'
+ width and height. By default it is 0 in V2.0.
+
+ Examples:
+ >>> from mmdet.core import AnchorGenerator
+ >>> self = AnchorGenerator([16], [1.], [1.], [9])
+ >>> all_anchors = self.grid_anchors([(2, 2)], device='cpu')
+ >>> print(all_anchors)
+ [tensor([[-4.5000, -4.5000, 4.5000, 4.5000],
+ [11.5000, -4.5000, 20.5000, 4.5000],
+ [-4.5000, 11.5000, 4.5000, 20.5000],
+ [11.5000, 11.5000, 20.5000, 20.5000]])]
+ >>> self = AnchorGenerator([16, 32], [1.], [1.], [9, 18])
+ >>> all_anchors = self.grid_anchors([(2, 2), (1, 1)], device='cpu')
+ >>> print(all_anchors)
+ [tensor([[-4.5000, -4.5000, 4.5000, 4.5000],
+ [11.5000, -4.5000, 20.5000, 4.5000],
+ [-4.5000, 11.5000, 4.5000, 20.5000],
+ [11.5000, 11.5000, 20.5000, 20.5000]]), \
+ tensor([[-9., -9., 9., 9.]])]
+ """
+
+ def __init__(self,
+ strides,
+ ratios,
+ scales=None,
+ base_sizes=None,
+ scale_major=True,
+ octave_base_scale=None,
+ scales_per_octave=None,
+ centers=None,
+ center_offset=0.):
+ # check center and center_offset
+ if center_offset != 0:
+ assert centers is None, 'center cannot be set when center_offset' \
+ f'!=0, {centers} is given.'
+ if not (0 <= center_offset <= 1):
+ raise ValueError('center_offset should be in range [0, 1], '
+ f'{center_offset} is given.')
+ if centers is not None:
+ assert len(centers) == len(strides), \
+ 'The number of strides should be the same as centers, got ' \
+ f'{strides} and {centers}'
+
+ # calculate base sizes of anchors
+ self.strides = [_pair(stride) for stride in strides]
+ self.base_sizes = [min(stride) for stride in self.strides
+ ] if base_sizes is None else base_sizes
+ assert len(self.base_sizes) == len(self.strides), \
+ 'The number of strides should be the same as base sizes, got ' \
+ f'{self.strides} and {self.base_sizes}'
+
+ # calculate scales of anchors
+ assert ((octave_base_scale is not None
+ and scales_per_octave is not None) ^ (scales is not None)), \
+ 'scales and octave_base_scale with scales_per_octave cannot' \
+ ' be set at the same time'
+ if scales is not None:
+ self.scales = torch.Tensor(scales)
+ elif octave_base_scale is not None and scales_per_octave is not None:
+ octave_scales = np.array(
+ [2**(i / scales_per_octave) for i in range(scales_per_octave)])
+ scales = octave_scales * octave_base_scale
+ self.scales = torch.Tensor(scales)
+ else:
+ raise ValueError('Either scales or octave_base_scale with '
+ 'scales_per_octave should be set')
+
+ self.octave_base_scale = octave_base_scale
+ self.scales_per_octave = scales_per_octave
+ self.ratios = torch.Tensor(ratios)
+ self.scale_major = scale_major
+ self.centers = centers
+ self.center_offset = center_offset
+ self.base_anchors = self.gen_base_anchors()
+
+ @property
+ def num_base_anchors(self):
+ """list[int]: total number of base anchors in a feature grid"""
+ return [base_anchors.size(0) for base_anchors in self.base_anchors]
+
+ @property
+ def num_levels(self):
+ """int: number of feature levels that the generator will be applied"""
+ return len(self.strides)
+
+ def gen_base_anchors(self):
+ """Generate base anchors.
+
+ Returns:
+ list(torch.Tensor): Base anchors of a feature grid in multiple \
+ feature levels.
+ """
+ multi_level_base_anchors = []
+ for i, base_size in enumerate(self.base_sizes):
+ center = None
+ if self.centers is not None:
+ center = self.centers[i]
+ multi_level_base_anchors.append(
+ self.gen_single_level_base_anchors(
+ base_size,
+ scales=self.scales,
+ ratios=self.ratios,
+ center=center))
+ return multi_level_base_anchors
+
+ def gen_single_level_base_anchors(self,
+ base_size,
+ scales,
+ ratios,
+ center=None):
+ """Generate base anchors of a single level.
+
+ Args:
+ base_size (int | float): Basic size of an anchor.
+ scales (torch.Tensor): Scales of the anchor.
+ ratios (torch.Tensor): The ratio between between the height
+ and width of anchors in a single level.
+ center (tuple[float], optional): The center of the base anchor
+ related to a single feature grid. Defaults to None.
+
+ Returns:
+ torch.Tensor: Anchors in a single-level feature maps.
+ """
+ w = base_size
+ h = base_size
+ if center is None:
+ x_center = self.center_offset * w
+ y_center = self.center_offset * h
+ else:
+ x_center, y_center = center
+
+ h_ratios = torch.sqrt(ratios)
+ w_ratios = 1 / h_ratios
+ if self.scale_major:
+ ws = (w * w_ratios[:, None] * scales[None, :]).view(-1)
+ hs = (h * h_ratios[:, None] * scales[None, :]).view(-1)
+ else:
+ ws = (w * scales[:, None] * w_ratios[None, :]).view(-1)
+ hs = (h * scales[:, None] * h_ratios[None, :]).view(-1)
+
+ # use float anchor and the anchor's center is aligned with the
+ # pixel center
+ base_anchors = [
+ x_center - 0.5 * ws, y_center - 0.5 * hs, x_center + 0.5 * ws,
+ y_center + 0.5 * hs
+ ]
+ base_anchors = torch.stack(base_anchors, dim=-1)
+
+ return base_anchors
+
+ def _meshgrid(self, x, y, row_major=True):
+ """Generate mesh grid of x and y.
+
+ Args:
+ x (torch.Tensor): Grids of x dimension.
+ y (torch.Tensor): Grids of y dimension.
+ row_major (bool, optional): Whether to return y grids first.
+ Defaults to True.
+
+ Returns:
+ tuple[torch.Tensor]: The mesh grids of x and y.
+ """
+ # use shape instead of len to keep tracing while exporting to onnx
+ xx = x.repeat(y.shape[0])
+ yy = y.view(-1, 1).repeat(1, x.shape[0]).view(-1)
+ if row_major:
+ return xx, yy
+ else:
+ return yy, xx
+
+ def grid_anchors(self, featmap_sizes, device='cuda'):
+ """Generate grid anchors in multiple feature levels.
+
+ Args:
+ featmap_sizes (list[tuple]): List of feature map sizes in
+ multiple feature levels.
+ device (str): Device where the anchors will be put on.
+
+ Return:
+ list[torch.Tensor]: Anchors in multiple feature levels. \
+ The sizes of each tensor should be [N, 4], where \
+ N = width * height * num_base_anchors, width and height \
+ are the sizes of the corresponding feature level, \
+ num_base_anchors is the number of anchors for that level.
+ """
+ assert self.num_levels == len(featmap_sizes)
+ multi_level_anchors = []
+ for i in range(self.num_levels):
+ anchors = self.single_level_grid_anchors(
+ self.base_anchors[i].to(device),
+ featmap_sizes[i],
+ self.strides[i],
+ device=device)
+ multi_level_anchors.append(anchors)
+ return multi_level_anchors
+
+ def single_level_grid_anchors(self,
+ base_anchors,
+ featmap_size,
+ stride=(16, 16),
+ device='cuda'):
+ """Generate grid anchors of a single level.
+
+ Note:
+ This function is usually called by method ``self.grid_anchors``.
+
+ Args:
+ base_anchors (torch.Tensor): The base anchors of a feature grid.
+ featmap_size (tuple[int]): Size of the feature maps.
+ stride (tuple[int], optional): Stride of the feature map in order
+ (w, h). Defaults to (16, 16).
+ device (str, optional): Device the tensor will be put on.
+ Defaults to 'cuda'.
+
+ Returns:
+ torch.Tensor: Anchors in the overall feature maps.
+ """
+ # keep as Tensor, so that we can covert to ONNX correctly
+ feat_h, feat_w = featmap_size
+ shift_x = torch.arange(0, feat_w, device=device) * stride[0]
+ shift_y = torch.arange(0, feat_h, device=device) * stride[1]
+
+ shift_xx, shift_yy = self._meshgrid(shift_x, shift_y)
+ shifts = torch.stack([shift_xx, shift_yy, shift_xx, shift_yy], dim=-1)
+ shifts = shifts.type_as(base_anchors)
+ # first feat_w elements correspond to the first row of shifts
+ # add A anchors (1, A, 4) to K shifts (K, 1, 4) to get
+ # shifted anchors (K, A, 4), reshape to (K*A, 4)
+
+ all_anchors = base_anchors[None, :, :] + shifts[:, None, :]
+ all_anchors = all_anchors.view(-1, 4)
+ # first A rows correspond to A anchors of (0, 0) in feature map,
+ # then (0, 1), (0, 2), ...
+ return all_anchors
+
+ def valid_flags(self, featmap_sizes, pad_shape, device='cuda'):
+ """Generate valid flags of anchors in multiple feature levels.
+
+ Args:
+ featmap_sizes (list(tuple)): List of feature map sizes in
+ multiple feature levels.
+ pad_shape (tuple): The padded shape of the image.
+ device (str): Device where the anchors will be put on.
+
+ Return:
+ list(torch.Tensor): Valid flags of anchors in multiple levels.
+ """
+ assert self.num_levels == len(featmap_sizes)
+ multi_level_flags = []
+ for i in range(self.num_levels):
+ anchor_stride = self.strides[i]
+ feat_h, feat_w = featmap_sizes[i]
+ h, w = pad_shape[:2]
+ valid_feat_h = min(int(np.ceil(h / anchor_stride[1])), feat_h)
+ valid_feat_w = min(int(np.ceil(w / anchor_stride[0])), feat_w)
+ flags = self.single_level_valid_flags((feat_h, feat_w),
+ (valid_feat_h, valid_feat_w),
+ self.num_base_anchors[i],
+ device=device)
+ multi_level_flags.append(flags)
+ return multi_level_flags
+
+ def single_level_valid_flags(self,
+ featmap_size,
+ valid_size,
+ num_base_anchors,
+ device='cuda'):
+ """Generate the valid flags of anchor in a single feature map.
+
+ Args:
+ featmap_size (tuple[int]): The size of feature maps.
+ valid_size (tuple[int]): The valid size of the feature maps.
+ num_base_anchors (int): The number of base anchors.
+ device (str, optional): Device where the flags will be put on.
+ Defaults to 'cuda'.
+
+ Returns:
+ torch.Tensor: The valid flags of each anchor in a single level \
+ feature map.
+ """
+ feat_h, feat_w = featmap_size
+ valid_h, valid_w = valid_size
+ assert valid_h <= feat_h and valid_w <= feat_w
+ valid_x = torch.zeros(feat_w, dtype=torch.bool, device=device)
+ valid_y = torch.zeros(feat_h, dtype=torch.bool, device=device)
+ valid_x[:valid_w] = 1
+ valid_y[:valid_h] = 1
+ valid_xx, valid_yy = self._meshgrid(valid_x, valid_y)
+ valid = valid_xx & valid_yy
+ valid = valid[:, None].expand(valid.size(0),
+ num_base_anchors).contiguous().view(-1)
+ return valid
+
+ def __repr__(self):
+ """str: a string that describes the module"""
+ indent_str = ' '
+ repr_str = self.__class__.__name__ + '(\n'
+ repr_str += f'{indent_str}strides={self.strides},\n'
+ repr_str += f'{indent_str}ratios={self.ratios},\n'
+ repr_str += f'{indent_str}scales={self.scales},\n'
+ repr_str += f'{indent_str}base_sizes={self.base_sizes},\n'
+ repr_str += f'{indent_str}scale_major={self.scale_major},\n'
+ repr_str += f'{indent_str}octave_base_scale='
+ repr_str += f'{self.octave_base_scale},\n'
+ repr_str += f'{indent_str}scales_per_octave='
+ repr_str += f'{self.scales_per_octave},\n'
+ repr_str += f'{indent_str}num_levels={self.num_levels}\n'
+ repr_str += f'{indent_str}centers={self.centers},\n'
+ repr_str += f'{indent_str}center_offset={self.center_offset})'
+ return repr_str
+
+
+@ANCHOR_GENERATORS.register_module()
+class SSDAnchorGenerator(AnchorGenerator):
+ """Anchor generator for SSD.
+
+ Args:
+ strides (list[int] | list[tuple[int, int]]): Strides of anchors
+ in multiple feature levels.
+ ratios (list[float]): The list of ratios between the height and width
+ of anchors in a single level.
+ basesize_ratio_range (tuple(float)): Ratio range of anchors.
+ input_size (int): Size of feature map, 300 for SSD300,
+ 512 for SSD512.
+ scale_major (bool): Whether to multiply scales first when generating
+ base anchors. If true, the anchors in the same row will have the
+ same scales. It is always set to be False in SSD.
+ """
+
+ def __init__(self,
+ strides,
+ ratios,
+ basesize_ratio_range,
+ input_size=300,
+ scale_major=True):
+ assert len(strides) == len(ratios)
+ assert mmcv.is_tuple_of(basesize_ratio_range, float)
+
+ self.strides = [_pair(stride) for stride in strides]
+ self.input_size = input_size
+ self.centers = [(stride[0] / 2., stride[1] / 2.)
+ for stride in self.strides]
+ self.basesize_ratio_range = basesize_ratio_range
+
+ # calculate anchor ratios and sizes
+ min_ratio, max_ratio = basesize_ratio_range
+ min_ratio = int(min_ratio * 100)
+ max_ratio = int(max_ratio * 100)
+ step = int(np.floor(max_ratio - min_ratio) / (self.num_levels - 2))
+ min_sizes = []
+ max_sizes = []
+ for ratio in range(int(min_ratio), int(max_ratio) + 1, step):
+ min_sizes.append(int(self.input_size * ratio / 100))
+ max_sizes.append(int(self.input_size * (ratio + step) / 100))
+ if self.input_size == 300:
+ if basesize_ratio_range[0] == 0.15: # SSD300 COCO
+ min_sizes.insert(0, int(self.input_size * 7 / 100))
+ max_sizes.insert(0, int(self.input_size * 15 / 100))
+ elif basesize_ratio_range[0] == 0.2: # SSD300 VOC
+ min_sizes.insert(0, int(self.input_size * 10 / 100))
+ max_sizes.insert(0, int(self.input_size * 20 / 100))
+ else:
+ raise ValueError(
+ 'basesize_ratio_range[0] should be either 0.15'
+ 'or 0.2 when input_size is 300, got '
+ f'{basesize_ratio_range[0]}.')
+ elif self.input_size == 512:
+ if basesize_ratio_range[0] == 0.1: # SSD512 COCO
+ min_sizes.insert(0, int(self.input_size * 4 / 100))
+ max_sizes.insert(0, int(self.input_size * 10 / 100))
+ elif basesize_ratio_range[0] == 0.15: # SSD512 VOC
+ min_sizes.insert(0, int(self.input_size * 7 / 100))
+ max_sizes.insert(0, int(self.input_size * 15 / 100))
+ else:
+ raise ValueError('basesize_ratio_range[0] should be either 0.1'
+ 'or 0.15 when input_size is 512, got'
+ f' {basesize_ratio_range[0]}.')
+ else:
+ raise ValueError('Only support 300 or 512 in SSDAnchorGenerator'
+ f', got {self.input_size}.')
+
+ anchor_ratios = []
+ anchor_scales = []
+ for k in range(len(self.strides)):
+ scales = [1., np.sqrt(max_sizes[k] / min_sizes[k])]
+ anchor_ratio = [1.]
+ for r in ratios[k]:
+ anchor_ratio += [1 / r, r] # 4 or 6 ratio
+ anchor_ratios.append(torch.Tensor(anchor_ratio))
+ anchor_scales.append(torch.Tensor(scales))
+
+ self.base_sizes = min_sizes
+ self.scales = anchor_scales
+ self.ratios = anchor_ratios
+ self.scale_major = scale_major
+ self.center_offset = 0
+ self.base_anchors = self.gen_base_anchors()
+
+ def gen_base_anchors(self):
+ """Generate base anchors.
+
+ Returns:
+ list(torch.Tensor): Base anchors of a feature grid in multiple \
+ feature levels.
+ """
+ multi_level_base_anchors = []
+ for i, base_size in enumerate(self.base_sizes):
+ base_anchors = self.gen_single_level_base_anchors(
+ base_size,
+ scales=self.scales[i],
+ ratios=self.ratios[i],
+ center=self.centers[i])
+ indices = list(range(len(self.ratios[i])))
+ indices.insert(1, len(indices))
+ base_anchors = torch.index_select(base_anchors, 0,
+ torch.LongTensor(indices))
+ multi_level_base_anchors.append(base_anchors)
+ return multi_level_base_anchors
+
+ def __repr__(self):
+ """str: a string that describes the module"""
+ indent_str = ' '
+ repr_str = self.__class__.__name__ + '(\n'
+ repr_str += f'{indent_str}strides={self.strides},\n'
+ repr_str += f'{indent_str}scales={self.scales},\n'
+ repr_str += f'{indent_str}scale_major={self.scale_major},\n'
+ repr_str += f'{indent_str}input_size={self.input_size},\n'
+ repr_str += f'{indent_str}scales={self.scales},\n'
+ repr_str += f'{indent_str}ratios={self.ratios},\n'
+ repr_str += f'{indent_str}num_levels={self.num_levels},\n'
+ repr_str += f'{indent_str}base_sizes={self.base_sizes},\n'
+ repr_str += f'{indent_str}basesize_ratio_range='
+ repr_str += f'{self.basesize_ratio_range})'
+ return repr_str
+
+
+@ANCHOR_GENERATORS.register_module()
+class LegacyAnchorGenerator(AnchorGenerator):
+ """Legacy anchor generator used in MMDetection V1.x.
+
+ Note:
+ Difference to the V2.0 anchor generator:
+
+ 1. The center offset of V1.x anchors are set to be 0.5 rather than 0.
+ 2. The width/height are minused by 1 when calculating the anchors' \
+ centers and corners to meet the V1.x coordinate system.
+ 3. The anchors' corners are quantized.
+
+ Args:
+ strides (list[int] | list[tuple[int]]): Strides of anchors
+ in multiple feature levels.
+ ratios (list[float]): The list of ratios between the height and width
+ of anchors in a single level.
+ scales (list[int] | None): Anchor scales for anchors in a single level.
+ It cannot be set at the same time if `octave_base_scale` and
+ `scales_per_octave` are set.
+ base_sizes (list[int]): The basic sizes of anchors in multiple levels.
+ If None is given, strides will be used to generate base_sizes.
+ scale_major (bool): Whether to multiply scales first when generating
+ base anchors. If true, the anchors in the same row will have the
+ same scales. By default it is True in V2.0
+ octave_base_scale (int): The base scale of octave.
+ scales_per_octave (int): Number of scales for each octave.
+ `octave_base_scale` and `scales_per_octave` are usually used in
+ retinanet and the `scales` should be None when they are set.
+ centers (list[tuple[float, float]] | None): The centers of the anchor
+ relative to the feature grid center in multiple feature levels.
+ By default it is set to be None and not used. It a list of float
+ is given, this list will be used to shift the centers of anchors.
+ center_offset (float): The offset of center in propotion to anchors'
+ width and height. By default it is 0.5 in V2.0 but it should be 0.5
+ in v1.x models.
+
+ Examples:
+ >>> from mmdet.core import LegacyAnchorGenerator
+ >>> self = LegacyAnchorGenerator(
+ >>> [16], [1.], [1.], [9], center_offset=0.5)
+ >>> all_anchors = self.grid_anchors(((2, 2),), device='cpu')
+ >>> print(all_anchors)
+ [tensor([[ 0., 0., 8., 8.],
+ [16., 0., 24., 8.],
+ [ 0., 16., 8., 24.],
+ [16., 16., 24., 24.]])]
+ """
+
+ def gen_single_level_base_anchors(self,
+ base_size,
+ scales,
+ ratios,
+ center=None):
+ """Generate base anchors of a single level.
+
+ Note:
+ The width/height of anchors are minused by 1 when calculating \
+ the centers and corners to meet the V1.x coordinate system.
+
+ Args:
+ base_size (int | float): Basic size of an anchor.
+ scales (torch.Tensor): Scales of the anchor.
+ ratios (torch.Tensor): The ratio between between the height.
+ and width of anchors in a single level.
+ center (tuple[float], optional): The center of the base anchor
+ related to a single feature grid. Defaults to None.
+
+ Returns:
+ torch.Tensor: Anchors in a single-level feature map.
+ """
+ w = base_size
+ h = base_size
+ if center is None:
+ x_center = self.center_offset * (w - 1)
+ y_center = self.center_offset * (h - 1)
+ else:
+ x_center, y_center = center
+
+ h_ratios = torch.sqrt(ratios)
+ w_ratios = 1 / h_ratios
+ if self.scale_major:
+ ws = (w * w_ratios[:, None] * scales[None, :]).view(-1)
+ hs = (h * h_ratios[:, None] * scales[None, :]).view(-1)
+ else:
+ ws = (w * scales[:, None] * w_ratios[None, :]).view(-1)
+ hs = (h * scales[:, None] * h_ratios[None, :]).view(-1)
+
+ # use float anchor and the anchor's center is aligned with the
+ # pixel center
+ base_anchors = [
+ x_center - 0.5 * (ws - 1), y_center - 0.5 * (hs - 1),
+ x_center + 0.5 * (ws - 1), y_center + 0.5 * (hs - 1)
+ ]
+ base_anchors = torch.stack(base_anchors, dim=-1).round()
+
+ return base_anchors
+
+
+@ANCHOR_GENERATORS.register_module()
+class LegacySSDAnchorGenerator(SSDAnchorGenerator, LegacyAnchorGenerator):
+ """Legacy anchor generator used in MMDetection V1.x.
+
+ The difference between `LegacySSDAnchorGenerator` and `SSDAnchorGenerator`
+ can be found in `LegacyAnchorGenerator`.
+ """
+
+ def __init__(self,
+ strides,
+ ratios,
+ basesize_ratio_range,
+ input_size=300,
+ scale_major=True):
+ super(LegacySSDAnchorGenerator,
+ self).__init__(strides, ratios, basesize_ratio_range, input_size,
+ scale_major)
+ self.centers = [((stride - 1) / 2., (stride - 1) / 2.)
+ for stride in strides]
+ self.base_anchors = self.gen_base_anchors()
+
+
+@ANCHOR_GENERATORS.register_module()
+class YOLOAnchorGenerator(AnchorGenerator):
+ """Anchor generator for YOLO.
+
+ Args:
+ strides (list[int] | list[tuple[int, int]]): Strides of anchors
+ in multiple feature levels.
+ base_sizes (list[list[tuple[int, int]]]): The basic sizes
+ of anchors in multiple levels.
+ """
+
+ def __init__(self, strides, base_sizes):
+ self.strides = [_pair(stride) for stride in strides]
+ self.centers = [(stride[0] / 2., stride[1] / 2.)
+ for stride in self.strides]
+ self.base_sizes = []
+ num_anchor_per_level = len(base_sizes[0])
+ for base_sizes_per_level in base_sizes:
+ assert num_anchor_per_level == len(base_sizes_per_level)
+ self.base_sizes.append(
+ [_pair(base_size) for base_size in base_sizes_per_level])
+ self.base_anchors = self.gen_base_anchors()
+
+ @property
+ def num_levels(self):
+ """int: number of feature levels that the generator will be applied"""
+ return len(self.base_sizes)
+
+ def gen_base_anchors(self):
+ """Generate base anchors.
+
+ Returns:
+ list(torch.Tensor): Base anchors of a feature grid in multiple \
+ feature levels.
+ """
+ multi_level_base_anchors = []
+ for i, base_sizes_per_level in enumerate(self.base_sizes):
+ center = None
+ if self.centers is not None:
+ center = self.centers[i]
+ multi_level_base_anchors.append(
+ self.gen_single_level_base_anchors(base_sizes_per_level,
+ center))
+ return multi_level_base_anchors
+
+ def gen_single_level_base_anchors(self, base_sizes_per_level, center=None):
+ """Generate base anchors of a single level.
+
+ Args:
+ base_sizes_per_level (list[tuple[int, int]]): Basic sizes of
+ anchors.
+ center (tuple[float], optional): The center of the base anchor
+ related to a single feature grid. Defaults to None.
+
+ Returns:
+ torch.Tensor: Anchors in a single-level feature maps.
+ """
+ x_center, y_center = center
+ base_anchors = []
+ for base_size in base_sizes_per_level:
+ w, h = base_size
+
+ # use float anchor and the anchor's center is aligned with the
+ # pixel center
+ base_anchor = torch.Tensor([
+ x_center - 0.5 * w, y_center - 0.5 * h, x_center + 0.5 * w,
+ y_center + 0.5 * h
+ ])
+ base_anchors.append(base_anchor)
+ base_anchors = torch.stack(base_anchors, dim=0)
+
+ return base_anchors
+
+ def responsible_flags(self, featmap_sizes, gt_bboxes, device='cuda'):
+ """Generate responsible anchor flags of grid cells in multiple scales.
+
+ Args:
+ featmap_sizes (list(tuple)): List of feature map sizes in multiple
+ feature levels.
+ gt_bboxes (Tensor): Ground truth boxes, shape (n, 4).
+ device (str): Device where the anchors will be put on.
+
+ Return:
+ list(torch.Tensor): responsible flags of anchors in multiple level
+ """
+ assert self.num_levels == len(featmap_sizes)
+ multi_level_responsible_flags = []
+ for i in range(self.num_levels):
+ anchor_stride = self.strides[i]
+ flags = self.single_level_responsible_flags(
+ featmap_sizes[i],
+ gt_bboxes,
+ anchor_stride,
+ self.num_base_anchors[i],
+ device=device)
+ multi_level_responsible_flags.append(flags)
+ return multi_level_responsible_flags
+
+ def single_level_responsible_flags(self,
+ featmap_size,
+ gt_bboxes,
+ stride,
+ num_base_anchors,
+ device='cuda'):
+ """Generate the responsible flags of anchor in a single feature map.
+
+ Args:
+ featmap_size (tuple[int]): The size of feature maps.
+ gt_bboxes (Tensor): Ground truth boxes, shape (n, 4).
+ stride (tuple(int)): stride of current level
+ num_base_anchors (int): The number of base anchors.
+ device (str, optional): Device where the flags will be put on.
+ Defaults to 'cuda'.
+
+ Returns:
+ torch.Tensor: The valid flags of each anchor in a single level \
+ feature map.
+ """
+ feat_h, feat_w = featmap_size
+ gt_bboxes_cx = ((gt_bboxes[:, 0] + gt_bboxes[:, 2]) * 0.5).to(device)
+ gt_bboxes_cy = ((gt_bboxes[:, 1] + gt_bboxes[:, 3]) * 0.5).to(device)
+ gt_bboxes_grid_x = torch.floor(gt_bboxes_cx / stride[0]).long()
+ gt_bboxes_grid_y = torch.floor(gt_bboxes_cy / stride[1]).long()
+
+ # row major indexing
+ gt_bboxes_grid_idx = gt_bboxes_grid_y * feat_w + gt_bboxes_grid_x
+
+ responsible_grid = torch.zeros(
+ feat_h * feat_w, dtype=torch.uint8, device=device)
+ responsible_grid[gt_bboxes_grid_idx] = 1
+
+ responsible_grid = responsible_grid[:, None].expand(
+ responsible_grid.size(0), num_base_anchors).contiguous().view(-1)
+ return responsible_grid
diff --git a/annotator/uniformer/mmdet/core/anchor/builder.py b/annotator/uniformer/mmdet/core/anchor/builder.py
new file mode 100644
index 0000000000000000000000000000000000000000..d79b448ebca9f2b21d455046623172c48c5c3ef0
--- /dev/null
+++ b/annotator/uniformer/mmdet/core/anchor/builder.py
@@ -0,0 +1,7 @@
+from mmcv.utils import Registry, build_from_cfg
+
+ANCHOR_GENERATORS = Registry('Anchor generator')
+
+
+def build_anchor_generator(cfg, default_args=None):
+ return build_from_cfg(cfg, ANCHOR_GENERATORS, default_args)
diff --git a/annotator/uniformer/mmdet/core/anchor/point_generator.py b/annotator/uniformer/mmdet/core/anchor/point_generator.py
new file mode 100644
index 0000000000000000000000000000000000000000..e6fbd988c317992c092c68c827dc4c53223b4a4a
--- /dev/null
+++ b/annotator/uniformer/mmdet/core/anchor/point_generator.py
@@ -0,0 +1,37 @@
+import torch
+
+from .builder import ANCHOR_GENERATORS
+
+
+@ANCHOR_GENERATORS.register_module()
+class PointGenerator(object):
+
+ def _meshgrid(self, x, y, row_major=True):
+ xx = x.repeat(len(y))
+ yy = y.view(-1, 1).repeat(1, len(x)).view(-1)
+ if row_major:
+ return xx, yy
+ else:
+ return yy, xx
+
+ def grid_points(self, featmap_size, stride=16, device='cuda'):
+ feat_h, feat_w = featmap_size
+ shift_x = torch.arange(0., feat_w, device=device) * stride
+ shift_y = torch.arange(0., feat_h, device=device) * stride
+ shift_xx, shift_yy = self._meshgrid(shift_x, shift_y)
+ stride = shift_x.new_full((shift_xx.shape[0], ), stride)
+ shifts = torch.stack([shift_xx, shift_yy, stride], dim=-1)
+ all_points = shifts.to(device)
+ return all_points
+
+ def valid_flags(self, featmap_size, valid_size, device='cuda'):
+ feat_h, feat_w = featmap_size
+ valid_h, valid_w = valid_size
+ assert valid_h <= feat_h and valid_w <= feat_w
+ valid_x = torch.zeros(feat_w, dtype=torch.bool, device=device)
+ valid_y = torch.zeros(feat_h, dtype=torch.bool, device=device)
+ valid_x[:valid_w] = 1
+ valid_y[:valid_h] = 1
+ valid_xx, valid_yy = self._meshgrid(valid_x, valid_y)
+ valid = valid_xx & valid_yy
+ return valid
diff --git a/annotator/uniformer/mmdet/core/anchor/utils.py b/annotator/uniformer/mmdet/core/anchor/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..ab9b53f37f7be1f52fe63c5e53df64ac1303b9e0
--- /dev/null
+++ b/annotator/uniformer/mmdet/core/anchor/utils.py
@@ -0,0 +1,71 @@
+import torch
+
+
+def images_to_levels(target, num_levels):
+ """Convert targets by image to targets by feature level.
+
+ [target_img0, target_img1] -> [target_level0, target_level1, ...]
+ """
+ target = torch.stack(target, 0)
+ level_targets = []
+ start = 0
+ for n in num_levels:
+ end = start + n
+ # level_targets.append(target[:, start:end].squeeze(0))
+ level_targets.append(target[:, start:end])
+ start = end
+ return level_targets
+
+
+def anchor_inside_flags(flat_anchors,
+ valid_flags,
+ img_shape,
+ allowed_border=0):
+ """Check whether the anchors are inside the border.
+
+ Args:
+ flat_anchors (torch.Tensor): Flatten anchors, shape (n, 4).
+ valid_flags (torch.Tensor): An existing valid flags of anchors.
+ img_shape (tuple(int)): Shape of current image.
+ allowed_border (int, optional): The border to allow the valid anchor.
+ Defaults to 0.
+
+ Returns:
+ torch.Tensor: Flags indicating whether the anchors are inside a \
+ valid range.
+ """
+ img_h, img_w = img_shape[:2]
+ if allowed_border >= 0:
+ inside_flags = valid_flags & \
+ (flat_anchors[:, 0] >= -allowed_border) & \
+ (flat_anchors[:, 1] >= -allowed_border) & \
+ (flat_anchors[:, 2] < img_w + allowed_border) & \
+ (flat_anchors[:, 3] < img_h + allowed_border)
+ else:
+ inside_flags = valid_flags
+ return inside_flags
+
+
+def calc_region(bbox, ratio, featmap_size=None):
+ """Calculate a proportional bbox region.
+
+ The bbox center are fixed and the new h' and w' is h * ratio and w * ratio.
+
+ Args:
+ bbox (Tensor): Bboxes to calculate regions, shape (n, 4).
+ ratio (float): Ratio of the output region.
+ featmap_size (tuple): Feature map size used for clipping the boundary.
+
+ Returns:
+ tuple: x1, y1, x2, y2
+ """
+ x1 = torch.round((1 - ratio) * bbox[0] + ratio * bbox[2]).long()
+ y1 = torch.round((1 - ratio) * bbox[1] + ratio * bbox[3]).long()
+ x2 = torch.round(ratio * bbox[0] + (1 - ratio) * bbox[2]).long()
+ y2 = torch.round(ratio * bbox[1] + (1 - ratio) * bbox[3]).long()
+ if featmap_size is not None:
+ x1 = x1.clamp(min=0, max=featmap_size[1])
+ y1 = y1.clamp(min=0, max=featmap_size[0])
+ x2 = x2.clamp(min=0, max=featmap_size[1])
+ y2 = y2.clamp(min=0, max=featmap_size[0])
+ return (x1, y1, x2, y2)
diff --git a/annotator/uniformer/mmdet/core/bbox/__init__.py b/annotator/uniformer/mmdet/core/bbox/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..a3537297f57e4c3670afdb97b5fcb1b2d775e5f3
--- /dev/null
+++ b/annotator/uniformer/mmdet/core/bbox/__init__.py
@@ -0,0 +1,27 @@
+from .assigners import (AssignResult, BaseAssigner, CenterRegionAssigner,
+ MaxIoUAssigner, RegionAssigner)
+from .builder import build_assigner, build_bbox_coder, build_sampler
+from .coder import (BaseBBoxCoder, DeltaXYWHBBoxCoder, PseudoBBoxCoder,
+ TBLRBBoxCoder)
+from .iou_calculators import BboxOverlaps2D, bbox_overlaps
+from .samplers import (BaseSampler, CombinedSampler,
+ InstanceBalancedPosSampler, IoUBalancedNegSampler,
+ OHEMSampler, PseudoSampler, RandomSampler,
+ SamplingResult, ScoreHLRSampler)
+from .transforms import (bbox2distance, bbox2result, bbox2roi,
+ bbox_cxcywh_to_xyxy, bbox_flip, bbox_mapping,
+ bbox_mapping_back, bbox_rescale, bbox_xyxy_to_cxcywh,
+ distance2bbox, roi2bbox)
+
+__all__ = [
+ 'bbox_overlaps', 'BboxOverlaps2D', 'BaseAssigner', 'MaxIoUAssigner',
+ 'AssignResult', 'BaseSampler', 'PseudoSampler', 'RandomSampler',
+ 'InstanceBalancedPosSampler', 'IoUBalancedNegSampler', 'CombinedSampler',
+ 'OHEMSampler', 'SamplingResult', 'ScoreHLRSampler', 'build_assigner',
+ 'build_sampler', 'bbox_flip', 'bbox_mapping', 'bbox_mapping_back',
+ 'bbox2roi', 'roi2bbox', 'bbox2result', 'distance2bbox', 'bbox2distance',
+ 'build_bbox_coder', 'BaseBBoxCoder', 'PseudoBBoxCoder',
+ 'DeltaXYWHBBoxCoder', 'TBLRBBoxCoder', 'CenterRegionAssigner',
+ 'bbox_rescale', 'bbox_cxcywh_to_xyxy', 'bbox_xyxy_to_cxcywh',
+ 'RegionAssigner'
+]
diff --git a/annotator/uniformer/mmdet/core/bbox/assigners/__init__.py b/annotator/uniformer/mmdet/core/bbox/assigners/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..95e34a848652f2ab3ca6d3489aa2934d24817888
--- /dev/null
+++ b/annotator/uniformer/mmdet/core/bbox/assigners/__init__.py
@@ -0,0 +1,16 @@
+from .approx_max_iou_assigner import ApproxMaxIoUAssigner
+from .assign_result import AssignResult
+from .atss_assigner import ATSSAssigner
+from .base_assigner import BaseAssigner
+from .center_region_assigner import CenterRegionAssigner
+from .grid_assigner import GridAssigner
+from .hungarian_assigner import HungarianAssigner
+from .max_iou_assigner import MaxIoUAssigner
+from .point_assigner import PointAssigner
+from .region_assigner import RegionAssigner
+
+__all__ = [
+ 'BaseAssigner', 'MaxIoUAssigner', 'ApproxMaxIoUAssigner', 'AssignResult',
+ 'PointAssigner', 'ATSSAssigner', 'CenterRegionAssigner', 'GridAssigner',
+ 'HungarianAssigner', 'RegionAssigner'
+]
diff --git a/annotator/uniformer/mmdet/core/bbox/assigners/approx_max_iou_assigner.py b/annotator/uniformer/mmdet/core/bbox/assigners/approx_max_iou_assigner.py
new file mode 100644
index 0000000000000000000000000000000000000000..6d07656d173744426795c81c14c6bcdb4e63a406
--- /dev/null
+++ b/annotator/uniformer/mmdet/core/bbox/assigners/approx_max_iou_assigner.py
@@ -0,0 +1,145 @@
+import torch
+
+from ..builder import BBOX_ASSIGNERS
+from ..iou_calculators import build_iou_calculator
+from .max_iou_assigner import MaxIoUAssigner
+
+
+@BBOX_ASSIGNERS.register_module()
+class ApproxMaxIoUAssigner(MaxIoUAssigner):
+ """Assign a corresponding gt bbox or background to each bbox.
+
+ Each proposals will be assigned with an integer indicating the ground truth
+ index. (semi-positive index: gt label (0-based), -1: background)
+
+ - -1: negative sample, no assigned gt
+ - semi-positive integer: positive sample, index (0-based) of assigned gt
+
+ Args:
+ pos_iou_thr (float): IoU threshold for positive bboxes.
+ neg_iou_thr (float or tuple): IoU threshold for negative bboxes.
+ min_pos_iou (float): Minimum iou for a bbox to be considered as a
+ positive bbox. Positive samples can have smaller IoU than
+ pos_iou_thr due to the 4th step (assign max IoU sample to each gt).
+ gt_max_assign_all (bool): Whether to assign all bboxes with the same
+ highest overlap with some gt to that gt.
+ ignore_iof_thr (float): IoF threshold for ignoring bboxes (if
+ `gt_bboxes_ignore` is specified). Negative values mean not
+ ignoring any bboxes.
+ ignore_wrt_candidates (bool): Whether to compute the iof between
+ `bboxes` and `gt_bboxes_ignore`, or the contrary.
+ match_low_quality (bool): Whether to allow quality matches. This is
+ usually allowed for RPN and single stage detectors, but not allowed
+ in the second stage.
+ gpu_assign_thr (int): The upper bound of the number of GT for GPU
+ assign. When the number of gt is above this threshold, will assign
+ on CPU device. Negative values mean not assign on CPU.
+ """
+
+ def __init__(self,
+ pos_iou_thr,
+ neg_iou_thr,
+ min_pos_iou=.0,
+ gt_max_assign_all=True,
+ ignore_iof_thr=-1,
+ ignore_wrt_candidates=True,
+ match_low_quality=True,
+ gpu_assign_thr=-1,
+ iou_calculator=dict(type='BboxOverlaps2D')):
+ self.pos_iou_thr = pos_iou_thr
+ self.neg_iou_thr = neg_iou_thr
+ self.min_pos_iou = min_pos_iou
+ self.gt_max_assign_all = gt_max_assign_all
+ self.ignore_iof_thr = ignore_iof_thr
+ self.ignore_wrt_candidates = ignore_wrt_candidates
+ self.gpu_assign_thr = gpu_assign_thr
+ self.match_low_quality = match_low_quality
+ self.iou_calculator = build_iou_calculator(iou_calculator)
+
+ def assign(self,
+ approxs,
+ squares,
+ approxs_per_octave,
+ gt_bboxes,
+ gt_bboxes_ignore=None,
+ gt_labels=None):
+ """Assign gt to approxs.
+
+ This method assign a gt bbox to each group of approxs (bboxes),
+ each group of approxs is represent by a base approx (bbox) and
+ will be assigned with -1, or a semi-positive number.
+ background_label (-1) means negative sample,
+ semi-positive number is the index (0-based) of assigned gt.
+ The assignment is done in following steps, the order matters.
+
+ 1. assign every bbox to background_label (-1)
+ 2. use the max IoU of each group of approxs to assign
+ 2. assign proposals whose iou with all gts < neg_iou_thr to background
+ 3. for each bbox, if the iou with its nearest gt >= pos_iou_thr,
+ assign it to that bbox
+ 4. for each gt bbox, assign its nearest proposals (may be more than
+ one) to itself
+
+ Args:
+ approxs (Tensor): Bounding boxes to be assigned,
+ shape(approxs_per_octave*n, 4).
+ squares (Tensor): Base Bounding boxes to be assigned,
+ shape(n, 4).
+ approxs_per_octave (int): number of approxs per octave
+ gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4).
+ gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are
+ labelled as `ignored`, e.g., crowd boxes in COCO.
+ gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ).
+
+ Returns:
+ :obj:`AssignResult`: The assign result.
+ """
+ num_squares = squares.size(0)
+ num_gts = gt_bboxes.size(0)
+
+ if num_squares == 0 or num_gts == 0:
+ # No predictions and/or truth, return empty assignment
+ overlaps = approxs.new(num_gts, num_squares)
+ assign_result = self.assign_wrt_overlaps(overlaps, gt_labels)
+ return assign_result
+
+ # re-organize anchors by approxs_per_octave x num_squares
+ approxs = torch.transpose(
+ approxs.view(num_squares, approxs_per_octave, 4), 0,
+ 1).contiguous().view(-1, 4)
+ assign_on_cpu = True if (self.gpu_assign_thr > 0) and (
+ num_gts > self.gpu_assign_thr) else False
+ # compute overlap and assign gt on CPU when number of GT is large
+ if assign_on_cpu:
+ device = approxs.device
+ approxs = approxs.cpu()
+ gt_bboxes = gt_bboxes.cpu()
+ if gt_bboxes_ignore is not None:
+ gt_bboxes_ignore = gt_bboxes_ignore.cpu()
+ if gt_labels is not None:
+ gt_labels = gt_labels.cpu()
+ all_overlaps = self.iou_calculator(approxs, gt_bboxes)
+
+ overlaps, _ = all_overlaps.view(approxs_per_octave, num_squares,
+ num_gts).max(dim=0)
+ overlaps = torch.transpose(overlaps, 0, 1)
+
+ if (self.ignore_iof_thr > 0 and gt_bboxes_ignore is not None
+ and gt_bboxes_ignore.numel() > 0 and squares.numel() > 0):
+ if self.ignore_wrt_candidates:
+ ignore_overlaps = self.iou_calculator(
+ squares, gt_bboxes_ignore, mode='iof')
+ ignore_max_overlaps, _ = ignore_overlaps.max(dim=1)
+ else:
+ ignore_overlaps = self.iou_calculator(
+ gt_bboxes_ignore, squares, mode='iof')
+ ignore_max_overlaps, _ = ignore_overlaps.max(dim=0)
+ overlaps[:, ignore_max_overlaps > self.ignore_iof_thr] = -1
+
+ assign_result = self.assign_wrt_overlaps(overlaps, gt_labels)
+ if assign_on_cpu:
+ assign_result.gt_inds = assign_result.gt_inds.to(device)
+ assign_result.max_overlaps = assign_result.max_overlaps.to(device)
+ if assign_result.labels is not None:
+ assign_result.labels = assign_result.labels.to(device)
+ return assign_result
diff --git a/annotator/uniformer/mmdet/core/bbox/assigners/assign_result.py b/annotator/uniformer/mmdet/core/bbox/assigners/assign_result.py
new file mode 100644
index 0000000000000000000000000000000000000000..4639fbdba0a5b92778e1ab87d61182e54bfb9b6f
--- /dev/null
+++ b/annotator/uniformer/mmdet/core/bbox/assigners/assign_result.py
@@ -0,0 +1,204 @@
+import torch
+
+from mmdet.utils import util_mixins
+
+
+class AssignResult(util_mixins.NiceRepr):
+ """Stores assignments between predicted and truth boxes.
+
+ Attributes:
+ num_gts (int): the number of truth boxes considered when computing this
+ assignment
+
+ gt_inds (LongTensor): for each predicted box indicates the 1-based
+ index of the assigned truth box. 0 means unassigned and -1 means
+ ignore.
+
+ max_overlaps (FloatTensor): the iou between the predicted box and its
+ assigned truth box.
+
+ labels (None | LongTensor): If specified, for each predicted box
+ indicates the category label of the assigned truth box.
+
+ Example:
+ >>> # An assign result between 4 predicted boxes and 9 true boxes
+ >>> # where only two boxes were assigned.
+ >>> num_gts = 9
+ >>> max_overlaps = torch.LongTensor([0, .5, .9, 0])
+ >>> gt_inds = torch.LongTensor([-1, 1, 2, 0])
+ >>> labels = torch.LongTensor([0, 3, 4, 0])
+ >>> self = AssignResult(num_gts, gt_inds, max_overlaps, labels)
+ >>> print(str(self)) # xdoctest: +IGNORE_WANT
+
+ >>> # Force addition of gt labels (when adding gt as proposals)
+ >>> new_labels = torch.LongTensor([3, 4, 5])
+ >>> self.add_gt_(new_labels)
+ >>> print(str(self)) # xdoctest: +IGNORE_WANT
+
+ """
+
+ def __init__(self, num_gts, gt_inds, max_overlaps, labels=None):
+ self.num_gts = num_gts
+ self.gt_inds = gt_inds
+ self.max_overlaps = max_overlaps
+ self.labels = labels
+ # Interface for possible user-defined properties
+ self._extra_properties = {}
+
+ @property
+ def num_preds(self):
+ """int: the number of predictions in this assignment"""
+ return len(self.gt_inds)
+
+ def set_extra_property(self, key, value):
+ """Set user-defined new property."""
+ assert key not in self.info
+ self._extra_properties[key] = value
+
+ def get_extra_property(self, key):
+ """Get user-defined property."""
+ return self._extra_properties.get(key, None)
+
+ @property
+ def info(self):
+ """dict: a dictionary of info about the object"""
+ basic_info = {
+ 'num_gts': self.num_gts,
+ 'num_preds': self.num_preds,
+ 'gt_inds': self.gt_inds,
+ 'max_overlaps': self.max_overlaps,
+ 'labels': self.labels,
+ }
+ basic_info.update(self._extra_properties)
+ return basic_info
+
+ def __nice__(self):
+ """str: a "nice" summary string describing this assign result"""
+ parts = []
+ parts.append(f'num_gts={self.num_gts!r}')
+ if self.gt_inds is None:
+ parts.append(f'gt_inds={self.gt_inds!r}')
+ else:
+ parts.append(f'gt_inds.shape={tuple(self.gt_inds.shape)!r}')
+ if self.max_overlaps is None:
+ parts.append(f'max_overlaps={self.max_overlaps!r}')
+ else:
+ parts.append('max_overlaps.shape='
+ f'{tuple(self.max_overlaps.shape)!r}')
+ if self.labels is None:
+ parts.append(f'labels={self.labels!r}')
+ else:
+ parts.append(f'labels.shape={tuple(self.labels.shape)!r}')
+ return ', '.join(parts)
+
+ @classmethod
+ def random(cls, **kwargs):
+ """Create random AssignResult for tests or debugging.
+
+ Args:
+ num_preds: number of predicted boxes
+ num_gts: number of true boxes
+ p_ignore (float): probability of a predicted box assinged to an
+ ignored truth
+ p_assigned (float): probability of a predicted box not being
+ assigned
+ p_use_label (float | bool): with labels or not
+ rng (None | int | numpy.random.RandomState): seed or state
+
+ Returns:
+ :obj:`AssignResult`: Randomly generated assign results.
+
+ Example:
+ >>> from mmdet.core.bbox.assigners.assign_result import * # NOQA
+ >>> self = AssignResult.random()
+ >>> print(self.info)
+ """
+ from mmdet.core.bbox import demodata
+ rng = demodata.ensure_rng(kwargs.get('rng', None))
+
+ num_gts = kwargs.get('num_gts', None)
+ num_preds = kwargs.get('num_preds', None)
+ p_ignore = kwargs.get('p_ignore', 0.3)
+ p_assigned = kwargs.get('p_assigned', 0.7)
+ p_use_label = kwargs.get('p_use_label', 0.5)
+ num_classes = kwargs.get('p_use_label', 3)
+
+ if num_gts is None:
+ num_gts = rng.randint(0, 8)
+ if num_preds is None:
+ num_preds = rng.randint(0, 16)
+
+ if num_gts == 0:
+ max_overlaps = torch.zeros(num_preds, dtype=torch.float32)
+ gt_inds = torch.zeros(num_preds, dtype=torch.int64)
+ if p_use_label is True or p_use_label < rng.rand():
+ labels = torch.zeros(num_preds, dtype=torch.int64)
+ else:
+ labels = None
+ else:
+ import numpy as np
+ # Create an overlap for each predicted box
+ max_overlaps = torch.from_numpy(rng.rand(num_preds))
+
+ # Construct gt_inds for each predicted box
+ is_assigned = torch.from_numpy(rng.rand(num_preds) < p_assigned)
+ # maximum number of assignments constraints
+ n_assigned = min(num_preds, min(num_gts, is_assigned.sum()))
+
+ assigned_idxs = np.where(is_assigned)[0]
+ rng.shuffle(assigned_idxs)
+ assigned_idxs = assigned_idxs[0:n_assigned]
+ assigned_idxs.sort()
+
+ is_assigned[:] = 0
+ is_assigned[assigned_idxs] = True
+
+ is_ignore = torch.from_numpy(
+ rng.rand(num_preds) < p_ignore) & is_assigned
+
+ gt_inds = torch.zeros(num_preds, dtype=torch.int64)
+
+ true_idxs = np.arange(num_gts)
+ rng.shuffle(true_idxs)
+ true_idxs = torch.from_numpy(true_idxs)
+ gt_inds[is_assigned] = true_idxs[:n_assigned]
+
+ gt_inds = torch.from_numpy(
+ rng.randint(1, num_gts + 1, size=num_preds))
+ gt_inds[is_ignore] = -1
+ gt_inds[~is_assigned] = 0
+ max_overlaps[~is_assigned] = 0
+
+ if p_use_label is True or p_use_label < rng.rand():
+ if num_classes == 0:
+ labels = torch.zeros(num_preds, dtype=torch.int64)
+ else:
+ labels = torch.from_numpy(
+ # remind that we set FG labels to [0, num_class-1]
+ # since mmdet v2.0
+ # BG cat_id: num_class
+ rng.randint(0, num_classes, size=num_preds))
+ labels[~is_assigned] = 0
+ else:
+ labels = None
+
+ self = cls(num_gts, gt_inds, max_overlaps, labels)
+ return self
+
+ def add_gt_(self, gt_labels):
+ """Add ground truth as assigned results.
+
+ Args:
+ gt_labels (torch.Tensor): Labels of gt boxes
+ """
+ self_inds = torch.arange(
+ 1, len(gt_labels) + 1, dtype=torch.long, device=gt_labels.device)
+ self.gt_inds = torch.cat([self_inds, self.gt_inds])
+
+ self.max_overlaps = torch.cat(
+ [self.max_overlaps.new_ones(len(gt_labels)), self.max_overlaps])
+
+ if self.labels is not None:
+ self.labels = torch.cat([gt_labels, self.labels])
diff --git a/annotator/uniformer/mmdet/core/bbox/assigners/atss_assigner.py b/annotator/uniformer/mmdet/core/bbox/assigners/atss_assigner.py
new file mode 100644
index 0000000000000000000000000000000000000000..d4fe9d0e3c8704bd780d493eff20a5505dbe9580
--- /dev/null
+++ b/annotator/uniformer/mmdet/core/bbox/assigners/atss_assigner.py
@@ -0,0 +1,178 @@
+import torch
+
+from ..builder import BBOX_ASSIGNERS
+from ..iou_calculators import build_iou_calculator
+from .assign_result import AssignResult
+from .base_assigner import BaseAssigner
+
+
+@BBOX_ASSIGNERS.register_module()
+class ATSSAssigner(BaseAssigner):
+ """Assign a corresponding gt bbox or background to each bbox.
+
+ Each proposals will be assigned with `0` or a positive integer
+ indicating the ground truth index.
+
+ - 0: negative sample, no assigned gt
+ - positive integer: positive sample, index (1-based) of assigned gt
+
+ Args:
+ topk (float): number of bbox selected in each level
+ """
+
+ def __init__(self,
+ topk,
+ iou_calculator=dict(type='BboxOverlaps2D'),
+ ignore_iof_thr=-1):
+ self.topk = topk
+ self.iou_calculator = build_iou_calculator(iou_calculator)
+ self.ignore_iof_thr = ignore_iof_thr
+
+ # https://github.com/sfzhang15/ATSS/blob/master/atss_core/modeling/rpn/atss/loss.py
+
+ def assign(self,
+ bboxes,
+ num_level_bboxes,
+ gt_bboxes,
+ gt_bboxes_ignore=None,
+ gt_labels=None):
+ """Assign gt to bboxes.
+
+ The assignment is done in following steps
+
+ 1. compute iou between all bbox (bbox of all pyramid levels) and gt
+ 2. compute center distance between all bbox and gt
+ 3. on each pyramid level, for each gt, select k bbox whose center
+ are closest to the gt center, so we total select k*l bbox as
+ candidates for each gt
+ 4. get corresponding iou for the these candidates, and compute the
+ mean and std, set mean + std as the iou threshold
+ 5. select these candidates whose iou are greater than or equal to
+ the threshold as positive
+ 6. limit the positive sample's center in gt
+
+
+ Args:
+ bboxes (Tensor): Bounding boxes to be assigned, shape(n, 4).
+ num_level_bboxes (List): num of bboxes in each level
+ gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4).
+ gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are
+ labelled as `ignored`, e.g., crowd boxes in COCO.
+ gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ).
+
+ Returns:
+ :obj:`AssignResult`: The assign result.
+ """
+ INF = 100000000
+ bboxes = bboxes[:, :4]
+ num_gt, num_bboxes = gt_bboxes.size(0), bboxes.size(0)
+
+ # compute iou between all bbox and gt
+ overlaps = self.iou_calculator(bboxes, gt_bboxes)
+
+ # assign 0 by default
+ assigned_gt_inds = overlaps.new_full((num_bboxes, ),
+ 0,
+ dtype=torch.long)
+
+ if num_gt == 0 or num_bboxes == 0:
+ # No ground truth or boxes, return empty assignment
+ max_overlaps = overlaps.new_zeros((num_bboxes, ))
+ if num_gt == 0:
+ # No truth, assign everything to background
+ assigned_gt_inds[:] = 0
+ if gt_labels is None:
+ assigned_labels = None
+ else:
+ assigned_labels = overlaps.new_full((num_bboxes, ),
+ -1,
+ dtype=torch.long)
+ return AssignResult(
+ num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels)
+
+ # compute center distance between all bbox and gt
+ gt_cx = (gt_bboxes[:, 0] + gt_bboxes[:, 2]) / 2.0
+ gt_cy = (gt_bboxes[:, 1] + gt_bboxes[:, 3]) / 2.0
+ gt_points = torch.stack((gt_cx, gt_cy), dim=1)
+
+ bboxes_cx = (bboxes[:, 0] + bboxes[:, 2]) / 2.0
+ bboxes_cy = (bboxes[:, 1] + bboxes[:, 3]) / 2.0
+ bboxes_points = torch.stack((bboxes_cx, bboxes_cy), dim=1)
+
+ distances = (bboxes_points[:, None, :] -
+ gt_points[None, :, :]).pow(2).sum(-1).sqrt()
+
+ if (self.ignore_iof_thr > 0 and gt_bboxes_ignore is not None
+ and gt_bboxes_ignore.numel() > 0 and bboxes.numel() > 0):
+ ignore_overlaps = self.iou_calculator(
+ bboxes, gt_bboxes_ignore, mode='iof')
+ ignore_max_overlaps, _ = ignore_overlaps.max(dim=1)
+ ignore_idxs = ignore_max_overlaps > self.ignore_iof_thr
+ distances[ignore_idxs, :] = INF
+ assigned_gt_inds[ignore_idxs] = -1
+
+ # Selecting candidates based on the center distance
+ candidate_idxs = []
+ start_idx = 0
+ for level, bboxes_per_level in enumerate(num_level_bboxes):
+ # on each pyramid level, for each gt,
+ # select k bbox whose center are closest to the gt center
+ end_idx = start_idx + bboxes_per_level
+ distances_per_level = distances[start_idx:end_idx, :]
+ selectable_k = min(self.topk, bboxes_per_level)
+ _, topk_idxs_per_level = distances_per_level.topk(
+ selectable_k, dim=0, largest=False)
+ candidate_idxs.append(topk_idxs_per_level + start_idx)
+ start_idx = end_idx
+ candidate_idxs = torch.cat(candidate_idxs, dim=0)
+
+ # get corresponding iou for the these candidates, and compute the
+ # mean and std, set mean + std as the iou threshold
+ candidate_overlaps = overlaps[candidate_idxs, torch.arange(num_gt)]
+ overlaps_mean_per_gt = candidate_overlaps.mean(0)
+ overlaps_std_per_gt = candidate_overlaps.std(0)
+ overlaps_thr_per_gt = overlaps_mean_per_gt + overlaps_std_per_gt
+
+ is_pos = candidate_overlaps >= overlaps_thr_per_gt[None, :]
+
+ # limit the positive sample's center in gt
+ for gt_idx in range(num_gt):
+ candidate_idxs[:, gt_idx] += gt_idx * num_bboxes
+ ep_bboxes_cx = bboxes_cx.view(1, -1).expand(
+ num_gt, num_bboxes).contiguous().view(-1)
+ ep_bboxes_cy = bboxes_cy.view(1, -1).expand(
+ num_gt, num_bboxes).contiguous().view(-1)
+ candidate_idxs = candidate_idxs.view(-1)
+
+ # calculate the left, top, right, bottom distance between positive
+ # bbox center and gt side
+ l_ = ep_bboxes_cx[candidate_idxs].view(-1, num_gt) - gt_bboxes[:, 0]
+ t_ = ep_bboxes_cy[candidate_idxs].view(-1, num_gt) - gt_bboxes[:, 1]
+ r_ = gt_bboxes[:, 2] - ep_bboxes_cx[candidate_idxs].view(-1, num_gt)
+ b_ = gt_bboxes[:, 3] - ep_bboxes_cy[candidate_idxs].view(-1, num_gt)
+ is_in_gts = torch.stack([l_, t_, r_, b_], dim=1).min(dim=1)[0] > 0.01
+ is_pos = is_pos & is_in_gts
+
+ # if an anchor box is assigned to multiple gts,
+ # the one with the highest IoU will be selected.
+ overlaps_inf = torch.full_like(overlaps,
+ -INF).t().contiguous().view(-1)
+ index = candidate_idxs.view(-1)[is_pos.view(-1)]
+ overlaps_inf[index] = overlaps.t().contiguous().view(-1)[index]
+ overlaps_inf = overlaps_inf.view(num_gt, -1).t()
+
+ max_overlaps, argmax_overlaps = overlaps_inf.max(dim=1)
+ assigned_gt_inds[
+ max_overlaps != -INF] = argmax_overlaps[max_overlaps != -INF] + 1
+
+ if gt_labels is not None:
+ assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1)
+ pos_inds = torch.nonzero(
+ assigned_gt_inds > 0, as_tuple=False).squeeze()
+ if pos_inds.numel() > 0:
+ assigned_labels[pos_inds] = gt_labels[
+ assigned_gt_inds[pos_inds] - 1]
+ else:
+ assigned_labels = None
+ return AssignResult(
+ num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels)
diff --git a/annotator/uniformer/mmdet/core/bbox/assigners/base_assigner.py b/annotator/uniformer/mmdet/core/bbox/assigners/base_assigner.py
new file mode 100644
index 0000000000000000000000000000000000000000..1ff0160dbb4bfbf53cb40d1d5cb29bcc3d197a59
--- /dev/null
+++ b/annotator/uniformer/mmdet/core/bbox/assigners/base_assigner.py
@@ -0,0 +1,9 @@
+from abc import ABCMeta, abstractmethod
+
+
+class BaseAssigner(metaclass=ABCMeta):
+ """Base assigner that assigns boxes to ground truth boxes."""
+
+ @abstractmethod
+ def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None):
+ """Assign boxes to either a ground truth boxes or a negative boxes."""
diff --git a/annotator/uniformer/mmdet/core/bbox/assigners/center_region_assigner.py b/annotator/uniformer/mmdet/core/bbox/assigners/center_region_assigner.py
new file mode 100644
index 0000000000000000000000000000000000000000..488e3b615318787751cab3211e38dd9471c666be
--- /dev/null
+++ b/annotator/uniformer/mmdet/core/bbox/assigners/center_region_assigner.py
@@ -0,0 +1,335 @@
+import torch
+
+from ..builder import BBOX_ASSIGNERS
+from ..iou_calculators import build_iou_calculator
+from .assign_result import AssignResult
+from .base_assigner import BaseAssigner
+
+
+def scale_boxes(bboxes, scale):
+ """Expand an array of boxes by a given scale.
+
+ Args:
+ bboxes (Tensor): Shape (m, 4)
+ scale (float): The scale factor of bboxes
+
+ Returns:
+ (Tensor): Shape (m, 4). Scaled bboxes
+ """
+ assert bboxes.size(1) == 4
+ w_half = (bboxes[:, 2] - bboxes[:, 0]) * .5
+ h_half = (bboxes[:, 3] - bboxes[:, 1]) * .5
+ x_c = (bboxes[:, 2] + bboxes[:, 0]) * .5
+ y_c = (bboxes[:, 3] + bboxes[:, 1]) * .5
+
+ w_half *= scale
+ h_half *= scale
+
+ boxes_scaled = torch.zeros_like(bboxes)
+ boxes_scaled[:, 0] = x_c - w_half
+ boxes_scaled[:, 2] = x_c + w_half
+ boxes_scaled[:, 1] = y_c - h_half
+ boxes_scaled[:, 3] = y_c + h_half
+ return boxes_scaled
+
+
+def is_located_in(points, bboxes):
+ """Are points located in bboxes.
+
+ Args:
+ points (Tensor): Points, shape: (m, 2).
+ bboxes (Tensor): Bounding boxes, shape: (n, 4).
+
+ Return:
+ Tensor: Flags indicating if points are located in bboxes, shape: (m, n).
+ """
+ assert points.size(1) == 2
+ assert bboxes.size(1) == 4
+ return (points[:, 0].unsqueeze(1) > bboxes[:, 0].unsqueeze(0)) & \
+ (points[:, 0].unsqueeze(1) < bboxes[:, 2].unsqueeze(0)) & \
+ (points[:, 1].unsqueeze(1) > bboxes[:, 1].unsqueeze(0)) & \
+ (points[:, 1].unsqueeze(1) < bboxes[:, 3].unsqueeze(0))
+
+
+def bboxes_area(bboxes):
+ """Compute the area of an array of bboxes.
+
+ Args:
+ bboxes (Tensor): The coordinates ox bboxes. Shape: (m, 4)
+
+ Returns:
+ Tensor: Area of the bboxes. Shape: (m, )
+ """
+ assert bboxes.size(1) == 4
+ w = (bboxes[:, 2] - bboxes[:, 0])
+ h = (bboxes[:, 3] - bboxes[:, 1])
+ areas = w * h
+ return areas
+
+
+@BBOX_ASSIGNERS.register_module()
+class CenterRegionAssigner(BaseAssigner):
+ """Assign pixels at the center region of a bbox as positive.
+
+ Each proposals will be assigned with `-1`, `0`, or a positive integer
+ indicating the ground truth index.
+ - -1: negative samples
+ - semi-positive numbers: positive sample, index (0-based) of assigned gt
+
+ Args:
+ pos_scale (float): Threshold within which pixels are
+ labelled as positive.
+ neg_scale (float): Threshold above which pixels are
+ labelled as positive.
+ min_pos_iof (float): Minimum iof of a pixel with a gt to be
+ labelled as positive. Default: 1e-2
+ ignore_gt_scale (float): Threshold within which the pixels
+ are ignored when the gt is labelled as shadowed. Default: 0.5
+ foreground_dominate (bool): If True, the bbox will be assigned as
+ positive when a gt's kernel region overlaps with another's shadowed
+ (ignored) region, otherwise it is set as ignored. Default to False.
+ """
+
+ def __init__(self,
+ pos_scale,
+ neg_scale,
+ min_pos_iof=1e-2,
+ ignore_gt_scale=0.5,
+ foreground_dominate=False,
+ iou_calculator=dict(type='BboxOverlaps2D')):
+ self.pos_scale = pos_scale
+ self.neg_scale = neg_scale
+ self.min_pos_iof = min_pos_iof
+ self.ignore_gt_scale = ignore_gt_scale
+ self.foreground_dominate = foreground_dominate
+ self.iou_calculator = build_iou_calculator(iou_calculator)
+
+ def get_gt_priorities(self, gt_bboxes):
+ """Get gt priorities according to their areas.
+
+ Smaller gt has higher priority.
+
+ Args:
+ gt_bboxes (Tensor): Ground truth boxes, shape (k, 4).
+
+ Returns:
+ Tensor: The priority of gts so that gts with larger priority is \
+ more likely to be assigned. Shape (k, )
+ """
+ gt_areas = bboxes_area(gt_bboxes)
+ # Rank all gt bbox areas. Smaller objects has larger priority
+ _, sort_idx = gt_areas.sort(descending=True)
+ sort_idx = sort_idx.argsort()
+ return sort_idx
+
+ def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None):
+ """Assign gt to bboxes.
+
+ This method assigns gts to every bbox (proposal/anchor), each bbox \
+ will be assigned with -1, or a semi-positive number. -1 means \
+ negative sample, semi-positive number is the index (0-based) of \
+ assigned gt.
+
+ Args:
+ bboxes (Tensor): Bounding boxes to be assigned, shape(n, 4).
+ gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4).
+ gt_bboxes_ignore (tensor, optional): Ground truth bboxes that are
+ labelled as `ignored`, e.g., crowd boxes in COCO.
+ gt_labels (tensor, optional): Label of gt_bboxes, shape (num_gts,).
+
+ Returns:
+ :obj:`AssignResult`: The assigned result. Note that \
+ shadowed_labels of shape (N, 2) is also added as an \
+ `assign_result` attribute. `shadowed_labels` is a tensor \
+ composed of N pairs of anchor_ind, class_label], where N \
+ is the number of anchors that lie in the outer region of a \
+ gt, anchor_ind is the shadowed anchor index and class_label \
+ is the shadowed class label.
+
+ Example:
+ >>> self = CenterRegionAssigner(0.2, 0.2)
+ >>> bboxes = torch.Tensor([[0, 0, 10, 10], [10, 10, 20, 20]])
+ >>> gt_bboxes = torch.Tensor([[0, 0, 10, 10]])
+ >>> assign_result = self.assign(bboxes, gt_bboxes)
+ >>> expected_gt_inds = torch.LongTensor([1, 0])
+ >>> assert torch.all(assign_result.gt_inds == expected_gt_inds)
+ """
+ # There are in total 5 steps in the pixel assignment
+ # 1. Find core (the center region, say inner 0.2)
+ # and shadow (the relatively ourter part, say inner 0.2-0.5)
+ # regions of every gt.
+ # 2. Find all prior bboxes that lie in gt_core and gt_shadow regions
+ # 3. Assign prior bboxes in gt_core with a one-hot id of the gt in
+ # the image.
+ # 3.1. For overlapping objects, the prior bboxes in gt_core is
+ # assigned with the object with smallest area
+ # 4. Assign prior bboxes with class label according to its gt id.
+ # 4.1. Assign -1 to prior bboxes lying in shadowed gts
+ # 4.2. Assign positive prior boxes with the corresponding label
+ # 5. Find pixels lying in the shadow of an object and assign them with
+ # background label, but set the loss weight of its corresponding
+ # gt to zero.
+ assert bboxes.size(1) == 4, 'bboxes must have size of 4'
+ # 1. Find core positive and shadow region of every gt
+ gt_core = scale_boxes(gt_bboxes, self.pos_scale)
+ gt_shadow = scale_boxes(gt_bboxes, self.neg_scale)
+
+ # 2. Find prior bboxes that lie in gt_core and gt_shadow regions
+ bbox_centers = (bboxes[:, 2:4] + bboxes[:, 0:2]) / 2
+ # The center points lie within the gt boxes
+ is_bbox_in_gt = is_located_in(bbox_centers, gt_bboxes)
+ # Only calculate bbox and gt_core IoF. This enables small prior bboxes
+ # to match large gts
+ bbox_and_gt_core_overlaps = self.iou_calculator(
+ bboxes, gt_core, mode='iof')
+ # The center point of effective priors should be within the gt box
+ is_bbox_in_gt_core = is_bbox_in_gt & (
+ bbox_and_gt_core_overlaps > self.min_pos_iof) # shape (n, k)
+
+ is_bbox_in_gt_shadow = (
+ self.iou_calculator(bboxes, gt_shadow, mode='iof') >
+ self.min_pos_iof)
+ # Rule out center effective positive pixels
+ is_bbox_in_gt_shadow &= (~is_bbox_in_gt_core)
+
+ num_gts, num_bboxes = gt_bboxes.size(0), bboxes.size(0)
+ if num_gts == 0 or num_bboxes == 0:
+ # If no gts exist, assign all pixels to negative
+ assigned_gt_ids = \
+ is_bbox_in_gt_core.new_zeros((num_bboxes,),
+ dtype=torch.long)
+ pixels_in_gt_shadow = assigned_gt_ids.new_empty((0, 2))
+ else:
+ # Step 3: assign a one-hot gt id to each pixel, and smaller objects
+ # have high priority to assign the pixel.
+ sort_idx = self.get_gt_priorities(gt_bboxes)
+ assigned_gt_ids, pixels_in_gt_shadow = \
+ self.assign_one_hot_gt_indices(is_bbox_in_gt_core,
+ is_bbox_in_gt_shadow,
+ gt_priority=sort_idx)
+
+ if gt_bboxes_ignore is not None and gt_bboxes_ignore.numel() > 0:
+ # No ground truth or boxes, return empty assignment
+ gt_bboxes_ignore = scale_boxes(
+ gt_bboxes_ignore, scale=self.ignore_gt_scale)
+ is_bbox_in_ignored_gts = is_located_in(bbox_centers,
+ gt_bboxes_ignore)
+ is_bbox_in_ignored_gts = is_bbox_in_ignored_gts.any(dim=1)
+ assigned_gt_ids[is_bbox_in_ignored_gts] = -1
+
+ # 4. Assign prior bboxes with class label according to its gt id.
+ assigned_labels = None
+ shadowed_pixel_labels = None
+ if gt_labels is not None:
+ # Default assigned label is the background (-1)
+ assigned_labels = assigned_gt_ids.new_full((num_bboxes, ), -1)
+ pos_inds = torch.nonzero(
+ assigned_gt_ids > 0, as_tuple=False).squeeze()
+ if pos_inds.numel() > 0:
+ assigned_labels[pos_inds] = gt_labels[assigned_gt_ids[pos_inds]
+ - 1]
+ # 5. Find pixels lying in the shadow of an object
+ shadowed_pixel_labels = pixels_in_gt_shadow.clone()
+ if pixels_in_gt_shadow.numel() > 0:
+ pixel_idx, gt_idx =\
+ pixels_in_gt_shadow[:, 0], pixels_in_gt_shadow[:, 1]
+ assert (assigned_gt_ids[pixel_idx] != gt_idx).all(), \
+ 'Some pixels are dually assigned to ignore and gt!'
+ shadowed_pixel_labels[:, 1] = gt_labels[gt_idx - 1]
+ override = (
+ assigned_labels[pixel_idx] == shadowed_pixel_labels[:, 1])
+ if self.foreground_dominate:
+ # When a pixel is both positive and shadowed, set it as pos
+ shadowed_pixel_labels = shadowed_pixel_labels[~override]
+ else:
+ # When a pixel is both pos and shadowed, set it as shadowed
+ assigned_labels[pixel_idx[override]] = -1
+ assigned_gt_ids[pixel_idx[override]] = 0
+
+ assign_result = AssignResult(
+ num_gts, assigned_gt_ids, None, labels=assigned_labels)
+ # Add shadowed_labels as assign_result property. Shape: (num_shadow, 2)
+ assign_result.set_extra_property('shadowed_labels',
+ shadowed_pixel_labels)
+ return assign_result
+
+ def assign_one_hot_gt_indices(self,
+ is_bbox_in_gt_core,
+ is_bbox_in_gt_shadow,
+ gt_priority=None):
+ """Assign only one gt index to each prior box.
+
+ Gts with large gt_priority are more likely to be assigned.
+
+ Args:
+ is_bbox_in_gt_core (Tensor): Bool tensor indicating the bbox center
+ is in the core area of a gt (e.g. 0-0.2).
+ Shape: (num_prior, num_gt).
+ is_bbox_in_gt_shadow (Tensor): Bool tensor indicating the bbox
+ center is in the shadowed area of a gt (e.g. 0.2-0.5).
+ Shape: (num_prior, num_gt).
+ gt_priority (Tensor): Priorities of gts. The gt with a higher
+ priority is more likely to be assigned to the bbox when the bbox
+ match with multiple gts. Shape: (num_gt, ).
+
+ Returns:
+ tuple: Returns (assigned_gt_inds, shadowed_gt_inds).
+
+ - assigned_gt_inds: The assigned gt index of each prior bbox \
+ (i.e. index from 1 to num_gts). Shape: (num_prior, ).
+ - shadowed_gt_inds: shadowed gt indices. It is a tensor of \
+ shape (num_ignore, 2) with first column being the \
+ shadowed prior bbox indices and the second column the \
+ shadowed gt indices (1-based).
+ """
+ num_bboxes, num_gts = is_bbox_in_gt_core.shape
+
+ if gt_priority is None:
+ gt_priority = torch.arange(
+ num_gts, device=is_bbox_in_gt_core.device)
+ assert gt_priority.size(0) == num_gts
+ # The bigger gt_priority, the more preferable to be assigned
+ # The assigned inds are by default 0 (background)
+ assigned_gt_inds = is_bbox_in_gt_core.new_zeros((num_bboxes, ),
+ dtype=torch.long)
+ # Shadowed bboxes are assigned to be background. But the corresponding
+ # label is ignored during loss calculation, which is done through
+ # shadowed_gt_inds
+ shadowed_gt_inds = torch.nonzero(is_bbox_in_gt_shadow, as_tuple=False)
+ if is_bbox_in_gt_core.sum() == 0: # No gt match
+ shadowed_gt_inds[:, 1] += 1 # 1-based. For consistency issue
+ return assigned_gt_inds, shadowed_gt_inds
+
+ # The priority of each prior box and gt pair. If one prior box is
+ # matched bo multiple gts. Only the pair with the highest priority
+ # is saved
+ pair_priority = is_bbox_in_gt_core.new_full((num_bboxes, num_gts),
+ -1,
+ dtype=torch.long)
+
+ # Each bbox could match with multiple gts.
+ # The following codes deal with this situation
+ # Matched bboxes (to any gt). Shape: (num_pos_anchor, )
+ inds_of_match = torch.any(is_bbox_in_gt_core, dim=1)
+ # The matched gt index of each positive bbox. Length >= num_pos_anchor
+ # , since one bbox could match multiple gts
+ matched_bbox_gt_inds = torch.nonzero(
+ is_bbox_in_gt_core, as_tuple=False)[:, 1]
+ # Assign priority to each bbox-gt pair.
+ pair_priority[is_bbox_in_gt_core] = gt_priority[matched_bbox_gt_inds]
+ _, argmax_priority = pair_priority[inds_of_match].max(dim=1)
+ assigned_gt_inds[inds_of_match] = argmax_priority + 1 # 1-based
+ # Zero-out the assigned anchor box to filter the shadowed gt indices
+ is_bbox_in_gt_core[inds_of_match, argmax_priority] = 0
+ # Concat the shadowed indices due to overlapping with that out side of
+ # effective scale. shape: (total_num_ignore, 2)
+ shadowed_gt_inds = torch.cat(
+ (shadowed_gt_inds, torch.nonzero(
+ is_bbox_in_gt_core, as_tuple=False)),
+ dim=0)
+ # `is_bbox_in_gt_core` should be changed back to keep arguments intact.
+ is_bbox_in_gt_core[inds_of_match, argmax_priority] = 1
+ # 1-based shadowed gt indices, to be consistent with `assigned_gt_inds`
+ if shadowed_gt_inds.numel() > 0:
+ shadowed_gt_inds[:, 1] += 1
+ return assigned_gt_inds, shadowed_gt_inds
diff --git a/annotator/uniformer/mmdet/core/bbox/assigners/grid_assigner.py b/annotator/uniformer/mmdet/core/bbox/assigners/grid_assigner.py
new file mode 100644
index 0000000000000000000000000000000000000000..7390ea6370639c939d578c6ebf0f9268499161bc
--- /dev/null
+++ b/annotator/uniformer/mmdet/core/bbox/assigners/grid_assigner.py
@@ -0,0 +1,155 @@
+import torch
+
+from ..builder import BBOX_ASSIGNERS
+from ..iou_calculators import build_iou_calculator
+from .assign_result import AssignResult
+from .base_assigner import BaseAssigner
+
+
+@BBOX_ASSIGNERS.register_module()
+class GridAssigner(BaseAssigner):
+ """Assign a corresponding gt bbox or background to each bbox.
+
+ Each proposals will be assigned with `-1`, `0`, or a positive integer
+ indicating the ground truth index.
+
+ - -1: don't care
+ - 0: negative sample, no assigned gt
+ - positive integer: positive sample, index (1-based) of assigned gt
+
+ Args:
+ pos_iou_thr (float): IoU threshold for positive bboxes.
+ neg_iou_thr (float or tuple): IoU threshold for negative bboxes.
+ min_pos_iou (float): Minimum iou for a bbox to be considered as a
+ positive bbox. Positive samples can have smaller IoU than
+ pos_iou_thr due to the 4th step (assign max IoU sample to each gt).
+ gt_max_assign_all (bool): Whether to assign all bboxes with the same
+ highest overlap with some gt to that gt.
+ """
+
+ def __init__(self,
+ pos_iou_thr,
+ neg_iou_thr,
+ min_pos_iou=.0,
+ gt_max_assign_all=True,
+ iou_calculator=dict(type='BboxOverlaps2D')):
+ self.pos_iou_thr = pos_iou_thr
+ self.neg_iou_thr = neg_iou_thr
+ self.min_pos_iou = min_pos_iou
+ self.gt_max_assign_all = gt_max_assign_all
+ self.iou_calculator = build_iou_calculator(iou_calculator)
+
+ def assign(self, bboxes, box_responsible_flags, gt_bboxes, gt_labels=None):
+ """Assign gt to bboxes. The process is very much like the max iou
+ assigner, except that positive samples are constrained within the cell
+ that the gt boxes fell in.
+
+ This method assign a gt bbox to every bbox (proposal/anchor), each bbox
+ will be assigned with -1, 0, or a positive number. -1 means don't care,
+ 0 means negative sample, positive number is the index (1-based) of
+ assigned gt.
+ The assignment is done in following steps, the order matters.
+
+ 1. assign every bbox to -1
+ 2. assign proposals whose iou with all gts <= neg_iou_thr to 0
+ 3. for each bbox within a cell, if the iou with its nearest gt >
+ pos_iou_thr and the center of that gt falls inside the cell,
+ assign it to that bbox
+ 4. for each gt bbox, assign its nearest proposals within the cell the
+ gt bbox falls in to itself.
+
+ Args:
+ bboxes (Tensor): Bounding boxes to be assigned, shape(n, 4).
+ box_responsible_flags (Tensor): flag to indicate whether box is
+ responsible for prediction, shape(n, )
+ gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4).
+ gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ).
+
+ Returns:
+ :obj:`AssignResult`: The assign result.
+ """
+ num_gts, num_bboxes = gt_bboxes.size(0), bboxes.size(0)
+
+ # compute iou between all gt and bboxes
+ overlaps = self.iou_calculator(gt_bboxes, bboxes)
+
+ # 1. assign -1 by default
+ assigned_gt_inds = overlaps.new_full((num_bboxes, ),
+ -1,
+ dtype=torch.long)
+
+ if num_gts == 0 or num_bboxes == 0:
+ # No ground truth or boxes, return empty assignment
+ max_overlaps = overlaps.new_zeros((num_bboxes, ))
+ if num_gts == 0:
+ # No truth, assign everything to background
+ assigned_gt_inds[:] = 0
+ if gt_labels is None:
+ assigned_labels = None
+ else:
+ assigned_labels = overlaps.new_full((num_bboxes, ),
+ -1,
+ dtype=torch.long)
+ return AssignResult(
+ num_gts,
+ assigned_gt_inds,
+ max_overlaps,
+ labels=assigned_labels)
+
+ # 2. assign negative: below
+ # for each anchor, which gt best overlaps with it
+ # for each anchor, the max iou of all gts
+ # shape of max_overlaps == argmax_overlaps == num_bboxes
+ max_overlaps, argmax_overlaps = overlaps.max(dim=0)
+
+ if isinstance(self.neg_iou_thr, float):
+ assigned_gt_inds[(max_overlaps >= 0)
+ & (max_overlaps <= self.neg_iou_thr)] = 0
+ elif isinstance(self.neg_iou_thr, (tuple, list)):
+ assert len(self.neg_iou_thr) == 2
+ assigned_gt_inds[(max_overlaps > self.neg_iou_thr[0])
+ & (max_overlaps <= self.neg_iou_thr[1])] = 0
+
+ # 3. assign positive: falls into responsible cell and above
+ # positive IOU threshold, the order matters.
+ # the prior condition of comparision is to filter out all
+ # unrelated anchors, i.e. not box_responsible_flags
+ overlaps[:, ~box_responsible_flags.type(torch.bool)] = -1.
+
+ # calculate max_overlaps again, but this time we only consider IOUs
+ # for anchors responsible for prediction
+ max_overlaps, argmax_overlaps = overlaps.max(dim=0)
+
+ # for each gt, which anchor best overlaps with it
+ # for each gt, the max iou of all proposals
+ # shape of gt_max_overlaps == gt_argmax_overlaps == num_gts
+ gt_max_overlaps, gt_argmax_overlaps = overlaps.max(dim=1)
+
+ pos_inds = (max_overlaps >
+ self.pos_iou_thr) & box_responsible_flags.type(torch.bool)
+ assigned_gt_inds[pos_inds] = argmax_overlaps[pos_inds] + 1
+
+ # 4. assign positive to max overlapped anchors within responsible cell
+ for i in range(num_gts):
+ if gt_max_overlaps[i] > self.min_pos_iou:
+ if self.gt_max_assign_all:
+ max_iou_inds = (overlaps[i, :] == gt_max_overlaps[i]) & \
+ box_responsible_flags.type(torch.bool)
+ assigned_gt_inds[max_iou_inds] = i + 1
+ elif box_responsible_flags[gt_argmax_overlaps[i]]:
+ assigned_gt_inds[gt_argmax_overlaps[i]] = i + 1
+
+ # assign labels of positive anchors
+ if gt_labels is not None:
+ assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1)
+ pos_inds = torch.nonzero(
+ assigned_gt_inds > 0, as_tuple=False).squeeze()
+ if pos_inds.numel() > 0:
+ assigned_labels[pos_inds] = gt_labels[
+ assigned_gt_inds[pos_inds] - 1]
+
+ else:
+ assigned_labels = None
+
+ return AssignResult(
+ num_gts, assigned_gt_inds, max_overlaps, labels=assigned_labels)
diff --git a/annotator/uniformer/mmdet/core/bbox/assigners/hungarian_assigner.py b/annotator/uniformer/mmdet/core/bbox/assigners/hungarian_assigner.py
new file mode 100644
index 0000000000000000000000000000000000000000..e10cc14afac4ddfcb9395c1a250ece1fbfe3263c
--- /dev/null
+++ b/annotator/uniformer/mmdet/core/bbox/assigners/hungarian_assigner.py
@@ -0,0 +1,145 @@
+import torch
+
+from ..builder import BBOX_ASSIGNERS
+from ..match_costs import build_match_cost
+from ..transforms import bbox_cxcywh_to_xyxy
+from .assign_result import AssignResult
+from .base_assigner import BaseAssigner
+
+try:
+ from scipy.optimize import linear_sum_assignment
+except ImportError:
+ linear_sum_assignment = None
+
+
+@BBOX_ASSIGNERS.register_module()
+class HungarianAssigner(BaseAssigner):
+ """Computes one-to-one matching between predictions and ground truth.
+
+ This class computes an assignment between the targets and the predictions
+ based on the costs. The costs are weighted sum of three components:
+ classification cost, regression L1 cost and regression iou cost. The
+ targets don't include the no_object, so generally there are more
+ predictions than targets. After the one-to-one matching, the un-matched
+ are treated as backgrounds. Thus each query prediction will be assigned
+ with `0` or a positive integer indicating the ground truth index:
+
+ - 0: negative sample, no assigned gt
+ - positive integer: positive sample, index (1-based) of assigned gt
+
+ Args:
+ cls_weight (int | float, optional): The scale factor for classification
+ cost. Default 1.0.
+ bbox_weight (int | float, optional): The scale factor for regression
+ L1 cost. Default 1.0.
+ iou_weight (int | float, optional): The scale factor for regression
+ iou cost. Default 1.0.
+ iou_calculator (dict | optional): The config for the iou calculation.
+ Default type `BboxOverlaps2D`.
+ iou_mode (str | optional): "iou" (intersection over union), "iof"
+ (intersection over foreground), or "giou" (generalized
+ intersection over union). Default "giou".
+ """
+
+ def __init__(self,
+ cls_cost=dict(type='ClassificationCost', weight=1.),
+ reg_cost=dict(type='BBoxL1Cost', weight=1.0),
+ iou_cost=dict(type='IoUCost', iou_mode='giou', weight=1.0)):
+ self.cls_cost = build_match_cost(cls_cost)
+ self.reg_cost = build_match_cost(reg_cost)
+ self.iou_cost = build_match_cost(iou_cost)
+
+ def assign(self,
+ bbox_pred,
+ cls_pred,
+ gt_bboxes,
+ gt_labels,
+ img_meta,
+ gt_bboxes_ignore=None,
+ eps=1e-7):
+ """Computes one-to-one matching based on the weighted costs.
+
+ This method assign each query prediction to a ground truth or
+ background. The `assigned_gt_inds` with -1 means don't care,
+ 0 means negative sample, and positive number is the index (1-based)
+ of assigned gt.
+ The assignment is done in the following steps, the order matters.
+
+ 1. assign every prediction to -1
+ 2. compute the weighted costs
+ 3. do Hungarian matching on CPU based on the costs
+ 4. assign all to 0 (background) first, then for each matched pair
+ between predictions and gts, treat this prediction as foreground
+ and assign the corresponding gt index (plus 1) to it.
+
+ Args:
+ bbox_pred (Tensor): Predicted boxes with normalized coordinates
+ (cx, cy, w, h), which are all in range [0, 1]. Shape
+ [num_query, 4].
+ cls_pred (Tensor): Predicted classification logits, shape
+ [num_query, num_class].
+ gt_bboxes (Tensor): Ground truth boxes with unnormalized
+ coordinates (x1, y1, x2, y2). Shape [num_gt, 4].
+ gt_labels (Tensor): Label of `gt_bboxes`, shape (num_gt,).
+ img_meta (dict): Meta information for current image.
+ gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are
+ labelled as `ignored`. Default None.
+ eps (int | float, optional): A value added to the denominator for
+ numerical stability. Default 1e-7.
+
+ Returns:
+ :obj:`AssignResult`: The assigned result.
+ """
+ assert gt_bboxes_ignore is None, \
+ 'Only case when gt_bboxes_ignore is None is supported.'
+ num_gts, num_bboxes = gt_bboxes.size(0), bbox_pred.size(0)
+
+ # 1. assign -1 by default
+ assigned_gt_inds = bbox_pred.new_full((num_bboxes, ),
+ -1,
+ dtype=torch.long)
+ assigned_labels = bbox_pred.new_full((num_bboxes, ),
+ -1,
+ dtype=torch.long)
+ if num_gts == 0 or num_bboxes == 0:
+ # No ground truth or boxes, return empty assignment
+ if num_gts == 0:
+ # No ground truth, assign all to background
+ assigned_gt_inds[:] = 0
+ return AssignResult(
+ num_gts, assigned_gt_inds, None, labels=assigned_labels)
+ img_h, img_w, _ = img_meta['img_shape']
+ factor = gt_bboxes.new_tensor([img_w, img_h, img_w,
+ img_h]).unsqueeze(0)
+
+ # 2. compute the weighted costs
+ # classification and bboxcost.
+ cls_cost = self.cls_cost(cls_pred, gt_labels)
+ # regression L1 cost
+ normalize_gt_bboxes = gt_bboxes / factor
+ reg_cost = self.reg_cost(bbox_pred, normalize_gt_bboxes)
+ # regression iou cost, defaultly giou is used in official DETR.
+ bboxes = bbox_cxcywh_to_xyxy(bbox_pred) * factor
+ iou_cost = self.iou_cost(bboxes, gt_bboxes)
+ # weighted sum of above three costs
+ cost = cls_cost + reg_cost + iou_cost
+
+ # 3. do Hungarian matching on CPU using linear_sum_assignment
+ cost = cost.detach().cpu()
+ if linear_sum_assignment is None:
+ raise ImportError('Please run "pip install scipy" '
+ 'to install scipy first.')
+ matched_row_inds, matched_col_inds = linear_sum_assignment(cost)
+ matched_row_inds = torch.from_numpy(matched_row_inds).to(
+ bbox_pred.device)
+ matched_col_inds = torch.from_numpy(matched_col_inds).to(
+ bbox_pred.device)
+
+ # 4. assign backgrounds and foregrounds
+ # assign all indices to backgrounds first
+ assigned_gt_inds[:] = 0
+ # assign foregrounds based on matching results
+ assigned_gt_inds[matched_row_inds] = matched_col_inds + 1
+ assigned_labels[matched_row_inds] = gt_labels[matched_col_inds]
+ return AssignResult(
+ num_gts, assigned_gt_inds, None, labels=assigned_labels)
diff --git a/annotator/uniformer/mmdet/core/bbox/assigners/max_iou_assigner.py b/annotator/uniformer/mmdet/core/bbox/assigners/max_iou_assigner.py
new file mode 100644
index 0000000000000000000000000000000000000000..5cf4c4b4b450f87dfb99c3d33d8ed83d3e5cfcb3
--- /dev/null
+++ b/annotator/uniformer/mmdet/core/bbox/assigners/max_iou_assigner.py
@@ -0,0 +1,212 @@
+import torch
+
+from ..builder import BBOX_ASSIGNERS
+from ..iou_calculators import build_iou_calculator
+from .assign_result import AssignResult
+from .base_assigner import BaseAssigner
+
+
+@BBOX_ASSIGNERS.register_module()
+class MaxIoUAssigner(BaseAssigner):
+ """Assign a corresponding gt bbox or background to each bbox.
+
+ Each proposals will be assigned with `-1`, or a semi-positive integer
+ indicating the ground truth index.
+
+ - -1: negative sample, no assigned gt
+ - semi-positive integer: positive sample, index (0-based) of assigned gt
+
+ Args:
+ pos_iou_thr (float): IoU threshold for positive bboxes.
+ neg_iou_thr (float or tuple): IoU threshold for negative bboxes.
+ min_pos_iou (float): Minimum iou for a bbox to be considered as a
+ positive bbox. Positive samples can have smaller IoU than
+ pos_iou_thr due to the 4th step (assign max IoU sample to each gt).
+ gt_max_assign_all (bool): Whether to assign all bboxes with the same
+ highest overlap with some gt to that gt.
+ ignore_iof_thr (float): IoF threshold for ignoring bboxes (if
+ `gt_bboxes_ignore` is specified). Negative values mean not
+ ignoring any bboxes.
+ ignore_wrt_candidates (bool): Whether to compute the iof between
+ `bboxes` and `gt_bboxes_ignore`, or the contrary.
+ match_low_quality (bool): Whether to allow low quality matches. This is
+ usually allowed for RPN and single stage detectors, but not allowed
+ in the second stage. Details are demonstrated in Step 4.
+ gpu_assign_thr (int): The upper bound of the number of GT for GPU
+ assign. When the number of gt is above this threshold, will assign
+ on CPU device. Negative values mean not assign on CPU.
+ """
+
+ def __init__(self,
+ pos_iou_thr,
+ neg_iou_thr,
+ min_pos_iou=.0,
+ gt_max_assign_all=True,
+ ignore_iof_thr=-1,
+ ignore_wrt_candidates=True,
+ match_low_quality=True,
+ gpu_assign_thr=-1,
+ iou_calculator=dict(type='BboxOverlaps2D')):
+ self.pos_iou_thr = pos_iou_thr
+ self.neg_iou_thr = neg_iou_thr
+ self.min_pos_iou = min_pos_iou
+ self.gt_max_assign_all = gt_max_assign_all
+ self.ignore_iof_thr = ignore_iof_thr
+ self.ignore_wrt_candidates = ignore_wrt_candidates
+ self.gpu_assign_thr = gpu_assign_thr
+ self.match_low_quality = match_low_quality
+ self.iou_calculator = build_iou_calculator(iou_calculator)
+
+ def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None):
+ """Assign gt to bboxes.
+
+ This method assign a gt bbox to every bbox (proposal/anchor), each bbox
+ will be assigned with -1, or a semi-positive number. -1 means negative
+ sample, semi-positive number is the index (0-based) of assigned gt.
+ The assignment is done in following steps, the order matters.
+
+ 1. assign every bbox to the background
+ 2. assign proposals whose iou with all gts < neg_iou_thr to 0
+ 3. for each bbox, if the iou with its nearest gt >= pos_iou_thr,
+ assign it to that bbox
+ 4. for each gt bbox, assign its nearest proposals (may be more than
+ one) to itself
+
+ Args:
+ bboxes (Tensor): Bounding boxes to be assigned, shape(n, 4).
+ gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4).
+ gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are
+ labelled as `ignored`, e.g., crowd boxes in COCO.
+ gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ).
+
+ Returns:
+ :obj:`AssignResult`: The assign result.
+
+ Example:
+ >>> self = MaxIoUAssigner(0.5, 0.5)
+ >>> bboxes = torch.Tensor([[0, 0, 10, 10], [10, 10, 20, 20]])
+ >>> gt_bboxes = torch.Tensor([[0, 0, 10, 9]])
+ >>> assign_result = self.assign(bboxes, gt_bboxes)
+ >>> expected_gt_inds = torch.LongTensor([1, 0])
+ >>> assert torch.all(assign_result.gt_inds == expected_gt_inds)
+ """
+ assign_on_cpu = True if (self.gpu_assign_thr > 0) and (
+ gt_bboxes.shape[0] > self.gpu_assign_thr) else False
+ # compute overlap and assign gt on CPU when number of GT is large
+ if assign_on_cpu:
+ device = bboxes.device
+ bboxes = bboxes.cpu()
+ gt_bboxes = gt_bboxes.cpu()
+ if gt_bboxes_ignore is not None:
+ gt_bboxes_ignore = gt_bboxes_ignore.cpu()
+ if gt_labels is not None:
+ gt_labels = gt_labels.cpu()
+
+ overlaps = self.iou_calculator(gt_bboxes, bboxes)
+
+ if (self.ignore_iof_thr > 0 and gt_bboxes_ignore is not None
+ and gt_bboxes_ignore.numel() > 0 and bboxes.numel() > 0):
+ if self.ignore_wrt_candidates:
+ ignore_overlaps = self.iou_calculator(
+ bboxes, gt_bboxes_ignore, mode='iof')
+ ignore_max_overlaps, _ = ignore_overlaps.max(dim=1)
+ else:
+ ignore_overlaps = self.iou_calculator(
+ gt_bboxes_ignore, bboxes, mode='iof')
+ ignore_max_overlaps, _ = ignore_overlaps.max(dim=0)
+ overlaps[:, ignore_max_overlaps > self.ignore_iof_thr] = -1
+
+ assign_result = self.assign_wrt_overlaps(overlaps, gt_labels)
+ if assign_on_cpu:
+ assign_result.gt_inds = assign_result.gt_inds.to(device)
+ assign_result.max_overlaps = assign_result.max_overlaps.to(device)
+ if assign_result.labels is not None:
+ assign_result.labels = assign_result.labels.to(device)
+ return assign_result
+
+ def assign_wrt_overlaps(self, overlaps, gt_labels=None):
+ """Assign w.r.t. the overlaps of bboxes with gts.
+
+ Args:
+ overlaps (Tensor): Overlaps between k gt_bboxes and n bboxes,
+ shape(k, n).
+ gt_labels (Tensor, optional): Labels of k gt_bboxes, shape (k, ).
+
+ Returns:
+ :obj:`AssignResult`: The assign result.
+ """
+ num_gts, num_bboxes = overlaps.size(0), overlaps.size(1)
+
+ # 1. assign -1 by default
+ assigned_gt_inds = overlaps.new_full((num_bboxes, ),
+ -1,
+ dtype=torch.long)
+
+ if num_gts == 0 or num_bboxes == 0:
+ # No ground truth or boxes, return empty assignment
+ max_overlaps = overlaps.new_zeros((num_bboxes, ))
+ if num_gts == 0:
+ # No truth, assign everything to background
+ assigned_gt_inds[:] = 0
+ if gt_labels is None:
+ assigned_labels = None
+ else:
+ assigned_labels = overlaps.new_full((num_bboxes, ),
+ -1,
+ dtype=torch.long)
+ return AssignResult(
+ num_gts,
+ assigned_gt_inds,
+ max_overlaps,
+ labels=assigned_labels)
+
+ # for each anchor, which gt best overlaps with it
+ # for each anchor, the max iou of all gts
+ max_overlaps, argmax_overlaps = overlaps.max(dim=0)
+ # for each gt, which anchor best overlaps with it
+ # for each gt, the max iou of all proposals
+ gt_max_overlaps, gt_argmax_overlaps = overlaps.max(dim=1)
+
+ # 2. assign negative: below
+ # the negative inds are set to be 0
+ if isinstance(self.neg_iou_thr, float):
+ assigned_gt_inds[(max_overlaps >= 0)
+ & (max_overlaps < self.neg_iou_thr)] = 0
+ elif isinstance(self.neg_iou_thr, tuple):
+ assert len(self.neg_iou_thr) == 2
+ assigned_gt_inds[(max_overlaps >= self.neg_iou_thr[0])
+ & (max_overlaps < self.neg_iou_thr[1])] = 0
+
+ # 3. assign positive: above positive IoU threshold
+ pos_inds = max_overlaps >= self.pos_iou_thr
+ assigned_gt_inds[pos_inds] = argmax_overlaps[pos_inds] + 1
+
+ if self.match_low_quality:
+ # Low-quality matching will overwrite the assigned_gt_inds assigned
+ # in Step 3. Thus, the assigned gt might not be the best one for
+ # prediction.
+ # For example, if bbox A has 0.9 and 0.8 iou with GT bbox 1 & 2,
+ # bbox 1 will be assigned as the best target for bbox A in step 3.
+ # However, if GT bbox 2's gt_argmax_overlaps = A, bbox A's
+ # assigned_gt_inds will be overwritten to be bbox B.
+ # This might be the reason that it is not used in ROI Heads.
+ for i in range(num_gts):
+ if gt_max_overlaps[i] >= self.min_pos_iou:
+ if self.gt_max_assign_all:
+ max_iou_inds = overlaps[i, :] == gt_max_overlaps[i]
+ assigned_gt_inds[max_iou_inds] = i + 1
+ else:
+ assigned_gt_inds[gt_argmax_overlaps[i]] = i + 1
+
+ if gt_labels is not None:
+ assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1)
+ pos_inds = torch.nonzero(
+ assigned_gt_inds > 0, as_tuple=False).squeeze()
+ if pos_inds.numel() > 0:
+ assigned_labels[pos_inds] = gt_labels[
+ assigned_gt_inds[pos_inds] - 1]
+ else:
+ assigned_labels = None
+
+ return AssignResult(
+ num_gts, assigned_gt_inds, max_overlaps, labels=assigned_labels)
diff --git a/annotator/uniformer/mmdet/core/bbox/assigners/point_assigner.py b/annotator/uniformer/mmdet/core/bbox/assigners/point_assigner.py
new file mode 100644
index 0000000000000000000000000000000000000000..fb8f5e4edc63f4851e2067034c5e67a3558f31bc
--- /dev/null
+++ b/annotator/uniformer/mmdet/core/bbox/assigners/point_assigner.py
@@ -0,0 +1,133 @@
+import torch
+
+from ..builder import BBOX_ASSIGNERS
+from .assign_result import AssignResult
+from .base_assigner import BaseAssigner
+
+
+@BBOX_ASSIGNERS.register_module()
+class PointAssigner(BaseAssigner):
+ """Assign a corresponding gt bbox or background to each point.
+
+ Each proposals will be assigned with `0`, or a positive integer
+ indicating the ground truth index.
+
+ - 0: negative sample, no assigned gt
+ - positive integer: positive sample, index (1-based) of assigned gt
+ """
+
+ def __init__(self, scale=4, pos_num=3):
+ self.scale = scale
+ self.pos_num = pos_num
+
+ def assign(self, points, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None):
+ """Assign gt to points.
+
+ This method assign a gt bbox to every points set, each points set
+ will be assigned with the background_label (-1), or a label number.
+ -1 is background, and semi-positive number is the index (0-based) of
+ assigned gt.
+ The assignment is done in following steps, the order matters.
+
+ 1. assign every points to the background_label (-1)
+ 2. A point is assigned to some gt bbox if
+ (i) the point is within the k closest points to the gt bbox
+ (ii) the distance between this point and the gt is smaller than
+ other gt bboxes
+
+ Args:
+ points (Tensor): points to be assigned, shape(n, 3) while last
+ dimension stands for (x, y, stride).
+ gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4).
+ gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are
+ labelled as `ignored`, e.g., crowd boxes in COCO.
+ NOTE: currently unused.
+ gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ).
+
+ Returns:
+ :obj:`AssignResult`: The assign result.
+ """
+ num_points = points.shape[0]
+ num_gts = gt_bboxes.shape[0]
+
+ if num_gts == 0 or num_points == 0:
+ # If no truth assign everything to the background
+ assigned_gt_inds = points.new_full((num_points, ),
+ 0,
+ dtype=torch.long)
+ if gt_labels is None:
+ assigned_labels = None
+ else:
+ assigned_labels = points.new_full((num_points, ),
+ -1,
+ dtype=torch.long)
+ return AssignResult(
+ num_gts, assigned_gt_inds, None, labels=assigned_labels)
+
+ points_xy = points[:, :2]
+ points_stride = points[:, 2]
+ points_lvl = torch.log2(
+ points_stride).int() # [3...,4...,5...,6...,7...]
+ lvl_min, lvl_max = points_lvl.min(), points_lvl.max()
+
+ # assign gt box
+ gt_bboxes_xy = (gt_bboxes[:, :2] + gt_bboxes[:, 2:]) / 2
+ gt_bboxes_wh = (gt_bboxes[:, 2:] - gt_bboxes[:, :2]).clamp(min=1e-6)
+ scale = self.scale
+ gt_bboxes_lvl = ((torch.log2(gt_bboxes_wh[:, 0] / scale) +
+ torch.log2(gt_bboxes_wh[:, 1] / scale)) / 2).int()
+ gt_bboxes_lvl = torch.clamp(gt_bboxes_lvl, min=lvl_min, max=lvl_max)
+
+ # stores the assigned gt index of each point
+ assigned_gt_inds = points.new_zeros((num_points, ), dtype=torch.long)
+ # stores the assigned gt dist (to this point) of each point
+ assigned_gt_dist = points.new_full((num_points, ), float('inf'))
+ points_range = torch.arange(points.shape[0])
+
+ for idx in range(num_gts):
+ gt_lvl = gt_bboxes_lvl[idx]
+ # get the index of points in this level
+ lvl_idx = gt_lvl == points_lvl
+ points_index = points_range[lvl_idx]
+ # get the points in this level
+ lvl_points = points_xy[lvl_idx, :]
+ # get the center point of gt
+ gt_point = gt_bboxes_xy[[idx], :]
+ # get width and height of gt
+ gt_wh = gt_bboxes_wh[[idx], :]
+ # compute the distance between gt center and
+ # all points in this level
+ points_gt_dist = ((lvl_points - gt_point) / gt_wh).norm(dim=1)
+ # find the nearest k points to gt center in this level
+ min_dist, min_dist_index = torch.topk(
+ points_gt_dist, self.pos_num, largest=False)
+ # the index of nearest k points to gt center in this level
+ min_dist_points_index = points_index[min_dist_index]
+ # The less_than_recorded_index stores the index
+ # of min_dist that is less then the assigned_gt_dist. Where
+ # assigned_gt_dist stores the dist from previous assigned gt
+ # (if exist) to each point.
+ less_than_recorded_index = min_dist < assigned_gt_dist[
+ min_dist_points_index]
+ # The min_dist_points_index stores the index of points satisfy:
+ # (1) it is k nearest to current gt center in this level.
+ # (2) it is closer to current gt center than other gt center.
+ min_dist_points_index = min_dist_points_index[
+ less_than_recorded_index]
+ # assign the result
+ assigned_gt_inds[min_dist_points_index] = idx + 1
+ assigned_gt_dist[min_dist_points_index] = min_dist[
+ less_than_recorded_index]
+
+ if gt_labels is not None:
+ assigned_labels = assigned_gt_inds.new_full((num_points, ), -1)
+ pos_inds = torch.nonzero(
+ assigned_gt_inds > 0, as_tuple=False).squeeze()
+ if pos_inds.numel() > 0:
+ assigned_labels[pos_inds] = gt_labels[
+ assigned_gt_inds[pos_inds] - 1]
+ else:
+ assigned_labels = None
+
+ return AssignResult(
+ num_gts, assigned_gt_inds, None, labels=assigned_labels)
diff --git a/annotator/uniformer/mmdet/core/bbox/assigners/region_assigner.py b/annotator/uniformer/mmdet/core/bbox/assigners/region_assigner.py
new file mode 100644
index 0000000000000000000000000000000000000000..2e8464b97c8d8f44488d7bb781ca2e733a258e55
--- /dev/null
+++ b/annotator/uniformer/mmdet/core/bbox/assigners/region_assigner.py
@@ -0,0 +1,221 @@
+import torch
+
+from mmdet.core import anchor_inside_flags
+from ..builder import BBOX_ASSIGNERS
+from .assign_result import AssignResult
+from .base_assigner import BaseAssigner
+
+
+def calc_region(bbox, ratio, stride, featmap_size=None):
+ """Calculate region of the box defined by the ratio, the ratio is from the
+ center of the box to every edge."""
+ # project bbox on the feature
+ f_bbox = bbox / stride
+ x1 = torch.round((1 - ratio) * f_bbox[0] + ratio * f_bbox[2])
+ y1 = torch.round((1 - ratio) * f_bbox[1] + ratio * f_bbox[3])
+ x2 = torch.round(ratio * f_bbox[0] + (1 - ratio) * f_bbox[2])
+ y2 = torch.round(ratio * f_bbox[1] + (1 - ratio) * f_bbox[3])
+ if featmap_size is not None:
+ x1 = x1.clamp(min=0, max=featmap_size[1])
+ y1 = y1.clamp(min=0, max=featmap_size[0])
+ x2 = x2.clamp(min=0, max=featmap_size[1])
+ y2 = y2.clamp(min=0, max=featmap_size[0])
+ return (x1, y1, x2, y2)
+
+
+def anchor_ctr_inside_region_flags(anchors, stride, region):
+ """Get the flag indicate whether anchor centers are inside regions."""
+ x1, y1, x2, y2 = region
+ f_anchors = anchors / stride
+ x = (f_anchors[:, 0] + f_anchors[:, 2]) * 0.5
+ y = (f_anchors[:, 1] + f_anchors[:, 3]) * 0.5
+ flags = (x >= x1) & (x <= x2) & (y >= y1) & (y <= y2)
+ return flags
+
+
+@BBOX_ASSIGNERS.register_module()
+class RegionAssigner(BaseAssigner):
+ """Assign a corresponding gt bbox or background to each bbox.
+
+ Each proposals will be assigned with `-1`, `0`, or a positive integer
+ indicating the ground truth index.
+
+ - -1: don't care
+ - 0: negative sample, no assigned gt
+ - positive integer: positive sample, index (1-based) of assigned gt
+
+ Args:
+ center_ratio: ratio of the region in the center of the bbox to
+ define positive sample.
+ ignore_ratio: ratio of the region to define ignore samples.
+ """
+
+ def __init__(self, center_ratio=0.2, ignore_ratio=0.5):
+ self.center_ratio = center_ratio
+ self.ignore_ratio = ignore_ratio
+
+ def assign(self,
+ mlvl_anchors,
+ mlvl_valid_flags,
+ gt_bboxes,
+ img_meta,
+ featmap_sizes,
+ anchor_scale,
+ anchor_strides,
+ gt_bboxes_ignore=None,
+ gt_labels=None,
+ allowed_border=0):
+ """Assign gt to anchors.
+
+ This method assign a gt bbox to every bbox (proposal/anchor), each bbox
+ will be assigned with -1, 0, or a positive number. -1 means don't care,
+ 0 means negative sample, positive number is the index (1-based) of
+ assigned gt.
+ The assignment is done in following steps, the order matters.
+
+ 1. Assign every anchor to 0 (negative)
+ For each gt_bboxes:
+ 2. Compute ignore flags based on ignore_region then
+ assign -1 to anchors w.r.t. ignore flags
+ 3. Compute pos flags based on center_region then
+ assign gt_bboxes to anchors w.r.t. pos flags
+ 4. Compute ignore flags based on adjacent anchor lvl then
+ assign -1 to anchors w.r.t. ignore flags
+ 5. Assign anchor outside of image to -1
+
+ Args:
+ mlvl_anchors (list[Tensor]): Multi level anchors.
+ mlvl_valid_flags (list[Tensor]): Multi level valid flags.
+ gt_bboxes (Tensor): Ground truth bboxes of image
+ img_meta (dict): Meta info of image.
+ featmap_sizes (list[Tensor]): Feature mapsize each level
+ anchor_scale (int): Scale of the anchor.
+ anchor_strides (list[int]): Stride of the anchor.
+ gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4).
+ gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are
+ labelled as `ignored`, e.g., crowd boxes in COCO.
+ gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ).
+ allowed_border (int, optional): The border to allow the valid
+ anchor. Defaults to 0.
+
+ Returns:
+ :obj:`AssignResult`: The assign result.
+ """
+ if gt_bboxes_ignore is not None:
+ raise NotImplementedError
+
+ num_gts = gt_bboxes.shape[0]
+ num_bboxes = sum(x.shape[0] for x in mlvl_anchors)
+
+ if num_gts == 0 or num_bboxes == 0:
+ # No ground truth or boxes, return empty assignment
+ max_overlaps = gt_bboxes.new_zeros((num_bboxes, ))
+ assigned_gt_inds = gt_bboxes.new_zeros((num_bboxes, ),
+ dtype=torch.long)
+ if gt_labels is None:
+ assigned_labels = None
+ else:
+ assigned_labels = gt_bboxes.new_full((num_bboxes, ),
+ -1,
+ dtype=torch.long)
+ return AssignResult(
+ num_gts,
+ assigned_gt_inds,
+ max_overlaps,
+ labels=assigned_labels)
+
+ num_lvls = len(mlvl_anchors)
+ r1 = (1 - self.center_ratio) / 2
+ r2 = (1 - self.ignore_ratio) / 2
+
+ scale = torch.sqrt((gt_bboxes[:, 2] - gt_bboxes[:, 0]) *
+ (gt_bboxes[:, 3] - gt_bboxes[:, 1]))
+ min_anchor_size = scale.new_full(
+ (1, ), float(anchor_scale * anchor_strides[0]))
+ target_lvls = torch.floor(
+ torch.log2(scale) - torch.log2(min_anchor_size) + 0.5)
+ target_lvls = target_lvls.clamp(min=0, max=num_lvls - 1).long()
+
+ # 1. assign 0 (negative) by default
+ mlvl_assigned_gt_inds = []
+ mlvl_ignore_flags = []
+ for lvl in range(num_lvls):
+ h, w = featmap_sizes[lvl]
+ assert h * w == mlvl_anchors[lvl].shape[0]
+ assigned_gt_inds = gt_bboxes.new_full((h * w, ),
+ 0,
+ dtype=torch.long)
+ ignore_flags = torch.zeros_like(assigned_gt_inds)
+ mlvl_assigned_gt_inds.append(assigned_gt_inds)
+ mlvl_ignore_flags.append(ignore_flags)
+
+ for gt_id in range(num_gts):
+ lvl = target_lvls[gt_id].item()
+ featmap_size = featmap_sizes[lvl]
+ stride = anchor_strides[lvl]
+ anchors = mlvl_anchors[lvl]
+ gt_bbox = gt_bboxes[gt_id, :4]
+
+ # Compute regions
+ ignore_region = calc_region(gt_bbox, r2, stride, featmap_size)
+ ctr_region = calc_region(gt_bbox, r1, stride, featmap_size)
+
+ # 2. Assign -1 to ignore flags
+ ignore_flags = anchor_ctr_inside_region_flags(
+ anchors, stride, ignore_region)
+ mlvl_assigned_gt_inds[lvl][ignore_flags] = -1
+
+ # 3. Assign gt_bboxes to pos flags
+ pos_flags = anchor_ctr_inside_region_flags(anchors, stride,
+ ctr_region)
+ mlvl_assigned_gt_inds[lvl][pos_flags] = gt_id + 1
+
+ # 4. Assign -1 to ignore adjacent lvl
+ if lvl > 0:
+ d_lvl = lvl - 1
+ d_anchors = mlvl_anchors[d_lvl]
+ d_featmap_size = featmap_sizes[d_lvl]
+ d_stride = anchor_strides[d_lvl]
+ d_ignore_region = calc_region(gt_bbox, r2, d_stride,
+ d_featmap_size)
+ ignore_flags = anchor_ctr_inside_region_flags(
+ d_anchors, d_stride, d_ignore_region)
+ mlvl_ignore_flags[d_lvl][ignore_flags] = 1
+ if lvl < num_lvls - 1:
+ u_lvl = lvl + 1
+ u_anchors = mlvl_anchors[u_lvl]
+ u_featmap_size = featmap_sizes[u_lvl]
+ u_stride = anchor_strides[u_lvl]
+ u_ignore_region = calc_region(gt_bbox, r2, u_stride,
+ u_featmap_size)
+ ignore_flags = anchor_ctr_inside_region_flags(
+ u_anchors, u_stride, u_ignore_region)
+ mlvl_ignore_flags[u_lvl][ignore_flags] = 1
+
+ # 4. (cont.) Assign -1 to ignore adjacent lvl
+ for lvl in range(num_lvls):
+ ignore_flags = mlvl_ignore_flags[lvl]
+ mlvl_assigned_gt_inds[lvl][ignore_flags] = -1
+
+ # 5. Assign -1 to anchor outside of image
+ flat_assigned_gt_inds = torch.cat(mlvl_assigned_gt_inds)
+ flat_anchors = torch.cat(mlvl_anchors)
+ flat_valid_flags = torch.cat(mlvl_valid_flags)
+ assert (flat_assigned_gt_inds.shape[0] == flat_anchors.shape[0] ==
+ flat_valid_flags.shape[0])
+ inside_flags = anchor_inside_flags(flat_anchors, flat_valid_flags,
+ img_meta['img_shape'],
+ allowed_border)
+ outside_flags = ~inside_flags
+ flat_assigned_gt_inds[outside_flags] = -1
+
+ if gt_labels is not None:
+ assigned_labels = torch.zeros_like(flat_assigned_gt_inds)
+ pos_flags = assigned_gt_inds > 0
+ assigned_labels[pos_flags] = gt_labels[
+ flat_assigned_gt_inds[pos_flags] - 1]
+ else:
+ assigned_labels = None
+
+ return AssignResult(
+ num_gts, flat_assigned_gt_inds, None, labels=assigned_labels)
diff --git a/annotator/uniformer/mmdet/core/bbox/builder.py b/annotator/uniformer/mmdet/core/bbox/builder.py
new file mode 100644
index 0000000000000000000000000000000000000000..682683b62ae55396f24e9f9eea0f8193e2e88de6
--- /dev/null
+++ b/annotator/uniformer/mmdet/core/bbox/builder.py
@@ -0,0 +1,20 @@
+from mmcv.utils import Registry, build_from_cfg
+
+BBOX_ASSIGNERS = Registry('bbox_assigner')
+BBOX_SAMPLERS = Registry('bbox_sampler')
+BBOX_CODERS = Registry('bbox_coder')
+
+
+def build_assigner(cfg, **default_args):
+ """Builder of box assigner."""
+ return build_from_cfg(cfg, BBOX_ASSIGNERS, default_args)
+
+
+def build_sampler(cfg, **default_args):
+ """Builder of box sampler."""
+ return build_from_cfg(cfg, BBOX_SAMPLERS, default_args)
+
+
+def build_bbox_coder(cfg, **default_args):
+ """Builder of box coder."""
+ return build_from_cfg(cfg, BBOX_CODERS, default_args)
diff --git a/annotator/uniformer/mmdet/core/bbox/coder/__init__.py b/annotator/uniformer/mmdet/core/bbox/coder/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..ae455ba8fc0e0727e2d581cdc8f20fceededf99a
--- /dev/null
+++ b/annotator/uniformer/mmdet/core/bbox/coder/__init__.py
@@ -0,0 +1,13 @@
+from .base_bbox_coder import BaseBBoxCoder
+from .bucketing_bbox_coder import BucketingBBoxCoder
+from .delta_xywh_bbox_coder import DeltaXYWHBBoxCoder
+from .legacy_delta_xywh_bbox_coder import LegacyDeltaXYWHBBoxCoder
+from .pseudo_bbox_coder import PseudoBBoxCoder
+from .tblr_bbox_coder import TBLRBBoxCoder
+from .yolo_bbox_coder import YOLOBBoxCoder
+
+__all__ = [
+ 'BaseBBoxCoder', 'PseudoBBoxCoder', 'DeltaXYWHBBoxCoder',
+ 'LegacyDeltaXYWHBBoxCoder', 'TBLRBBoxCoder', 'YOLOBBoxCoder',
+ 'BucketingBBoxCoder'
+]
diff --git a/annotator/uniformer/mmdet/core/bbox/coder/base_bbox_coder.py b/annotator/uniformer/mmdet/core/bbox/coder/base_bbox_coder.py
new file mode 100644
index 0000000000000000000000000000000000000000..cf0b34c7cc2fe561718b0c884990beb40a993643
--- /dev/null
+++ b/annotator/uniformer/mmdet/core/bbox/coder/base_bbox_coder.py
@@ -0,0 +1,17 @@
+from abc import ABCMeta, abstractmethod
+
+
+class BaseBBoxCoder(metaclass=ABCMeta):
+ """Base bounding box coder."""
+
+ def __init__(self, **kwargs):
+ pass
+
+ @abstractmethod
+ def encode(self, bboxes, gt_bboxes):
+ """Encode deltas between bboxes and ground truth boxes."""
+
+ @abstractmethod
+ def decode(self, bboxes, bboxes_pred):
+ """Decode the predicted bboxes according to prediction and base
+ boxes."""
diff --git a/annotator/uniformer/mmdet/core/bbox/coder/bucketing_bbox_coder.py b/annotator/uniformer/mmdet/core/bbox/coder/bucketing_bbox_coder.py
new file mode 100644
index 0000000000000000000000000000000000000000..92d24b4519edece7a4af8f5cfa9af025b25f2dad
--- /dev/null
+++ b/annotator/uniformer/mmdet/core/bbox/coder/bucketing_bbox_coder.py
@@ -0,0 +1,350 @@
+import mmcv
+import numpy as np
+import torch
+import torch.nn.functional as F
+
+from ..builder import BBOX_CODERS
+from ..transforms import bbox_rescale
+from .base_bbox_coder import BaseBBoxCoder
+
+
+@BBOX_CODERS.register_module()
+class BucketingBBoxCoder(BaseBBoxCoder):
+ """Bucketing BBox Coder for Side-Aware Boundary Localization (SABL).
+
+ Boundary Localization with Bucketing and Bucketing Guided Rescoring
+ are implemented here.
+
+ Please refer to https://arxiv.org/abs/1912.04260 for more details.
+
+ Args:
+ num_buckets (int): Number of buckets.
+ scale_factor (int): Scale factor of proposals to generate buckets.
+ offset_topk (int): Topk buckets are used to generate
+ bucket fine regression targets. Defaults to 2.
+ offset_upperbound (float): Offset upperbound to generate
+ bucket fine regression targets.
+ To avoid too large offset displacements. Defaults to 1.0.
+ cls_ignore_neighbor (bool): Ignore second nearest bucket or Not.
+ Defaults to True.
+ clip_border (bool, optional): Whether clip the objects outside the
+ border of the image. Defaults to True.
+ """
+
+ def __init__(self,
+ num_buckets,
+ scale_factor,
+ offset_topk=2,
+ offset_upperbound=1.0,
+ cls_ignore_neighbor=True,
+ clip_border=True):
+ super(BucketingBBoxCoder, self).__init__()
+ self.num_buckets = num_buckets
+ self.scale_factor = scale_factor
+ self.offset_topk = offset_topk
+ self.offset_upperbound = offset_upperbound
+ self.cls_ignore_neighbor = cls_ignore_neighbor
+ self.clip_border = clip_border
+
+ def encode(self, bboxes, gt_bboxes):
+ """Get bucketing estimation and fine regression targets during
+ training.
+
+ Args:
+ bboxes (torch.Tensor): source boxes, e.g., object proposals.
+ gt_bboxes (torch.Tensor): target of the transformation, e.g.,
+ ground truth boxes.
+
+ Returns:
+ encoded_bboxes(tuple[Tensor]): bucketing estimation
+ and fine regression targets and weights
+ """
+
+ assert bboxes.size(0) == gt_bboxes.size(0)
+ assert bboxes.size(-1) == gt_bboxes.size(-1) == 4
+ encoded_bboxes = bbox2bucket(bboxes, gt_bboxes, self.num_buckets,
+ self.scale_factor, self.offset_topk,
+ self.offset_upperbound,
+ self.cls_ignore_neighbor)
+ return encoded_bboxes
+
+ def decode(self, bboxes, pred_bboxes, max_shape=None):
+ """Apply transformation `pred_bboxes` to `boxes`.
+ Args:
+ boxes (torch.Tensor): Basic boxes.
+ pred_bboxes (torch.Tensor): Predictions for bucketing estimation
+ and fine regression
+ max_shape (tuple[int], optional): Maximum shape of boxes.
+ Defaults to None.
+
+ Returns:
+ torch.Tensor: Decoded boxes.
+ """
+ assert len(pred_bboxes) == 2
+ cls_preds, offset_preds = pred_bboxes
+ assert cls_preds.size(0) == bboxes.size(0) and offset_preds.size(
+ 0) == bboxes.size(0)
+ decoded_bboxes = bucket2bbox(bboxes, cls_preds, offset_preds,
+ self.num_buckets, self.scale_factor,
+ max_shape, self.clip_border)
+
+ return decoded_bboxes
+
+
+@mmcv.jit(coderize=True)
+def generat_buckets(proposals, num_buckets, scale_factor=1.0):
+ """Generate buckets w.r.t bucket number and scale factor of proposals.
+
+ Args:
+ proposals (Tensor): Shape (n, 4)
+ num_buckets (int): Number of buckets.
+ scale_factor (float): Scale factor to rescale proposals.
+
+ Returns:
+ tuple[Tensor]: (bucket_w, bucket_h, l_buckets, r_buckets,
+ t_buckets, d_buckets)
+
+ - bucket_w: Width of buckets on x-axis. Shape (n, ).
+ - bucket_h: Height of buckets on y-axis. Shape (n, ).
+ - l_buckets: Left buckets. Shape (n, ceil(side_num/2)).
+ - r_buckets: Right buckets. Shape (n, ceil(side_num/2)).
+ - t_buckets: Top buckets. Shape (n, ceil(side_num/2)).
+ - d_buckets: Down buckets. Shape (n, ceil(side_num/2)).
+ """
+ proposals = bbox_rescale(proposals, scale_factor)
+
+ # number of buckets in each side
+ side_num = int(np.ceil(num_buckets / 2.0))
+ pw = proposals[..., 2] - proposals[..., 0]
+ ph = proposals[..., 3] - proposals[..., 1]
+ px1 = proposals[..., 0]
+ py1 = proposals[..., 1]
+ px2 = proposals[..., 2]
+ py2 = proposals[..., 3]
+
+ bucket_w = pw / num_buckets
+ bucket_h = ph / num_buckets
+
+ # left buckets
+ l_buckets = px1[:, None] + (0.5 + torch.arange(
+ 0, side_num).to(proposals).float())[None, :] * bucket_w[:, None]
+ # right buckets
+ r_buckets = px2[:, None] - (0.5 + torch.arange(
+ 0, side_num).to(proposals).float())[None, :] * bucket_w[:, None]
+ # top buckets
+ t_buckets = py1[:, None] + (0.5 + torch.arange(
+ 0, side_num).to(proposals).float())[None, :] * bucket_h[:, None]
+ # down buckets
+ d_buckets = py2[:, None] - (0.5 + torch.arange(
+ 0, side_num).to(proposals).float())[None, :] * bucket_h[:, None]
+ return bucket_w, bucket_h, l_buckets, r_buckets, t_buckets, d_buckets
+
+
+@mmcv.jit(coderize=True)
+def bbox2bucket(proposals,
+ gt,
+ num_buckets,
+ scale_factor,
+ offset_topk=2,
+ offset_upperbound=1.0,
+ cls_ignore_neighbor=True):
+ """Generate buckets estimation and fine regression targets.
+
+ Args:
+ proposals (Tensor): Shape (n, 4)
+ gt (Tensor): Shape (n, 4)
+ num_buckets (int): Number of buckets.
+ scale_factor (float): Scale factor to rescale proposals.
+ offset_topk (int): Topk buckets are used to generate
+ bucket fine regression targets. Defaults to 2.
+ offset_upperbound (float): Offset allowance to generate
+ bucket fine regression targets.
+ To avoid too large offset displacements. Defaults to 1.0.
+ cls_ignore_neighbor (bool): Ignore second nearest bucket or Not.
+ Defaults to True.
+
+ Returns:
+ tuple[Tensor]: (offsets, offsets_weights, bucket_labels, cls_weights).
+
+ - offsets: Fine regression targets. \
+ Shape (n, num_buckets*2).
+ - offsets_weights: Fine regression weights. \
+ Shape (n, num_buckets*2).
+ - bucket_labels: Bucketing estimation labels. \
+ Shape (n, num_buckets*2).
+ - cls_weights: Bucketing estimation weights. \
+ Shape (n, num_buckets*2).
+ """
+ assert proposals.size() == gt.size()
+
+ # generate buckets
+ proposals = proposals.float()
+ gt = gt.float()
+ (bucket_w, bucket_h, l_buckets, r_buckets, t_buckets,
+ d_buckets) = generat_buckets(proposals, num_buckets, scale_factor)
+
+ gx1 = gt[..., 0]
+ gy1 = gt[..., 1]
+ gx2 = gt[..., 2]
+ gy2 = gt[..., 3]
+
+ # generate offset targets and weights
+ # offsets from buckets to gts
+ l_offsets = (l_buckets - gx1[:, None]) / bucket_w[:, None]
+ r_offsets = (r_buckets - gx2[:, None]) / bucket_w[:, None]
+ t_offsets = (t_buckets - gy1[:, None]) / bucket_h[:, None]
+ d_offsets = (d_buckets - gy2[:, None]) / bucket_h[:, None]
+
+ # select top-k nearset buckets
+ l_topk, l_label = l_offsets.abs().topk(
+ offset_topk, dim=1, largest=False, sorted=True)
+ r_topk, r_label = r_offsets.abs().topk(
+ offset_topk, dim=1, largest=False, sorted=True)
+ t_topk, t_label = t_offsets.abs().topk(
+ offset_topk, dim=1, largest=False, sorted=True)
+ d_topk, d_label = d_offsets.abs().topk(
+ offset_topk, dim=1, largest=False, sorted=True)
+
+ offset_l_weights = l_offsets.new_zeros(l_offsets.size())
+ offset_r_weights = r_offsets.new_zeros(r_offsets.size())
+ offset_t_weights = t_offsets.new_zeros(t_offsets.size())
+ offset_d_weights = d_offsets.new_zeros(d_offsets.size())
+ inds = torch.arange(0, proposals.size(0)).to(proposals).long()
+
+ # generate offset weights of top-k nearset buckets
+ for k in range(offset_topk):
+ if k >= 1:
+ offset_l_weights[inds, l_label[:,
+ k]] = (l_topk[:, k] <
+ offset_upperbound).float()
+ offset_r_weights[inds, r_label[:,
+ k]] = (r_topk[:, k] <
+ offset_upperbound).float()
+ offset_t_weights[inds, t_label[:,
+ k]] = (t_topk[:, k] <
+ offset_upperbound).float()
+ offset_d_weights[inds, d_label[:,
+ k]] = (d_topk[:, k] <
+ offset_upperbound).float()
+ else:
+ offset_l_weights[inds, l_label[:, k]] = 1.0
+ offset_r_weights[inds, r_label[:, k]] = 1.0
+ offset_t_weights[inds, t_label[:, k]] = 1.0
+ offset_d_weights[inds, d_label[:, k]] = 1.0
+
+ offsets = torch.cat([l_offsets, r_offsets, t_offsets, d_offsets], dim=-1)
+ offsets_weights = torch.cat([
+ offset_l_weights, offset_r_weights, offset_t_weights, offset_d_weights
+ ],
+ dim=-1)
+
+ # generate bucket labels and weight
+ side_num = int(np.ceil(num_buckets / 2.0))
+ labels = torch.stack(
+ [l_label[:, 0], r_label[:, 0], t_label[:, 0], d_label[:, 0]], dim=-1)
+
+ batch_size = labels.size(0)
+ bucket_labels = F.one_hot(labels.view(-1), side_num).view(batch_size,
+ -1).float()
+ bucket_cls_l_weights = (l_offsets.abs() < 1).float()
+ bucket_cls_r_weights = (r_offsets.abs() < 1).float()
+ bucket_cls_t_weights = (t_offsets.abs() < 1).float()
+ bucket_cls_d_weights = (d_offsets.abs() < 1).float()
+ bucket_cls_weights = torch.cat([
+ bucket_cls_l_weights, bucket_cls_r_weights, bucket_cls_t_weights,
+ bucket_cls_d_weights
+ ],
+ dim=-1)
+ # ignore second nearest buckets for cls if necessary
+ if cls_ignore_neighbor:
+ bucket_cls_weights = (~((bucket_cls_weights == 1) &
+ (bucket_labels == 0))).float()
+ else:
+ bucket_cls_weights[:] = 1.0
+ return offsets, offsets_weights, bucket_labels, bucket_cls_weights
+
+
+@mmcv.jit(coderize=True)
+def bucket2bbox(proposals,
+ cls_preds,
+ offset_preds,
+ num_buckets,
+ scale_factor=1.0,
+ max_shape=None,
+ clip_border=True):
+ """Apply bucketing estimation (cls preds) and fine regression (offset
+ preds) to generate det bboxes.
+
+ Args:
+ proposals (Tensor): Boxes to be transformed. Shape (n, 4)
+ cls_preds (Tensor): bucketing estimation. Shape (n, num_buckets*2).
+ offset_preds (Tensor): fine regression. Shape (n, num_buckets*2).
+ num_buckets (int): Number of buckets.
+ scale_factor (float): Scale factor to rescale proposals.
+ max_shape (tuple[int, int]): Maximum bounds for boxes. specifies (H, W)
+ clip_border (bool, optional): Whether clip the objects outside the
+ border of the image. Defaults to True.
+
+ Returns:
+ tuple[Tensor]: (bboxes, loc_confidence).
+
+ - bboxes: predicted bboxes. Shape (n, 4)
+ - loc_confidence: localization confidence of predicted bboxes.
+ Shape (n,).
+ """
+
+ side_num = int(np.ceil(num_buckets / 2.0))
+ cls_preds = cls_preds.view(-1, side_num)
+ offset_preds = offset_preds.view(-1, side_num)
+
+ scores = F.softmax(cls_preds, dim=1)
+ score_topk, score_label = scores.topk(2, dim=1, largest=True, sorted=True)
+
+ rescaled_proposals = bbox_rescale(proposals, scale_factor)
+
+ pw = rescaled_proposals[..., 2] - rescaled_proposals[..., 0]
+ ph = rescaled_proposals[..., 3] - rescaled_proposals[..., 1]
+ px1 = rescaled_proposals[..., 0]
+ py1 = rescaled_proposals[..., 1]
+ px2 = rescaled_proposals[..., 2]
+ py2 = rescaled_proposals[..., 3]
+
+ bucket_w = pw / num_buckets
+ bucket_h = ph / num_buckets
+
+ score_inds_l = score_label[0::4, 0]
+ score_inds_r = score_label[1::4, 0]
+ score_inds_t = score_label[2::4, 0]
+ score_inds_d = score_label[3::4, 0]
+ l_buckets = px1 + (0.5 + score_inds_l.float()) * bucket_w
+ r_buckets = px2 - (0.5 + score_inds_r.float()) * bucket_w
+ t_buckets = py1 + (0.5 + score_inds_t.float()) * bucket_h
+ d_buckets = py2 - (0.5 + score_inds_d.float()) * bucket_h
+
+ offsets = offset_preds.view(-1, 4, side_num)
+ inds = torch.arange(proposals.size(0)).to(proposals).long()
+ l_offsets = offsets[:, 0, :][inds, score_inds_l]
+ r_offsets = offsets[:, 1, :][inds, score_inds_r]
+ t_offsets = offsets[:, 2, :][inds, score_inds_t]
+ d_offsets = offsets[:, 3, :][inds, score_inds_d]
+
+ x1 = l_buckets - l_offsets * bucket_w
+ x2 = r_buckets - r_offsets * bucket_w
+ y1 = t_buckets - t_offsets * bucket_h
+ y2 = d_buckets - d_offsets * bucket_h
+
+ if clip_border and max_shape is not None:
+ x1 = x1.clamp(min=0, max=max_shape[1] - 1)
+ y1 = y1.clamp(min=0, max=max_shape[0] - 1)
+ x2 = x2.clamp(min=0, max=max_shape[1] - 1)
+ y2 = y2.clamp(min=0, max=max_shape[0] - 1)
+ bboxes = torch.cat([x1[:, None], y1[:, None], x2[:, None], y2[:, None]],
+ dim=-1)
+
+ # bucketing guided rescoring
+ loc_confidence = score_topk[:, 0]
+ top2_neighbor_inds = (score_label[:, 0] - score_label[:, 1]).abs() == 1
+ loc_confidence += score_topk[:, 1] * top2_neighbor_inds.float()
+ loc_confidence = loc_confidence.view(-1, 4).mean(dim=1)
+
+ return bboxes, loc_confidence
diff --git a/annotator/uniformer/mmdet/core/bbox/coder/delta_xywh_bbox_coder.py b/annotator/uniformer/mmdet/core/bbox/coder/delta_xywh_bbox_coder.py
new file mode 100644
index 0000000000000000000000000000000000000000..da317184a6eb6f87b0b658e9ff8be289794a0cb2
--- /dev/null
+++ b/annotator/uniformer/mmdet/core/bbox/coder/delta_xywh_bbox_coder.py
@@ -0,0 +1,237 @@
+import mmcv
+import numpy as np
+import torch
+
+from ..builder import BBOX_CODERS
+from .base_bbox_coder import BaseBBoxCoder
+
+
+@BBOX_CODERS.register_module()
+class DeltaXYWHBBoxCoder(BaseBBoxCoder):
+ """Delta XYWH BBox coder.
+
+ Following the practice in `R-CNN `_,
+ this coder encodes bbox (x1, y1, x2, y2) into delta (dx, dy, dw, dh) and
+ decodes delta (dx, dy, dw, dh) back to original bbox (x1, y1, x2, y2).
+
+ Args:
+ target_means (Sequence[float]): Denormalizing means of target for
+ delta coordinates
+ target_stds (Sequence[float]): Denormalizing standard deviation of
+ target for delta coordinates
+ clip_border (bool, optional): Whether clip the objects outside the
+ border of the image. Defaults to True.
+ """
+
+ def __init__(self,
+ target_means=(0., 0., 0., 0.),
+ target_stds=(1., 1., 1., 1.),
+ clip_border=True):
+ super(BaseBBoxCoder, self).__init__()
+ self.means = target_means
+ self.stds = target_stds
+ self.clip_border = clip_border
+
+ def encode(self, bboxes, gt_bboxes):
+ """Get box regression transformation deltas that can be used to
+ transform the ``bboxes`` into the ``gt_bboxes``.
+
+ Args:
+ bboxes (torch.Tensor): Source boxes, e.g., object proposals.
+ gt_bboxes (torch.Tensor): Target of the transformation, e.g.,
+ ground-truth boxes.
+
+ Returns:
+ torch.Tensor: Box transformation deltas
+ """
+
+ assert bboxes.size(0) == gt_bboxes.size(0)
+ assert bboxes.size(-1) == gt_bboxes.size(-1) == 4
+ encoded_bboxes = bbox2delta(bboxes, gt_bboxes, self.means, self.stds)
+ return encoded_bboxes
+
+ def decode(self,
+ bboxes,
+ pred_bboxes,
+ max_shape=None,
+ wh_ratio_clip=16 / 1000):
+ """Apply transformation `pred_bboxes` to `boxes`.
+
+ Args:
+ bboxes (torch.Tensor): Basic boxes. Shape (B, N, 4) or (N, 4)
+ pred_bboxes (Tensor): Encoded offsets with respect to each roi.
+ Has shape (B, N, num_classes * 4) or (B, N, 4) or
+ (N, num_classes * 4) or (N, 4). Note N = num_anchors * W * H
+ when rois is a grid of anchors.Offset encoding follows [1]_.
+ max_shape (Sequence[int] or torch.Tensor or Sequence[
+ Sequence[int]],optional): Maximum bounds for boxes, specifies
+ (H, W, C) or (H, W). If bboxes shape is (B, N, 4), then
+ the max_shape should be a Sequence[Sequence[int]]
+ and the length of max_shape should also be B.
+ wh_ratio_clip (float, optional): The allowed ratio between
+ width and height.
+
+ Returns:
+ torch.Tensor: Decoded boxes.
+ """
+
+ assert pred_bboxes.size(0) == bboxes.size(0)
+ if pred_bboxes.ndim == 3:
+ assert pred_bboxes.size(1) == bboxes.size(1)
+ decoded_bboxes = delta2bbox(bboxes, pred_bboxes, self.means, self.stds,
+ max_shape, wh_ratio_clip, self.clip_border)
+
+ return decoded_bboxes
+
+
+@mmcv.jit(coderize=True)
+def bbox2delta(proposals, gt, means=(0., 0., 0., 0.), stds=(1., 1., 1., 1.)):
+ """Compute deltas of proposals w.r.t. gt.
+
+ We usually compute the deltas of x, y, w, h of proposals w.r.t ground
+ truth bboxes to get regression target.
+ This is the inverse function of :func:`delta2bbox`.
+
+ Args:
+ proposals (Tensor): Boxes to be transformed, shape (N, ..., 4)
+ gt (Tensor): Gt bboxes to be used as base, shape (N, ..., 4)
+ means (Sequence[float]): Denormalizing means for delta coordinates
+ stds (Sequence[float]): Denormalizing standard deviation for delta
+ coordinates
+
+ Returns:
+ Tensor: deltas with shape (N, 4), where columns represent dx, dy,
+ dw, dh.
+ """
+ assert proposals.size() == gt.size()
+
+ proposals = proposals.float()
+ gt = gt.float()
+ px = (proposals[..., 0] + proposals[..., 2]) * 0.5
+ py = (proposals[..., 1] + proposals[..., 3]) * 0.5
+ pw = proposals[..., 2] - proposals[..., 0]
+ ph = proposals[..., 3] - proposals[..., 1]
+
+ gx = (gt[..., 0] + gt[..., 2]) * 0.5
+ gy = (gt[..., 1] + gt[..., 3]) * 0.5
+ gw = gt[..., 2] - gt[..., 0]
+ gh = gt[..., 3] - gt[..., 1]
+
+ dx = (gx - px) / pw
+ dy = (gy - py) / ph
+ dw = torch.log(gw / pw)
+ dh = torch.log(gh / ph)
+ deltas = torch.stack([dx, dy, dw, dh], dim=-1)
+
+ means = deltas.new_tensor(means).unsqueeze(0)
+ stds = deltas.new_tensor(stds).unsqueeze(0)
+ deltas = deltas.sub_(means).div_(stds)
+
+ return deltas
+
+
+@mmcv.jit(coderize=True)
+def delta2bbox(rois,
+ deltas,
+ means=(0., 0., 0., 0.),
+ stds=(1., 1., 1., 1.),
+ max_shape=None,
+ wh_ratio_clip=16 / 1000,
+ clip_border=True):
+ """Apply deltas to shift/scale base boxes.
+
+ Typically the rois are anchor or proposed bounding boxes and the deltas are
+ network outputs used to shift/scale those boxes.
+ This is the inverse function of :func:`bbox2delta`.
+
+ Args:
+ rois (Tensor): Boxes to be transformed. Has shape (N, 4) or (B, N, 4)
+ deltas (Tensor): Encoded offsets with respect to each roi.
+ Has shape (B, N, num_classes * 4) or (B, N, 4) or
+ (N, num_classes * 4) or (N, 4). Note N = num_anchors * W * H
+ when rois is a grid of anchors.Offset encoding follows [1]_.
+ means (Sequence[float]): Denormalizing means for delta coordinates
+ stds (Sequence[float]): Denormalizing standard deviation for delta
+ coordinates
+ max_shape (Sequence[int] or torch.Tensor or Sequence[
+ Sequence[int]],optional): Maximum bounds for boxes, specifies
+ (H, W, C) or (H, W). If rois shape is (B, N, 4), then
+ the max_shape should be a Sequence[Sequence[int]]
+ and the length of max_shape should also be B.
+ wh_ratio_clip (float): Maximum aspect ratio for boxes.
+ clip_border (bool, optional): Whether clip the objects outside the
+ border of the image. Defaults to True.
+
+ Returns:
+ Tensor: Boxes with shape (B, N, num_classes * 4) or (B, N, 4) or
+ (N, num_classes * 4) or (N, 4), where 4 represent
+ tl_x, tl_y, br_x, br_y.
+
+ References:
+ .. [1] https://arxiv.org/abs/1311.2524
+
+ Example:
+ >>> rois = torch.Tensor([[ 0., 0., 1., 1.],
+ >>> [ 0., 0., 1., 1.],
+ >>> [ 0., 0., 1., 1.],
+ >>> [ 5., 5., 5., 5.]])
+ >>> deltas = torch.Tensor([[ 0., 0., 0., 0.],
+ >>> [ 1., 1., 1., 1.],
+ >>> [ 0., 0., 2., -1.],
+ >>> [ 0.7, -1.9, -0.5, 0.3]])
+ >>> delta2bbox(rois, deltas, max_shape=(32, 32, 3))
+ tensor([[0.0000, 0.0000, 1.0000, 1.0000],
+ [0.1409, 0.1409, 2.8591, 2.8591],
+ [0.0000, 0.3161, 4.1945, 0.6839],
+ [5.0000, 5.0000, 5.0000, 5.0000]])
+ """
+ means = deltas.new_tensor(means).view(1,
+ -1).repeat(1,
+ deltas.size(-1) // 4)
+ stds = deltas.new_tensor(stds).view(1, -1).repeat(1, deltas.size(-1) // 4)
+ denorm_deltas = deltas * stds + means
+ dx = denorm_deltas[..., 0::4]
+ dy = denorm_deltas[..., 1::4]
+ dw = denorm_deltas[..., 2::4]
+ dh = denorm_deltas[..., 3::4]
+ max_ratio = np.abs(np.log(wh_ratio_clip))
+ dw = dw.clamp(min=-max_ratio, max=max_ratio)
+ dh = dh.clamp(min=-max_ratio, max=max_ratio)
+ x1, y1 = rois[..., 0], rois[..., 1]
+ x2, y2 = rois[..., 2], rois[..., 3]
+ # Compute center of each roi
+ px = ((x1 + x2) * 0.5).unsqueeze(-1).expand_as(dx)
+ py = ((y1 + y2) * 0.5).unsqueeze(-1).expand_as(dy)
+ # Compute width/height of each roi
+ pw = (x2 - x1).unsqueeze(-1).expand_as(dw)
+ ph = (y2 - y1).unsqueeze(-1).expand_as(dh)
+ # Use exp(network energy) to enlarge/shrink each roi
+ gw = pw * dw.exp()
+ gh = ph * dh.exp()
+ # Use network energy to shift the center of each roi
+ gx = px + pw * dx
+ gy = py + ph * dy
+ # Convert center-xy/width/height to top-left, bottom-right
+ x1 = gx - gw * 0.5
+ y1 = gy - gh * 0.5
+ x2 = gx + gw * 0.5
+ y2 = gy + gh * 0.5
+
+ bboxes = torch.stack([x1, y1, x2, y2], dim=-1).view(deltas.size())
+
+ if clip_border and max_shape is not None:
+ if not isinstance(max_shape, torch.Tensor):
+ max_shape = x1.new_tensor(max_shape)
+ max_shape = max_shape[..., :2].type_as(x1)
+ if max_shape.ndim == 2:
+ assert bboxes.ndim == 3
+ assert max_shape.size(0) == bboxes.size(0)
+
+ min_xy = x1.new_tensor(0)
+ max_xy = torch.cat(
+ [max_shape] * (deltas.size(-1) // 2),
+ dim=-1).flip(-1).unsqueeze(-2)
+ bboxes = torch.where(bboxes < min_xy, min_xy, bboxes)
+ bboxes = torch.where(bboxes > max_xy, max_xy, bboxes)
+
+ return bboxes
diff --git a/annotator/uniformer/mmdet/core/bbox/coder/legacy_delta_xywh_bbox_coder.py b/annotator/uniformer/mmdet/core/bbox/coder/legacy_delta_xywh_bbox_coder.py
new file mode 100644
index 0000000000000000000000000000000000000000..190309fd42a1b76c12c82fc1acf0511494be5ac3
--- /dev/null
+++ b/annotator/uniformer/mmdet/core/bbox/coder/legacy_delta_xywh_bbox_coder.py
@@ -0,0 +1,215 @@
+import mmcv
+import numpy as np
+import torch
+
+from ..builder import BBOX_CODERS
+from .base_bbox_coder import BaseBBoxCoder
+
+
+@BBOX_CODERS.register_module()
+class LegacyDeltaXYWHBBoxCoder(BaseBBoxCoder):
+ """Legacy Delta XYWH BBox coder used in MMDet V1.x.
+
+ Following the practice in R-CNN [1]_, this coder encodes bbox (x1, y1, x2,
+ y2) into delta (dx, dy, dw, dh) and decodes delta (dx, dy, dw, dh)
+ back to original bbox (x1, y1, x2, y2).
+
+ Note:
+ The main difference between :class`LegacyDeltaXYWHBBoxCoder` and
+ :class:`DeltaXYWHBBoxCoder` is whether ``+ 1`` is used during width and
+ height calculation. We suggest to only use this coder when testing with
+ MMDet V1.x models.
+
+ References:
+ .. [1] https://arxiv.org/abs/1311.2524
+
+ Args:
+ target_means (Sequence[float]): denormalizing means of target for
+ delta coordinates
+ target_stds (Sequence[float]): denormalizing standard deviation of
+ target for delta coordinates
+ """
+
+ def __init__(self,
+ target_means=(0., 0., 0., 0.),
+ target_stds=(1., 1., 1., 1.)):
+ super(BaseBBoxCoder, self).__init__()
+ self.means = target_means
+ self.stds = target_stds
+
+ def encode(self, bboxes, gt_bboxes):
+ """Get box regression transformation deltas that can be used to
+ transform the ``bboxes`` into the ``gt_bboxes``.
+
+ Args:
+ bboxes (torch.Tensor): source boxes, e.g., object proposals.
+ gt_bboxes (torch.Tensor): target of the transformation, e.g.,
+ ground-truth boxes.
+
+ Returns:
+ torch.Tensor: Box transformation deltas
+ """
+ assert bboxes.size(0) == gt_bboxes.size(0)
+ assert bboxes.size(-1) == gt_bboxes.size(-1) == 4
+ encoded_bboxes = legacy_bbox2delta(bboxes, gt_bboxes, self.means,
+ self.stds)
+ return encoded_bboxes
+
+ def decode(self,
+ bboxes,
+ pred_bboxes,
+ max_shape=None,
+ wh_ratio_clip=16 / 1000):
+ """Apply transformation `pred_bboxes` to `boxes`.
+
+ Args:
+ boxes (torch.Tensor): Basic boxes.
+ pred_bboxes (torch.Tensor): Encoded boxes with shape
+ max_shape (tuple[int], optional): Maximum shape of boxes.
+ Defaults to None.
+ wh_ratio_clip (float, optional): The allowed ratio between
+ width and height.
+
+ Returns:
+ torch.Tensor: Decoded boxes.
+ """
+ assert pred_bboxes.size(0) == bboxes.size(0)
+ decoded_bboxes = legacy_delta2bbox(bboxes, pred_bboxes, self.means,
+ self.stds, max_shape, wh_ratio_clip)
+
+ return decoded_bboxes
+
+
+@mmcv.jit(coderize=True)
+def legacy_bbox2delta(proposals,
+ gt,
+ means=(0., 0., 0., 0.),
+ stds=(1., 1., 1., 1.)):
+ """Compute deltas of proposals w.r.t. gt in the MMDet V1.x manner.
+
+ We usually compute the deltas of x, y, w, h of proposals w.r.t ground
+ truth bboxes to get regression target.
+ This is the inverse function of `delta2bbox()`
+
+ Args:
+ proposals (Tensor): Boxes to be transformed, shape (N, ..., 4)
+ gt (Tensor): Gt bboxes to be used as base, shape (N, ..., 4)
+ means (Sequence[float]): Denormalizing means for delta coordinates
+ stds (Sequence[float]): Denormalizing standard deviation for delta
+ coordinates
+
+ Returns:
+ Tensor: deltas with shape (N, 4), where columns represent dx, dy,
+ dw, dh.
+ """
+ assert proposals.size() == gt.size()
+
+ proposals = proposals.float()
+ gt = gt.float()
+ px = (proposals[..., 0] + proposals[..., 2]) * 0.5
+ py = (proposals[..., 1] + proposals[..., 3]) * 0.5
+ pw = proposals[..., 2] - proposals[..., 0] + 1.0
+ ph = proposals[..., 3] - proposals[..., 1] + 1.0
+
+ gx = (gt[..., 0] + gt[..., 2]) * 0.5
+ gy = (gt[..., 1] + gt[..., 3]) * 0.5
+ gw = gt[..., 2] - gt[..., 0] + 1.0
+ gh = gt[..., 3] - gt[..., 1] + 1.0
+
+ dx = (gx - px) / pw
+ dy = (gy - py) / ph
+ dw = torch.log(gw / pw)
+ dh = torch.log(gh / ph)
+ deltas = torch.stack([dx, dy, dw, dh], dim=-1)
+
+ means = deltas.new_tensor(means).unsqueeze(0)
+ stds = deltas.new_tensor(stds).unsqueeze(0)
+ deltas = deltas.sub_(means).div_(stds)
+
+ return deltas
+
+
+@mmcv.jit(coderize=True)
+def legacy_delta2bbox(rois,
+ deltas,
+ means=(0., 0., 0., 0.),
+ stds=(1., 1., 1., 1.),
+ max_shape=None,
+ wh_ratio_clip=16 / 1000):
+ """Apply deltas to shift/scale base boxes in the MMDet V1.x manner.
+
+ Typically the rois are anchor or proposed bounding boxes and the deltas are
+ network outputs used to shift/scale those boxes.
+ This is the inverse function of `bbox2delta()`
+
+ Args:
+ rois (Tensor): Boxes to be transformed. Has shape (N, 4)
+ deltas (Tensor): Encoded offsets with respect to each roi.
+ Has shape (N, 4 * num_classes). Note N = num_anchors * W * H when
+ rois is a grid of anchors. Offset encoding follows [1]_.
+ means (Sequence[float]): Denormalizing means for delta coordinates
+ stds (Sequence[float]): Denormalizing standard deviation for delta
+ coordinates
+ max_shape (tuple[int, int]): Maximum bounds for boxes. specifies (H, W)
+ wh_ratio_clip (float): Maximum aspect ratio for boxes.
+
+ Returns:
+ Tensor: Boxes with shape (N, 4), where columns represent
+ tl_x, tl_y, br_x, br_y.
+
+ References:
+ .. [1] https://arxiv.org/abs/1311.2524
+
+ Example:
+ >>> rois = torch.Tensor([[ 0., 0., 1., 1.],
+ >>> [ 0., 0., 1., 1.],
+ >>> [ 0., 0., 1., 1.],
+ >>> [ 5., 5., 5., 5.]])
+ >>> deltas = torch.Tensor([[ 0., 0., 0., 0.],
+ >>> [ 1., 1., 1., 1.],
+ >>> [ 0., 0., 2., -1.],
+ >>> [ 0.7, -1.9, -0.5, 0.3]])
+ >>> legacy_delta2bbox(rois, deltas, max_shape=(32, 32))
+ tensor([[0.0000, 0.0000, 1.5000, 1.5000],
+ [0.0000, 0.0000, 5.2183, 5.2183],
+ [0.0000, 0.1321, 7.8891, 0.8679],
+ [5.3967, 2.4251, 6.0033, 3.7749]])
+ """
+ means = deltas.new_tensor(means).repeat(1, deltas.size(1) // 4)
+ stds = deltas.new_tensor(stds).repeat(1, deltas.size(1) // 4)
+ denorm_deltas = deltas * stds + means
+ dx = denorm_deltas[:, 0::4]
+ dy = denorm_deltas[:, 1::4]
+ dw = denorm_deltas[:, 2::4]
+ dh = denorm_deltas[:, 3::4]
+ max_ratio = np.abs(np.log(wh_ratio_clip))
+ dw = dw.clamp(min=-max_ratio, max=max_ratio)
+ dh = dh.clamp(min=-max_ratio, max=max_ratio)
+ # Compute center of each roi
+ px = ((rois[:, 0] + rois[:, 2]) * 0.5).unsqueeze(1).expand_as(dx)
+ py = ((rois[:, 1] + rois[:, 3]) * 0.5).unsqueeze(1).expand_as(dy)
+ # Compute width/height of each roi
+ pw = (rois[:, 2] - rois[:, 0] + 1.0).unsqueeze(1).expand_as(dw)
+ ph = (rois[:, 3] - rois[:, 1] + 1.0).unsqueeze(1).expand_as(dh)
+ # Use exp(network energy) to enlarge/shrink each roi
+ gw = pw * dw.exp()
+ gh = ph * dh.exp()
+ # Use network energy to shift the center of each roi
+ gx = px + pw * dx
+ gy = py + ph * dy
+ # Convert center-xy/width/height to top-left, bottom-right
+
+ # The true legacy box coder should +- 0.5 here.
+ # However, current implementation improves the performance when testing
+ # the models trained in MMDetection 1.X (~0.5 bbox AP, 0.2 mask AP)
+ x1 = gx - gw * 0.5
+ y1 = gy - gh * 0.5
+ x2 = gx + gw * 0.5
+ y2 = gy + gh * 0.5
+ if max_shape is not None:
+ x1 = x1.clamp(min=0, max=max_shape[1] - 1)
+ y1 = y1.clamp(min=0, max=max_shape[0] - 1)
+ x2 = x2.clamp(min=0, max=max_shape[1] - 1)
+ y2 = y2.clamp(min=0, max=max_shape[0] - 1)
+ bboxes = torch.stack([x1, y1, x2, y2], dim=-1).view_as(deltas)
+ return bboxes
diff --git a/annotator/uniformer/mmdet/core/bbox/coder/pseudo_bbox_coder.py b/annotator/uniformer/mmdet/core/bbox/coder/pseudo_bbox_coder.py
new file mode 100644
index 0000000000000000000000000000000000000000..1c8346f4ae2c7db9719a70c7dc0244e088a9965b
--- /dev/null
+++ b/annotator/uniformer/mmdet/core/bbox/coder/pseudo_bbox_coder.py
@@ -0,0 +1,18 @@
+from ..builder import BBOX_CODERS
+from .base_bbox_coder import BaseBBoxCoder
+
+
+@BBOX_CODERS.register_module()
+class PseudoBBoxCoder(BaseBBoxCoder):
+ """Pseudo bounding box coder."""
+
+ def __init__(self, **kwargs):
+ super(BaseBBoxCoder, self).__init__(**kwargs)
+
+ def encode(self, bboxes, gt_bboxes):
+ """torch.Tensor: return the given ``bboxes``"""
+ return gt_bboxes
+
+ def decode(self, bboxes, pred_bboxes):
+ """torch.Tensor: return the given ``pred_bboxes``"""
+ return pred_bboxes
diff --git a/annotator/uniformer/mmdet/core/bbox/coder/tblr_bbox_coder.py b/annotator/uniformer/mmdet/core/bbox/coder/tblr_bbox_coder.py
new file mode 100644
index 0000000000000000000000000000000000000000..edaffaf1fa252857e1a660ea14a613e2466fb52c
--- /dev/null
+++ b/annotator/uniformer/mmdet/core/bbox/coder/tblr_bbox_coder.py
@@ -0,0 +1,198 @@
+import mmcv
+import torch
+
+from ..builder import BBOX_CODERS
+from .base_bbox_coder import BaseBBoxCoder
+
+
+@BBOX_CODERS.register_module()
+class TBLRBBoxCoder(BaseBBoxCoder):
+ """TBLR BBox coder.
+
+ Following the practice in `FSAF `_,
+ this coder encodes gt bboxes (x1, y1, x2, y2) into (top, bottom, left,
+ right) and decode it back to the original.
+
+ Args:
+ normalizer (list | float): Normalization factor to be
+ divided with when coding the coordinates. If it is a list, it should
+ have length of 4 indicating normalization factor in tblr dims.
+ Otherwise it is a unified float factor for all dims. Default: 4.0
+ clip_border (bool, optional): Whether clip the objects outside the
+ border of the image. Defaults to True.
+ """
+
+ def __init__(self, normalizer=4.0, clip_border=True):
+ super(BaseBBoxCoder, self).__init__()
+ self.normalizer = normalizer
+ self.clip_border = clip_border
+
+ def encode(self, bboxes, gt_bboxes):
+ """Get box regression transformation deltas that can be used to
+ transform the ``bboxes`` into the ``gt_bboxes`` in the (top, left,
+ bottom, right) order.
+
+ Args:
+ bboxes (torch.Tensor): source boxes, e.g., object proposals.
+ gt_bboxes (torch.Tensor): target of the transformation, e.g.,
+ ground truth boxes.
+
+ Returns:
+ torch.Tensor: Box transformation deltas
+ """
+ assert bboxes.size(0) == gt_bboxes.size(0)
+ assert bboxes.size(-1) == gt_bboxes.size(-1) == 4
+ encoded_bboxes = bboxes2tblr(
+ bboxes, gt_bboxes, normalizer=self.normalizer)
+ return encoded_bboxes
+
+ def decode(self, bboxes, pred_bboxes, max_shape=None):
+ """Apply transformation `pred_bboxes` to `boxes`.
+
+ Args:
+ bboxes (torch.Tensor): Basic boxes.Shape (B, N, 4) or (N, 4)
+ pred_bboxes (torch.Tensor): Encoded boxes with shape
+ (B, N, 4) or (N, 4)
+ max_shape (Sequence[int] or torch.Tensor or Sequence[
+ Sequence[int]],optional): Maximum bounds for boxes, specifies
+ (H, W, C) or (H, W). If bboxes shape is (B, N, 4), then
+ the max_shape should be a Sequence[Sequence[int]]
+ and the length of max_shape should also be B.
+
+ Returns:
+ torch.Tensor: Decoded boxes.
+ """
+ decoded_bboxes = tblr2bboxes(
+ bboxes,
+ pred_bboxes,
+ normalizer=self.normalizer,
+ max_shape=max_shape,
+ clip_border=self.clip_border)
+
+ return decoded_bboxes
+
+
+@mmcv.jit(coderize=True)
+def bboxes2tblr(priors, gts, normalizer=4.0, normalize_by_wh=True):
+ """Encode ground truth boxes to tblr coordinate.
+
+ It first convert the gt coordinate to tblr format,
+ (top, bottom, left, right), relative to prior box centers.
+ The tblr coordinate may be normalized by the side length of prior bboxes
+ if `normalize_by_wh` is specified as True, and it is then normalized by
+ the `normalizer` factor.
+
+ Args:
+ priors (Tensor): Prior boxes in point form
+ Shape: (num_proposals,4).
+ gts (Tensor): Coords of ground truth for each prior in point-form
+ Shape: (num_proposals, 4).
+ normalizer (Sequence[float] | float): normalization parameter of
+ encoded boxes. If it is a list, it has to have length = 4.
+ Default: 4.0
+ normalize_by_wh (bool): Whether to normalize tblr coordinate by the
+ side length (wh) of prior bboxes.
+
+ Return:
+ encoded boxes (Tensor), Shape: (num_proposals, 4)
+ """
+
+ # dist b/t match center and prior's center
+ if not isinstance(normalizer, float):
+ normalizer = torch.tensor(normalizer, device=priors.device)
+ assert len(normalizer) == 4, 'Normalizer must have length = 4'
+ assert priors.size(0) == gts.size(0)
+ prior_centers = (priors[:, 0:2] + priors[:, 2:4]) / 2
+ xmin, ymin, xmax, ymax = gts.split(1, dim=1)
+ top = prior_centers[:, 1].unsqueeze(1) - ymin
+ bottom = ymax - prior_centers[:, 1].unsqueeze(1)
+ left = prior_centers[:, 0].unsqueeze(1) - xmin
+ right = xmax - prior_centers[:, 0].unsqueeze(1)
+ loc = torch.cat((top, bottom, left, right), dim=1)
+ if normalize_by_wh:
+ # Normalize tblr by anchor width and height
+ wh = priors[:, 2:4] - priors[:, 0:2]
+ w, h = torch.split(wh, 1, dim=1)
+ loc[:, :2] /= h # tb is normalized by h
+ loc[:, 2:] /= w # lr is normalized by w
+ # Normalize tblr by the given normalization factor
+ return loc / normalizer
+
+
+@mmcv.jit(coderize=True)
+def tblr2bboxes(priors,
+ tblr,
+ normalizer=4.0,
+ normalize_by_wh=True,
+ max_shape=None,
+ clip_border=True):
+ """Decode tblr outputs to prediction boxes.
+
+ The process includes 3 steps: 1) De-normalize tblr coordinates by
+ multiplying it with `normalizer`; 2) De-normalize tblr coordinates by the
+ prior bbox width and height if `normalize_by_wh` is `True`; 3) Convert
+ tblr (top, bottom, left, right) pair relative to the center of priors back
+ to (xmin, ymin, xmax, ymax) coordinate.
+
+ Args:
+ priors (Tensor): Prior boxes in point form (x0, y0, x1, y1)
+ Shape: (N,4) or (B, N, 4).
+ tblr (Tensor): Coords of network output in tblr form
+ Shape: (N, 4) or (B, N, 4).
+ normalizer (Sequence[float] | float): Normalization parameter of
+ encoded boxes. By list, it represents the normalization factors at
+ tblr dims. By float, it is the unified normalization factor at all
+ dims. Default: 4.0
+ normalize_by_wh (bool): Whether the tblr coordinates have been
+ normalized by the side length (wh) of prior bboxes.
+ max_shape (Sequence[int] or torch.Tensor or Sequence[
+ Sequence[int]],optional): Maximum bounds for boxes, specifies
+ (H, W, C) or (H, W). If priors shape is (B, N, 4), then
+ the max_shape should be a Sequence[Sequence[int]]
+ and the length of max_shape should also be B.
+ clip_border (bool, optional): Whether clip the objects outside the
+ border of the image. Defaults to True.
+
+ Return:
+ encoded boxes (Tensor): Boxes with shape (N, 4) or (B, N, 4)
+ """
+ if not isinstance(normalizer, float):
+ normalizer = torch.tensor(normalizer, device=priors.device)
+ assert len(normalizer) == 4, 'Normalizer must have length = 4'
+ assert priors.size(0) == tblr.size(0)
+ if priors.ndim == 3:
+ assert priors.size(1) == tblr.size(1)
+
+ loc_decode = tblr * normalizer
+ prior_centers = (priors[..., 0:2] + priors[..., 2:4]) / 2
+ if normalize_by_wh:
+ wh = priors[..., 2:4] - priors[..., 0:2]
+ w, h = torch.split(wh, 1, dim=-1)
+ # Inplace operation with slice would failed for exporting to ONNX
+ th = h * loc_decode[..., :2] # tb
+ tw = w * loc_decode[..., 2:] # lr
+ loc_decode = torch.cat([th, tw], dim=-1)
+ # Cannot be exported using onnx when loc_decode.split(1, dim=-1)
+ top, bottom, left, right = loc_decode.split((1, 1, 1, 1), dim=-1)
+ xmin = prior_centers[..., 0].unsqueeze(-1) - left
+ xmax = prior_centers[..., 0].unsqueeze(-1) + right
+ ymin = prior_centers[..., 1].unsqueeze(-1) - top
+ ymax = prior_centers[..., 1].unsqueeze(-1) + bottom
+
+ bboxes = torch.cat((xmin, ymin, xmax, ymax), dim=-1)
+
+ if clip_border and max_shape is not None:
+ if not isinstance(max_shape, torch.Tensor):
+ max_shape = priors.new_tensor(max_shape)
+ max_shape = max_shape[..., :2].type_as(priors)
+ if max_shape.ndim == 2:
+ assert bboxes.ndim == 3
+ assert max_shape.size(0) == bboxes.size(0)
+
+ min_xy = priors.new_tensor(0)
+ max_xy = torch.cat([max_shape, max_shape],
+ dim=-1).flip(-1).unsqueeze(-2)
+ bboxes = torch.where(bboxes < min_xy, min_xy, bboxes)
+ bboxes = torch.where(bboxes > max_xy, max_xy, bboxes)
+
+ return bboxes
diff --git a/annotator/uniformer/mmdet/core/bbox/coder/yolo_bbox_coder.py b/annotator/uniformer/mmdet/core/bbox/coder/yolo_bbox_coder.py
new file mode 100644
index 0000000000000000000000000000000000000000..d6d0e82ac780820952938d8751ac9776ea31588a
--- /dev/null
+++ b/annotator/uniformer/mmdet/core/bbox/coder/yolo_bbox_coder.py
@@ -0,0 +1,89 @@
+import mmcv
+import torch
+
+from ..builder import BBOX_CODERS
+from .base_bbox_coder import BaseBBoxCoder
+
+
+@BBOX_CODERS.register_module()
+class YOLOBBoxCoder(BaseBBoxCoder):
+ """YOLO BBox coder.
+
+ Following `YOLO `_, this coder divide
+ image into grids, and encode bbox (x1, y1, x2, y2) into (cx, cy, dw, dh).
+ cx, cy in [0., 1.], denotes relative center position w.r.t the center of
+ bboxes. dw, dh are the same as :obj:`DeltaXYWHBBoxCoder`.
+
+ Args:
+ eps (float): Min value of cx, cy when encoding.
+ """
+
+ def __init__(self, eps=1e-6):
+ super(BaseBBoxCoder, self).__init__()
+ self.eps = eps
+
+ @mmcv.jit(coderize=True)
+ def encode(self, bboxes, gt_bboxes, stride):
+ """Get box regression transformation deltas that can be used to
+ transform the ``bboxes`` into the ``gt_bboxes``.
+
+ Args:
+ bboxes (torch.Tensor): Source boxes, e.g., anchors.
+ gt_bboxes (torch.Tensor): Target of the transformation, e.g.,
+ ground-truth boxes.
+ stride (torch.Tensor | int): Stride of bboxes.
+
+ Returns:
+ torch.Tensor: Box transformation deltas
+ """
+
+ assert bboxes.size(0) == gt_bboxes.size(0)
+ assert bboxes.size(-1) == gt_bboxes.size(-1) == 4
+ x_center_gt = (gt_bboxes[..., 0] + gt_bboxes[..., 2]) * 0.5
+ y_center_gt = (gt_bboxes[..., 1] + gt_bboxes[..., 3]) * 0.5
+ w_gt = gt_bboxes[..., 2] - gt_bboxes[..., 0]
+ h_gt = gt_bboxes[..., 3] - gt_bboxes[..., 1]
+ x_center = (bboxes[..., 0] + bboxes[..., 2]) * 0.5
+ y_center = (bboxes[..., 1] + bboxes[..., 3]) * 0.5
+ w = bboxes[..., 2] - bboxes[..., 0]
+ h = bboxes[..., 3] - bboxes[..., 1]
+ w_target = torch.log((w_gt / w).clamp(min=self.eps))
+ h_target = torch.log((h_gt / h).clamp(min=self.eps))
+ x_center_target = ((x_center_gt - x_center) / stride + 0.5).clamp(
+ self.eps, 1 - self.eps)
+ y_center_target = ((y_center_gt - y_center) / stride + 0.5).clamp(
+ self.eps, 1 - self.eps)
+ encoded_bboxes = torch.stack(
+ [x_center_target, y_center_target, w_target, h_target], dim=-1)
+ return encoded_bboxes
+
+ @mmcv.jit(coderize=True)
+ def decode(self, bboxes, pred_bboxes, stride):
+ """Apply transformation `pred_bboxes` to `boxes`.
+
+ Args:
+ boxes (torch.Tensor): Basic boxes, e.g. anchors.
+ pred_bboxes (torch.Tensor): Encoded boxes with shape
+ stride (torch.Tensor | int): Strides of bboxes.
+
+ Returns:
+ torch.Tensor: Decoded boxes.
+ """
+ assert pred_bboxes.size(0) == bboxes.size(0)
+ assert pred_bboxes.size(-1) == bboxes.size(-1) == 4
+ x_center = (bboxes[..., 0] + bboxes[..., 2]) * 0.5
+ y_center = (bboxes[..., 1] + bboxes[..., 3]) * 0.5
+ w = bboxes[..., 2] - bboxes[..., 0]
+ h = bboxes[..., 3] - bboxes[..., 1]
+ # Get outputs x, y
+ x_center_pred = (pred_bboxes[..., 0] - 0.5) * stride + x_center
+ y_center_pred = (pred_bboxes[..., 1] - 0.5) * stride + y_center
+ w_pred = torch.exp(pred_bboxes[..., 2]) * w
+ h_pred = torch.exp(pred_bboxes[..., 3]) * h
+
+ decoded_bboxes = torch.stack(
+ (x_center_pred - w_pred / 2, y_center_pred - h_pred / 2,
+ x_center_pred + w_pred / 2, y_center_pred + h_pred / 2),
+ dim=-1)
+
+ return decoded_bboxes
diff --git a/annotator/uniformer/mmdet/core/bbox/demodata.py b/annotator/uniformer/mmdet/core/bbox/demodata.py
new file mode 100644
index 0000000000000000000000000000000000000000..feecb693745a47d9f2bebd8af9a217ff4f5cc92b
--- /dev/null
+++ b/annotator/uniformer/mmdet/core/bbox/demodata.py
@@ -0,0 +1,41 @@
+import numpy as np
+import torch
+
+from mmdet.utils.util_random import ensure_rng
+
+
+def random_boxes(num=1, scale=1, rng=None):
+ """Simple version of ``kwimage.Boxes.random``
+
+ Returns:
+ Tensor: shape (n, 4) in x1, y1, x2, y2 format.
+
+ References:
+ https://gitlab.kitware.com/computer-vision/kwimage/blob/master/kwimage/structs/boxes.py#L1390
+
+ Example:
+ >>> num = 3
+ >>> scale = 512
+ >>> rng = 0
+ >>> boxes = random_boxes(num, scale, rng)
+ >>> print(boxes)
+ tensor([[280.9925, 278.9802, 308.6148, 366.1769],
+ [216.9113, 330.6978, 224.0446, 456.5878],
+ [405.3632, 196.3221, 493.3953, 270.7942]])
+ """
+ rng = ensure_rng(rng)
+
+ tlbr = rng.rand(num, 4).astype(np.float32)
+
+ tl_x = np.minimum(tlbr[:, 0], tlbr[:, 2])
+ tl_y = np.minimum(tlbr[:, 1], tlbr[:, 3])
+ br_x = np.maximum(tlbr[:, 0], tlbr[:, 2])
+ br_y = np.maximum(tlbr[:, 1], tlbr[:, 3])
+
+ tlbr[:, 0] = tl_x * scale
+ tlbr[:, 1] = tl_y * scale
+ tlbr[:, 2] = br_x * scale
+ tlbr[:, 3] = br_y * scale
+
+ boxes = torch.from_numpy(tlbr)
+ return boxes
diff --git a/annotator/uniformer/mmdet/core/bbox/iou_calculators/__init__.py b/annotator/uniformer/mmdet/core/bbox/iou_calculators/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e71369a58a05fa25e6a754300875fdbb87cb26a5
--- /dev/null
+++ b/annotator/uniformer/mmdet/core/bbox/iou_calculators/__init__.py
@@ -0,0 +1,4 @@
+from .builder import build_iou_calculator
+from .iou2d_calculator import BboxOverlaps2D, bbox_overlaps
+
+__all__ = ['build_iou_calculator', 'BboxOverlaps2D', 'bbox_overlaps']
diff --git a/annotator/uniformer/mmdet/core/bbox/iou_calculators/builder.py b/annotator/uniformer/mmdet/core/bbox/iou_calculators/builder.py
new file mode 100644
index 0000000000000000000000000000000000000000..09094d7ece46a9f18a28ed0960feac2afa9331bb
--- /dev/null
+++ b/annotator/uniformer/mmdet/core/bbox/iou_calculators/builder.py
@@ -0,0 +1,8 @@
+from mmcv.utils import Registry, build_from_cfg
+
+IOU_CALCULATORS = Registry('IoU calculator')
+
+
+def build_iou_calculator(cfg, default_args=None):
+ """Builder of IoU calculator."""
+ return build_from_cfg(cfg, IOU_CALCULATORS, default_args)
diff --git a/annotator/uniformer/mmdet/core/bbox/iou_calculators/iou2d_calculator.py b/annotator/uniformer/mmdet/core/bbox/iou_calculators/iou2d_calculator.py
new file mode 100644
index 0000000000000000000000000000000000000000..158b702c234f5c10c4f5f03e08e8794ac7b8dcad
--- /dev/null
+++ b/annotator/uniformer/mmdet/core/bbox/iou_calculators/iou2d_calculator.py
@@ -0,0 +1,159 @@
+import torch
+
+from .builder import IOU_CALCULATORS
+
+
+@IOU_CALCULATORS.register_module()
+class BboxOverlaps2D(object):
+ """2D Overlaps (e.g. IoUs, GIoUs) Calculator."""
+
+ def __call__(self, bboxes1, bboxes2, mode='iou', is_aligned=False):
+ """Calculate IoU between 2D bboxes.
+
+ Args:
+ bboxes1 (Tensor): bboxes have shape (m, 4) in
+ format, or shape (m, 5) in format.
+ bboxes2 (Tensor): bboxes have shape (m, 4) in
+ format, shape (m, 5) in format, or be
+ empty. If ``is_aligned `` is ``True``, then m and n must be
+ equal.
+ mode (str): "iou" (intersection over union), "iof" (intersection
+ over foreground), or "giou" (generalized intersection over
+ union).
+ is_aligned (bool, optional): If True, then m and n must be equal.
+ Default False.
+
+ Returns:
+ Tensor: shape (m, n) if ``is_aligned `` is False else shape (m,)
+ """
+ assert bboxes1.size(-1) in [0, 4, 5]
+ assert bboxes2.size(-1) in [0, 4, 5]
+ if bboxes2.size(-1) == 5:
+ bboxes2 = bboxes2[..., :4]
+ if bboxes1.size(-1) == 5:
+ bboxes1 = bboxes1[..., :4]
+ return bbox_overlaps(bboxes1, bboxes2, mode, is_aligned)
+
+ def __repr__(self):
+ """str: a string describing the module"""
+ repr_str = self.__class__.__name__ + '()'
+ return repr_str
+
+
+def bbox_overlaps(bboxes1, bboxes2, mode='iou', is_aligned=False, eps=1e-6):
+ """Calculate overlap between two set of bboxes.
+
+ If ``is_aligned `` is ``False``, then calculate the overlaps between each
+ bbox of bboxes1 and bboxes2, otherwise the overlaps between each aligned
+ pair of bboxes1 and bboxes2.
+
+ Args:
+ bboxes1 (Tensor): shape (B, m, 4) in format or empty.
+ bboxes2 (Tensor): shape (B, n, 4) in format or empty.
+ B indicates the batch dim, in shape (B1, B2, ..., Bn).
+ If ``is_aligned `` is ``True``, then m and n must be equal.
+ mode (str): "iou" (intersection over union), "iof" (intersection over
+ foreground) or "giou" (generalized intersection over union).
+ Default "iou".
+ is_aligned (bool, optional): If True, then m and n must be equal.
+ Default False.
+ eps (float, optional): A value added to the denominator for numerical
+ stability. Default 1e-6.
+
+ Returns:
+ Tensor: shape (m, n) if ``is_aligned `` is False else shape (m,)
+
+ Example:
+ >>> bboxes1 = torch.FloatTensor([
+ >>> [0, 0, 10, 10],
+ >>> [10, 10, 20, 20],
+ >>> [32, 32, 38, 42],
+ >>> ])
+ >>> bboxes2 = torch.FloatTensor([
+ >>> [0, 0, 10, 20],
+ >>> [0, 10, 10, 19],
+ >>> [10, 10, 20, 20],
+ >>> ])
+ >>> overlaps = bbox_overlaps(bboxes1, bboxes2)
+ >>> assert overlaps.shape == (3, 3)
+ >>> overlaps = bbox_overlaps(bboxes1, bboxes2, is_aligned=True)
+ >>> assert overlaps.shape == (3, )
+
+ Example:
+ >>> empty = torch.empty(0, 4)
+ >>> nonempty = torch.FloatTensor([[0, 0, 10, 9]])
+ >>> assert tuple(bbox_overlaps(empty, nonempty).shape) == (0, 1)
+ >>> assert tuple(bbox_overlaps(nonempty, empty).shape) == (1, 0)
+ >>> assert tuple(bbox_overlaps(empty, empty).shape) == (0, 0)
+ """
+
+ assert mode in ['iou', 'iof', 'giou'], f'Unsupported mode {mode}'
+ # Either the boxes are empty or the length of boxes' last dimension is 4
+ assert (bboxes1.size(-1) == 4 or bboxes1.size(0) == 0)
+ assert (bboxes2.size(-1) == 4 or bboxes2.size(0) == 0)
+
+ # Batch dim must be the same
+ # Batch dim: (B1, B2, ... Bn)
+ assert bboxes1.shape[:-2] == bboxes2.shape[:-2]
+ batch_shape = bboxes1.shape[:-2]
+
+ rows = bboxes1.size(-2)
+ cols = bboxes2.size(-2)
+ if is_aligned:
+ assert rows == cols
+
+ if rows * cols == 0:
+ if is_aligned:
+ return bboxes1.new(batch_shape + (rows, ))
+ else:
+ return bboxes1.new(batch_shape + (rows, cols))
+
+ area1 = (bboxes1[..., 2] - bboxes1[..., 0]) * (
+ bboxes1[..., 3] - bboxes1[..., 1])
+ area2 = (bboxes2[..., 2] - bboxes2[..., 0]) * (
+ bboxes2[..., 3] - bboxes2[..., 1])
+
+ if is_aligned:
+ lt = torch.max(bboxes1[..., :2], bboxes2[..., :2]) # [B, rows, 2]
+ rb = torch.min(bboxes1[..., 2:], bboxes2[..., 2:]) # [B, rows, 2]
+
+ wh = (rb - lt).clamp(min=0) # [B, rows, 2]
+ overlap = wh[..., 0] * wh[..., 1]
+
+ if mode in ['iou', 'giou']:
+ union = area1 + area2 - overlap
+ else:
+ union = area1
+ if mode == 'giou':
+ enclosed_lt = torch.min(bboxes1[..., :2], bboxes2[..., :2])
+ enclosed_rb = torch.max(bboxes1[..., 2:], bboxes2[..., 2:])
+ else:
+ lt = torch.max(bboxes1[..., :, None, :2],
+ bboxes2[..., None, :, :2]) # [B, rows, cols, 2]
+ rb = torch.min(bboxes1[..., :, None, 2:],
+ bboxes2[..., None, :, 2:]) # [B, rows, cols, 2]
+
+ wh = (rb - lt).clamp(min=0) # [B, rows, cols, 2]
+ overlap = wh[..., 0] * wh[..., 1]
+
+ if mode in ['iou', 'giou']:
+ union = area1[..., None] + area2[..., None, :] - overlap
+ else:
+ union = area1[..., None]
+ if mode == 'giou':
+ enclosed_lt = torch.min(bboxes1[..., :, None, :2],
+ bboxes2[..., None, :, :2])
+ enclosed_rb = torch.max(bboxes1[..., :, None, 2:],
+ bboxes2[..., None, :, 2:])
+
+ eps = union.new_tensor([eps])
+ union = torch.max(union, eps)
+ ious = overlap / union
+ if mode in ['iou', 'iof']:
+ return ious
+ # calculate gious
+ enclose_wh = (enclosed_rb - enclosed_lt).clamp(min=0)
+ enclose_area = enclose_wh[..., 0] * enclose_wh[..., 1]
+ enclose_area = torch.max(enclose_area, eps)
+ gious = ious - (enclose_area - union) / enclose_area
+ return gious
diff --git a/annotator/uniformer/mmdet/core/bbox/match_costs/__init__.py b/annotator/uniformer/mmdet/core/bbox/match_costs/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..add5e0d394034d89b2d47c314ff1938294deb6ea
--- /dev/null
+++ b/annotator/uniformer/mmdet/core/bbox/match_costs/__init__.py
@@ -0,0 +1,7 @@
+from .builder import build_match_cost
+from .match_cost import BBoxL1Cost, ClassificationCost, FocalLossCost, IoUCost
+
+__all__ = [
+ 'build_match_cost', 'ClassificationCost', 'BBoxL1Cost', 'IoUCost',
+ 'FocalLossCost'
+]
diff --git a/annotator/uniformer/mmdet/core/bbox/match_costs/builder.py b/annotator/uniformer/mmdet/core/bbox/match_costs/builder.py
new file mode 100644
index 0000000000000000000000000000000000000000..6894017d42eb16ee4a8ae3ed660a71cda3ad9940
--- /dev/null
+++ b/annotator/uniformer/mmdet/core/bbox/match_costs/builder.py
@@ -0,0 +1,8 @@
+from mmcv.utils import Registry, build_from_cfg
+
+MATCH_COST = Registry('Match Cost')
+
+
+def build_match_cost(cfg, default_args=None):
+ """Builder of IoU calculator."""
+ return build_from_cfg(cfg, MATCH_COST, default_args)
diff --git a/annotator/uniformer/mmdet/core/bbox/match_costs/match_cost.py b/annotator/uniformer/mmdet/core/bbox/match_costs/match_cost.py
new file mode 100644
index 0000000000000000000000000000000000000000..38869737d66064ee5adea4b2c8ff26ae091e5f56
--- /dev/null
+++ b/annotator/uniformer/mmdet/core/bbox/match_costs/match_cost.py
@@ -0,0 +1,184 @@
+import torch
+
+from mmdet.core.bbox.iou_calculators import bbox_overlaps
+from mmdet.core.bbox.transforms import bbox_cxcywh_to_xyxy, bbox_xyxy_to_cxcywh
+from .builder import MATCH_COST
+
+
+@MATCH_COST.register_module()
+class BBoxL1Cost(object):
+ """BBoxL1Cost.
+
+ Args:
+ weight (int | float, optional): loss_weight
+ box_format (str, optional): 'xyxy' for DETR, 'xywh' for Sparse_RCNN
+
+ Examples:
+ >>> from mmdet.core.bbox.match_costs.match_cost import BBoxL1Cost
+ >>> import torch
+ >>> self = BBoxL1Cost()
+ >>> bbox_pred = torch.rand(1, 4)
+ >>> gt_bboxes= torch.FloatTensor([[0, 0, 2, 4], [1, 2, 3, 4]])
+ >>> factor = torch.tensor([10, 8, 10, 8])
+ >>> self(bbox_pred, gt_bboxes, factor)
+ tensor([[1.6172, 1.6422]])
+ """
+
+ def __init__(self, weight=1., box_format='xyxy'):
+ self.weight = weight
+ assert box_format in ['xyxy', 'xywh']
+ self.box_format = box_format
+
+ def __call__(self, bbox_pred, gt_bboxes):
+ """
+ Args:
+ bbox_pred (Tensor): Predicted boxes with normalized coordinates
+ (cx, cy, w, h), which are all in range [0, 1]. Shape
+ [num_query, 4].
+ gt_bboxes (Tensor): Ground truth boxes with normalized
+ coordinates (x1, y1, x2, y2). Shape [num_gt, 4].
+
+ Returns:
+ torch.Tensor: bbox_cost value with weight
+ """
+ if self.box_format == 'xywh':
+ gt_bboxes = bbox_xyxy_to_cxcywh(gt_bboxes)
+ elif self.box_format == 'xyxy':
+ bbox_pred = bbox_cxcywh_to_xyxy(bbox_pred)
+ bbox_cost = torch.cdist(bbox_pred, gt_bboxes, p=1)
+ return bbox_cost * self.weight
+
+
+@MATCH_COST.register_module()
+class FocalLossCost(object):
+ """FocalLossCost.
+
+ Args:
+ weight (int | float, optional): loss_weight
+ alpha (int | float, optional): focal_loss alpha
+ gamma (int | float, optional): focal_loss gamma
+ eps (float, optional): default 1e-12
+
+ Examples:
+ >>> from mmdet.core.bbox.match_costs.match_cost import FocalLossCost
+ >>> import torch
+ >>> self = FocalLossCost()
+ >>> cls_pred = torch.rand(4, 3)
+ >>> gt_labels = torch.tensor([0, 1, 2])
+ >>> factor = torch.tensor([10, 8, 10, 8])
+ >>> self(cls_pred, gt_labels)
+ tensor([[-0.3236, -0.3364, -0.2699],
+ [-0.3439, -0.3209, -0.4807],
+ [-0.4099, -0.3795, -0.2929],
+ [-0.1950, -0.1207, -0.2626]])
+ """
+
+ def __init__(self, weight=1., alpha=0.25, gamma=2, eps=1e-12):
+ self.weight = weight
+ self.alpha = alpha
+ self.gamma = gamma
+ self.eps = eps
+
+ def __call__(self, cls_pred, gt_labels):
+ """
+ Args:
+ cls_pred (Tensor): Predicted classification logits, shape
+ [num_query, num_class].
+ gt_labels (Tensor): Label of `gt_bboxes`, shape (num_gt,).
+
+ Returns:
+ torch.Tensor: cls_cost value with weight
+ """
+ cls_pred = cls_pred.sigmoid()
+ neg_cost = -(1 - cls_pred + self.eps).log() * (
+ 1 - self.alpha) * cls_pred.pow(self.gamma)
+ pos_cost = -(cls_pred + self.eps).log() * self.alpha * (
+ 1 - cls_pred).pow(self.gamma)
+ cls_cost = pos_cost[:, gt_labels] - neg_cost[:, gt_labels]
+ return cls_cost * self.weight
+
+
+@MATCH_COST.register_module()
+class ClassificationCost(object):
+ """ClsSoftmaxCost.
+
+ Args:
+ weight (int | float, optional): loss_weight
+
+ Examples:
+ >>> from mmdet.core.bbox.match_costs.match_cost import \
+ ... ClassificationCost
+ >>> import torch
+ >>> self = ClassificationCost()
+ >>> cls_pred = torch.rand(4, 3)
+ >>> gt_labels = torch.tensor([0, 1, 2])
+ >>> factor = torch.tensor([10, 8, 10, 8])
+ >>> self(cls_pred, gt_labels)
+ tensor([[-0.3430, -0.3525, -0.3045],
+ [-0.3077, -0.2931, -0.3992],
+ [-0.3664, -0.3455, -0.2881],
+ [-0.3343, -0.2701, -0.3956]])
+ """
+
+ def __init__(self, weight=1.):
+ self.weight = weight
+
+ def __call__(self, cls_pred, gt_labels):
+ """
+ Args:
+ cls_pred (Tensor): Predicted classification logits, shape
+ [num_query, num_class].
+ gt_labels (Tensor): Label of `gt_bboxes`, shape (num_gt,).
+
+ Returns:
+ torch.Tensor: cls_cost value with weight
+ """
+ # Following the official DETR repo, contrary to the loss that
+ # NLL is used, we approximate it in 1 - cls_score[gt_label].
+ # The 1 is a constant that doesn't change the matching,
+ # so it can be omitted.
+ cls_score = cls_pred.softmax(-1)
+ cls_cost = -cls_score[:, gt_labels]
+ return cls_cost * self.weight
+
+
+@MATCH_COST.register_module()
+class IoUCost(object):
+ """IoUCost.
+
+ Args:
+ iou_mode (str, optional): iou mode such as 'iou' | 'giou'
+ weight (int | float, optional): loss weight
+
+ Examples:
+ >>> from mmdet.core.bbox.match_costs.match_cost import IoUCost
+ >>> import torch
+ >>> self = IoUCost()
+ >>> bboxes = torch.FloatTensor([[1,1, 2, 2], [2, 2, 3, 4]])
+ >>> gt_bboxes = torch.FloatTensor([[0, 0, 2, 4], [1, 2, 3, 4]])
+ >>> self(bboxes, gt_bboxes)
+ tensor([[-0.1250, 0.1667],
+ [ 0.1667, -0.5000]])
+ """
+
+ def __init__(self, iou_mode='giou', weight=1.):
+ self.weight = weight
+ self.iou_mode = iou_mode
+
+ def __call__(self, bboxes, gt_bboxes):
+ """
+ Args:
+ bboxes (Tensor): Predicted boxes with unnormalized coordinates
+ (x1, y1, x2, y2). Shape [num_query, 4].
+ gt_bboxes (Tensor): Ground truth boxes with unnormalized
+ coordinates (x1, y1, x2, y2). Shape [num_gt, 4].
+
+ Returns:
+ torch.Tensor: iou_cost value with weight
+ """
+ # overlaps: [num_bboxes, num_gt]
+ overlaps = bbox_overlaps(
+ bboxes, gt_bboxes, mode=self.iou_mode, is_aligned=False)
+ # The 1 is a constant that doesn't change the matching, so omitted.
+ iou_cost = -overlaps
+ return iou_cost * self.weight
diff --git a/annotator/uniformer/mmdet/core/bbox/samplers/__init__.py b/annotator/uniformer/mmdet/core/bbox/samplers/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..0b06303fe1000e11c5486c40c70606a34a5208e3
--- /dev/null
+++ b/annotator/uniformer/mmdet/core/bbox/samplers/__init__.py
@@ -0,0 +1,15 @@
+from .base_sampler import BaseSampler
+from .combined_sampler import CombinedSampler
+from .instance_balanced_pos_sampler import InstanceBalancedPosSampler
+from .iou_balanced_neg_sampler import IoUBalancedNegSampler
+from .ohem_sampler import OHEMSampler
+from .pseudo_sampler import PseudoSampler
+from .random_sampler import RandomSampler
+from .sampling_result import SamplingResult
+from .score_hlr_sampler import ScoreHLRSampler
+
+__all__ = [
+ 'BaseSampler', 'PseudoSampler', 'RandomSampler',
+ 'InstanceBalancedPosSampler', 'IoUBalancedNegSampler', 'CombinedSampler',
+ 'OHEMSampler', 'SamplingResult', 'ScoreHLRSampler'
+]
diff --git a/annotator/uniformer/mmdet/core/bbox/samplers/base_sampler.py b/annotator/uniformer/mmdet/core/bbox/samplers/base_sampler.py
new file mode 100644
index 0000000000000000000000000000000000000000..9ea35def115b49dfdad8a1f7c040ef3cd983b0d1
--- /dev/null
+++ b/annotator/uniformer/mmdet/core/bbox/samplers/base_sampler.py
@@ -0,0 +1,101 @@
+from abc import ABCMeta, abstractmethod
+
+import torch
+
+from .sampling_result import SamplingResult
+
+
+class BaseSampler(metaclass=ABCMeta):
+ """Base class of samplers."""
+
+ def __init__(self,
+ num,
+ pos_fraction,
+ neg_pos_ub=-1,
+ add_gt_as_proposals=True,
+ **kwargs):
+ self.num = num
+ self.pos_fraction = pos_fraction
+ self.neg_pos_ub = neg_pos_ub
+ self.add_gt_as_proposals = add_gt_as_proposals
+ self.pos_sampler = self
+ self.neg_sampler = self
+
+ @abstractmethod
+ def _sample_pos(self, assign_result, num_expected, **kwargs):
+ """Sample positive samples."""
+ pass
+
+ @abstractmethod
+ def _sample_neg(self, assign_result, num_expected, **kwargs):
+ """Sample negative samples."""
+ pass
+
+ def sample(self,
+ assign_result,
+ bboxes,
+ gt_bboxes,
+ gt_labels=None,
+ **kwargs):
+ """Sample positive and negative bboxes.
+
+ This is a simple implementation of bbox sampling given candidates,
+ assigning results and ground truth bboxes.
+
+ Args:
+ assign_result (:obj:`AssignResult`): Bbox assigning results.
+ bboxes (Tensor): Boxes to be sampled from.
+ gt_bboxes (Tensor): Ground truth bboxes.
+ gt_labels (Tensor, optional): Class labels of ground truth bboxes.
+
+ Returns:
+ :obj:`SamplingResult`: Sampling result.
+
+ Example:
+ >>> from mmdet.core.bbox import RandomSampler
+ >>> from mmdet.core.bbox import AssignResult
+ >>> from mmdet.core.bbox.demodata import ensure_rng, random_boxes
+ >>> rng = ensure_rng(None)
+ >>> assign_result = AssignResult.random(rng=rng)
+ >>> bboxes = random_boxes(assign_result.num_preds, rng=rng)
+ >>> gt_bboxes = random_boxes(assign_result.num_gts, rng=rng)
+ >>> gt_labels = None
+ >>> self = RandomSampler(num=32, pos_fraction=0.5, neg_pos_ub=-1,
+ >>> add_gt_as_proposals=False)
+ >>> self = self.sample(assign_result, bboxes, gt_bboxes, gt_labels)
+ """
+ if len(bboxes.shape) < 2:
+ bboxes = bboxes[None, :]
+
+ bboxes = bboxes[:, :4]
+
+ gt_flags = bboxes.new_zeros((bboxes.shape[0], ), dtype=torch.uint8)
+ if self.add_gt_as_proposals and len(gt_bboxes) > 0:
+ if gt_labels is None:
+ raise ValueError(
+ 'gt_labels must be given when add_gt_as_proposals is True')
+ bboxes = torch.cat([gt_bboxes, bboxes], dim=0)
+ assign_result.add_gt_(gt_labels)
+ gt_ones = bboxes.new_ones(gt_bboxes.shape[0], dtype=torch.uint8)
+ gt_flags = torch.cat([gt_ones, gt_flags])
+
+ num_expected_pos = int(self.num * self.pos_fraction)
+ pos_inds = self.pos_sampler._sample_pos(
+ assign_result, num_expected_pos, bboxes=bboxes, **kwargs)
+ # We found that sampled indices have duplicated items occasionally.
+ # (may be a bug of PyTorch)
+ pos_inds = pos_inds.unique()
+ num_sampled_pos = pos_inds.numel()
+ num_expected_neg = self.num - num_sampled_pos
+ if self.neg_pos_ub >= 0:
+ _pos = max(1, num_sampled_pos)
+ neg_upper_bound = int(self.neg_pos_ub * _pos)
+ if num_expected_neg > neg_upper_bound:
+ num_expected_neg = neg_upper_bound
+ neg_inds = self.neg_sampler._sample_neg(
+ assign_result, num_expected_neg, bboxes=bboxes, **kwargs)
+ neg_inds = neg_inds.unique()
+
+ sampling_result = SamplingResult(pos_inds, neg_inds, bboxes, gt_bboxes,
+ assign_result, gt_flags)
+ return sampling_result
diff --git a/annotator/uniformer/mmdet/core/bbox/samplers/combined_sampler.py b/annotator/uniformer/mmdet/core/bbox/samplers/combined_sampler.py
new file mode 100644
index 0000000000000000000000000000000000000000..564729f0895b1863d94c479a67202438af45f996
--- /dev/null
+++ b/annotator/uniformer/mmdet/core/bbox/samplers/combined_sampler.py
@@ -0,0 +1,20 @@
+from ..builder import BBOX_SAMPLERS, build_sampler
+from .base_sampler import BaseSampler
+
+
+@BBOX_SAMPLERS.register_module()
+class CombinedSampler(BaseSampler):
+ """A sampler that combines positive sampler and negative sampler."""
+
+ def __init__(self, pos_sampler, neg_sampler, **kwargs):
+ super(CombinedSampler, self).__init__(**kwargs)
+ self.pos_sampler = build_sampler(pos_sampler, **kwargs)
+ self.neg_sampler = build_sampler(neg_sampler, **kwargs)
+
+ def _sample_pos(self, **kwargs):
+ """Sample positive samples."""
+ raise NotImplementedError
+
+ def _sample_neg(self, **kwargs):
+ """Sample negative samples."""
+ raise NotImplementedError
diff --git a/annotator/uniformer/mmdet/core/bbox/samplers/instance_balanced_pos_sampler.py b/annotator/uniformer/mmdet/core/bbox/samplers/instance_balanced_pos_sampler.py
new file mode 100644
index 0000000000000000000000000000000000000000..c735298487e14e4a0ec42913f25673cccb98a8a0
--- /dev/null
+++ b/annotator/uniformer/mmdet/core/bbox/samplers/instance_balanced_pos_sampler.py
@@ -0,0 +1,55 @@
+import numpy as np
+import torch
+
+from ..builder import BBOX_SAMPLERS
+from .random_sampler import RandomSampler
+
+
+@BBOX_SAMPLERS.register_module()
+class InstanceBalancedPosSampler(RandomSampler):
+ """Instance balanced sampler that samples equal number of positive samples
+ for each instance."""
+
+ def _sample_pos(self, assign_result, num_expected, **kwargs):
+ """Sample positive boxes.
+
+ Args:
+ assign_result (:obj:`AssignResult`): The assigned results of boxes.
+ num_expected (int): The number of expected positive samples
+
+ Returns:
+ Tensor or ndarray: sampled indices.
+ """
+ pos_inds = torch.nonzero(assign_result.gt_inds > 0, as_tuple=False)
+ if pos_inds.numel() != 0:
+ pos_inds = pos_inds.squeeze(1)
+ if pos_inds.numel() <= num_expected:
+ return pos_inds
+ else:
+ unique_gt_inds = assign_result.gt_inds[pos_inds].unique()
+ num_gts = len(unique_gt_inds)
+ num_per_gt = int(round(num_expected / float(num_gts)) + 1)
+ sampled_inds = []
+ for i in unique_gt_inds:
+ inds = torch.nonzero(
+ assign_result.gt_inds == i.item(), as_tuple=False)
+ if inds.numel() != 0:
+ inds = inds.squeeze(1)
+ else:
+ continue
+ if len(inds) > num_per_gt:
+ inds = self.random_choice(inds, num_per_gt)
+ sampled_inds.append(inds)
+ sampled_inds = torch.cat(sampled_inds)
+ if len(sampled_inds) < num_expected:
+ num_extra = num_expected - len(sampled_inds)
+ extra_inds = np.array(
+ list(set(pos_inds.cpu()) - set(sampled_inds.cpu())))
+ if len(extra_inds) > num_extra:
+ extra_inds = self.random_choice(extra_inds, num_extra)
+ extra_inds = torch.from_numpy(extra_inds).to(
+ assign_result.gt_inds.device).long()
+ sampled_inds = torch.cat([sampled_inds, extra_inds])
+ elif len(sampled_inds) > num_expected:
+ sampled_inds = self.random_choice(sampled_inds, num_expected)
+ return sampled_inds
diff --git a/annotator/uniformer/mmdet/core/bbox/samplers/iou_balanced_neg_sampler.py b/annotator/uniformer/mmdet/core/bbox/samplers/iou_balanced_neg_sampler.py
new file mode 100644
index 0000000000000000000000000000000000000000..f275e430d1b57c4d9df57387b8f3ae6f0ff68cf1
--- /dev/null
+++ b/annotator/uniformer/mmdet/core/bbox/samplers/iou_balanced_neg_sampler.py
@@ -0,0 +1,157 @@
+import numpy as np
+import torch
+
+from ..builder import BBOX_SAMPLERS
+from .random_sampler import RandomSampler
+
+
+@BBOX_SAMPLERS.register_module()
+class IoUBalancedNegSampler(RandomSampler):
+ """IoU Balanced Sampling.
+
+ arXiv: https://arxiv.org/pdf/1904.02701.pdf (CVPR 2019)
+
+ Sampling proposals according to their IoU. `floor_fraction` of needed RoIs
+ are sampled from proposals whose IoU are lower than `floor_thr` randomly.
+ The others are sampled from proposals whose IoU are higher than
+ `floor_thr`. These proposals are sampled from some bins evenly, which are
+ split by `num_bins` via IoU evenly.
+
+ Args:
+ num (int): number of proposals.
+ pos_fraction (float): fraction of positive proposals.
+ floor_thr (float): threshold (minimum) IoU for IoU balanced sampling,
+ set to -1 if all using IoU balanced sampling.
+ floor_fraction (float): sampling fraction of proposals under floor_thr.
+ num_bins (int): number of bins in IoU balanced sampling.
+ """
+
+ def __init__(self,
+ num,
+ pos_fraction,
+ floor_thr=-1,
+ floor_fraction=0,
+ num_bins=3,
+ **kwargs):
+ super(IoUBalancedNegSampler, self).__init__(num, pos_fraction,
+ **kwargs)
+ assert floor_thr >= 0 or floor_thr == -1
+ assert 0 <= floor_fraction <= 1
+ assert num_bins >= 1
+
+ self.floor_thr = floor_thr
+ self.floor_fraction = floor_fraction
+ self.num_bins = num_bins
+
+ def sample_via_interval(self, max_overlaps, full_set, num_expected):
+ """Sample according to the iou interval.
+
+ Args:
+ max_overlaps (torch.Tensor): IoU between bounding boxes and ground
+ truth boxes.
+ full_set (set(int)): A full set of indices of boxes。
+ num_expected (int): Number of expected samples。
+
+ Returns:
+ np.ndarray: Indices of samples
+ """
+ max_iou = max_overlaps.max()
+ iou_interval = (max_iou - self.floor_thr) / self.num_bins
+ per_num_expected = int(num_expected / self.num_bins)
+
+ sampled_inds = []
+ for i in range(self.num_bins):
+ start_iou = self.floor_thr + i * iou_interval
+ end_iou = self.floor_thr + (i + 1) * iou_interval
+ tmp_set = set(
+ np.where(
+ np.logical_and(max_overlaps >= start_iou,
+ max_overlaps < end_iou))[0])
+ tmp_inds = list(tmp_set & full_set)
+ if len(tmp_inds) > per_num_expected:
+ tmp_sampled_set = self.random_choice(tmp_inds,
+ per_num_expected)
+ else:
+ tmp_sampled_set = np.array(tmp_inds, dtype=np.int)
+ sampled_inds.append(tmp_sampled_set)
+
+ sampled_inds = np.concatenate(sampled_inds)
+ if len(sampled_inds) < num_expected:
+ num_extra = num_expected - len(sampled_inds)
+ extra_inds = np.array(list(full_set - set(sampled_inds)))
+ if len(extra_inds) > num_extra:
+ extra_inds = self.random_choice(extra_inds, num_extra)
+ sampled_inds = np.concatenate([sampled_inds, extra_inds])
+
+ return sampled_inds
+
+ def _sample_neg(self, assign_result, num_expected, **kwargs):
+ """Sample negative boxes.
+
+ Args:
+ assign_result (:obj:`AssignResult`): The assigned results of boxes.
+ num_expected (int): The number of expected negative samples
+
+ Returns:
+ Tensor or ndarray: sampled indices.
+ """
+ neg_inds = torch.nonzero(assign_result.gt_inds == 0, as_tuple=False)
+ if neg_inds.numel() != 0:
+ neg_inds = neg_inds.squeeze(1)
+ if len(neg_inds) <= num_expected:
+ return neg_inds
+ else:
+ max_overlaps = assign_result.max_overlaps.cpu().numpy()
+ # balance sampling for negative samples
+ neg_set = set(neg_inds.cpu().numpy())
+
+ if self.floor_thr > 0:
+ floor_set = set(
+ np.where(
+ np.logical_and(max_overlaps >= 0,
+ max_overlaps < self.floor_thr))[0])
+ iou_sampling_set = set(
+ np.where(max_overlaps >= self.floor_thr)[0])
+ elif self.floor_thr == 0:
+ floor_set = set(np.where(max_overlaps == 0)[0])
+ iou_sampling_set = set(
+ np.where(max_overlaps > self.floor_thr)[0])
+ else:
+ floor_set = set()
+ iou_sampling_set = set(
+ np.where(max_overlaps > self.floor_thr)[0])
+ # for sampling interval calculation
+ self.floor_thr = 0
+
+ floor_neg_inds = list(floor_set & neg_set)
+ iou_sampling_neg_inds = list(iou_sampling_set & neg_set)
+ num_expected_iou_sampling = int(num_expected *
+ (1 - self.floor_fraction))
+ if len(iou_sampling_neg_inds) > num_expected_iou_sampling:
+ if self.num_bins >= 2:
+ iou_sampled_inds = self.sample_via_interval(
+ max_overlaps, set(iou_sampling_neg_inds),
+ num_expected_iou_sampling)
+ else:
+ iou_sampled_inds = self.random_choice(
+ iou_sampling_neg_inds, num_expected_iou_sampling)
+ else:
+ iou_sampled_inds = np.array(
+ iou_sampling_neg_inds, dtype=np.int)
+ num_expected_floor = num_expected - len(iou_sampled_inds)
+ if len(floor_neg_inds) > num_expected_floor:
+ sampled_floor_inds = self.random_choice(
+ floor_neg_inds, num_expected_floor)
+ else:
+ sampled_floor_inds = np.array(floor_neg_inds, dtype=np.int)
+ sampled_inds = np.concatenate(
+ (sampled_floor_inds, iou_sampled_inds))
+ if len(sampled_inds) < num_expected:
+ num_extra = num_expected - len(sampled_inds)
+ extra_inds = np.array(list(neg_set - set(sampled_inds)))
+ if len(extra_inds) > num_extra:
+ extra_inds = self.random_choice(extra_inds, num_extra)
+ sampled_inds = np.concatenate((sampled_inds, extra_inds))
+ sampled_inds = torch.from_numpy(sampled_inds).long().to(
+ assign_result.gt_inds.device)
+ return sampled_inds
diff --git a/annotator/uniformer/mmdet/core/bbox/samplers/ohem_sampler.py b/annotator/uniformer/mmdet/core/bbox/samplers/ohem_sampler.py
new file mode 100644
index 0000000000000000000000000000000000000000..8b99f60ef0176f1b7a56665fb0f59272f65b84cd
--- /dev/null
+++ b/annotator/uniformer/mmdet/core/bbox/samplers/ohem_sampler.py
@@ -0,0 +1,107 @@
+import torch
+
+from ..builder import BBOX_SAMPLERS
+from ..transforms import bbox2roi
+from .base_sampler import BaseSampler
+
+
+@BBOX_SAMPLERS.register_module()
+class OHEMSampler(BaseSampler):
+ r"""Online Hard Example Mining Sampler described in `Training Region-based
+ Object Detectors with Online Hard Example Mining
+ `_.
+ """
+
+ def __init__(self,
+ num,
+ pos_fraction,
+ context,
+ neg_pos_ub=-1,
+ add_gt_as_proposals=True,
+ **kwargs):
+ super(OHEMSampler, self).__init__(num, pos_fraction, neg_pos_ub,
+ add_gt_as_proposals)
+ self.context = context
+ if not hasattr(self.context, 'num_stages'):
+ self.bbox_head = self.context.bbox_head
+ else:
+ self.bbox_head = self.context.bbox_head[self.context.current_stage]
+
+ def hard_mining(self, inds, num_expected, bboxes, labels, feats):
+ with torch.no_grad():
+ rois = bbox2roi([bboxes])
+ if not hasattr(self.context, 'num_stages'):
+ bbox_results = self.context._bbox_forward(feats, rois)
+ else:
+ bbox_results = self.context._bbox_forward(
+ self.context.current_stage, feats, rois)
+ cls_score = bbox_results['cls_score']
+ loss = self.bbox_head.loss(
+ cls_score=cls_score,
+ bbox_pred=None,
+ rois=rois,
+ labels=labels,
+ label_weights=cls_score.new_ones(cls_score.size(0)),
+ bbox_targets=None,
+ bbox_weights=None,
+ reduction_override='none')['loss_cls']
+ _, topk_loss_inds = loss.topk(num_expected)
+ return inds[topk_loss_inds]
+
+ def _sample_pos(self,
+ assign_result,
+ num_expected,
+ bboxes=None,
+ feats=None,
+ **kwargs):
+ """Sample positive boxes.
+
+ Args:
+ assign_result (:obj:`AssignResult`): Assigned results
+ num_expected (int): Number of expected positive samples
+ bboxes (torch.Tensor, optional): Boxes. Defaults to None.
+ feats (list[torch.Tensor], optional): Multi-level features.
+ Defaults to None.
+
+ Returns:
+ torch.Tensor: Indices of positive samples
+ """
+ # Sample some hard positive samples
+ pos_inds = torch.nonzero(assign_result.gt_inds > 0, as_tuple=False)
+ if pos_inds.numel() != 0:
+ pos_inds = pos_inds.squeeze(1)
+ if pos_inds.numel() <= num_expected:
+ return pos_inds
+ else:
+ return self.hard_mining(pos_inds, num_expected, bboxes[pos_inds],
+ assign_result.labels[pos_inds], feats)
+
+ def _sample_neg(self,
+ assign_result,
+ num_expected,
+ bboxes=None,
+ feats=None,
+ **kwargs):
+ """Sample negative boxes.
+
+ Args:
+ assign_result (:obj:`AssignResult`): Assigned results
+ num_expected (int): Number of expected negative samples
+ bboxes (torch.Tensor, optional): Boxes. Defaults to None.
+ feats (list[torch.Tensor], optional): Multi-level features.
+ Defaults to None.
+
+ Returns:
+ torch.Tensor: Indices of negative samples
+ """
+ # Sample some hard negative samples
+ neg_inds = torch.nonzero(assign_result.gt_inds == 0, as_tuple=False)
+ if neg_inds.numel() != 0:
+ neg_inds = neg_inds.squeeze(1)
+ if len(neg_inds) <= num_expected:
+ return neg_inds
+ else:
+ neg_labels = assign_result.labels.new_empty(
+ neg_inds.size(0)).fill_(self.bbox_head.num_classes)
+ return self.hard_mining(neg_inds, num_expected, bboxes[neg_inds],
+ neg_labels, feats)
diff --git a/annotator/uniformer/mmdet/core/bbox/samplers/pseudo_sampler.py b/annotator/uniformer/mmdet/core/bbox/samplers/pseudo_sampler.py
new file mode 100644
index 0000000000000000000000000000000000000000..2bd81abcdc62debc14772659d7a171f20bf33364
--- /dev/null
+++ b/annotator/uniformer/mmdet/core/bbox/samplers/pseudo_sampler.py
@@ -0,0 +1,41 @@
+import torch
+
+from ..builder import BBOX_SAMPLERS
+from .base_sampler import BaseSampler
+from .sampling_result import SamplingResult
+
+
+@BBOX_SAMPLERS.register_module()
+class PseudoSampler(BaseSampler):
+ """A pseudo sampler that does not do sampling actually."""
+
+ def __init__(self, **kwargs):
+ pass
+
+ def _sample_pos(self, **kwargs):
+ """Sample positive samples."""
+ raise NotImplementedError
+
+ def _sample_neg(self, **kwargs):
+ """Sample negative samples."""
+ raise NotImplementedError
+
+ def sample(self, assign_result, bboxes, gt_bboxes, **kwargs):
+ """Directly returns the positive and negative indices of samples.
+
+ Args:
+ assign_result (:obj:`AssignResult`): Assigned results
+ bboxes (torch.Tensor): Bounding boxes
+ gt_bboxes (torch.Tensor): Ground truth boxes
+
+ Returns:
+ :obj:`SamplingResult`: sampler results
+ """
+ pos_inds = torch.nonzero(
+ assign_result.gt_inds > 0, as_tuple=False).squeeze(-1).unique()
+ neg_inds = torch.nonzero(
+ assign_result.gt_inds == 0, as_tuple=False).squeeze(-1).unique()
+ gt_flags = bboxes.new_zeros(bboxes.shape[0], dtype=torch.uint8)
+ sampling_result = SamplingResult(pos_inds, neg_inds, bboxes, gt_bboxes,
+ assign_result, gt_flags)
+ return sampling_result
diff --git a/annotator/uniformer/mmdet/core/bbox/samplers/random_sampler.py b/annotator/uniformer/mmdet/core/bbox/samplers/random_sampler.py
new file mode 100644
index 0000000000000000000000000000000000000000..f34b006e8bb0b55c74aa1c3b792f3664ada93162
--- /dev/null
+++ b/annotator/uniformer/mmdet/core/bbox/samplers/random_sampler.py
@@ -0,0 +1,78 @@
+import torch
+
+from ..builder import BBOX_SAMPLERS
+from .base_sampler import BaseSampler
+
+
+@BBOX_SAMPLERS.register_module()
+class RandomSampler(BaseSampler):
+ """Random sampler.
+
+ Args:
+ num (int): Number of samples
+ pos_fraction (float): Fraction of positive samples
+ neg_pos_up (int, optional): Upper bound number of negative and
+ positive samples. Defaults to -1.
+ add_gt_as_proposals (bool, optional): Whether to add ground truth
+ boxes as proposals. Defaults to True.
+ """
+
+ def __init__(self,
+ num,
+ pos_fraction,
+ neg_pos_ub=-1,
+ add_gt_as_proposals=True,
+ **kwargs):
+ from mmdet.core.bbox import demodata
+ super(RandomSampler, self).__init__(num, pos_fraction, neg_pos_ub,
+ add_gt_as_proposals)
+ self.rng = demodata.ensure_rng(kwargs.get('rng', None))
+
+ def random_choice(self, gallery, num):
+ """Random select some elements from the gallery.
+
+ If `gallery` is a Tensor, the returned indices will be a Tensor;
+ If `gallery` is a ndarray or list, the returned indices will be a
+ ndarray.
+
+ Args:
+ gallery (Tensor | ndarray | list): indices pool.
+ num (int): expected sample num.
+
+ Returns:
+ Tensor or ndarray: sampled indices.
+ """
+ assert len(gallery) >= num
+
+ is_tensor = isinstance(gallery, torch.Tensor)
+ if not is_tensor:
+ if torch.cuda.is_available():
+ device = torch.cuda.current_device()
+ else:
+ device = 'cpu'
+ gallery = torch.tensor(gallery, dtype=torch.long, device=device)
+ perm = torch.randperm(gallery.numel(), device=gallery.device)[:num]
+ rand_inds = gallery[perm]
+ if not is_tensor:
+ rand_inds = rand_inds.cpu().numpy()
+ return rand_inds
+
+ def _sample_pos(self, assign_result, num_expected, **kwargs):
+ """Randomly sample some positive samples."""
+ pos_inds = torch.nonzero(assign_result.gt_inds > 0, as_tuple=False)
+ if pos_inds.numel() != 0:
+ pos_inds = pos_inds.squeeze(1)
+ if pos_inds.numel() <= num_expected:
+ return pos_inds
+ else:
+ return self.random_choice(pos_inds, num_expected)
+
+ def _sample_neg(self, assign_result, num_expected, **kwargs):
+ """Randomly sample some negative samples."""
+ neg_inds = torch.nonzero(assign_result.gt_inds == 0, as_tuple=False)
+ if neg_inds.numel() != 0:
+ neg_inds = neg_inds.squeeze(1)
+ if len(neg_inds) <= num_expected:
+ return neg_inds
+ else:
+ return self.random_choice(neg_inds, num_expected)
diff --git a/annotator/uniformer/mmdet/core/bbox/samplers/sampling_result.py b/annotator/uniformer/mmdet/core/bbox/samplers/sampling_result.py
new file mode 100644
index 0000000000000000000000000000000000000000..419a8e39a3c307a7cd9cfd0565a20037ded0d646
--- /dev/null
+++ b/annotator/uniformer/mmdet/core/bbox/samplers/sampling_result.py
@@ -0,0 +1,152 @@
+import torch
+
+from mmdet.utils import util_mixins
+
+
+class SamplingResult(util_mixins.NiceRepr):
+ """Bbox sampling result.
+
+ Example:
+ >>> # xdoctest: +IGNORE_WANT
+ >>> from mmdet.core.bbox.samplers.sampling_result import * # NOQA
+ >>> self = SamplingResult.random(rng=10)
+ >>> print(f'self = {self}')
+ self =
+ """
+
+ def __init__(self, pos_inds, neg_inds, bboxes, gt_bboxes, assign_result,
+ gt_flags):
+ self.pos_inds = pos_inds
+ self.neg_inds = neg_inds
+ self.pos_bboxes = bboxes[pos_inds]
+ self.neg_bboxes = bboxes[neg_inds]
+ self.pos_is_gt = gt_flags[pos_inds]
+
+ self.num_gts = gt_bboxes.shape[0]
+ self.pos_assigned_gt_inds = assign_result.gt_inds[pos_inds] - 1
+
+ if gt_bboxes.numel() == 0:
+ # hack for index error case
+ assert self.pos_assigned_gt_inds.numel() == 0
+ self.pos_gt_bboxes = torch.empty_like(gt_bboxes).view(-1, 4)
+ else:
+ if len(gt_bboxes.shape) < 2:
+ gt_bboxes = gt_bboxes.view(-1, 4)
+
+ self.pos_gt_bboxes = gt_bboxes[self.pos_assigned_gt_inds, :]
+
+ if assign_result.labels is not None:
+ self.pos_gt_labels = assign_result.labels[pos_inds]
+ else:
+ self.pos_gt_labels = None
+
+ @property
+ def bboxes(self):
+ """torch.Tensor: concatenated positive and negative boxes"""
+ return torch.cat([self.pos_bboxes, self.neg_bboxes])
+
+ def to(self, device):
+ """Change the device of the data inplace.
+
+ Example:
+ >>> self = SamplingResult.random()
+ >>> print(f'self = {self.to(None)}')
+ >>> # xdoctest: +REQUIRES(--gpu)
+ >>> print(f'self = {self.to(0)}')
+ """
+ _dict = self.__dict__
+ for key, value in _dict.items():
+ if isinstance(value, torch.Tensor):
+ _dict[key] = value.to(device)
+ return self
+
+ def __nice__(self):
+ data = self.info.copy()
+ data['pos_bboxes'] = data.pop('pos_bboxes').shape
+ data['neg_bboxes'] = data.pop('neg_bboxes').shape
+ parts = [f"'{k}': {v!r}" for k, v in sorted(data.items())]
+ body = ' ' + ',\n '.join(parts)
+ return '{\n' + body + '\n}'
+
+ @property
+ def info(self):
+ """Returns a dictionary of info about the object."""
+ return {
+ 'pos_inds': self.pos_inds,
+ 'neg_inds': self.neg_inds,
+ 'pos_bboxes': self.pos_bboxes,
+ 'neg_bboxes': self.neg_bboxes,
+ 'pos_is_gt': self.pos_is_gt,
+ 'num_gts': self.num_gts,
+ 'pos_assigned_gt_inds': self.pos_assigned_gt_inds,
+ }
+
+ @classmethod
+ def random(cls, rng=None, **kwargs):
+ """
+ Args:
+ rng (None | int | numpy.random.RandomState): seed or state.
+ kwargs (keyword arguments):
+ - num_preds: number of predicted boxes
+ - num_gts: number of true boxes
+ - p_ignore (float): probability of a predicted box assinged to \
+ an ignored truth.
+ - p_assigned (float): probability of a predicted box not being \
+ assigned.
+ - p_use_label (float | bool): with labels or not.
+
+ Returns:
+ :obj:`SamplingResult`: Randomly generated sampling result.
+
+ Example:
+ >>> from mmdet.core.bbox.samplers.sampling_result import * # NOQA
+ >>> self = SamplingResult.random()
+ >>> print(self.__dict__)
+ """
+ from mmdet.core.bbox.samplers.random_sampler import RandomSampler
+ from mmdet.core.bbox.assigners.assign_result import AssignResult
+ from mmdet.core.bbox import demodata
+ rng = demodata.ensure_rng(rng)
+
+ # make probabalistic?
+ num = 32
+ pos_fraction = 0.5
+ neg_pos_ub = -1
+
+ assign_result = AssignResult.random(rng=rng, **kwargs)
+
+ # Note we could just compute an assignment
+ bboxes = demodata.random_boxes(assign_result.num_preds, rng=rng)
+ gt_bboxes = demodata.random_boxes(assign_result.num_gts, rng=rng)
+
+ if rng.rand() > 0.2:
+ # sometimes algorithms squeeze their data, be robust to that
+ gt_bboxes = gt_bboxes.squeeze()
+ bboxes = bboxes.squeeze()
+
+ if assign_result.labels is None:
+ gt_labels = None
+ else:
+ gt_labels = None # todo
+
+ if gt_labels is None:
+ add_gt_as_proposals = False
+ else:
+ add_gt_as_proposals = True # make probabalistic?
+
+ sampler = RandomSampler(
+ num,
+ pos_fraction,
+ neg_pos_ub=neg_pos_ub,
+ add_gt_as_proposals=add_gt_as_proposals,
+ rng=rng)
+ self = sampler.sample(assign_result, bboxes, gt_bboxes, gt_labels)
+ return self
diff --git a/annotator/uniformer/mmdet/core/bbox/samplers/score_hlr_sampler.py b/annotator/uniformer/mmdet/core/bbox/samplers/score_hlr_sampler.py
new file mode 100644
index 0000000000000000000000000000000000000000..11d46b97705db60fb6a4eb5fa7da10ac78acb8bc
--- /dev/null
+++ b/annotator/uniformer/mmdet/core/bbox/samplers/score_hlr_sampler.py
@@ -0,0 +1,264 @@
+import torch
+from mmcv.ops import nms_match
+
+from ..builder import BBOX_SAMPLERS
+from ..transforms import bbox2roi
+from .base_sampler import BaseSampler
+from .sampling_result import SamplingResult
+
+
+@BBOX_SAMPLERS.register_module()
+class ScoreHLRSampler(BaseSampler):
+ r"""Importance-based Sample Reweighting (ISR_N), described in `Prime Sample
+ Attention in Object Detection `_.
+
+ Score hierarchical local rank (HLR) differentiates with RandomSampler in
+ negative part. It firstly computes Score-HLR in a two-step way,
+ then linearly maps score hlr to the loss weights.
+
+ Args:
+ num (int): Total number of sampled RoIs.
+ pos_fraction (float): Fraction of positive samples.
+ context (:class:`BaseRoIHead`): RoI head that the sampler belongs to.
+ neg_pos_ub (int): Upper bound of the ratio of num negative to num
+ positive, -1 means no upper bound.
+ add_gt_as_proposals (bool): Whether to add ground truth as proposals.
+ k (float): Power of the non-linear mapping.
+ bias (float): Shift of the non-linear mapping.
+ score_thr (float): Minimum score that a negative sample is to be
+ considered as valid bbox.
+ """
+
+ def __init__(self,
+ num,
+ pos_fraction,
+ context,
+ neg_pos_ub=-1,
+ add_gt_as_proposals=True,
+ k=0.5,
+ bias=0,
+ score_thr=0.05,
+ iou_thr=0.5,
+ **kwargs):
+ super().__init__(num, pos_fraction, neg_pos_ub, add_gt_as_proposals)
+ self.k = k
+ self.bias = bias
+ self.score_thr = score_thr
+ self.iou_thr = iou_thr
+ self.context = context
+ # context of cascade detectors is a list, so distinguish them here.
+ if not hasattr(context, 'num_stages'):
+ self.bbox_roi_extractor = context.bbox_roi_extractor
+ self.bbox_head = context.bbox_head
+ self.with_shared_head = context.with_shared_head
+ if self.with_shared_head:
+ self.shared_head = context.shared_head
+ else:
+ self.bbox_roi_extractor = context.bbox_roi_extractor[
+ context.current_stage]
+ self.bbox_head = context.bbox_head[context.current_stage]
+
+ @staticmethod
+ def random_choice(gallery, num):
+ """Randomly select some elements from the gallery.
+
+ If `gallery` is a Tensor, the returned indices will be a Tensor;
+ If `gallery` is a ndarray or list, the returned indices will be a
+ ndarray.
+
+ Args:
+ gallery (Tensor | ndarray | list): indices pool.
+ num (int): expected sample num.
+
+ Returns:
+ Tensor or ndarray: sampled indices.
+ """
+ assert len(gallery) >= num
+
+ is_tensor = isinstance(gallery, torch.Tensor)
+ if not is_tensor:
+ if torch.cuda.is_available():
+ device = torch.cuda.current_device()
+ else:
+ device = 'cpu'
+ gallery = torch.tensor(gallery, dtype=torch.long, device=device)
+ perm = torch.randperm(gallery.numel(), device=gallery.device)[:num]
+ rand_inds = gallery[perm]
+ if not is_tensor:
+ rand_inds = rand_inds.cpu().numpy()
+ return rand_inds
+
+ def _sample_pos(self, assign_result, num_expected, **kwargs):
+ """Randomly sample some positive samples."""
+ pos_inds = torch.nonzero(assign_result.gt_inds > 0).flatten()
+ if pos_inds.numel() <= num_expected:
+ return pos_inds
+ else:
+ return self.random_choice(pos_inds, num_expected)
+
+ def _sample_neg(self,
+ assign_result,
+ num_expected,
+ bboxes,
+ feats=None,
+ img_meta=None,
+ **kwargs):
+ """Sample negative samples.
+
+ Score-HLR sampler is done in the following steps:
+ 1. Take the maximum positive score prediction of each negative samples
+ as s_i.
+ 2. Filter out negative samples whose s_i <= score_thr, the left samples
+ are called valid samples.
+ 3. Use NMS-Match to divide valid samples into different groups,
+ samples in the same group will greatly overlap with each other
+ 4. Rank the matched samples in two-steps to get Score-HLR.
+ (1) In the same group, rank samples with their scores.
+ (2) In the same score rank across different groups,
+ rank samples with their scores again.
+ 5. Linearly map Score-HLR to the final label weights.
+
+ Args:
+ assign_result (:obj:`AssignResult`): result of assigner.
+ num_expected (int): Expected number of samples.
+ bboxes (Tensor): bbox to be sampled.
+ feats (Tensor): Features come from FPN.
+ img_meta (dict): Meta information dictionary.
+ """
+ neg_inds = torch.nonzero(assign_result.gt_inds == 0).flatten()
+ num_neg = neg_inds.size(0)
+ if num_neg == 0:
+ return neg_inds, None
+ with torch.no_grad():
+ neg_bboxes = bboxes[neg_inds]
+ neg_rois = bbox2roi([neg_bboxes])
+ bbox_result = self.context._bbox_forward(feats, neg_rois)
+ cls_score, bbox_pred = bbox_result['cls_score'], bbox_result[
+ 'bbox_pred']
+
+ ori_loss = self.bbox_head.loss(
+ cls_score=cls_score,
+ bbox_pred=None,
+ rois=None,
+ labels=neg_inds.new_full((num_neg, ),
+ self.bbox_head.num_classes),
+ label_weights=cls_score.new_ones(num_neg),
+ bbox_targets=None,
+ bbox_weights=None,
+ reduction_override='none')['loss_cls']
+
+ # filter out samples with the max score lower than score_thr
+ max_score, argmax_score = cls_score.softmax(-1)[:, :-1].max(-1)
+ valid_inds = (max_score > self.score_thr).nonzero().view(-1)
+ invalid_inds = (max_score <= self.score_thr).nonzero().view(-1)
+ num_valid = valid_inds.size(0)
+ num_invalid = invalid_inds.size(0)
+
+ num_expected = min(num_neg, num_expected)
+ num_hlr = min(num_valid, num_expected)
+ num_rand = num_expected - num_hlr
+ if num_valid > 0:
+ valid_rois = neg_rois[valid_inds]
+ valid_max_score = max_score[valid_inds]
+ valid_argmax_score = argmax_score[valid_inds]
+ valid_bbox_pred = bbox_pred[valid_inds]
+
+ # valid_bbox_pred shape: [num_valid, #num_classes, 4]
+ valid_bbox_pred = valid_bbox_pred.view(
+ valid_bbox_pred.size(0), -1, 4)
+ selected_bbox_pred = valid_bbox_pred[range(num_valid),
+ valid_argmax_score]
+ pred_bboxes = self.bbox_head.bbox_coder.decode(
+ valid_rois[:, 1:], selected_bbox_pred)
+ pred_bboxes_with_score = torch.cat(
+ [pred_bboxes, valid_max_score[:, None]], -1)
+ group = nms_match(pred_bboxes_with_score, self.iou_thr)
+
+ # imp: importance
+ imp = cls_score.new_zeros(num_valid)
+ for g in group:
+ g_score = valid_max_score[g]
+ # g_score has already sorted
+ rank = g_score.new_tensor(range(g_score.size(0)))
+ imp[g] = num_valid - rank + g_score
+ _, imp_rank_inds = imp.sort(descending=True)
+ _, imp_rank = imp_rank_inds.sort()
+ hlr_inds = imp_rank_inds[:num_expected]
+
+ if num_rand > 0:
+ rand_inds = torch.randperm(num_invalid)[:num_rand]
+ select_inds = torch.cat(
+ [valid_inds[hlr_inds], invalid_inds[rand_inds]])
+ else:
+ select_inds = valid_inds[hlr_inds]
+
+ neg_label_weights = cls_score.new_ones(num_expected)
+
+ up_bound = max(num_expected, num_valid)
+ imp_weights = (up_bound -
+ imp_rank[hlr_inds].float()) / up_bound
+ neg_label_weights[:num_hlr] = imp_weights
+ neg_label_weights[num_hlr:] = imp_weights.min()
+ neg_label_weights = (self.bias +
+ (1 - self.bias) * neg_label_weights).pow(
+ self.k)
+ ori_selected_loss = ori_loss[select_inds]
+ new_loss = ori_selected_loss * neg_label_weights
+ norm_ratio = ori_selected_loss.sum() / new_loss.sum()
+ neg_label_weights *= norm_ratio
+ else:
+ neg_label_weights = cls_score.new_ones(num_expected)
+ select_inds = torch.randperm(num_neg)[:num_expected]
+
+ return neg_inds[select_inds], neg_label_weights
+
+ def sample(self,
+ assign_result,
+ bboxes,
+ gt_bboxes,
+ gt_labels=None,
+ img_meta=None,
+ **kwargs):
+ """Sample positive and negative bboxes.
+
+ This is a simple implementation of bbox sampling given candidates,
+ assigning results and ground truth bboxes.
+
+ Args:
+ assign_result (:obj:`AssignResult`): Bbox assigning results.
+ bboxes (Tensor): Boxes to be sampled from.
+ gt_bboxes (Tensor): Ground truth bboxes.
+ gt_labels (Tensor, optional): Class labels of ground truth bboxes.
+
+ Returns:
+ tuple[:obj:`SamplingResult`, Tensor]: Sampling result and negetive
+ label weights.
+ """
+ bboxes = bboxes[:, :4]
+
+ gt_flags = bboxes.new_zeros((bboxes.shape[0], ), dtype=torch.uint8)
+ if self.add_gt_as_proposals:
+ bboxes = torch.cat([gt_bboxes, bboxes], dim=0)
+ assign_result.add_gt_(gt_labels)
+ gt_ones = bboxes.new_ones(gt_bboxes.shape[0], dtype=torch.uint8)
+ gt_flags = torch.cat([gt_ones, gt_flags])
+
+ num_expected_pos = int(self.num * self.pos_fraction)
+ pos_inds = self.pos_sampler._sample_pos(
+ assign_result, num_expected_pos, bboxes=bboxes, **kwargs)
+ num_sampled_pos = pos_inds.numel()
+ num_expected_neg = self.num - num_sampled_pos
+ if self.neg_pos_ub >= 0:
+ _pos = max(1, num_sampled_pos)
+ neg_upper_bound = int(self.neg_pos_ub * _pos)
+ if num_expected_neg > neg_upper_bound:
+ num_expected_neg = neg_upper_bound
+ neg_inds, neg_label_weights = self.neg_sampler._sample_neg(
+ assign_result,
+ num_expected_neg,
+ bboxes,
+ img_meta=img_meta,
+ **kwargs)
+
+ return SamplingResult(pos_inds, neg_inds, bboxes, gt_bboxes,
+ assign_result, gt_flags), neg_label_weights
diff --git a/annotator/uniformer/mmdet/core/bbox/transforms.py b/annotator/uniformer/mmdet/core/bbox/transforms.py
new file mode 100644
index 0000000000000000000000000000000000000000..df55b0a496516bf7373fe96cf746c561dd713c3b
--- /dev/null
+++ b/annotator/uniformer/mmdet/core/bbox/transforms.py
@@ -0,0 +1,240 @@
+import numpy as np
+import torch
+
+
+def bbox_flip(bboxes, img_shape, direction='horizontal'):
+ """Flip bboxes horizontally or vertically.
+
+ Args:
+ bboxes (Tensor): Shape (..., 4*k)
+ img_shape (tuple): Image shape.
+ direction (str): Flip direction, options are "horizontal", "vertical",
+ "diagonal". Default: "horizontal"
+
+ Returns:
+ Tensor: Flipped bboxes.
+ """
+ assert bboxes.shape[-1] % 4 == 0
+ assert direction in ['horizontal', 'vertical', 'diagonal']
+ flipped = bboxes.clone()
+ if direction == 'horizontal':
+ flipped[..., 0::4] = img_shape[1] - bboxes[..., 2::4]
+ flipped[..., 2::4] = img_shape[1] - bboxes[..., 0::4]
+ elif direction == 'vertical':
+ flipped[..., 1::4] = img_shape[0] - bboxes[..., 3::4]
+ flipped[..., 3::4] = img_shape[0] - bboxes[..., 1::4]
+ else:
+ flipped[..., 0::4] = img_shape[1] - bboxes[..., 2::4]
+ flipped[..., 1::4] = img_shape[0] - bboxes[..., 3::4]
+ flipped[..., 2::4] = img_shape[1] - bboxes[..., 0::4]
+ flipped[..., 3::4] = img_shape[0] - bboxes[..., 1::4]
+ return flipped
+
+
+def bbox_mapping(bboxes,
+ img_shape,
+ scale_factor,
+ flip,
+ flip_direction='horizontal'):
+ """Map bboxes from the original image scale to testing scale."""
+ new_bboxes = bboxes * bboxes.new_tensor(scale_factor)
+ if flip:
+ new_bboxes = bbox_flip(new_bboxes, img_shape, flip_direction)
+ return new_bboxes
+
+
+def bbox_mapping_back(bboxes,
+ img_shape,
+ scale_factor,
+ flip,
+ flip_direction='horizontal'):
+ """Map bboxes from testing scale to original image scale."""
+ new_bboxes = bbox_flip(bboxes, img_shape,
+ flip_direction) if flip else bboxes
+ new_bboxes = new_bboxes.view(-1, 4) / new_bboxes.new_tensor(scale_factor)
+ return new_bboxes.view(bboxes.shape)
+
+
+def bbox2roi(bbox_list):
+ """Convert a list of bboxes to roi format.
+
+ Args:
+ bbox_list (list[Tensor]): a list of bboxes corresponding to a batch
+ of images.
+
+ Returns:
+ Tensor: shape (n, 5), [batch_ind, x1, y1, x2, y2]
+ """
+ rois_list = []
+ for img_id, bboxes in enumerate(bbox_list):
+ if bboxes.size(0) > 0:
+ img_inds = bboxes.new_full((bboxes.size(0), 1), img_id)
+ rois = torch.cat([img_inds, bboxes[:, :4]], dim=-1)
+ else:
+ rois = bboxes.new_zeros((0, 5))
+ rois_list.append(rois)
+ rois = torch.cat(rois_list, 0)
+ return rois
+
+
+def roi2bbox(rois):
+ """Convert rois to bounding box format.
+
+ Args:
+ rois (torch.Tensor): RoIs with the shape (n, 5) where the first
+ column indicates batch id of each RoI.
+
+ Returns:
+ list[torch.Tensor]: Converted boxes of corresponding rois.
+ """
+ bbox_list = []
+ img_ids = torch.unique(rois[:, 0].cpu(), sorted=True)
+ for img_id in img_ids:
+ inds = (rois[:, 0] == img_id.item())
+ bbox = rois[inds, 1:]
+ bbox_list.append(bbox)
+ return bbox_list
+
+
+def bbox2result(bboxes, labels, num_classes):
+ """Convert detection results to a list of numpy arrays.
+
+ Args:
+ bboxes (torch.Tensor | np.ndarray): shape (n, 5)
+ labels (torch.Tensor | np.ndarray): shape (n, )
+ num_classes (int): class number, including background class
+
+ Returns:
+ list(ndarray): bbox results of each class
+ """
+ if bboxes.shape[0] == 0:
+ return [np.zeros((0, 5), dtype=np.float32) for i in range(num_classes)]
+ else:
+ if isinstance(bboxes, torch.Tensor):
+ bboxes = bboxes.detach().cpu().numpy()
+ labels = labels.detach().cpu().numpy()
+ return [bboxes[labels == i, :] for i in range(num_classes)]
+
+
+def distance2bbox(points, distance, max_shape=None):
+ """Decode distance prediction to bounding box.
+
+ Args:
+ points (Tensor): Shape (B, N, 2) or (N, 2).
+ distance (Tensor): Distance from the given point to 4
+ boundaries (left, top, right, bottom). Shape (B, N, 4) or (N, 4)
+ max_shape (Sequence[int] or torch.Tensor or Sequence[
+ Sequence[int]],optional): Maximum bounds for boxes, specifies
+ (H, W, C) or (H, W). If priors shape is (B, N, 4), then
+ the max_shape should be a Sequence[Sequence[int]]
+ and the length of max_shape should also be B.
+
+ Returns:
+ Tensor: Boxes with shape (N, 4) or (B, N, 4)
+ """
+ x1 = points[..., 0] - distance[..., 0]
+ y1 = points[..., 1] - distance[..., 1]
+ x2 = points[..., 0] + distance[..., 2]
+ y2 = points[..., 1] + distance[..., 3]
+
+ bboxes = torch.stack([x1, y1, x2, y2], -1)
+
+ if max_shape is not None:
+ if not isinstance(max_shape, torch.Tensor):
+ max_shape = x1.new_tensor(max_shape)
+ max_shape = max_shape[..., :2].type_as(x1)
+ if max_shape.ndim == 2:
+ assert bboxes.ndim == 3
+ assert max_shape.size(0) == bboxes.size(0)
+
+ min_xy = x1.new_tensor(0)
+ max_xy = torch.cat([max_shape, max_shape],
+ dim=-1).flip(-1).unsqueeze(-2)
+ bboxes = torch.where(bboxes < min_xy, min_xy, bboxes)
+ bboxes = torch.where(bboxes > max_xy, max_xy, bboxes)
+
+ return bboxes
+
+
+def bbox2distance(points, bbox, max_dis=None, eps=0.1):
+ """Decode bounding box based on distances.
+
+ Args:
+ points (Tensor): Shape (n, 2), [x, y].
+ bbox (Tensor): Shape (n, 4), "xyxy" format
+ max_dis (float): Upper bound of the distance.
+ eps (float): a small value to ensure target < max_dis, instead <=
+
+ Returns:
+ Tensor: Decoded distances.
+ """
+ left = points[:, 0] - bbox[:, 0]
+ top = points[:, 1] - bbox[:, 1]
+ right = bbox[:, 2] - points[:, 0]
+ bottom = bbox[:, 3] - points[:, 1]
+ if max_dis is not None:
+ left = left.clamp(min=0, max=max_dis - eps)
+ top = top.clamp(min=0, max=max_dis - eps)
+ right = right.clamp(min=0, max=max_dis - eps)
+ bottom = bottom.clamp(min=0, max=max_dis - eps)
+ return torch.stack([left, top, right, bottom], -1)
+
+
+def bbox_rescale(bboxes, scale_factor=1.0):
+ """Rescale bounding box w.r.t. scale_factor.
+
+ Args:
+ bboxes (Tensor): Shape (n, 4) for bboxes or (n, 5) for rois
+ scale_factor (float): rescale factor
+
+ Returns:
+ Tensor: Rescaled bboxes.
+ """
+ if bboxes.size(1) == 5:
+ bboxes_ = bboxes[:, 1:]
+ inds_ = bboxes[:, 0]
+ else:
+ bboxes_ = bboxes
+ cx = (bboxes_[:, 0] + bboxes_[:, 2]) * 0.5
+ cy = (bboxes_[:, 1] + bboxes_[:, 3]) * 0.5
+ w = bboxes_[:, 2] - bboxes_[:, 0]
+ h = bboxes_[:, 3] - bboxes_[:, 1]
+ w = w * scale_factor
+ h = h * scale_factor
+ x1 = cx - 0.5 * w
+ x2 = cx + 0.5 * w
+ y1 = cy - 0.5 * h
+ y2 = cy + 0.5 * h
+ if bboxes.size(1) == 5:
+ rescaled_bboxes = torch.stack([inds_, x1, y1, x2, y2], dim=-1)
+ else:
+ rescaled_bboxes = torch.stack([x1, y1, x2, y2], dim=-1)
+ return rescaled_bboxes
+
+
+def bbox_cxcywh_to_xyxy(bbox):
+ """Convert bbox coordinates from (cx, cy, w, h) to (x1, y1, x2, y2).
+
+ Args:
+ bbox (Tensor): Shape (n, 4) for bboxes.
+
+ Returns:
+ Tensor: Converted bboxes.
+ """
+ cx, cy, w, h = bbox.split((1, 1, 1, 1), dim=-1)
+ bbox_new = [(cx - 0.5 * w), (cy - 0.5 * h), (cx + 0.5 * w), (cy + 0.5 * h)]
+ return torch.cat(bbox_new, dim=-1)
+
+
+def bbox_xyxy_to_cxcywh(bbox):
+ """Convert bbox coordinates from (x1, y1, x2, y2) to (cx, cy, w, h).
+
+ Args:
+ bbox (Tensor): Shape (n, 4) for bboxes.
+
+ Returns:
+ Tensor: Converted bboxes.
+ """
+ x1, y1, x2, y2 = bbox.split((1, 1, 1, 1), dim=-1)
+ bbox_new = [(x1 + x2) / 2, (y1 + y2) / 2, (x2 - x1), (y2 - y1)]
+ return torch.cat(bbox_new, dim=-1)
diff --git a/annotator/uniformer/mmdet/core/evaluation/__init__.py b/annotator/uniformer/mmdet/core/evaluation/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..d11ef15b9db95166b4427ad4d08debbd0630a741
--- /dev/null
+++ b/annotator/uniformer/mmdet/core/evaluation/__init__.py
@@ -0,0 +1,15 @@
+from .class_names import (cityscapes_classes, coco_classes, dataset_aliases,
+ get_classes, imagenet_det_classes,
+ imagenet_vid_classes, voc_classes)
+from .eval_hooks import DistEvalHook, EvalHook
+from .mean_ap import average_precision, eval_map, print_map_summary
+from .recall import (eval_recalls, plot_iou_recall, plot_num_recall,
+ print_recall_summary)
+
+__all__ = [
+ 'voc_classes', 'imagenet_det_classes', 'imagenet_vid_classes',
+ 'coco_classes', 'cityscapes_classes', 'dataset_aliases', 'get_classes',
+ 'DistEvalHook', 'EvalHook', 'average_precision', 'eval_map',
+ 'print_map_summary', 'eval_recalls', 'print_recall_summary',
+ 'plot_num_recall', 'plot_iou_recall'
+]
diff --git a/annotator/uniformer/mmdet/core/evaluation/bbox_overlaps.py b/annotator/uniformer/mmdet/core/evaluation/bbox_overlaps.py
new file mode 100644
index 0000000000000000000000000000000000000000..93559ea0f25369d552a5365312fa32b9ffec9226
--- /dev/null
+++ b/annotator/uniformer/mmdet/core/evaluation/bbox_overlaps.py
@@ -0,0 +1,48 @@
+import numpy as np
+
+
+def bbox_overlaps(bboxes1, bboxes2, mode='iou', eps=1e-6):
+ """Calculate the ious between each bbox of bboxes1 and bboxes2.
+
+ Args:
+ bboxes1(ndarray): shape (n, 4)
+ bboxes2(ndarray): shape (k, 4)
+ mode(str): iou (intersection over union) or iof (intersection
+ over foreground)
+
+ Returns:
+ ious(ndarray): shape (n, k)
+ """
+
+ assert mode in ['iou', 'iof']
+
+ bboxes1 = bboxes1.astype(np.float32)
+ bboxes2 = bboxes2.astype(np.float32)
+ rows = bboxes1.shape[0]
+ cols = bboxes2.shape[0]
+ ious = np.zeros((rows, cols), dtype=np.float32)
+ if rows * cols == 0:
+ return ious
+ exchange = False
+ if bboxes1.shape[0] > bboxes2.shape[0]:
+ bboxes1, bboxes2 = bboxes2, bboxes1
+ ious = np.zeros((cols, rows), dtype=np.float32)
+ exchange = True
+ area1 = (bboxes1[:, 2] - bboxes1[:, 0]) * (bboxes1[:, 3] - bboxes1[:, 1])
+ area2 = (bboxes2[:, 2] - bboxes2[:, 0]) * (bboxes2[:, 3] - bboxes2[:, 1])
+ for i in range(bboxes1.shape[0]):
+ x_start = np.maximum(bboxes1[i, 0], bboxes2[:, 0])
+ y_start = np.maximum(bboxes1[i, 1], bboxes2[:, 1])
+ x_end = np.minimum(bboxes1[i, 2], bboxes2[:, 2])
+ y_end = np.minimum(bboxes1[i, 3], bboxes2[:, 3])
+ overlap = np.maximum(x_end - x_start, 0) * np.maximum(
+ y_end - y_start, 0)
+ if mode == 'iou':
+ union = area1[i] + area2 - overlap
+ else:
+ union = area1[i] if not exchange else area2
+ union = np.maximum(union, eps)
+ ious[i, :] = overlap / union
+ if exchange:
+ ious = ious.T
+ return ious
diff --git a/annotator/uniformer/mmdet/core/evaluation/class_names.py b/annotator/uniformer/mmdet/core/evaluation/class_names.py
new file mode 100644
index 0000000000000000000000000000000000000000..c2487c2ee2d010c40db0e1c2b51c91b194e84dc7
--- /dev/null
+++ b/annotator/uniformer/mmdet/core/evaluation/class_names.py
@@ -0,0 +1,116 @@
+import mmcv
+
+
+def wider_face_classes():
+ return ['face']
+
+
+def voc_classes():
+ return [
+ 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat',
+ 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person',
+ 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor'
+ ]
+
+
+def imagenet_det_classes():
+ return [
+ 'accordion', 'airplane', 'ant', 'antelope', 'apple', 'armadillo',
+ 'artichoke', 'axe', 'baby_bed', 'backpack', 'bagel', 'balance_beam',
+ 'banana', 'band_aid', 'banjo', 'baseball', 'basketball', 'bathing_cap',
+ 'beaker', 'bear', 'bee', 'bell_pepper', 'bench', 'bicycle', 'binder',
+ 'bird', 'bookshelf', 'bow_tie', 'bow', 'bowl', 'brassiere', 'burrito',
+ 'bus', 'butterfly', 'camel', 'can_opener', 'car', 'cart', 'cattle',
+ 'cello', 'centipede', 'chain_saw', 'chair', 'chime', 'cocktail_shaker',
+ 'coffee_maker', 'computer_keyboard', 'computer_mouse', 'corkscrew',
+ 'cream', 'croquet_ball', 'crutch', 'cucumber', 'cup_or_mug', 'diaper',
+ 'digital_clock', 'dishwasher', 'dog', 'domestic_cat', 'dragonfly',
+ 'drum', 'dumbbell', 'electric_fan', 'elephant', 'face_powder', 'fig',
+ 'filing_cabinet', 'flower_pot', 'flute', 'fox', 'french_horn', 'frog',
+ 'frying_pan', 'giant_panda', 'goldfish', 'golf_ball', 'golfcart',
+ 'guacamole', 'guitar', 'hair_dryer', 'hair_spray', 'hamburger',
+ 'hammer', 'hamster', 'harmonica', 'harp', 'hat_with_a_wide_brim',
+ 'head_cabbage', 'helmet', 'hippopotamus', 'horizontal_bar', 'horse',
+ 'hotdog', 'iPod', 'isopod', 'jellyfish', 'koala_bear', 'ladle',
+ 'ladybug', 'lamp', 'laptop', 'lemon', 'lion', 'lipstick', 'lizard',
+ 'lobster', 'maillot', 'maraca', 'microphone', 'microwave', 'milk_can',
+ 'miniskirt', 'monkey', 'motorcycle', 'mushroom', 'nail', 'neck_brace',
+ 'oboe', 'orange', 'otter', 'pencil_box', 'pencil_sharpener', 'perfume',
+ 'person', 'piano', 'pineapple', 'ping-pong_ball', 'pitcher', 'pizza',
+ 'plastic_bag', 'plate_rack', 'pomegranate', 'popsicle', 'porcupine',
+ 'power_drill', 'pretzel', 'printer', 'puck', 'punching_bag', 'purse',
+ 'rabbit', 'racket', 'ray', 'red_panda', 'refrigerator',
+ 'remote_control', 'rubber_eraser', 'rugby_ball', 'ruler',
+ 'salt_or_pepper_shaker', 'saxophone', 'scorpion', 'screwdriver',
+ 'seal', 'sheep', 'ski', 'skunk', 'snail', 'snake', 'snowmobile',
+ 'snowplow', 'soap_dispenser', 'soccer_ball', 'sofa', 'spatula',
+ 'squirrel', 'starfish', 'stethoscope', 'stove', 'strainer',
+ 'strawberry', 'stretcher', 'sunglasses', 'swimming_trunks', 'swine',
+ 'syringe', 'table', 'tape_player', 'tennis_ball', 'tick', 'tie',
+ 'tiger', 'toaster', 'traffic_light', 'train', 'trombone', 'trumpet',
+ 'turtle', 'tv_or_monitor', 'unicycle', 'vacuum', 'violin',
+ 'volleyball', 'waffle_iron', 'washer', 'water_bottle', 'watercraft',
+ 'whale', 'wine_bottle', 'zebra'
+ ]
+
+
+def imagenet_vid_classes():
+ return [
+ 'airplane', 'antelope', 'bear', 'bicycle', 'bird', 'bus', 'car',
+ 'cattle', 'dog', 'domestic_cat', 'elephant', 'fox', 'giant_panda',
+ 'hamster', 'horse', 'lion', 'lizard', 'monkey', 'motorcycle', 'rabbit',
+ 'red_panda', 'sheep', 'snake', 'squirrel', 'tiger', 'train', 'turtle',
+ 'watercraft', 'whale', 'zebra'
+ ]
+
+
+def coco_classes():
+ return [
+ 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
+ 'truck', 'boat', 'traffic_light', 'fire_hydrant', 'stop_sign',
+ 'parking_meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep',
+ 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella',
+ 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard',
+ 'sports_ball', 'kite', 'baseball_bat', 'baseball_glove', 'skateboard',
+ 'surfboard', 'tennis_racket', 'bottle', 'wine_glass', 'cup', 'fork',
+ 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange',
+ 'broccoli', 'carrot', 'hot_dog', 'pizza', 'donut', 'cake', 'chair',
+ 'couch', 'potted_plant', 'bed', 'dining_table', 'toilet', 'tv',
+ 'laptop', 'mouse', 'remote', 'keyboard', 'cell_phone', 'microwave',
+ 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',
+ 'scissors', 'teddy_bear', 'hair_drier', 'toothbrush'
+ ]
+
+
+def cityscapes_classes():
+ return [
+ 'person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle',
+ 'bicycle'
+ ]
+
+
+dataset_aliases = {
+ 'voc': ['voc', 'pascal_voc', 'voc07', 'voc12'],
+ 'imagenet_det': ['det', 'imagenet_det', 'ilsvrc_det'],
+ 'imagenet_vid': ['vid', 'imagenet_vid', 'ilsvrc_vid'],
+ 'coco': ['coco', 'mscoco', 'ms_coco'],
+ 'wider_face': ['WIDERFaceDataset', 'wider_face', 'WIDERFace'],
+ 'cityscapes': ['cityscapes']
+}
+
+
+def get_classes(dataset):
+ """Get class names of a dataset."""
+ alias2name = {}
+ for name, aliases in dataset_aliases.items():
+ for alias in aliases:
+ alias2name[alias] = name
+
+ if mmcv.is_str(dataset):
+ if dataset in alias2name:
+ labels = eval(alias2name[dataset] + '_classes()')
+ else:
+ raise ValueError(f'Unrecognized dataset: {dataset}')
+ else:
+ raise TypeError(f'dataset must a str, but got {type(dataset)}')
+ return labels
diff --git a/annotator/uniformer/mmdet/core/evaluation/eval_hooks.py b/annotator/uniformer/mmdet/core/evaluation/eval_hooks.py
new file mode 100644
index 0000000000000000000000000000000000000000..6fb932eae1ccb23a2b687a05a6cb9525200de718
--- /dev/null
+++ b/annotator/uniformer/mmdet/core/evaluation/eval_hooks.py
@@ -0,0 +1,303 @@
+import os.path as osp
+import warnings
+from math import inf
+
+import mmcv
+import torch.distributed as dist
+from mmcv.runner import Hook
+from torch.nn.modules.batchnorm import _BatchNorm
+from torch.utils.data import DataLoader
+
+from mmdet.utils import get_root_logger
+
+
+class EvalHook(Hook):
+ """Evaluation hook.
+
+ Notes:
+ If new arguments are added for EvalHook, tools/test.py,
+ tools/analysis_tools/eval_metric.py may be effected.
+
+ Attributes:
+ dataloader (DataLoader): A PyTorch dataloader.
+ start (int, optional): Evaluation starting epoch. It enables evaluation
+ before the training starts if ``start`` <= the resuming epoch.
+ If None, whether to evaluate is merely decided by ``interval``.
+ Default: None.
+ interval (int): Evaluation interval (by epochs). Default: 1.
+ save_best (str, optional): If a metric is specified, it would measure
+ the best checkpoint during evaluation. The information about best
+ checkpoint would be save in best.json.
+ Options are the evaluation metrics to the test dataset. e.g.,
+ ``bbox_mAP``, ``segm_mAP`` for bbox detection and instance
+ segmentation. ``AR@100`` for proposal recall. If ``save_best`` is
+ ``auto``, the first key will be used. The interval of
+ ``CheckpointHook`` should device EvalHook. Default: None.
+ rule (str, optional): Comparison rule for best score. If set to None,
+ it will infer a reasonable rule. Keys such as 'mAP' or 'AR' will
+ be inferred by 'greater' rule. Keys contain 'loss' will be inferred
+ by 'less' rule. Options are 'greater', 'less'. Default: None.
+ **eval_kwargs: Evaluation arguments fed into the evaluate function of
+ the dataset.
+ """
+
+ rule_map = {'greater': lambda x, y: x > y, 'less': lambda x, y: x < y}
+ init_value_map = {'greater': -inf, 'less': inf}
+ greater_keys = ['mAP', 'AR']
+ less_keys = ['loss']
+
+ def __init__(self,
+ dataloader,
+ start=None,
+ interval=1,
+ by_epoch=True,
+ save_best=None,
+ rule=None,
+ **eval_kwargs):
+ if not isinstance(dataloader, DataLoader):
+ raise TypeError('dataloader must be a pytorch DataLoader, but got'
+ f' {type(dataloader)}')
+ if not interval > 0:
+ raise ValueError(f'interval must be positive, but got {interval}')
+ if start is not None and start < 0:
+ warnings.warn(
+ f'The evaluation start epoch {start} is smaller than 0, '
+ f'use 0 instead', UserWarning)
+ start = 0
+ self.dataloader = dataloader
+ self.interval = interval
+ self.by_epoch = by_epoch
+ self.start = start
+ assert isinstance(save_best, str) or save_best is None
+ self.save_best = save_best
+ self.eval_kwargs = eval_kwargs
+ self.initial_epoch_flag = True
+
+ self.logger = get_root_logger()
+
+ if self.save_best is not None:
+ self._init_rule(rule, self.save_best)
+
+ def _init_rule(self, rule, key_indicator):
+ """Initialize rule, key_indicator, comparison_func, and best score.
+
+ Args:
+ rule (str | None): Comparison rule for best score.
+ key_indicator (str | None): Key indicator to determine the
+ comparison rule.
+ """
+ if rule not in self.rule_map and rule is not None:
+ raise KeyError(f'rule must be greater, less or None, '
+ f'but got {rule}.')
+
+ if rule is None:
+ if key_indicator != 'auto':
+ if any(key in key_indicator for key in self.greater_keys):
+ rule = 'greater'
+ elif any(key in key_indicator for key in self.less_keys):
+ rule = 'less'
+ else:
+ raise ValueError(f'Cannot infer the rule for key '
+ f'{key_indicator}, thus a specific rule '
+ f'must be specified.')
+ self.rule = rule
+ self.key_indicator = key_indicator
+ if self.rule is not None:
+ self.compare_func = self.rule_map[self.rule]
+
+ def before_run(self, runner):
+ if self.save_best is not None:
+ if runner.meta is None:
+ warnings.warn('runner.meta is None. Creating a empty one.')
+ runner.meta = dict()
+ runner.meta.setdefault('hook_msgs', dict())
+
+ def before_train_epoch(self, runner):
+ """Evaluate the model only at the start of training."""
+ if not self.initial_epoch_flag:
+ return
+ if self.start is not None and runner.epoch >= self.start:
+ self.after_train_epoch(runner)
+ self.initial_epoch_flag = False
+
+ def evaluation_flag(self, runner):
+ """Judge whether to perform_evaluation after this epoch.
+
+ Returns:
+ bool: The flag indicating whether to perform evaluation.
+ """
+ if self.start is None:
+ if not self.every_n_epochs(runner, self.interval):
+ # No evaluation during the interval epochs.
+ return False
+ elif (runner.epoch + 1) < self.start:
+ # No evaluation if start is larger than the current epoch.
+ return False
+ else:
+ # Evaluation only at epochs 3, 5, 7... if start==3 and interval==2
+ if (runner.epoch + 1 - self.start) % self.interval:
+ return False
+ return True
+
+ def after_train_epoch(self, runner):
+ if not self.by_epoch or not self.evaluation_flag(runner):
+ return
+ from mmdet.apis import single_gpu_test
+ results = single_gpu_test(runner.model, self.dataloader, show=False)
+ key_score = self.evaluate(runner, results)
+ if self.save_best:
+ self.save_best_checkpoint(runner, key_score)
+
+ def after_train_iter(self, runner):
+ if self.by_epoch or not self.every_n_iters(runner, self.interval):
+ return
+ from mmdet.apis import single_gpu_test
+ results = single_gpu_test(runner.model, self.dataloader, show=False)
+ key_score = self.evaluate(runner, results)
+ if self.save_best:
+ self.save_best_checkpoint(runner, key_score)
+
+ def save_best_checkpoint(self, runner, key_score):
+ best_score = runner.meta['hook_msgs'].get(
+ 'best_score', self.init_value_map[self.rule])
+ if self.compare_func(key_score, best_score):
+ best_score = key_score
+ runner.meta['hook_msgs']['best_score'] = best_score
+ last_ckpt = runner.meta['hook_msgs']['last_ckpt']
+ runner.meta['hook_msgs']['best_ckpt'] = last_ckpt
+ mmcv.symlink(
+ last_ckpt,
+ osp.join(runner.work_dir, f'best_{self.key_indicator}.pth'))
+ time_stamp = runner.epoch + 1 if self.by_epoch else runner.iter + 1
+ self.logger.info(f'Now best checkpoint is epoch_{time_stamp}.pth.'
+ f'Best {self.key_indicator} is {best_score:0.4f}')
+
+ def evaluate(self, runner, results):
+ eval_res = self.dataloader.dataset.evaluate(
+ results, logger=runner.logger, **self.eval_kwargs)
+ for name, val in eval_res.items():
+ runner.log_buffer.output[name] = val
+ runner.log_buffer.ready = True
+ if self.save_best is not None:
+ if self.key_indicator == 'auto':
+ # infer from eval_results
+ self._init_rule(self.rule, list(eval_res.keys())[0])
+ return eval_res[self.key_indicator]
+ else:
+ return None
+
+
+class DistEvalHook(EvalHook):
+ """Distributed evaluation hook.
+
+ Notes:
+ If new arguments are added, tools/test.py may be effected.
+
+ Attributes:
+ dataloader (DataLoader): A PyTorch dataloader.
+ start (int, optional): Evaluation starting epoch. It enables evaluation
+ before the training starts if ``start`` <= the resuming epoch.
+ If None, whether to evaluate is merely decided by ``interval``.
+ Default: None.
+ interval (int): Evaluation interval (by epochs). Default: 1.
+ tmpdir (str | None): Temporary directory to save the results of all
+ processes. Default: None.
+ gpu_collect (bool): Whether to use gpu or cpu to collect results.
+ Default: False.
+ save_best (str, optional): If a metric is specified, it would measure
+ the best checkpoint during evaluation. The information about best
+ checkpoint would be save in best.json.
+ Options are the evaluation metrics to the test dataset. e.g.,
+ ``bbox_mAP``, ``segm_mAP`` for bbox detection and instance
+ segmentation. ``AR@100`` for proposal recall. If ``save_best`` is
+ ``auto``, the first key will be used. The interval of
+ ``CheckpointHook`` should device EvalHook. Default: None.
+ rule (str | None): Comparison rule for best score. If set to None,
+ it will infer a reasonable rule. Default: 'None'.
+ broadcast_bn_buffer (bool): Whether to broadcast the
+ buffer(running_mean and running_var) of rank 0 to other rank
+ before evaluation. Default: True.
+ **eval_kwargs: Evaluation arguments fed into the evaluate function of
+ the dataset.
+ """
+
+ def __init__(self,
+ dataloader,
+ start=None,
+ interval=1,
+ by_epoch=True,
+ tmpdir=None,
+ gpu_collect=False,
+ save_best=None,
+ rule=None,
+ broadcast_bn_buffer=True,
+ **eval_kwargs):
+ super().__init__(
+ dataloader,
+ start=start,
+ interval=interval,
+ by_epoch=by_epoch,
+ save_best=save_best,
+ rule=rule,
+ **eval_kwargs)
+ self.broadcast_bn_buffer = broadcast_bn_buffer
+ self.tmpdir = tmpdir
+ self.gpu_collect = gpu_collect
+
+ def _broadcast_bn_buffer(self, runner):
+ # Synchronization of BatchNorm's buffer (running_mean
+ # and running_var) is not supported in the DDP of pytorch,
+ # which may cause the inconsistent performance of models in
+ # different ranks, so we broadcast BatchNorm's buffers
+ # of rank 0 to other ranks to avoid this.
+ if self.broadcast_bn_buffer:
+ model = runner.model
+ for name, module in model.named_modules():
+ if isinstance(module,
+ _BatchNorm) and module.track_running_stats:
+ dist.broadcast(module.running_var, 0)
+ dist.broadcast(module.running_mean, 0)
+
+ def after_train_epoch(self, runner):
+ if not self.by_epoch or not self.evaluation_flag(runner):
+ return
+
+ if self.broadcast_bn_buffer:
+ self._broadcast_bn_buffer(runner)
+
+ from mmdet.apis import multi_gpu_test
+ tmpdir = self.tmpdir
+ if tmpdir is None:
+ tmpdir = osp.join(runner.work_dir, '.eval_hook')
+ results = multi_gpu_test(
+ runner.model,
+ self.dataloader,
+ tmpdir=tmpdir,
+ gpu_collect=self.gpu_collect)
+ if runner.rank == 0:
+ print('\n')
+ key_score = self.evaluate(runner, results)
+ if self.save_best:
+ self.save_best_checkpoint(runner, key_score)
+
+ def after_train_iter(self, runner):
+ if self.by_epoch or not self.every_n_iters(runner, self.interval):
+ return
+
+ if self.broadcast_bn_buffer:
+ self._broadcast_bn_buffer(runner)
+
+ from mmdet.apis import multi_gpu_test
+ tmpdir = self.tmpdir
+ if tmpdir is None:
+ tmpdir = osp.join(runner.work_dir, '.eval_hook')
+ results = multi_gpu_test(
+ runner.model,
+ self.dataloader,
+ tmpdir=tmpdir,
+ gpu_collect=self.gpu_collect)
+ if runner.rank == 0:
+ print('\n')
+ key_score = self.evaluate(runner, results)
+ if self.save_best:
+ self.save_best_checkpoint(runner, key_score)
diff --git a/annotator/uniformer/mmdet/core/evaluation/mean_ap.py b/annotator/uniformer/mmdet/core/evaluation/mean_ap.py
new file mode 100644
index 0000000000000000000000000000000000000000..1d653a35497f6a0135c4374a09eb7c11399e3244
--- /dev/null
+++ b/annotator/uniformer/mmdet/core/evaluation/mean_ap.py
@@ -0,0 +1,469 @@
+from multiprocessing import Pool
+
+import mmcv
+import numpy as np
+from mmcv.utils import print_log
+from terminaltables import AsciiTable
+
+from .bbox_overlaps import bbox_overlaps
+from .class_names import get_classes
+
+
+def average_precision(recalls, precisions, mode='area'):
+ """Calculate average precision (for single or multiple scales).
+
+ Args:
+ recalls (ndarray): shape (num_scales, num_dets) or (num_dets, )
+ precisions (ndarray): shape (num_scales, num_dets) or (num_dets, )
+ mode (str): 'area' or '11points', 'area' means calculating the area
+ under precision-recall curve, '11points' means calculating
+ the average precision of recalls at [0, 0.1, ..., 1]
+
+ Returns:
+ float or ndarray: calculated average precision
+ """
+ no_scale = False
+ if recalls.ndim == 1:
+ no_scale = True
+ recalls = recalls[np.newaxis, :]
+ precisions = precisions[np.newaxis, :]
+ assert recalls.shape == precisions.shape and recalls.ndim == 2
+ num_scales = recalls.shape[0]
+ ap = np.zeros(num_scales, dtype=np.float32)
+ if mode == 'area':
+ zeros = np.zeros((num_scales, 1), dtype=recalls.dtype)
+ ones = np.ones((num_scales, 1), dtype=recalls.dtype)
+ mrec = np.hstack((zeros, recalls, ones))
+ mpre = np.hstack((zeros, precisions, zeros))
+ for i in range(mpre.shape[1] - 1, 0, -1):
+ mpre[:, i - 1] = np.maximum(mpre[:, i - 1], mpre[:, i])
+ for i in range(num_scales):
+ ind = np.where(mrec[i, 1:] != mrec[i, :-1])[0]
+ ap[i] = np.sum(
+ (mrec[i, ind + 1] - mrec[i, ind]) * mpre[i, ind + 1])
+ elif mode == '11points':
+ for i in range(num_scales):
+ for thr in np.arange(0, 1 + 1e-3, 0.1):
+ precs = precisions[i, recalls[i, :] >= thr]
+ prec = precs.max() if precs.size > 0 else 0
+ ap[i] += prec
+ ap /= 11
+ else:
+ raise ValueError(
+ 'Unrecognized mode, only "area" and "11points" are supported')
+ if no_scale:
+ ap = ap[0]
+ return ap
+
+
+def tpfp_imagenet(det_bboxes,
+ gt_bboxes,
+ gt_bboxes_ignore=None,
+ default_iou_thr=0.5,
+ area_ranges=None):
+ """Check if detected bboxes are true positive or false positive.
+
+ Args:
+ det_bbox (ndarray): Detected bboxes of this image, of shape (m, 5).
+ gt_bboxes (ndarray): GT bboxes of this image, of shape (n, 4).
+ gt_bboxes_ignore (ndarray): Ignored gt bboxes of this image,
+ of shape (k, 4). Default: None
+ default_iou_thr (float): IoU threshold to be considered as matched for
+ medium and large bboxes (small ones have special rules).
+ Default: 0.5.
+ area_ranges (list[tuple] | None): Range of bbox areas to be evaluated,
+ in the format [(min1, max1), (min2, max2), ...]. Default: None.
+
+ Returns:
+ tuple[np.ndarray]: (tp, fp) whose elements are 0 and 1. The shape of
+ each array is (num_scales, m).
+ """
+ # an indicator of ignored gts
+ gt_ignore_inds = np.concatenate(
+ (np.zeros(gt_bboxes.shape[0], dtype=np.bool),
+ np.ones(gt_bboxes_ignore.shape[0], dtype=np.bool)))
+ # stack gt_bboxes and gt_bboxes_ignore for convenience
+ gt_bboxes = np.vstack((gt_bboxes, gt_bboxes_ignore))
+
+ num_dets = det_bboxes.shape[0]
+ num_gts = gt_bboxes.shape[0]
+ if area_ranges is None:
+ area_ranges = [(None, None)]
+ num_scales = len(area_ranges)
+ # tp and fp are of shape (num_scales, num_gts), each row is tp or fp
+ # of a certain scale.
+ tp = np.zeros((num_scales, num_dets), dtype=np.float32)
+ fp = np.zeros((num_scales, num_dets), dtype=np.float32)
+ if gt_bboxes.shape[0] == 0:
+ if area_ranges == [(None, None)]:
+ fp[...] = 1
+ else:
+ det_areas = (det_bboxes[:, 2] - det_bboxes[:, 0]) * (
+ det_bboxes[:, 3] - det_bboxes[:, 1])
+ for i, (min_area, max_area) in enumerate(area_ranges):
+ fp[i, (det_areas >= min_area) & (det_areas < max_area)] = 1
+ return tp, fp
+ ious = bbox_overlaps(det_bboxes, gt_bboxes - 1)
+ gt_w = gt_bboxes[:, 2] - gt_bboxes[:, 0]
+ gt_h = gt_bboxes[:, 3] - gt_bboxes[:, 1]
+ iou_thrs = np.minimum((gt_w * gt_h) / ((gt_w + 10.0) * (gt_h + 10.0)),
+ default_iou_thr)
+ # sort all detections by scores in descending order
+ sort_inds = np.argsort(-det_bboxes[:, -1])
+ for k, (min_area, max_area) in enumerate(area_ranges):
+ gt_covered = np.zeros(num_gts, dtype=bool)
+ # if no area range is specified, gt_area_ignore is all False
+ if min_area is None:
+ gt_area_ignore = np.zeros_like(gt_ignore_inds, dtype=bool)
+ else:
+ gt_areas = gt_w * gt_h
+ gt_area_ignore = (gt_areas < min_area) | (gt_areas >= max_area)
+ for i in sort_inds:
+ max_iou = -1
+ matched_gt = -1
+ # find best overlapped available gt
+ for j in range(num_gts):
+ # different from PASCAL VOC: allow finding other gts if the
+ # best overlapped ones are already matched by other det bboxes
+ if gt_covered[j]:
+ continue
+ elif ious[i, j] >= iou_thrs[j] and ious[i, j] > max_iou:
+ max_iou = ious[i, j]
+ matched_gt = j
+ # there are 4 cases for a det bbox:
+ # 1. it matches a gt, tp = 1, fp = 0
+ # 2. it matches an ignored gt, tp = 0, fp = 0
+ # 3. it matches no gt and within area range, tp = 0, fp = 1
+ # 4. it matches no gt but is beyond area range, tp = 0, fp = 0
+ if matched_gt >= 0:
+ gt_covered[matched_gt] = 1
+ if not (gt_ignore_inds[matched_gt]
+ or gt_area_ignore[matched_gt]):
+ tp[k, i] = 1
+ elif min_area is None:
+ fp[k, i] = 1
+ else:
+ bbox = det_bboxes[i, :4]
+ area = (bbox[2] - bbox[0]) * (bbox[3] - bbox[1])
+ if area >= min_area and area < max_area:
+ fp[k, i] = 1
+ return tp, fp
+
+
+def tpfp_default(det_bboxes,
+ gt_bboxes,
+ gt_bboxes_ignore=None,
+ iou_thr=0.5,
+ area_ranges=None):
+ """Check if detected bboxes are true positive or false positive.
+
+ Args:
+ det_bbox (ndarray): Detected bboxes of this image, of shape (m, 5).
+ gt_bboxes (ndarray): GT bboxes of this image, of shape (n, 4).
+ gt_bboxes_ignore (ndarray): Ignored gt bboxes of this image,
+ of shape (k, 4). Default: None
+ iou_thr (float): IoU threshold to be considered as matched.
+ Default: 0.5.
+ area_ranges (list[tuple] | None): Range of bbox areas to be evaluated,
+ in the format [(min1, max1), (min2, max2), ...]. Default: None.
+
+ Returns:
+ tuple[np.ndarray]: (tp, fp) whose elements are 0 and 1. The shape of
+ each array is (num_scales, m).
+ """
+ # an indicator of ignored gts
+ gt_ignore_inds = np.concatenate(
+ (np.zeros(gt_bboxes.shape[0], dtype=np.bool),
+ np.ones(gt_bboxes_ignore.shape[0], dtype=np.bool)))
+ # stack gt_bboxes and gt_bboxes_ignore for convenience
+ gt_bboxes = np.vstack((gt_bboxes, gt_bboxes_ignore))
+
+ num_dets = det_bboxes.shape[0]
+ num_gts = gt_bboxes.shape[0]
+ if area_ranges is None:
+ area_ranges = [(None, None)]
+ num_scales = len(area_ranges)
+ # tp and fp are of shape (num_scales, num_gts), each row is tp or fp of
+ # a certain scale
+ tp = np.zeros((num_scales, num_dets), dtype=np.float32)
+ fp = np.zeros((num_scales, num_dets), dtype=np.float32)
+
+ # if there is no gt bboxes in this image, then all det bboxes
+ # within area range are false positives
+ if gt_bboxes.shape[0] == 0:
+ if area_ranges == [(None, None)]:
+ fp[...] = 1
+ else:
+ det_areas = (det_bboxes[:, 2] - det_bboxes[:, 0]) * (
+ det_bboxes[:, 3] - det_bboxes[:, 1])
+ for i, (min_area, max_area) in enumerate(area_ranges):
+ fp[i, (det_areas >= min_area) & (det_areas < max_area)] = 1
+ return tp, fp
+
+ ious = bbox_overlaps(det_bboxes, gt_bboxes)
+ # for each det, the max iou with all gts
+ ious_max = ious.max(axis=1)
+ # for each det, which gt overlaps most with it
+ ious_argmax = ious.argmax(axis=1)
+ # sort all dets in descending order by scores
+ sort_inds = np.argsort(-det_bboxes[:, -1])
+ for k, (min_area, max_area) in enumerate(area_ranges):
+ gt_covered = np.zeros(num_gts, dtype=bool)
+ # if no area range is specified, gt_area_ignore is all False
+ if min_area is None:
+ gt_area_ignore = np.zeros_like(gt_ignore_inds, dtype=bool)
+ else:
+ gt_areas = (gt_bboxes[:, 2] - gt_bboxes[:, 0]) * (
+ gt_bboxes[:, 3] - gt_bboxes[:, 1])
+ gt_area_ignore = (gt_areas < min_area) | (gt_areas >= max_area)
+ for i in sort_inds:
+ if ious_max[i] >= iou_thr:
+ matched_gt = ious_argmax[i]
+ if not (gt_ignore_inds[matched_gt]
+ or gt_area_ignore[matched_gt]):
+ if not gt_covered[matched_gt]:
+ gt_covered[matched_gt] = True
+ tp[k, i] = 1
+ else:
+ fp[k, i] = 1
+ # otherwise ignore this detected bbox, tp = 0, fp = 0
+ elif min_area is None:
+ fp[k, i] = 1
+ else:
+ bbox = det_bboxes[i, :4]
+ area = (bbox[2] - bbox[0]) * (bbox[3] - bbox[1])
+ if area >= min_area and area < max_area:
+ fp[k, i] = 1
+ return tp, fp
+
+
+def get_cls_results(det_results, annotations, class_id):
+ """Get det results and gt information of a certain class.
+
+ Args:
+ det_results (list[list]): Same as `eval_map()`.
+ annotations (list[dict]): Same as `eval_map()`.
+ class_id (int): ID of a specific class.
+
+ Returns:
+ tuple[list[np.ndarray]]: detected bboxes, gt bboxes, ignored gt bboxes
+ """
+ cls_dets = [img_res[class_id] for img_res in det_results]
+ cls_gts = []
+ cls_gts_ignore = []
+ for ann in annotations:
+ gt_inds = ann['labels'] == class_id
+ cls_gts.append(ann['bboxes'][gt_inds, :])
+
+ if ann.get('labels_ignore', None) is not None:
+ ignore_inds = ann['labels_ignore'] == class_id
+ cls_gts_ignore.append(ann['bboxes_ignore'][ignore_inds, :])
+ else:
+ cls_gts_ignore.append(np.empty((0, 4), dtype=np.float32))
+
+ return cls_dets, cls_gts, cls_gts_ignore
+
+
+def eval_map(det_results,
+ annotations,
+ scale_ranges=None,
+ iou_thr=0.5,
+ dataset=None,
+ logger=None,
+ tpfp_fn=None,
+ nproc=4):
+ """Evaluate mAP of a dataset.
+
+ Args:
+ det_results (list[list]): [[cls1_det, cls2_det, ...], ...].
+ The outer list indicates images, and the inner list indicates
+ per-class detected bboxes.
+ annotations (list[dict]): Ground truth annotations where each item of
+ the list indicates an image. Keys of annotations are:
+
+ - `bboxes`: numpy array of shape (n, 4)
+ - `labels`: numpy array of shape (n, )
+ - `bboxes_ignore` (optional): numpy array of shape (k, 4)
+ - `labels_ignore` (optional): numpy array of shape (k, )
+ scale_ranges (list[tuple] | None): Range of scales to be evaluated,
+ in the format [(min1, max1), (min2, max2), ...]. A range of
+ (32, 64) means the area range between (32**2, 64**2).
+ Default: None.
+ iou_thr (float): IoU threshold to be considered as matched.
+ Default: 0.5.
+ dataset (list[str] | str | None): Dataset name or dataset classes,
+ there are minor differences in metrics for different datsets, e.g.
+ "voc07", "imagenet_det", etc. Default: None.
+ logger (logging.Logger | str | None): The way to print the mAP
+ summary. See `mmcv.utils.print_log()` for details. Default: None.
+ tpfp_fn (callable | None): The function used to determine true/
+ false positives. If None, :func:`tpfp_default` is used as default
+ unless dataset is 'det' or 'vid' (:func:`tpfp_imagenet` in this
+ case). If it is given as a function, then this function is used
+ to evaluate tp & fp. Default None.
+ nproc (int): Processes used for computing TP and FP.
+ Default: 4.
+
+ Returns:
+ tuple: (mAP, [dict, dict, ...])
+ """
+ assert len(det_results) == len(annotations)
+
+ num_imgs = len(det_results)
+ num_scales = len(scale_ranges) if scale_ranges is not None else 1
+ num_classes = len(det_results[0]) # positive class num
+ area_ranges = ([(rg[0]**2, rg[1]**2) for rg in scale_ranges]
+ if scale_ranges is not None else None)
+
+ pool = Pool(nproc)
+ eval_results = []
+ for i in range(num_classes):
+ # get gt and det bboxes of this class
+ cls_dets, cls_gts, cls_gts_ignore = get_cls_results(
+ det_results, annotations, i)
+ # choose proper function according to datasets to compute tp and fp
+ if tpfp_fn is None:
+ if dataset in ['det', 'vid']:
+ tpfp_fn = tpfp_imagenet
+ else:
+ tpfp_fn = tpfp_default
+ if not callable(tpfp_fn):
+ raise ValueError(
+ f'tpfp_fn has to be a function or None, but got {tpfp_fn}')
+
+ # compute tp and fp for each image with multiple processes
+ tpfp = pool.starmap(
+ tpfp_fn,
+ zip(cls_dets, cls_gts, cls_gts_ignore,
+ [iou_thr for _ in range(num_imgs)],
+ [area_ranges for _ in range(num_imgs)]))
+ tp, fp = tuple(zip(*tpfp))
+ # calculate gt number of each scale
+ # ignored gts or gts beyond the specific scale are not counted
+ num_gts = np.zeros(num_scales, dtype=int)
+ for j, bbox in enumerate(cls_gts):
+ if area_ranges is None:
+ num_gts[0] += bbox.shape[0]
+ else:
+ gt_areas = (bbox[:, 2] - bbox[:, 0]) * (
+ bbox[:, 3] - bbox[:, 1])
+ for k, (min_area, max_area) in enumerate(area_ranges):
+ num_gts[k] += np.sum((gt_areas >= min_area)
+ & (gt_areas < max_area))
+ # sort all det bboxes by score, also sort tp and fp
+ cls_dets = np.vstack(cls_dets)
+ num_dets = cls_dets.shape[0]
+ sort_inds = np.argsort(-cls_dets[:, -1])
+ tp = np.hstack(tp)[:, sort_inds]
+ fp = np.hstack(fp)[:, sort_inds]
+ # calculate recall and precision with tp and fp
+ tp = np.cumsum(tp, axis=1)
+ fp = np.cumsum(fp, axis=1)
+ eps = np.finfo(np.float32).eps
+ recalls = tp / np.maximum(num_gts[:, np.newaxis], eps)
+ precisions = tp / np.maximum((tp + fp), eps)
+ # calculate AP
+ if scale_ranges is None:
+ recalls = recalls[0, :]
+ precisions = precisions[0, :]
+ num_gts = num_gts.item()
+ mode = 'area' if dataset != 'voc07' else '11points'
+ ap = average_precision(recalls, precisions, mode)
+ eval_results.append({
+ 'num_gts': num_gts,
+ 'num_dets': num_dets,
+ 'recall': recalls,
+ 'precision': precisions,
+ 'ap': ap
+ })
+ pool.close()
+ if scale_ranges is not None:
+ # shape (num_classes, num_scales)
+ all_ap = np.vstack([cls_result['ap'] for cls_result in eval_results])
+ all_num_gts = np.vstack(
+ [cls_result['num_gts'] for cls_result in eval_results])
+ mean_ap = []
+ for i in range(num_scales):
+ if np.any(all_num_gts[:, i] > 0):
+ mean_ap.append(all_ap[all_num_gts[:, i] > 0, i].mean())
+ else:
+ mean_ap.append(0.0)
+ else:
+ aps = []
+ for cls_result in eval_results:
+ if cls_result['num_gts'] > 0:
+ aps.append(cls_result['ap'])
+ mean_ap = np.array(aps).mean().item() if aps else 0.0
+
+ print_map_summary(
+ mean_ap, eval_results, dataset, area_ranges, logger=logger)
+
+ return mean_ap, eval_results
+
+
+def print_map_summary(mean_ap,
+ results,
+ dataset=None,
+ scale_ranges=None,
+ logger=None):
+ """Print mAP and results of each class.
+
+ A table will be printed to show the gts/dets/recall/AP of each class and
+ the mAP.
+
+ Args:
+ mean_ap (float): Calculated from `eval_map()`.
+ results (list[dict]): Calculated from `eval_map()`.
+ dataset (list[str] | str | None): Dataset name or dataset classes.
+ scale_ranges (list[tuple] | None): Range of scales to be evaluated.
+ logger (logging.Logger | str | None): The way to print the mAP
+ summary. See `mmcv.utils.print_log()` for details. Default: None.
+ """
+
+ if logger == 'silent':
+ return
+
+ if isinstance(results[0]['ap'], np.ndarray):
+ num_scales = len(results[0]['ap'])
+ else:
+ num_scales = 1
+
+ if scale_ranges is not None:
+ assert len(scale_ranges) == num_scales
+
+ num_classes = len(results)
+
+ recalls = np.zeros((num_scales, num_classes), dtype=np.float32)
+ aps = np.zeros((num_scales, num_classes), dtype=np.float32)
+ num_gts = np.zeros((num_scales, num_classes), dtype=int)
+ for i, cls_result in enumerate(results):
+ if cls_result['recall'].size > 0:
+ recalls[:, i] = np.array(cls_result['recall'], ndmin=2)[:, -1]
+ aps[:, i] = cls_result['ap']
+ num_gts[:, i] = cls_result['num_gts']
+
+ if dataset is None:
+ label_names = [str(i) for i in range(num_classes)]
+ elif mmcv.is_str(dataset):
+ label_names = get_classes(dataset)
+ else:
+ label_names = dataset
+
+ if not isinstance(mean_ap, list):
+ mean_ap = [mean_ap]
+
+ header = ['class', 'gts', 'dets', 'recall', 'ap']
+ for i in range(num_scales):
+ if scale_ranges is not None:
+ print_log(f'Scale range {scale_ranges[i]}', logger=logger)
+ table_data = [header]
+ for j in range(num_classes):
+ row_data = [
+ label_names[j], num_gts[i, j], results[j]['num_dets'],
+ f'{recalls[i, j]:.3f}', f'{aps[i, j]:.3f}'
+ ]
+ table_data.append(row_data)
+ table_data.append(['mAP', '', '', '', f'{mean_ap[i]:.3f}'])
+ table = AsciiTable(table_data)
+ table.inner_footing_row_border = True
+ print_log('\n' + table.table, logger=logger)
diff --git a/annotator/uniformer/mmdet/core/evaluation/recall.py b/annotator/uniformer/mmdet/core/evaluation/recall.py
new file mode 100644
index 0000000000000000000000000000000000000000..23ec744f552db1a4a76bfa63b7cc8b357deb3140
--- /dev/null
+++ b/annotator/uniformer/mmdet/core/evaluation/recall.py
@@ -0,0 +1,189 @@
+from collections.abc import Sequence
+
+import numpy as np
+from mmcv.utils import print_log
+from terminaltables import AsciiTable
+
+from .bbox_overlaps import bbox_overlaps
+
+
+def _recalls(all_ious, proposal_nums, thrs):
+
+ img_num = all_ious.shape[0]
+ total_gt_num = sum([ious.shape[0] for ious in all_ious])
+
+ _ious = np.zeros((proposal_nums.size, total_gt_num), dtype=np.float32)
+ for k, proposal_num in enumerate(proposal_nums):
+ tmp_ious = np.zeros(0)
+ for i in range(img_num):
+ ious = all_ious[i][:, :proposal_num].copy()
+ gt_ious = np.zeros((ious.shape[0]))
+ if ious.size == 0:
+ tmp_ious = np.hstack((tmp_ious, gt_ious))
+ continue
+ for j in range(ious.shape[0]):
+ gt_max_overlaps = ious.argmax(axis=1)
+ max_ious = ious[np.arange(0, ious.shape[0]), gt_max_overlaps]
+ gt_idx = max_ious.argmax()
+ gt_ious[j] = max_ious[gt_idx]
+ box_idx = gt_max_overlaps[gt_idx]
+ ious[gt_idx, :] = -1
+ ious[:, box_idx] = -1
+ tmp_ious = np.hstack((tmp_ious, gt_ious))
+ _ious[k, :] = tmp_ious
+
+ _ious = np.fliplr(np.sort(_ious, axis=1))
+ recalls = np.zeros((proposal_nums.size, thrs.size))
+ for i, thr in enumerate(thrs):
+ recalls[:, i] = (_ious >= thr).sum(axis=1) / float(total_gt_num)
+
+ return recalls
+
+
+def set_recall_param(proposal_nums, iou_thrs):
+ """Check proposal_nums and iou_thrs and set correct format."""
+ if isinstance(proposal_nums, Sequence):
+ _proposal_nums = np.array(proposal_nums)
+ elif isinstance(proposal_nums, int):
+ _proposal_nums = np.array([proposal_nums])
+ else:
+ _proposal_nums = proposal_nums
+
+ if iou_thrs is None:
+ _iou_thrs = np.array([0.5])
+ elif isinstance(iou_thrs, Sequence):
+ _iou_thrs = np.array(iou_thrs)
+ elif isinstance(iou_thrs, float):
+ _iou_thrs = np.array([iou_thrs])
+ else:
+ _iou_thrs = iou_thrs
+
+ return _proposal_nums, _iou_thrs
+
+
+def eval_recalls(gts,
+ proposals,
+ proposal_nums=None,
+ iou_thrs=0.5,
+ logger=None):
+ """Calculate recalls.
+
+ Args:
+ gts (list[ndarray]): a list of arrays of shape (n, 4)
+ proposals (list[ndarray]): a list of arrays of shape (k, 4) or (k, 5)
+ proposal_nums (int | Sequence[int]): Top N proposals to be evaluated.
+ iou_thrs (float | Sequence[float]): IoU thresholds. Default: 0.5.
+ logger (logging.Logger | str | None): The way to print the recall
+ summary. See `mmcv.utils.print_log()` for details. Default: None.
+
+ Returns:
+ ndarray: recalls of different ious and proposal nums
+ """
+
+ img_num = len(gts)
+ assert img_num == len(proposals)
+
+ proposal_nums, iou_thrs = set_recall_param(proposal_nums, iou_thrs)
+
+ all_ious = []
+ for i in range(img_num):
+ if proposals[i].ndim == 2 and proposals[i].shape[1] == 5:
+ scores = proposals[i][:, 4]
+ sort_idx = np.argsort(scores)[::-1]
+ img_proposal = proposals[i][sort_idx, :]
+ else:
+ img_proposal = proposals[i]
+ prop_num = min(img_proposal.shape[0], proposal_nums[-1])
+ if gts[i] is None or gts[i].shape[0] == 0:
+ ious = np.zeros((0, img_proposal.shape[0]), dtype=np.float32)
+ else:
+ ious = bbox_overlaps(gts[i], img_proposal[:prop_num, :4])
+ all_ious.append(ious)
+ all_ious = np.array(all_ious)
+ recalls = _recalls(all_ious, proposal_nums, iou_thrs)
+
+ print_recall_summary(recalls, proposal_nums, iou_thrs, logger=logger)
+ return recalls
+
+
+def print_recall_summary(recalls,
+ proposal_nums,
+ iou_thrs,
+ row_idxs=None,
+ col_idxs=None,
+ logger=None):
+ """Print recalls in a table.
+
+ Args:
+ recalls (ndarray): calculated from `bbox_recalls`
+ proposal_nums (ndarray or list): top N proposals
+ iou_thrs (ndarray or list): iou thresholds
+ row_idxs (ndarray): which rows(proposal nums) to print
+ col_idxs (ndarray): which cols(iou thresholds) to print
+ logger (logging.Logger | str | None): The way to print the recall
+ summary. See `mmcv.utils.print_log()` for details. Default: None.
+ """
+ proposal_nums = np.array(proposal_nums, dtype=np.int32)
+ iou_thrs = np.array(iou_thrs)
+ if row_idxs is None:
+ row_idxs = np.arange(proposal_nums.size)
+ if col_idxs is None:
+ col_idxs = np.arange(iou_thrs.size)
+ row_header = [''] + iou_thrs[col_idxs].tolist()
+ table_data = [row_header]
+ for i, num in enumerate(proposal_nums[row_idxs]):
+ row = [f'{val:.3f}' for val in recalls[row_idxs[i], col_idxs].tolist()]
+ row.insert(0, num)
+ table_data.append(row)
+ table = AsciiTable(table_data)
+ print_log('\n' + table.table, logger=logger)
+
+
+def plot_num_recall(recalls, proposal_nums):
+ """Plot Proposal_num-Recalls curve.
+
+ Args:
+ recalls(ndarray or list): shape (k,)
+ proposal_nums(ndarray or list): same shape as `recalls`
+ """
+ if isinstance(proposal_nums, np.ndarray):
+ _proposal_nums = proposal_nums.tolist()
+ else:
+ _proposal_nums = proposal_nums
+ if isinstance(recalls, np.ndarray):
+ _recalls = recalls.tolist()
+ else:
+ _recalls = recalls
+
+ import matplotlib.pyplot as plt
+ f = plt.figure()
+ plt.plot([0] + _proposal_nums, [0] + _recalls)
+ plt.xlabel('Proposal num')
+ plt.ylabel('Recall')
+ plt.axis([0, proposal_nums.max(), 0, 1])
+ f.show()
+
+
+def plot_iou_recall(recalls, iou_thrs):
+ """Plot IoU-Recalls curve.
+
+ Args:
+ recalls(ndarray or list): shape (k,)
+ iou_thrs(ndarray or list): same shape as `recalls`
+ """
+ if isinstance(iou_thrs, np.ndarray):
+ _iou_thrs = iou_thrs.tolist()
+ else:
+ _iou_thrs = iou_thrs
+ if isinstance(recalls, np.ndarray):
+ _recalls = recalls.tolist()
+ else:
+ _recalls = recalls
+
+ import matplotlib.pyplot as plt
+ f = plt.figure()
+ plt.plot(_iou_thrs + [1.0], _recalls + [0.])
+ plt.xlabel('IoU')
+ plt.ylabel('Recall')
+ plt.axis([iou_thrs.min(), 1, 0, 1])
+ f.show()
diff --git a/annotator/uniformer/mmdet/core/export/__init__.py b/annotator/uniformer/mmdet/core/export/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..76589b1f279a71a59a5515d1b78cea0865f83131
--- /dev/null
+++ b/annotator/uniformer/mmdet/core/export/__init__.py
@@ -0,0 +1,8 @@
+from .pytorch2onnx import (build_model_from_cfg,
+ generate_inputs_and_wrap_model,
+ preprocess_example_input)
+
+__all__ = [
+ 'build_model_from_cfg', 'generate_inputs_and_wrap_model',
+ 'preprocess_example_input'
+]
diff --git a/annotator/uniformer/mmdet/core/export/pytorch2onnx.py b/annotator/uniformer/mmdet/core/export/pytorch2onnx.py
new file mode 100644
index 0000000000000000000000000000000000000000..809a817e67446b3c0c7894dcefb3c4bbc29afb7e
--- /dev/null
+++ b/annotator/uniformer/mmdet/core/export/pytorch2onnx.py
@@ -0,0 +1,154 @@
+from functools import partial
+
+import mmcv
+import numpy as np
+import torch
+from mmcv.runner import load_checkpoint
+
+
+def generate_inputs_and_wrap_model(config_path,
+ checkpoint_path,
+ input_config,
+ cfg_options=None):
+ """Prepare sample input and wrap model for ONNX export.
+
+ The ONNX export API only accept args, and all inputs should be
+ torch.Tensor or corresponding types (such as tuple of tensor).
+ So we should call this function before exporting. This function will:
+
+ 1. generate corresponding inputs which are used to execute the model.
+ 2. Wrap the model's forward function.
+
+ For example, the MMDet models' forward function has a parameter
+ ``return_loss:bool``. As we want to set it as False while export API
+ supports neither bool type or kwargs. So we have to replace the forward
+ like: ``model.forward = partial(model.forward, return_loss=False)``
+
+ Args:
+ config_path (str): the OpenMMLab config for the model we want to
+ export to ONNX
+ checkpoint_path (str): Path to the corresponding checkpoint
+ input_config (dict): the exactly data in this dict depends on the
+ framework. For MMSeg, we can just declare the input shape,
+ and generate the dummy data accordingly. However, for MMDet,
+ we may pass the real img path, or the NMS will return None
+ as there is no legal bbox.
+
+ Returns:
+ tuple: (model, tensor_data) wrapped model which can be called by \
+ model(*tensor_data) and a list of inputs which are used to execute \
+ the model while exporting.
+ """
+
+ model = build_model_from_cfg(
+ config_path, checkpoint_path, cfg_options=cfg_options)
+ one_img, one_meta = preprocess_example_input(input_config)
+ tensor_data = [one_img]
+ model.forward = partial(
+ model.forward, img_metas=[[one_meta]], return_loss=False)
+
+ # pytorch has some bug in pytorch1.3, we have to fix it
+ # by replacing these existing op
+ opset_version = 11
+ # put the import within the function thus it will not cause import error
+ # when not using this function
+ try:
+ from mmcv.onnx.symbolic import register_extra_symbolics
+ except ModuleNotFoundError:
+ raise NotImplementedError('please update mmcv to version>=v1.0.4')
+ register_extra_symbolics(opset_version)
+
+ return model, tensor_data
+
+
+def build_model_from_cfg(config_path, checkpoint_path, cfg_options=None):
+ """Build a model from config and load the given checkpoint.
+
+ Args:
+ config_path (str): the OpenMMLab config for the model we want to
+ export to ONNX
+ checkpoint_path (str): Path to the corresponding checkpoint
+
+ Returns:
+ torch.nn.Module: the built model
+ """
+ from mmdet.models import build_detector
+
+ cfg = mmcv.Config.fromfile(config_path)
+ if cfg_options is not None:
+ cfg.merge_from_dict(cfg_options)
+ # import modules from string list.
+ if cfg.get('custom_imports', None):
+ from mmcv.utils import import_modules_from_strings
+ import_modules_from_strings(**cfg['custom_imports'])
+ # set cudnn_benchmark
+ if cfg.get('cudnn_benchmark', False):
+ torch.backends.cudnn.benchmark = True
+ cfg.model.pretrained = None
+ cfg.data.test.test_mode = True
+
+ # build the model
+ cfg.model.train_cfg = None
+ model = build_detector(cfg.model, test_cfg=cfg.get('test_cfg'))
+ load_checkpoint(model, checkpoint_path, map_location='cpu')
+ model.cpu().eval()
+ return model
+
+
+def preprocess_example_input(input_config):
+ """Prepare an example input image for ``generate_inputs_and_wrap_model``.
+
+ Args:
+ input_config (dict): customized config describing the example input.
+
+ Returns:
+ tuple: (one_img, one_meta), tensor of the example input image and \
+ meta information for the example input image.
+
+ Examples:
+ >>> from mmdet.core.export import preprocess_example_input
+ >>> input_config = {
+ >>> 'input_shape': (1,3,224,224),
+ >>> 'input_path': 'demo/demo.jpg',
+ >>> 'normalize_cfg': {
+ >>> 'mean': (123.675, 116.28, 103.53),
+ >>> 'std': (58.395, 57.12, 57.375)
+ >>> }
+ >>> }
+ >>> one_img, one_meta = preprocess_example_input(input_config)
+ >>> print(one_img.shape)
+ torch.Size([1, 3, 224, 224])
+ >>> print(one_meta)
+ {'img_shape': (224, 224, 3),
+ 'ori_shape': (224, 224, 3),
+ 'pad_shape': (224, 224, 3),
+ 'filename': '.png',
+ 'scale_factor': 1.0,
+ 'flip': False}
+ """
+ input_path = input_config['input_path']
+ input_shape = input_config['input_shape']
+ one_img = mmcv.imread(input_path)
+ one_img = mmcv.imresize(one_img, input_shape[2:][::-1])
+ show_img = one_img.copy()
+ if 'normalize_cfg' in input_config.keys():
+ normalize_cfg = input_config['normalize_cfg']
+ mean = np.array(normalize_cfg['mean'], dtype=np.float32)
+ std = np.array(normalize_cfg['std'], dtype=np.float32)
+ to_rgb = normalize_cfg.get('to_rgb', True)
+ one_img = mmcv.imnormalize(one_img, mean, std, to_rgb=to_rgb)
+ one_img = one_img.transpose(2, 0, 1)
+ one_img = torch.from_numpy(one_img).unsqueeze(0).float().requires_grad_(
+ True)
+ (_, C, H, W) = input_shape
+ one_meta = {
+ 'img_shape': (H, W, C),
+ 'ori_shape': (H, W, C),
+ 'pad_shape': (H, W, C),
+ 'filename': '.png',
+ 'scale_factor': 1.0,
+ 'flip': False,
+ 'show_img': show_img,
+ }
+
+ return one_img, one_meta
diff --git a/annotator/uniformer/mmdet/core/mask/__init__.py b/annotator/uniformer/mmdet/core/mask/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..ab1e88bc686d5c2fe72b3114cb2b3e372e73a0f8
--- /dev/null
+++ b/annotator/uniformer/mmdet/core/mask/__init__.py
@@ -0,0 +1,8 @@
+from .mask_target import mask_target
+from .structures import BaseInstanceMasks, BitmapMasks, PolygonMasks
+from .utils import encode_mask_results, split_combined_polys
+
+__all__ = [
+ 'split_combined_polys', 'mask_target', 'BaseInstanceMasks', 'BitmapMasks',
+ 'PolygonMasks', 'encode_mask_results'
+]
diff --git a/annotator/uniformer/mmdet/core/mask/mask_target.py b/annotator/uniformer/mmdet/core/mask/mask_target.py
new file mode 100644
index 0000000000000000000000000000000000000000..15d26a88bbf3710bd92813335918407db8c4e053
--- /dev/null
+++ b/annotator/uniformer/mmdet/core/mask/mask_target.py
@@ -0,0 +1,122 @@
+import numpy as np
+import torch
+from torch.nn.modules.utils import _pair
+
+
+def mask_target(pos_proposals_list, pos_assigned_gt_inds_list, gt_masks_list,
+ cfg):
+ """Compute mask target for positive proposals in multiple images.
+
+ Args:
+ pos_proposals_list (list[Tensor]): Positive proposals in multiple
+ images.
+ pos_assigned_gt_inds_list (list[Tensor]): Assigned GT indices for each
+ positive proposals.
+ gt_masks_list (list[:obj:`BaseInstanceMasks`]): Ground truth masks of
+ each image.
+ cfg (dict): Config dict that specifies the mask size.
+
+ Returns:
+ list[Tensor]: Mask target of each image.
+
+ Example:
+ >>> import mmcv
+ >>> import mmdet
+ >>> from mmdet.core.mask import BitmapMasks
+ >>> from mmdet.core.mask.mask_target import *
+ >>> H, W = 17, 18
+ >>> cfg = mmcv.Config({'mask_size': (13, 14)})
+ >>> rng = np.random.RandomState(0)
+ >>> # Positive proposals (tl_x, tl_y, br_x, br_y) for each image
+ >>> pos_proposals_list = [
+ >>> torch.Tensor([
+ >>> [ 7.2425, 5.5929, 13.9414, 14.9541],
+ >>> [ 7.3241, 3.6170, 16.3850, 15.3102],
+ >>> ]),
+ >>> torch.Tensor([
+ >>> [ 4.8448, 6.4010, 7.0314, 9.7681],
+ >>> [ 5.9790, 2.6989, 7.4416, 4.8580],
+ >>> [ 0.0000, 0.0000, 0.1398, 9.8232],
+ >>> ]),
+ >>> ]
+ >>> # Corresponding class index for each proposal for each image
+ >>> pos_assigned_gt_inds_list = [
+ >>> torch.LongTensor([7, 0]),
+ >>> torch.LongTensor([5, 4, 1]),
+ >>> ]
+ >>> # Ground truth mask for each true object for each image
+ >>> gt_masks_list = [
+ >>> BitmapMasks(rng.rand(8, H, W), height=H, width=W),
+ >>> BitmapMasks(rng.rand(6, H, W), height=H, width=W),
+ >>> ]
+ >>> mask_targets = mask_target(
+ >>> pos_proposals_list, pos_assigned_gt_inds_list,
+ >>> gt_masks_list, cfg)
+ >>> assert mask_targets.shape == (5,) + cfg['mask_size']
+ """
+ cfg_list = [cfg for _ in range(len(pos_proposals_list))]
+ mask_targets = map(mask_target_single, pos_proposals_list,
+ pos_assigned_gt_inds_list, gt_masks_list, cfg_list)
+ mask_targets = list(mask_targets)
+ if len(mask_targets) > 0:
+ mask_targets = torch.cat(mask_targets)
+ return mask_targets
+
+
+def mask_target_single(pos_proposals, pos_assigned_gt_inds, gt_masks, cfg):
+ """Compute mask target for each positive proposal in the image.
+
+ Args:
+ pos_proposals (Tensor): Positive proposals.
+ pos_assigned_gt_inds (Tensor): Assigned GT inds of positive proposals.
+ gt_masks (:obj:`BaseInstanceMasks`): GT masks in the format of Bitmap
+ or Polygon.
+ cfg (dict): Config dict that indicate the mask size.
+
+ Returns:
+ Tensor: Mask target of each positive proposals in the image.
+
+ Example:
+ >>> import mmcv
+ >>> import mmdet
+ >>> from mmdet.core.mask import BitmapMasks
+ >>> from mmdet.core.mask.mask_target import * # NOQA
+ >>> H, W = 32, 32
+ >>> cfg = mmcv.Config({'mask_size': (7, 11)})
+ >>> rng = np.random.RandomState(0)
+ >>> # Masks for each ground truth box (relative to the image)
+ >>> gt_masks_data = rng.rand(3, H, W)
+ >>> gt_masks = BitmapMasks(gt_masks_data, height=H, width=W)
+ >>> # Predicted positive boxes in one image
+ >>> pos_proposals = torch.FloatTensor([
+ >>> [ 16.2, 5.5, 19.9, 20.9],
+ >>> [ 17.3, 13.6, 19.3, 19.3],
+ >>> [ 14.8, 16.4, 17.0, 23.7],
+ >>> [ 0.0, 0.0, 16.0, 16.0],
+ >>> [ 4.0, 0.0, 20.0, 16.0],
+ >>> ])
+ >>> # For each predicted proposal, its assignment to a gt mask
+ >>> pos_assigned_gt_inds = torch.LongTensor([0, 1, 2, 1, 1])
+ >>> mask_targets = mask_target_single(
+ >>> pos_proposals, pos_assigned_gt_inds, gt_masks, cfg)
+ >>> assert mask_targets.shape == (5,) + cfg['mask_size']
+ """
+ device = pos_proposals.device
+ mask_size = _pair(cfg.mask_size)
+ num_pos = pos_proposals.size(0)
+ if num_pos > 0:
+ proposals_np = pos_proposals.cpu().numpy()
+ maxh, maxw = gt_masks.height, gt_masks.width
+ proposals_np[:, [0, 2]] = np.clip(proposals_np[:, [0, 2]], 0, maxw)
+ proposals_np[:, [1, 3]] = np.clip(proposals_np[:, [1, 3]], 0, maxh)
+ pos_assigned_gt_inds = pos_assigned_gt_inds.cpu().numpy()
+
+ mask_targets = gt_masks.crop_and_resize(
+ proposals_np, mask_size, device=device,
+ inds=pos_assigned_gt_inds).to_ndarray()
+
+ mask_targets = torch.from_numpy(mask_targets).float().to(device)
+ else:
+ mask_targets = pos_proposals.new_zeros((0, ) + mask_size)
+
+ return mask_targets
diff --git a/annotator/uniformer/mmdet/core/mask/structures.py b/annotator/uniformer/mmdet/core/mask/structures.py
new file mode 100644
index 0000000000000000000000000000000000000000..d9ec5775f281ab8b76cb873e71a4edd9969ab905
--- /dev/null
+++ b/annotator/uniformer/mmdet/core/mask/structures.py
@@ -0,0 +1,1024 @@
+from abc import ABCMeta, abstractmethod
+
+import cv2
+import mmcv
+import numpy as np
+import pycocotools.mask as maskUtils
+import torch
+from mmcv.ops.roi_align import roi_align
+
+
+class BaseInstanceMasks(metaclass=ABCMeta):
+ """Base class for instance masks."""
+
+ @abstractmethod
+ def rescale(self, scale, interpolation='nearest'):
+ """Rescale masks as large as possible while keeping the aspect ratio.
+ For details can refer to `mmcv.imrescale`.
+
+ Args:
+ scale (tuple[int]): The maximum size (h, w) of rescaled mask.
+ interpolation (str): Same as :func:`mmcv.imrescale`.
+
+ Returns:
+ BaseInstanceMasks: The rescaled masks.
+ """
+
+ @abstractmethod
+ def resize(self, out_shape, interpolation='nearest'):
+ """Resize masks to the given out_shape.
+
+ Args:
+ out_shape: Target (h, w) of resized mask.
+ interpolation (str): See :func:`mmcv.imresize`.
+
+ Returns:
+ BaseInstanceMasks: The resized masks.
+ """
+
+ @abstractmethod
+ def flip(self, flip_direction='horizontal'):
+ """Flip masks alone the given direction.
+
+ Args:
+ flip_direction (str): Either 'horizontal' or 'vertical'.
+
+ Returns:
+ BaseInstanceMasks: The flipped masks.
+ """
+
+ @abstractmethod
+ def pad(self, out_shape, pad_val):
+ """Pad masks to the given size of (h, w).
+
+ Args:
+ out_shape (tuple[int]): Target (h, w) of padded mask.
+ pad_val (int): The padded value.
+
+ Returns:
+ BaseInstanceMasks: The padded masks.
+ """
+
+ @abstractmethod
+ def crop(self, bbox):
+ """Crop each mask by the given bbox.
+
+ Args:
+ bbox (ndarray): Bbox in format [x1, y1, x2, y2], shape (4, ).
+
+ Return:
+ BaseInstanceMasks: The cropped masks.
+ """
+
+ @abstractmethod
+ def crop_and_resize(self,
+ bboxes,
+ out_shape,
+ inds,
+ device,
+ interpolation='bilinear'):
+ """Crop and resize masks by the given bboxes.
+
+ This function is mainly used in mask targets computation.
+ It firstly align mask to bboxes by assigned_inds, then crop mask by the
+ assigned bbox and resize to the size of (mask_h, mask_w)
+
+ Args:
+ bboxes (Tensor): Bboxes in format [x1, y1, x2, y2], shape (N, 4)
+ out_shape (tuple[int]): Target (h, w) of resized mask
+ inds (ndarray): Indexes to assign masks to each bbox,
+ shape (N,) and values should be between [0, num_masks - 1].
+ device (str): Device of bboxes
+ interpolation (str): See `mmcv.imresize`
+
+ Return:
+ BaseInstanceMasks: the cropped and resized masks.
+ """
+
+ @abstractmethod
+ def expand(self, expanded_h, expanded_w, top, left):
+ """see :class:`Expand`."""
+
+ @property
+ @abstractmethod
+ def areas(self):
+ """ndarray: areas of each instance."""
+
+ @abstractmethod
+ def to_ndarray(self):
+ """Convert masks to the format of ndarray.
+
+ Return:
+ ndarray: Converted masks in the format of ndarray.
+ """
+
+ @abstractmethod
+ def to_tensor(self, dtype, device):
+ """Convert masks to the format of Tensor.
+
+ Args:
+ dtype (str): Dtype of converted mask.
+ device (torch.device): Device of converted masks.
+
+ Returns:
+ Tensor: Converted masks in the format of Tensor.
+ """
+
+ @abstractmethod
+ def translate(self,
+ out_shape,
+ offset,
+ direction='horizontal',
+ fill_val=0,
+ interpolation='bilinear'):
+ """Translate the masks.
+
+ Args:
+ out_shape (tuple[int]): Shape for output mask, format (h, w).
+ offset (int | float): The offset for translate.
+ direction (str): The translate direction, either "horizontal"
+ or "vertical".
+ fill_val (int | float): Border value. Default 0.
+ interpolation (str): Same as :func:`mmcv.imtranslate`.
+
+ Returns:
+ Translated masks.
+ """
+
+ def shear(self,
+ out_shape,
+ magnitude,
+ direction='horizontal',
+ border_value=0,
+ interpolation='bilinear'):
+ """Shear the masks.
+
+ Args:
+ out_shape (tuple[int]): Shape for output mask, format (h, w).
+ magnitude (int | float): The magnitude used for shear.
+ direction (str): The shear direction, either "horizontal"
+ or "vertical".
+ border_value (int | tuple[int]): Value used in case of a
+ constant border. Default 0.
+ interpolation (str): Same as in :func:`mmcv.imshear`.
+
+ Returns:
+ ndarray: Sheared masks.
+ """
+
+ @abstractmethod
+ def rotate(self, out_shape, angle, center=None, scale=1.0, fill_val=0):
+ """Rotate the masks.
+
+ Args:
+ out_shape (tuple[int]): Shape for output mask, format (h, w).
+ angle (int | float): Rotation angle in degrees. Positive values
+ mean counter-clockwise rotation.
+ center (tuple[float], optional): Center point (w, h) of the
+ rotation in source image. If not specified, the center of
+ the image will be used.
+ scale (int | float): Isotropic scale factor.
+ fill_val (int | float): Border value. Default 0 for masks.
+
+ Returns:
+ Rotated masks.
+ """
+
+
+class BitmapMasks(BaseInstanceMasks):
+ """This class represents masks in the form of bitmaps.
+
+ Args:
+ masks (ndarray): ndarray of masks in shape (N, H, W), where N is
+ the number of objects.
+ height (int): height of masks
+ width (int): width of masks
+
+ Example:
+ >>> from mmdet.core.mask.structures import * # NOQA
+ >>> num_masks, H, W = 3, 32, 32
+ >>> rng = np.random.RandomState(0)
+ >>> masks = (rng.rand(num_masks, H, W) > 0.1).astype(np.int)
+ >>> self = BitmapMasks(masks, height=H, width=W)
+
+ >>> # demo crop_and_resize
+ >>> num_boxes = 5
+ >>> bboxes = np.array([[0, 0, 30, 10.0]] * num_boxes)
+ >>> out_shape = (14, 14)
+ >>> inds = torch.randint(0, len(self), size=(num_boxes,))
+ >>> device = 'cpu'
+ >>> interpolation = 'bilinear'
+ >>> new = self.crop_and_resize(
+ ... bboxes, out_shape, inds, device, interpolation)
+ >>> assert len(new) == num_boxes
+ >>> assert new.height, new.width == out_shape
+ """
+
+ def __init__(self, masks, height, width):
+ self.height = height
+ self.width = width
+ if len(masks) == 0:
+ self.masks = np.empty((0, self.height, self.width), dtype=np.uint8)
+ else:
+ assert isinstance(masks, (list, np.ndarray))
+ if isinstance(masks, list):
+ assert isinstance(masks[0], np.ndarray)
+ assert masks[0].ndim == 2 # (H, W)
+ else:
+ assert masks.ndim == 3 # (N, H, W)
+
+ self.masks = np.stack(masks).reshape(-1, height, width)
+ assert self.masks.shape[1] == self.height
+ assert self.masks.shape[2] == self.width
+
+ def __getitem__(self, index):
+ """Index the BitmapMask.
+
+ Args:
+ index (int | ndarray): Indices in the format of integer or ndarray.
+
+ Returns:
+ :obj:`BitmapMasks`: Indexed bitmap masks.
+ """
+ masks = self.masks[index].reshape(-1, self.height, self.width)
+ return BitmapMasks(masks, self.height, self.width)
+
+ def __iter__(self):
+ return iter(self.masks)
+
+ def __repr__(self):
+ s = self.__class__.__name__ + '('
+ s += f'num_masks={len(self.masks)}, '
+ s += f'height={self.height}, '
+ s += f'width={self.width})'
+ return s
+
+ def __len__(self):
+ """Number of masks."""
+ return len(self.masks)
+
+ def rescale(self, scale, interpolation='nearest'):
+ """See :func:`BaseInstanceMasks.rescale`."""
+ if len(self.masks) == 0:
+ new_w, new_h = mmcv.rescale_size((self.width, self.height), scale)
+ rescaled_masks = np.empty((0, new_h, new_w), dtype=np.uint8)
+ else:
+ rescaled_masks = np.stack([
+ mmcv.imrescale(mask, scale, interpolation=interpolation)
+ for mask in self.masks
+ ])
+ height, width = rescaled_masks.shape[1:]
+ return BitmapMasks(rescaled_masks, height, width)
+
+ def resize(self, out_shape, interpolation='nearest'):
+ """See :func:`BaseInstanceMasks.resize`."""
+ if len(self.masks) == 0:
+ resized_masks = np.empty((0, *out_shape), dtype=np.uint8)
+ else:
+ resized_masks = np.stack([
+ mmcv.imresize(
+ mask, out_shape[::-1], interpolation=interpolation)
+ for mask in self.masks
+ ])
+ return BitmapMasks(resized_masks, *out_shape)
+
+ def flip(self, flip_direction='horizontal'):
+ """See :func:`BaseInstanceMasks.flip`."""
+ assert flip_direction in ('horizontal', 'vertical', 'diagonal')
+
+ if len(self.masks) == 0:
+ flipped_masks = self.masks
+ else:
+ flipped_masks = np.stack([
+ mmcv.imflip(mask, direction=flip_direction)
+ for mask in self.masks
+ ])
+ return BitmapMasks(flipped_masks, self.height, self.width)
+
+ def pad(self, out_shape, pad_val=0):
+ """See :func:`BaseInstanceMasks.pad`."""
+ if len(self.masks) == 0:
+ padded_masks = np.empty((0, *out_shape), dtype=np.uint8)
+ else:
+ padded_masks = np.stack([
+ mmcv.impad(mask, shape=out_shape, pad_val=pad_val)
+ for mask in self.masks
+ ])
+ return BitmapMasks(padded_masks, *out_shape)
+
+ def crop(self, bbox):
+ """See :func:`BaseInstanceMasks.crop`."""
+ assert isinstance(bbox, np.ndarray)
+ assert bbox.ndim == 1
+
+ # clip the boundary
+ bbox = bbox.copy()
+ bbox[0::2] = np.clip(bbox[0::2], 0, self.width)
+ bbox[1::2] = np.clip(bbox[1::2], 0, self.height)
+ x1, y1, x2, y2 = bbox
+ w = np.maximum(x2 - x1, 1)
+ h = np.maximum(y2 - y1, 1)
+
+ if len(self.masks) == 0:
+ cropped_masks = np.empty((0, h, w), dtype=np.uint8)
+ else:
+ cropped_masks = self.masks[:, y1:y1 + h, x1:x1 + w]
+ return BitmapMasks(cropped_masks, h, w)
+
+ def crop_and_resize(self,
+ bboxes,
+ out_shape,
+ inds,
+ device='cpu',
+ interpolation='bilinear'):
+ """See :func:`BaseInstanceMasks.crop_and_resize`."""
+ if len(self.masks) == 0:
+ empty_masks = np.empty((0, *out_shape), dtype=np.uint8)
+ return BitmapMasks(empty_masks, *out_shape)
+
+ # convert bboxes to tensor
+ if isinstance(bboxes, np.ndarray):
+ bboxes = torch.from_numpy(bboxes).to(device=device)
+ if isinstance(inds, np.ndarray):
+ inds = torch.from_numpy(inds).to(device=device)
+
+ num_bbox = bboxes.shape[0]
+ fake_inds = torch.arange(
+ num_bbox, device=device).to(dtype=bboxes.dtype)[:, None]
+ rois = torch.cat([fake_inds, bboxes], dim=1) # Nx5
+ rois = rois.to(device=device)
+ if num_bbox > 0:
+ gt_masks_th = torch.from_numpy(self.masks).to(device).index_select(
+ 0, inds).to(dtype=rois.dtype)
+ targets = roi_align(gt_masks_th[:, None, :, :], rois, out_shape,
+ 1.0, 0, 'avg', True).squeeze(1)
+ resized_masks = (targets >= 0.5).cpu().numpy()
+ else:
+ resized_masks = []
+ return BitmapMasks(resized_masks, *out_shape)
+
+ def expand(self, expanded_h, expanded_w, top, left):
+ """See :func:`BaseInstanceMasks.expand`."""
+ if len(self.masks) == 0:
+ expanded_mask = np.empty((0, expanded_h, expanded_w),
+ dtype=np.uint8)
+ else:
+ expanded_mask = np.zeros((len(self), expanded_h, expanded_w),
+ dtype=np.uint8)
+ expanded_mask[:, top:top + self.height,
+ left:left + self.width] = self.masks
+ return BitmapMasks(expanded_mask, expanded_h, expanded_w)
+
+ def translate(self,
+ out_shape,
+ offset,
+ direction='horizontal',
+ fill_val=0,
+ interpolation='bilinear'):
+ """Translate the BitmapMasks.
+
+ Args:
+ out_shape (tuple[int]): Shape for output mask, format (h, w).
+ offset (int | float): The offset for translate.
+ direction (str): The translate direction, either "horizontal"
+ or "vertical".
+ fill_val (int | float): Border value. Default 0 for masks.
+ interpolation (str): Same as :func:`mmcv.imtranslate`.
+
+ Returns:
+ BitmapMasks: Translated BitmapMasks.
+
+ Example:
+ >>> from mmdet.core.mask.structures import BitmapMasks
+ >>> self = BitmapMasks.random(dtype=np.uint8)
+ >>> out_shape = (32, 32)
+ >>> offset = 4
+ >>> direction = 'horizontal'
+ >>> fill_val = 0
+ >>> interpolation = 'bilinear'
+ >>> # Note, There seem to be issues when:
+ >>> # * out_shape is different than self's shape
+ >>> # * the mask dtype is not supported by cv2.AffineWarp
+ >>> new = self.translate(out_shape, offset, direction, fill_val,
+ >>> interpolation)
+ >>> assert len(new) == len(self)
+ >>> assert new.height, new.width == out_shape
+ """
+ if len(self.masks) == 0:
+ translated_masks = np.empty((0, *out_shape), dtype=np.uint8)
+ else:
+ translated_masks = mmcv.imtranslate(
+ self.masks.transpose((1, 2, 0)),
+ offset,
+ direction,
+ border_value=fill_val,
+ interpolation=interpolation)
+ if translated_masks.ndim == 2:
+ translated_masks = translated_masks[:, :, None]
+ translated_masks = translated_masks.transpose(
+ (2, 0, 1)).astype(self.masks.dtype)
+ return BitmapMasks(translated_masks, *out_shape)
+
+ def shear(self,
+ out_shape,
+ magnitude,
+ direction='horizontal',
+ border_value=0,
+ interpolation='bilinear'):
+ """Shear the BitmapMasks.
+
+ Args:
+ out_shape (tuple[int]): Shape for output mask, format (h, w).
+ magnitude (int | float): The magnitude used for shear.
+ direction (str): The shear direction, either "horizontal"
+ or "vertical".
+ border_value (int | tuple[int]): Value used in case of a
+ constant border.
+ interpolation (str): Same as in :func:`mmcv.imshear`.
+
+ Returns:
+ BitmapMasks: The sheared masks.
+ """
+ if len(self.masks) == 0:
+ sheared_masks = np.empty((0, *out_shape), dtype=np.uint8)
+ else:
+ sheared_masks = mmcv.imshear(
+ self.masks.transpose((1, 2, 0)),
+ magnitude,
+ direction,
+ border_value=border_value,
+ interpolation=interpolation)
+ if sheared_masks.ndim == 2:
+ sheared_masks = sheared_masks[:, :, None]
+ sheared_masks = sheared_masks.transpose(
+ (2, 0, 1)).astype(self.masks.dtype)
+ return BitmapMasks(sheared_masks, *out_shape)
+
+ def rotate(self, out_shape, angle, center=None, scale=1.0, fill_val=0):
+ """Rotate the BitmapMasks.
+
+ Args:
+ out_shape (tuple[int]): Shape for output mask, format (h, w).
+ angle (int | float): Rotation angle in degrees. Positive values
+ mean counter-clockwise rotation.
+ center (tuple[float], optional): Center point (w, h) of the
+ rotation in source image. If not specified, the center of
+ the image will be used.
+ scale (int | float): Isotropic scale factor.
+ fill_val (int | float): Border value. Default 0 for masks.
+
+ Returns:
+ BitmapMasks: Rotated BitmapMasks.
+ """
+ if len(self.masks) == 0:
+ rotated_masks = np.empty((0, *out_shape), dtype=self.masks.dtype)
+ else:
+ rotated_masks = mmcv.imrotate(
+ self.masks.transpose((1, 2, 0)),
+ angle,
+ center=center,
+ scale=scale,
+ border_value=fill_val)
+ if rotated_masks.ndim == 2:
+ # case when only one mask, (h, w)
+ rotated_masks = rotated_masks[:, :, None] # (h, w, 1)
+ rotated_masks = rotated_masks.transpose(
+ (2, 0, 1)).astype(self.masks.dtype)
+ return BitmapMasks(rotated_masks, *out_shape)
+
+ @property
+ def areas(self):
+ """See :py:attr:`BaseInstanceMasks.areas`."""
+ return self.masks.sum((1, 2))
+
+ def to_ndarray(self):
+ """See :func:`BaseInstanceMasks.to_ndarray`."""
+ return self.masks
+
+ def to_tensor(self, dtype, device):
+ """See :func:`BaseInstanceMasks.to_tensor`."""
+ return torch.tensor(self.masks, dtype=dtype, device=device)
+
+ @classmethod
+ def random(cls,
+ num_masks=3,
+ height=32,
+ width=32,
+ dtype=np.uint8,
+ rng=None):
+ """Generate random bitmap masks for demo / testing purposes.
+
+ Example:
+ >>> from mmdet.core.mask.structures import BitmapMasks
+ >>> self = BitmapMasks.random()
+ >>> print('self = {}'.format(self))
+ self = BitmapMasks(num_masks=3, height=32, width=32)
+ """
+ from mmdet.utils.util_random import ensure_rng
+ rng = ensure_rng(rng)
+ masks = (rng.rand(num_masks, height, width) > 0.1).astype(dtype)
+ self = cls(masks, height=height, width=width)
+ return self
+
+
+class PolygonMasks(BaseInstanceMasks):
+ """This class represents masks in the form of polygons.
+
+ Polygons is a list of three levels. The first level of the list
+ corresponds to objects, the second level to the polys that compose the
+ object, the third level to the poly coordinates
+
+ Args:
+ masks (list[list[ndarray]]): The first level of the list
+ corresponds to objects, the second level to the polys that
+ compose the object, the third level to the poly coordinates
+ height (int): height of masks
+ width (int): width of masks
+
+ Example:
+ >>> from mmdet.core.mask.structures import * # NOQA
+ >>> masks = [
+ >>> [ np.array([0, 0, 10, 0, 10, 10., 0, 10, 0, 0]) ]
+ >>> ]
+ >>> height, width = 16, 16
+ >>> self = PolygonMasks(masks, height, width)
+
+ >>> # demo translate
+ >>> new = self.translate((16, 16), 4., direction='horizontal')
+ >>> assert np.all(new.masks[0][0][1::2] == masks[0][0][1::2])
+ >>> assert np.all(new.masks[0][0][0::2] == masks[0][0][0::2] + 4)
+
+ >>> # demo crop_and_resize
+ >>> num_boxes = 3
+ >>> bboxes = np.array([[0, 0, 30, 10.0]] * num_boxes)
+ >>> out_shape = (16, 16)
+ >>> inds = torch.randint(0, len(self), size=(num_boxes,))
+ >>> device = 'cpu'
+ >>> interpolation = 'bilinear'
+ >>> new = self.crop_and_resize(
+ ... bboxes, out_shape, inds, device, interpolation)
+ >>> assert len(new) == num_boxes
+ >>> assert new.height, new.width == out_shape
+ """
+
+ def __init__(self, masks, height, width):
+ assert isinstance(masks, list)
+ if len(masks) > 0:
+ assert isinstance(masks[0], list)
+ assert isinstance(masks[0][0], np.ndarray)
+
+ self.height = height
+ self.width = width
+ self.masks = masks
+
+ def __getitem__(self, index):
+ """Index the polygon masks.
+
+ Args:
+ index (ndarray | List): The indices.
+
+ Returns:
+ :obj:`PolygonMasks`: The indexed polygon masks.
+ """
+ if isinstance(index, np.ndarray):
+ index = index.tolist()
+ if isinstance(index, list):
+ masks = [self.masks[i] for i in index]
+ else:
+ try:
+ masks = self.masks[index]
+ except Exception:
+ raise ValueError(
+ f'Unsupported input of type {type(index)} for indexing!')
+ if len(masks) and isinstance(masks[0], np.ndarray):
+ masks = [masks] # ensure a list of three levels
+ return PolygonMasks(masks, self.height, self.width)
+
+ def __iter__(self):
+ return iter(self.masks)
+
+ def __repr__(self):
+ s = self.__class__.__name__ + '('
+ s += f'num_masks={len(self.masks)}, '
+ s += f'height={self.height}, '
+ s += f'width={self.width})'
+ return s
+
+ def __len__(self):
+ """Number of masks."""
+ return len(self.masks)
+
+ def rescale(self, scale, interpolation=None):
+ """see :func:`BaseInstanceMasks.rescale`"""
+ new_w, new_h = mmcv.rescale_size((self.width, self.height), scale)
+ if len(self.masks) == 0:
+ rescaled_masks = PolygonMasks([], new_h, new_w)
+ else:
+ rescaled_masks = self.resize((new_h, new_w))
+ return rescaled_masks
+
+ def resize(self, out_shape, interpolation=None):
+ """see :func:`BaseInstanceMasks.resize`"""
+ if len(self.masks) == 0:
+ resized_masks = PolygonMasks([], *out_shape)
+ else:
+ h_scale = out_shape[0] / self.height
+ w_scale = out_shape[1] / self.width
+ resized_masks = []
+ for poly_per_obj in self.masks:
+ resized_poly = []
+ for p in poly_per_obj:
+ p = p.copy()
+ p[0::2] *= w_scale
+ p[1::2] *= h_scale
+ resized_poly.append(p)
+ resized_masks.append(resized_poly)
+ resized_masks = PolygonMasks(resized_masks, *out_shape)
+ return resized_masks
+
+ def flip(self, flip_direction='horizontal'):
+ """see :func:`BaseInstanceMasks.flip`"""
+ assert flip_direction in ('horizontal', 'vertical', 'diagonal')
+ if len(self.masks) == 0:
+ flipped_masks = PolygonMasks([], self.height, self.width)
+ else:
+ flipped_masks = []
+ for poly_per_obj in self.masks:
+ flipped_poly_per_obj = []
+ for p in poly_per_obj:
+ p = p.copy()
+ if flip_direction == 'horizontal':
+ p[0::2] = self.width - p[0::2]
+ elif flip_direction == 'vertical':
+ p[1::2] = self.height - p[1::2]
+ else:
+ p[0::2] = self.width - p[0::2]
+ p[1::2] = self.height - p[1::2]
+ flipped_poly_per_obj.append(p)
+ flipped_masks.append(flipped_poly_per_obj)
+ flipped_masks = PolygonMasks(flipped_masks, self.height,
+ self.width)
+ return flipped_masks
+
+ def crop(self, bbox):
+ """see :func:`BaseInstanceMasks.crop`"""
+ assert isinstance(bbox, np.ndarray)
+ assert bbox.ndim == 1
+
+ # clip the boundary
+ bbox = bbox.copy()
+ bbox[0::2] = np.clip(bbox[0::2], 0, self.width)
+ bbox[1::2] = np.clip(bbox[1::2], 0, self.height)
+ x1, y1, x2, y2 = bbox
+ w = np.maximum(x2 - x1, 1)
+ h = np.maximum(y2 - y1, 1)
+
+ if len(self.masks) == 0:
+ cropped_masks = PolygonMasks([], h, w)
+ else:
+ cropped_masks = []
+ for poly_per_obj in self.masks:
+ cropped_poly_per_obj = []
+ for p in poly_per_obj:
+ # pycocotools will clip the boundary
+ p = p.copy()
+ p[0::2] -= bbox[0]
+ p[1::2] -= bbox[1]
+ cropped_poly_per_obj.append(p)
+ cropped_masks.append(cropped_poly_per_obj)
+ cropped_masks = PolygonMasks(cropped_masks, h, w)
+ return cropped_masks
+
+ def pad(self, out_shape, pad_val=0):
+ """padding has no effect on polygons`"""
+ return PolygonMasks(self.masks, *out_shape)
+
+ def expand(self, *args, **kwargs):
+ """TODO: Add expand for polygon"""
+ raise NotImplementedError
+
+ def crop_and_resize(self,
+ bboxes,
+ out_shape,
+ inds,
+ device='cpu',
+ interpolation='bilinear'):
+ """see :func:`BaseInstanceMasks.crop_and_resize`"""
+ out_h, out_w = out_shape
+ if len(self.masks) == 0:
+ return PolygonMasks([], out_h, out_w)
+
+ resized_masks = []
+ for i in range(len(bboxes)):
+ mask = self.masks[inds[i]]
+ bbox = bboxes[i, :]
+ x1, y1, x2, y2 = bbox
+ w = np.maximum(x2 - x1, 1)
+ h = np.maximum(y2 - y1, 1)
+ h_scale = out_h / max(h, 0.1) # avoid too large scale
+ w_scale = out_w / max(w, 0.1)
+
+ resized_mask = []
+ for p in mask:
+ p = p.copy()
+ # crop
+ # pycocotools will clip the boundary
+ p[0::2] -= bbox[0]
+ p[1::2] -= bbox[1]
+
+ # resize
+ p[0::2] *= w_scale
+ p[1::2] *= h_scale
+ resized_mask.append(p)
+ resized_masks.append(resized_mask)
+ return PolygonMasks(resized_masks, *out_shape)
+
+ def translate(self,
+ out_shape,
+ offset,
+ direction='horizontal',
+ fill_val=None,
+ interpolation=None):
+ """Translate the PolygonMasks.
+
+ Example:
+ >>> self = PolygonMasks.random(dtype=np.int)
+ >>> out_shape = (self.height, self.width)
+ >>> new = self.translate(out_shape, 4., direction='horizontal')
+ >>> assert np.all(new.masks[0][0][1::2] == self.masks[0][0][1::2])
+ >>> assert np.all(new.masks[0][0][0::2] == self.masks[0][0][0::2] + 4) # noqa: E501
+ """
+ assert fill_val is None or fill_val == 0, 'Here fill_val is not '\
+ f'used, and defaultly should be None or 0. got {fill_val}.'
+ if len(self.masks) == 0:
+ translated_masks = PolygonMasks([], *out_shape)
+ else:
+ translated_masks = []
+ for poly_per_obj in self.masks:
+ translated_poly_per_obj = []
+ for p in poly_per_obj:
+ p = p.copy()
+ if direction == 'horizontal':
+ p[0::2] = np.clip(p[0::2] + offset, 0, out_shape[1])
+ elif direction == 'vertical':
+ p[1::2] = np.clip(p[1::2] + offset, 0, out_shape[0])
+ translated_poly_per_obj.append(p)
+ translated_masks.append(translated_poly_per_obj)
+ translated_masks = PolygonMasks(translated_masks, *out_shape)
+ return translated_masks
+
+ def shear(self,
+ out_shape,
+ magnitude,
+ direction='horizontal',
+ border_value=0,
+ interpolation='bilinear'):
+ """See :func:`BaseInstanceMasks.shear`."""
+ if len(self.masks) == 0:
+ sheared_masks = PolygonMasks([], *out_shape)
+ else:
+ sheared_masks = []
+ if direction == 'horizontal':
+ shear_matrix = np.stack([[1, magnitude],
+ [0, 1]]).astype(np.float32)
+ elif direction == 'vertical':
+ shear_matrix = np.stack([[1, 0], [magnitude,
+ 1]]).astype(np.float32)
+ for poly_per_obj in self.masks:
+ sheared_poly = []
+ for p in poly_per_obj:
+ p = np.stack([p[0::2], p[1::2]], axis=0) # [2, n]
+ new_coords = np.matmul(shear_matrix, p) # [2, n]
+ new_coords[0, :] = np.clip(new_coords[0, :], 0,
+ out_shape[1])
+ new_coords[1, :] = np.clip(new_coords[1, :], 0,
+ out_shape[0])
+ sheared_poly.append(
+ new_coords.transpose((1, 0)).reshape(-1))
+ sheared_masks.append(sheared_poly)
+ sheared_masks = PolygonMasks(sheared_masks, *out_shape)
+ return sheared_masks
+
+ def rotate(self, out_shape, angle, center=None, scale=1.0, fill_val=0):
+ """See :func:`BaseInstanceMasks.rotate`."""
+ if len(self.masks) == 0:
+ rotated_masks = PolygonMasks([], *out_shape)
+ else:
+ rotated_masks = []
+ rotate_matrix = cv2.getRotationMatrix2D(center, -angle, scale)
+ for poly_per_obj in self.masks:
+ rotated_poly = []
+ for p in poly_per_obj:
+ p = p.copy()
+ coords = np.stack([p[0::2], p[1::2]], axis=1) # [n, 2]
+ # pad 1 to convert from format [x, y] to homogeneous
+ # coordinates format [x, y, 1]
+ coords = np.concatenate(
+ (coords, np.ones((coords.shape[0], 1), coords.dtype)),
+ axis=1) # [n, 3]
+ rotated_coords = np.matmul(
+ rotate_matrix[None, :, :],
+ coords[:, :, None])[..., 0] # [n, 2, 1] -> [n, 2]
+ rotated_coords[:, 0] = np.clip(rotated_coords[:, 0], 0,
+ out_shape[1])
+ rotated_coords[:, 1] = np.clip(rotated_coords[:, 1], 0,
+ out_shape[0])
+ rotated_poly.append(rotated_coords.reshape(-1))
+ rotated_masks.append(rotated_poly)
+ rotated_masks = PolygonMasks(rotated_masks, *out_shape)
+ return rotated_masks
+
+ def to_bitmap(self):
+ """convert polygon masks to bitmap masks."""
+ bitmap_masks = self.to_ndarray()
+ return BitmapMasks(bitmap_masks, self.height, self.width)
+
+ @property
+ def areas(self):
+ """Compute areas of masks.
+
+ This func is modified from `detectron2
+ `_.
+ The function only works with Polygons using the shoelace formula.
+
+ Return:
+ ndarray: areas of each instance
+ """ # noqa: W501
+ area = []
+ for polygons_per_obj in self.masks:
+ area_per_obj = 0
+ for p in polygons_per_obj:
+ area_per_obj += self._polygon_area(p[0::2], p[1::2])
+ area.append(area_per_obj)
+ return np.asarray(area)
+
+ def _polygon_area(self, x, y):
+ """Compute the area of a component of a polygon.
+
+ Using the shoelace formula:
+ https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates
+
+ Args:
+ x (ndarray): x coordinates of the component
+ y (ndarray): y coordinates of the component
+
+ Return:
+ float: the are of the component
+ """ # noqa: 501
+ return 0.5 * np.abs(
+ np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)))
+
+ def to_ndarray(self):
+ """Convert masks to the format of ndarray."""
+ if len(self.masks) == 0:
+ return np.empty((0, self.height, self.width), dtype=np.uint8)
+ bitmap_masks = []
+ for poly_per_obj in self.masks:
+ bitmap_masks.append(
+ polygon_to_bitmap(poly_per_obj, self.height, self.width))
+ return np.stack(bitmap_masks)
+
+ def to_tensor(self, dtype, device):
+ """See :func:`BaseInstanceMasks.to_tensor`."""
+ if len(self.masks) == 0:
+ return torch.empty((0, self.height, self.width),
+ dtype=dtype,
+ device=device)
+ ndarray_masks = self.to_ndarray()
+ return torch.tensor(ndarray_masks, dtype=dtype, device=device)
+
+ @classmethod
+ def random(cls,
+ num_masks=3,
+ height=32,
+ width=32,
+ n_verts=5,
+ dtype=np.float32,
+ rng=None):
+ """Generate random polygon masks for demo / testing purposes.
+
+ Adapted from [1]_
+
+ References:
+ .. [1] https://gitlab.kitware.com/computer-vision/kwimage/-/blob/928cae35ca8/kwimage/structs/polygon.py#L379 # noqa: E501
+
+ Example:
+ >>> from mmdet.core.mask.structures import PolygonMasks
+ >>> self = PolygonMasks.random()
+ >>> print('self = {}'.format(self))
+ """
+ from mmdet.utils.util_random import ensure_rng
+ rng = ensure_rng(rng)
+
+ def _gen_polygon(n, irregularity, spikeyness):
+ """Creates the polygon by sampling points on a circle around the
+ centre. Random noise is added by varying the angular spacing
+ between sequential points, and by varying the radial distance of
+ each point from the centre.
+
+ Based on original code by Mike Ounsworth
+
+ Args:
+ n (int): number of vertices
+ irregularity (float): [0,1] indicating how much variance there
+ is in the angular spacing of vertices. [0,1] will map to
+ [0, 2pi/numberOfVerts]
+ spikeyness (float): [0,1] indicating how much variance there is
+ in each vertex from the circle of radius aveRadius. [0,1]
+ will map to [0, aveRadius]
+
+ Returns:
+ a list of vertices, in CCW order.
+ """
+ from scipy.stats import truncnorm
+ # Generate around the unit circle
+ cx, cy = (0.0, 0.0)
+ radius = 1
+
+ tau = np.pi * 2
+
+ irregularity = np.clip(irregularity, 0, 1) * 2 * np.pi / n
+ spikeyness = np.clip(spikeyness, 1e-9, 1)
+
+ # generate n angle steps
+ lower = (tau / n) - irregularity
+ upper = (tau / n) + irregularity
+ angle_steps = rng.uniform(lower, upper, n)
+
+ # normalize the steps so that point 0 and point n+1 are the same
+ k = angle_steps.sum() / (2 * np.pi)
+ angles = (angle_steps / k).cumsum() + rng.uniform(0, tau)
+
+ # Convert high and low values to be wrt the standard normal range
+ # https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.truncnorm.html
+ low = 0
+ high = 2 * radius
+ mean = radius
+ std = spikeyness
+ a = (low - mean) / std
+ b = (high - mean) / std
+ tnorm = truncnorm(a=a, b=b, loc=mean, scale=std)
+
+ # now generate the points
+ radii = tnorm.rvs(n, random_state=rng)
+ x_pts = cx + radii * np.cos(angles)
+ y_pts = cy + radii * np.sin(angles)
+
+ points = np.hstack([x_pts[:, None], y_pts[:, None]])
+
+ # Scale to 0-1 space
+ points = points - points.min(axis=0)
+ points = points / points.max(axis=0)
+
+ # Randomly place within 0-1 space
+ points = points * (rng.rand() * .8 + .2)
+ min_pt = points.min(axis=0)
+ max_pt = points.max(axis=0)
+
+ high = (1 - max_pt)
+ low = (0 - min_pt)
+ offset = (rng.rand(2) * (high - low)) + low
+ points = points + offset
+ return points
+
+ def _order_vertices(verts):
+ """
+ References:
+ https://stackoverflow.com/questions/1709283/how-can-i-sort-a-coordinate-list-for-a-rectangle-counterclockwise
+ """
+ mlat = verts.T[0].sum() / len(verts)
+ mlng = verts.T[1].sum() / len(verts)
+
+ tau = np.pi * 2
+ angle = (np.arctan2(mlat - verts.T[0], verts.T[1] - mlng) +
+ tau) % tau
+ sortx = angle.argsort()
+ verts = verts.take(sortx, axis=0)
+ return verts
+
+ # Generate a random exterior for each requested mask
+ masks = []
+ for _ in range(num_masks):
+ exterior = _order_vertices(_gen_polygon(n_verts, 0.9, 0.9))
+ exterior = (exterior * [(width, height)]).astype(dtype)
+ masks.append([exterior.ravel()])
+
+ self = cls(masks, height, width)
+ return self
+
+
+def polygon_to_bitmap(polygons, height, width):
+ """Convert masks from the form of polygons to bitmaps.
+
+ Args:
+ polygons (list[ndarray]): masks in polygon representation
+ height (int): mask height
+ width (int): mask width
+
+ Return:
+ ndarray: the converted masks in bitmap representation
+ """
+ rles = maskUtils.frPyObjects(polygons, height, width)
+ rle = maskUtils.merge(rles)
+ bitmap_mask = maskUtils.decode(rle).astype(np.bool)
+ return bitmap_mask
diff --git a/annotator/uniformer/mmdet/core/mask/utils.py b/annotator/uniformer/mmdet/core/mask/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..c88208291ab2a605bee9fe6c1a28a443b74c6372
--- /dev/null
+++ b/annotator/uniformer/mmdet/core/mask/utils.py
@@ -0,0 +1,63 @@
+import mmcv
+import numpy as np
+import pycocotools.mask as mask_util
+
+
+def split_combined_polys(polys, poly_lens, polys_per_mask):
+ """Split the combined 1-D polys into masks.
+
+ A mask is represented as a list of polys, and a poly is represented as
+ a 1-D array. In dataset, all masks are concatenated into a single 1-D
+ tensor. Here we need to split the tensor into original representations.
+
+ Args:
+ polys (list): a list (length = image num) of 1-D tensors
+ poly_lens (list): a list (length = image num) of poly length
+ polys_per_mask (list): a list (length = image num) of poly number
+ of each mask
+
+ Returns:
+ list: a list (length = image num) of list (length = mask num) of \
+ list (length = poly num) of numpy array.
+ """
+ mask_polys_list = []
+ for img_id in range(len(polys)):
+ polys_single = polys[img_id]
+ polys_lens_single = poly_lens[img_id].tolist()
+ polys_per_mask_single = polys_per_mask[img_id].tolist()
+
+ split_polys = mmcv.slice_list(polys_single, polys_lens_single)
+ mask_polys = mmcv.slice_list(split_polys, polys_per_mask_single)
+ mask_polys_list.append(mask_polys)
+ return mask_polys_list
+
+
+# TODO: move this function to more proper place
+def encode_mask_results(mask_results):
+ """Encode bitmap mask to RLE code.
+
+ Args:
+ mask_results (list | tuple[list]): bitmap mask results.
+ In mask scoring rcnn, mask_results is a tuple of (segm_results,
+ segm_cls_score).
+
+ Returns:
+ list | tuple: RLE encoded mask.
+ """
+ if isinstance(mask_results, tuple): # mask scoring
+ cls_segms, cls_mask_scores = mask_results
+ else:
+ cls_segms = mask_results
+ num_classes = len(cls_segms)
+ encoded_mask_results = [[] for _ in range(num_classes)]
+ for i in range(len(cls_segms)):
+ for cls_segm in cls_segms[i]:
+ encoded_mask_results[i].append(
+ mask_util.encode(
+ np.array(
+ cls_segm[:, :, np.newaxis], order='F',
+ dtype='uint8'))[0]) # encoded with RLE
+ if isinstance(mask_results, tuple):
+ return encoded_mask_results, cls_mask_scores
+ else:
+ return encoded_mask_results
diff --git a/annotator/uniformer/mmdet/core/post_processing/__init__.py b/annotator/uniformer/mmdet/core/post_processing/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..880b3f06609b050aae163b2e38088c1ee4aa0998
--- /dev/null
+++ b/annotator/uniformer/mmdet/core/post_processing/__init__.py
@@ -0,0 +1,8 @@
+from .bbox_nms import fast_nms, multiclass_nms
+from .merge_augs import (merge_aug_bboxes, merge_aug_masks,
+ merge_aug_proposals, merge_aug_scores)
+
+__all__ = [
+ 'multiclass_nms', 'merge_aug_proposals', 'merge_aug_bboxes',
+ 'merge_aug_scores', 'merge_aug_masks', 'fast_nms'
+]
diff --git a/annotator/uniformer/mmdet/core/post_processing/bbox_nms.py b/annotator/uniformer/mmdet/core/post_processing/bbox_nms.py
new file mode 100644
index 0000000000000000000000000000000000000000..966d3a6ac86637a6be90edc3aab9b6863fb87764
--- /dev/null
+++ b/annotator/uniformer/mmdet/core/post_processing/bbox_nms.py
@@ -0,0 +1,168 @@
+import torch
+from mmcv.ops.nms import batched_nms
+
+from mmdet.core.bbox.iou_calculators import bbox_overlaps
+
+
+def multiclass_nms(multi_bboxes,
+ multi_scores,
+ score_thr,
+ nms_cfg,
+ max_num=-1,
+ score_factors=None,
+ return_inds=False):
+ """NMS for multi-class bboxes.
+
+ Args:
+ multi_bboxes (Tensor): shape (n, #class*4) or (n, 4)
+ multi_scores (Tensor): shape (n, #class), where the last column
+ contains scores of the background class, but this will be ignored.
+ score_thr (float): bbox threshold, bboxes with scores lower than it
+ will not be considered.
+ nms_thr (float): NMS IoU threshold
+ max_num (int, optional): if there are more than max_num bboxes after
+ NMS, only top max_num will be kept. Default to -1.
+ score_factors (Tensor, optional): The factors multiplied to scores
+ before applying NMS. Default to None.
+ return_inds (bool, optional): Whether return the indices of kept
+ bboxes. Default to False.
+
+ Returns:
+ tuple: (bboxes, labels, indices (optional)), tensors of shape (k, 5),
+ (k), and (k). Labels are 0-based.
+ """
+ num_classes = multi_scores.size(1) - 1
+ # exclude background category
+ if multi_bboxes.shape[1] > 4:
+ bboxes = multi_bboxes.view(multi_scores.size(0), -1, 4)
+ else:
+ bboxes = multi_bboxes[:, None].expand(
+ multi_scores.size(0), num_classes, 4)
+
+ scores = multi_scores[:, :-1]
+
+ labels = torch.arange(num_classes, dtype=torch.long)
+ labels = labels.view(1, -1).expand_as(scores)
+
+ bboxes = bboxes.reshape(-1, 4)
+ scores = scores.reshape(-1)
+ labels = labels.reshape(-1)
+
+ if not torch.onnx.is_in_onnx_export():
+ # NonZero not supported in TensorRT
+ # remove low scoring boxes
+ valid_mask = scores > score_thr
+ # multiply score_factor after threshold to preserve more bboxes, improve
+ # mAP by 1% for YOLOv3
+ if score_factors is not None:
+ # expand the shape to match original shape of score
+ score_factors = score_factors.view(-1, 1).expand(
+ multi_scores.size(0), num_classes)
+ score_factors = score_factors.reshape(-1)
+ scores = scores * score_factors
+
+ if not torch.onnx.is_in_onnx_export():
+ # NonZero not supported in TensorRT
+ inds = valid_mask.nonzero(as_tuple=False).squeeze(1)
+ bboxes, scores, labels = bboxes[inds], scores[inds], labels[inds]
+ else:
+ # TensorRT NMS plugin has invalid output filled with -1
+ # add dummy data to make detection output correct.
+ bboxes = torch.cat([bboxes, bboxes.new_zeros(1, 4)], dim=0)
+ scores = torch.cat([scores, scores.new_zeros(1)], dim=0)
+ labels = torch.cat([labels, labels.new_zeros(1)], dim=0)
+
+ if bboxes.numel() == 0:
+ if torch.onnx.is_in_onnx_export():
+ raise RuntimeError('[ONNX Error] Can not record NMS '
+ 'as it has not been executed this time')
+ if return_inds:
+ return bboxes, labels, inds
+ else:
+ return bboxes, labels
+
+ dets, keep = batched_nms(bboxes, scores, labels, nms_cfg)
+
+ if max_num > 0:
+ dets = dets[:max_num]
+ keep = keep[:max_num]
+
+ if return_inds:
+ return dets, labels[keep], keep
+ else:
+ return dets, labels[keep]
+
+
+def fast_nms(multi_bboxes,
+ multi_scores,
+ multi_coeffs,
+ score_thr,
+ iou_thr,
+ top_k,
+ max_num=-1):
+ """Fast NMS in `YOLACT `_.
+
+ Fast NMS allows already-removed detections to suppress other detections so
+ that every instance can be decided to be kept or discarded in parallel,
+ which is not possible in traditional NMS. This relaxation allows us to
+ implement Fast NMS entirely in standard GPU-accelerated matrix operations.
+
+ Args:
+ multi_bboxes (Tensor): shape (n, #class*4) or (n, 4)
+ multi_scores (Tensor): shape (n, #class+1), where the last column
+ contains scores of the background class, but this will be ignored.
+ multi_coeffs (Tensor): shape (n, #class*coeffs_dim).
+ score_thr (float): bbox threshold, bboxes with scores lower than it
+ will not be considered.
+ iou_thr (float): IoU threshold to be considered as conflicted.
+ top_k (int): if there are more than top_k bboxes before NMS,
+ only top top_k will be kept.
+ max_num (int): if there are more than max_num bboxes after NMS,
+ only top max_num will be kept. If -1, keep all the bboxes.
+ Default: -1.
+
+ Returns:
+ tuple: (bboxes, labels, coefficients), tensors of shape (k, 5), (k, 1),
+ and (k, coeffs_dim). Labels are 0-based.
+ """
+
+ scores = multi_scores[:, :-1].t() # [#class, n]
+ scores, idx = scores.sort(1, descending=True)
+
+ idx = idx[:, :top_k].contiguous()
+ scores = scores[:, :top_k] # [#class, topk]
+ num_classes, num_dets = idx.size()
+ boxes = multi_bboxes[idx.view(-1), :].view(num_classes, num_dets, 4)
+ coeffs = multi_coeffs[idx.view(-1), :].view(num_classes, num_dets, -1)
+
+ iou = bbox_overlaps(boxes, boxes) # [#class, topk, topk]
+ iou.triu_(diagonal=1)
+ iou_max, _ = iou.max(dim=1)
+
+ # Now just filter out the ones higher than the threshold
+ keep = iou_max <= iou_thr
+
+ # Second thresholding introduces 0.2 mAP gain at negligible time cost
+ keep *= scores > score_thr
+
+ # Assign each kept detection to its corresponding class
+ classes = torch.arange(
+ num_classes, device=boxes.device)[:, None].expand_as(keep)
+ classes = classes[keep]
+
+ boxes = boxes[keep]
+ coeffs = coeffs[keep]
+ scores = scores[keep]
+
+ # Only keep the top max_num highest scores across all classes
+ scores, idx = scores.sort(0, descending=True)
+ if max_num > 0:
+ idx = idx[:max_num]
+ scores = scores[:max_num]
+
+ classes = classes[idx]
+ boxes = boxes[idx]
+ coeffs = coeffs[idx]
+
+ cls_dets = torch.cat([boxes, scores[:, None]], dim=1)
+ return cls_dets, classes, coeffs
diff --git a/annotator/uniformer/mmdet/core/post_processing/merge_augs.py b/annotator/uniformer/mmdet/core/post_processing/merge_augs.py
new file mode 100644
index 0000000000000000000000000000000000000000..dbcf79d1ac20ddc32cb1605e06d253803250c855
--- /dev/null
+++ b/annotator/uniformer/mmdet/core/post_processing/merge_augs.py
@@ -0,0 +1,150 @@
+import copy
+import warnings
+
+import numpy as np
+import torch
+from mmcv import ConfigDict
+from mmcv.ops import nms
+
+from ..bbox import bbox_mapping_back
+
+
+def merge_aug_proposals(aug_proposals, img_metas, cfg):
+ """Merge augmented proposals (multiscale, flip, etc.)
+
+ Args:
+ aug_proposals (list[Tensor]): proposals from different testing
+ schemes, shape (n, 5). Note that they are not rescaled to the
+ original image size.
+
+ img_metas (list[dict]): list of image info dict where each dict has:
+ 'img_shape', 'scale_factor', 'flip', and may also contain
+ 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
+ For details on the values of these keys see
+ `mmdet/datasets/pipelines/formatting.py:Collect`.
+
+ cfg (dict): rpn test config.
+
+ Returns:
+ Tensor: shape (n, 4), proposals corresponding to original image scale.
+ """
+
+ cfg = copy.deepcopy(cfg)
+
+ # deprecate arguments warning
+ if 'nms' not in cfg or 'max_num' in cfg or 'nms_thr' in cfg:
+ warnings.warn(
+ 'In rpn_proposal or test_cfg, '
+ 'nms_thr has been moved to a dict named nms as '
+ 'iou_threshold, max_num has been renamed as max_per_img, '
+ 'name of original arguments and the way to specify '
+ 'iou_threshold of NMS will be deprecated.')
+ if 'nms' not in cfg:
+ cfg.nms = ConfigDict(dict(type='nms', iou_threshold=cfg.nms_thr))
+ if 'max_num' in cfg:
+ if 'max_per_img' in cfg:
+ assert cfg.max_num == cfg.max_per_img, f'You set max_num and ' \
+ f'max_per_img at the same time, but get {cfg.max_num} ' \
+ f'and {cfg.max_per_img} respectively' \
+ f'Please delete max_num which will be deprecated.'
+ else:
+ cfg.max_per_img = cfg.max_num
+ if 'nms_thr' in cfg:
+ assert cfg.nms.iou_threshold == cfg.nms_thr, f'You set ' \
+ f'iou_threshold in nms and ' \
+ f'nms_thr at the same time, but get ' \
+ f'{cfg.nms.iou_threshold} and {cfg.nms_thr}' \
+ f' respectively. Please delete the nms_thr ' \
+ f'which will be deprecated.'
+
+ recovered_proposals = []
+ for proposals, img_info in zip(aug_proposals, img_metas):
+ img_shape = img_info['img_shape']
+ scale_factor = img_info['scale_factor']
+ flip = img_info['flip']
+ flip_direction = img_info['flip_direction']
+ _proposals = proposals.clone()
+ _proposals[:, :4] = bbox_mapping_back(_proposals[:, :4], img_shape,
+ scale_factor, flip,
+ flip_direction)
+ recovered_proposals.append(_proposals)
+ aug_proposals = torch.cat(recovered_proposals, dim=0)
+ merged_proposals, _ = nms(aug_proposals[:, :4].contiguous(),
+ aug_proposals[:, -1].contiguous(),
+ cfg.nms.iou_threshold)
+ scores = merged_proposals[:, 4]
+ _, order = scores.sort(0, descending=True)
+ num = min(cfg.max_per_img, merged_proposals.shape[0])
+ order = order[:num]
+ merged_proposals = merged_proposals[order, :]
+ return merged_proposals
+
+
+def merge_aug_bboxes(aug_bboxes, aug_scores, img_metas, rcnn_test_cfg):
+ """Merge augmented detection bboxes and scores.
+
+ Args:
+ aug_bboxes (list[Tensor]): shape (n, 4*#class)
+ aug_scores (list[Tensor] or None): shape (n, #class)
+ img_shapes (list[Tensor]): shape (3, ).
+ rcnn_test_cfg (dict): rcnn test config.
+
+ Returns:
+ tuple: (bboxes, scores)
+ """
+ recovered_bboxes = []
+ for bboxes, img_info in zip(aug_bboxes, img_metas):
+ img_shape = img_info[0]['img_shape']
+ scale_factor = img_info[0]['scale_factor']
+ flip = img_info[0]['flip']
+ flip_direction = img_info[0]['flip_direction']
+ bboxes = bbox_mapping_back(bboxes, img_shape, scale_factor, flip,
+ flip_direction)
+ recovered_bboxes.append(bboxes)
+ bboxes = torch.stack(recovered_bboxes).mean(dim=0)
+ if aug_scores is None:
+ return bboxes
+ else:
+ scores = torch.stack(aug_scores).mean(dim=0)
+ return bboxes, scores
+
+
+def merge_aug_scores(aug_scores):
+ """Merge augmented bbox scores."""
+ if isinstance(aug_scores[0], torch.Tensor):
+ return torch.mean(torch.stack(aug_scores), dim=0)
+ else:
+ return np.mean(aug_scores, axis=0)
+
+
+def merge_aug_masks(aug_masks, img_metas, rcnn_test_cfg, weights=None):
+ """Merge augmented mask prediction.
+
+ Args:
+ aug_masks (list[ndarray]): shape (n, #class, h, w)
+ img_shapes (list[ndarray]): shape (3, ).
+ rcnn_test_cfg (dict): rcnn test config.
+
+ Returns:
+ tuple: (bboxes, scores)
+ """
+ recovered_masks = []
+ for mask, img_info in zip(aug_masks, img_metas):
+ flip = img_info[0]['flip']
+ flip_direction = img_info[0]['flip_direction']
+ if flip:
+ if flip_direction == 'horizontal':
+ mask = mask[:, :, :, ::-1]
+ elif flip_direction == 'vertical':
+ mask = mask[:, :, ::-1, :]
+ else:
+ raise ValueError(
+ f"Invalid flipping direction '{flip_direction}'")
+ recovered_masks.append(mask)
+
+ if weights is None:
+ merged_masks = np.mean(recovered_masks, axis=0)
+ else:
+ merged_masks = np.average(
+ np.array(recovered_masks), axis=0, weights=np.array(weights))
+ return merged_masks
diff --git a/annotator/uniformer/mmdet/core/utils/__init__.py b/annotator/uniformer/mmdet/core/utils/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..5c51dac6d648f41d5c5f46dbf703f19469a7bb6c
--- /dev/null
+++ b/annotator/uniformer/mmdet/core/utils/__init__.py
@@ -0,0 +1,7 @@
+from .dist_utils import DistOptimizerHook, allreduce_grads, reduce_mean
+from .misc import mask2ndarray, multi_apply, unmap
+
+__all__ = [
+ 'allreduce_grads', 'DistOptimizerHook', 'reduce_mean', 'multi_apply',
+ 'unmap', 'mask2ndarray'
+]
diff --git a/annotator/uniformer/mmdet/core/utils/dist_utils.py b/annotator/uniformer/mmdet/core/utils/dist_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..5fe77753313783f95bd7111038ef8b58ee4e4bc5
--- /dev/null
+++ b/annotator/uniformer/mmdet/core/utils/dist_utils.py
@@ -0,0 +1,69 @@
+import warnings
+from collections import OrderedDict
+
+import torch.distributed as dist
+from mmcv.runner import OptimizerHook
+from torch._utils import (_flatten_dense_tensors, _take_tensors,
+ _unflatten_dense_tensors)
+
+
+def _allreduce_coalesced(tensors, world_size, bucket_size_mb=-1):
+ if bucket_size_mb > 0:
+ bucket_size_bytes = bucket_size_mb * 1024 * 1024
+ buckets = _take_tensors(tensors, bucket_size_bytes)
+ else:
+ buckets = OrderedDict()
+ for tensor in tensors:
+ tp = tensor.type()
+ if tp not in buckets:
+ buckets[tp] = []
+ buckets[tp].append(tensor)
+ buckets = buckets.values()
+
+ for bucket in buckets:
+ flat_tensors = _flatten_dense_tensors(bucket)
+ dist.all_reduce(flat_tensors)
+ flat_tensors.div_(world_size)
+ for tensor, synced in zip(
+ bucket, _unflatten_dense_tensors(flat_tensors, bucket)):
+ tensor.copy_(synced)
+
+
+def allreduce_grads(params, coalesce=True, bucket_size_mb=-1):
+ """Allreduce gradients.
+
+ Args:
+ params (list[torch.Parameters]): List of parameters of a model
+ coalesce (bool, optional): Whether allreduce parameters as a whole.
+ Defaults to True.
+ bucket_size_mb (int, optional): Size of bucket, the unit is MB.
+ Defaults to -1.
+ """
+ grads = [
+ param.grad.data for param in params
+ if param.requires_grad and param.grad is not None
+ ]
+ world_size = dist.get_world_size()
+ if coalesce:
+ _allreduce_coalesced(grads, world_size, bucket_size_mb)
+ else:
+ for tensor in grads:
+ dist.all_reduce(tensor.div_(world_size))
+
+
+class DistOptimizerHook(OptimizerHook):
+ """Deprecated optimizer hook for distributed training."""
+
+ def __init__(self, *args, **kwargs):
+ warnings.warn('"DistOptimizerHook" is deprecated, please switch to'
+ '"mmcv.runner.OptimizerHook".')
+ super().__init__(*args, **kwargs)
+
+
+def reduce_mean(tensor):
+ """"Obtain the mean of tensor on different GPUs."""
+ if not (dist.is_available() and dist.is_initialized()):
+ return tensor
+ tensor = tensor.clone()
+ dist.all_reduce(tensor.div_(dist.get_world_size()), op=dist.ReduceOp.SUM)
+ return tensor
diff --git a/annotator/uniformer/mmdet/core/utils/misc.py b/annotator/uniformer/mmdet/core/utils/misc.py
new file mode 100644
index 0000000000000000000000000000000000000000..3e22c7b9085317b61a25c67d361f7e70df65bed1
--- /dev/null
+++ b/annotator/uniformer/mmdet/core/utils/misc.py
@@ -0,0 +1,61 @@
+from functools import partial
+
+import numpy as np
+import torch
+from six.moves import map, zip
+
+from ..mask.structures import BitmapMasks, PolygonMasks
+
+
+def multi_apply(func, *args, **kwargs):
+ """Apply function to a list of arguments.
+
+ Note:
+ This function applies the ``func`` to multiple inputs and
+ map the multiple outputs of the ``func`` into different
+ list. Each list contains the same type of outputs corresponding
+ to different inputs.
+
+ Args:
+ func (Function): A function that will be applied to a list of
+ arguments
+
+ Returns:
+ tuple(list): A tuple containing multiple list, each list contains \
+ a kind of returned results by the function
+ """
+ pfunc = partial(func, **kwargs) if kwargs else func
+ map_results = map(pfunc, *args)
+ return tuple(map(list, zip(*map_results)))
+
+
+def unmap(data, count, inds, fill=0):
+ """Unmap a subset of item (data) back to the original set of items (of size
+ count)"""
+ if data.dim() == 1:
+ ret = data.new_full((count, ), fill)
+ ret[inds.type(torch.bool)] = data
+ else:
+ new_size = (count, ) + data.size()[1:]
+ ret = data.new_full(new_size, fill)
+ ret[inds.type(torch.bool), :] = data
+ return ret
+
+
+def mask2ndarray(mask):
+ """Convert Mask to ndarray..
+
+ Args:
+ mask (:obj:`BitmapMasks` or :obj:`PolygonMasks` or
+ torch.Tensor or np.ndarray): The mask to be converted.
+
+ Returns:
+ np.ndarray: Ndarray mask of shape (n, h, w) that has been converted
+ """
+ if isinstance(mask, (BitmapMasks, PolygonMasks)):
+ mask = mask.to_ndarray()
+ elif isinstance(mask, torch.Tensor):
+ mask = mask.detach().cpu().numpy()
+ elif not isinstance(mask, np.ndarray):
+ raise TypeError(f'Unsupported {type(mask)} data type')
+ return mask
diff --git a/annotator/uniformer/mmdet/core/visualization/__init__.py b/annotator/uniformer/mmdet/core/visualization/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..4ff995c0861490941f8cfc19ebbd41a2ee7e2d65
--- /dev/null
+++ b/annotator/uniformer/mmdet/core/visualization/__init__.py
@@ -0,0 +1,4 @@
+from .image import (color_val_matplotlib, imshow_det_bboxes,
+ imshow_gt_det_bboxes)
+
+__all__ = ['imshow_det_bboxes', 'imshow_gt_det_bboxes', 'color_val_matplotlib']
diff --git a/annotator/uniformer/mmdet/core/visualization/image.py b/annotator/uniformer/mmdet/core/visualization/image.py
new file mode 100644
index 0000000000000000000000000000000000000000..5a148384d7a77c4d9849c54570e85740eaff8235
--- /dev/null
+++ b/annotator/uniformer/mmdet/core/visualization/image.py
@@ -0,0 +1,303 @@
+import matplotlib.pyplot as plt
+import mmcv
+import numpy as np
+import pycocotools.mask as mask_util
+from matplotlib.collections import PatchCollection
+from matplotlib.patches import Polygon
+
+from ..utils import mask2ndarray
+
+EPS = 1e-2
+
+
+def color_val_matplotlib(color):
+ """Convert various input in BGR order to normalized RGB matplotlib color
+ tuples,
+
+ Args:
+ color (:obj:`Color`/str/tuple/int/ndarray): Color inputs
+
+ Returns:
+ tuple[float]: A tuple of 3 normalized floats indicating RGB channels.
+ """
+ color = mmcv.color_val(color)
+ color = [color / 255 for color in color[::-1]]
+ return tuple(color)
+
+
+def imshow_det_bboxes(img,
+ bboxes,
+ labels,
+ segms=None,
+ class_names=None,
+ score_thr=0,
+ bbox_color='green',
+ text_color='green',
+ mask_color=None,
+ thickness=2,
+ font_size=13,
+ win_name='',
+ show=True,
+ wait_time=0,
+ out_file=None):
+ """Draw bboxes and class labels (with scores) on an image.
+
+ Args:
+ img (str or ndarray): The image to be displayed.
+ bboxes (ndarray): Bounding boxes (with scores), shaped (n, 4) or
+ (n, 5).
+ labels (ndarray): Labels of bboxes.
+ segms (ndarray or None): Masks, shaped (n,h,w) or None
+ class_names (list[str]): Names of each classes.
+ score_thr (float): Minimum score of bboxes to be shown. Default: 0
+ bbox_color (str or tuple(int) or :obj:`Color`):Color of bbox lines.
+ The tuple of color should be in BGR order. Default: 'green'
+ text_color (str or tuple(int) or :obj:`Color`):Color of texts.
+ The tuple of color should be in BGR order. Default: 'green'
+ mask_color (str or tuple(int) or :obj:`Color`, optional):
+ Color of masks. The tuple of color should be in BGR order.
+ Default: None
+ thickness (int): Thickness of lines. Default: 2
+ font_size (int): Font size of texts. Default: 13
+ show (bool): Whether to show the image. Default: True
+ win_name (str): The window name. Default: ''
+ wait_time (float): Value of waitKey param. Default: 0.
+ out_file (str, optional): The filename to write the image.
+ Default: None
+
+ Returns:
+ ndarray: The image with bboxes drawn on it.
+ """
+ assert bboxes.ndim == 2, \
+ f' bboxes ndim should be 2, but its ndim is {bboxes.ndim}.'
+ assert labels.ndim == 1, \
+ f' labels ndim should be 1, but its ndim is {labels.ndim}.'
+ assert bboxes.shape[0] == labels.shape[0], \
+ 'bboxes.shape[0] and labels.shape[0] should have the same length.'
+ assert bboxes.shape[1] == 4 or bboxes.shape[1] == 5, \
+ f' bboxes.shape[1] should be 4 or 5, but its {bboxes.shape[1]}.'
+ img = mmcv.imread(img).astype(np.uint8)
+
+ if score_thr > 0:
+ assert bboxes.shape[1] == 5
+ scores = bboxes[:, -1]
+ inds = scores > score_thr
+ bboxes = bboxes[inds, :]
+ labels = labels[inds]
+ if segms is not None:
+ segms = segms[inds, ...]
+
+ mask_colors = []
+ if labels.shape[0] > 0:
+ if mask_color is None:
+ # random color
+ np.random.seed(42)
+ mask_colors = [
+ np.random.randint(0, 256, (1, 3), dtype=np.uint8)
+ for _ in range(max(labels) + 1)
+ ]
+ else:
+ # specify color
+ mask_colors = [
+ np.array(mmcv.color_val(mask_color)[::-1], dtype=np.uint8)
+ ] * (
+ max(labels) + 1)
+
+ bbox_color = color_val_matplotlib(bbox_color)
+ text_color = color_val_matplotlib(text_color)
+
+ img = mmcv.bgr2rgb(img)
+ width, height = img.shape[1], img.shape[0]
+ img = np.ascontiguousarray(img)
+
+ fig = plt.figure(win_name, frameon=False)
+ plt.title(win_name)
+ canvas = fig.canvas
+ dpi = fig.get_dpi()
+ # add a small EPS to avoid precision lost due to matplotlib's truncation
+ # (https://github.com/matplotlib/matplotlib/issues/15363)
+ fig.set_size_inches((width + EPS) / dpi, (height + EPS) / dpi)
+
+ # remove white edges by set subplot margin
+ plt.subplots_adjust(left=0, right=1, bottom=0, top=1)
+ ax = plt.gca()
+ ax.axis('off')
+
+ polygons = []
+ color = []
+ for i, (bbox, label) in enumerate(zip(bboxes, labels)):
+ bbox_int = bbox.astype(np.int32)
+ poly = [[bbox_int[0], bbox_int[1]], [bbox_int[0], bbox_int[3]],
+ [bbox_int[2], bbox_int[3]], [bbox_int[2], bbox_int[1]]]
+ np_poly = np.array(poly).reshape((4, 2))
+ polygons.append(Polygon(np_poly))
+ color.append(bbox_color)
+ label_text = class_names[
+ label] if class_names is not None else f'class {label}'
+ if len(bbox) > 4:
+ label_text += f'|{bbox[-1]:.02f}'
+ ax.text(
+ bbox_int[0],
+ bbox_int[1],
+ f'{label_text}',
+ bbox={
+ 'facecolor': 'black',
+ 'alpha': 0.8,
+ 'pad': 0.7,
+ 'edgecolor': 'none'
+ },
+ color=text_color,
+ fontsize=font_size,
+ verticalalignment='top',
+ horizontalalignment='left')
+ if segms is not None:
+ color_mask = mask_colors[labels[i]]
+ mask = segms[i].astype(bool)
+ img[mask] = img[mask] * 0.5 + color_mask * 0.5
+
+ plt.imshow(img)
+
+ p = PatchCollection(
+ polygons, facecolor='none', edgecolors=color, linewidths=thickness)
+ ax.add_collection(p)
+
+ stream, _ = canvas.print_to_buffer()
+ buffer = np.frombuffer(stream, dtype='uint8')
+ img_rgba = buffer.reshape(height, width, 4)
+ rgb, alpha = np.split(img_rgba, [3], axis=2)
+ img = rgb.astype('uint8')
+ img = mmcv.rgb2bgr(img)
+
+ if show:
+ # We do not use cv2 for display because in some cases, opencv will
+ # conflict with Qt, it will output a warning: Current thread
+ # is not the object's thread. You can refer to
+ # https://github.com/opencv/opencv-python/issues/46 for details
+ if wait_time == 0:
+ plt.show()
+ else:
+ plt.show(block=False)
+ plt.pause(wait_time)
+ if out_file is not None:
+ mmcv.imwrite(img, out_file)
+
+ plt.close()
+
+ return img
+
+
+def imshow_gt_det_bboxes(img,
+ annotation,
+ result,
+ class_names=None,
+ score_thr=0,
+ gt_bbox_color=(255, 102, 61),
+ gt_text_color=(255, 102, 61),
+ gt_mask_color=(255, 102, 61),
+ det_bbox_color=(72, 101, 241),
+ det_text_color=(72, 101, 241),
+ det_mask_color=(72, 101, 241),
+ thickness=2,
+ font_size=13,
+ win_name='',
+ show=True,
+ wait_time=0,
+ out_file=None):
+ """General visualization GT and result function.
+
+ Args:
+ img (str or ndarray): The image to be displayed.)
+ annotation (dict): Ground truth annotations where contain keys of
+ 'gt_bboxes' and 'gt_labels' or 'gt_masks'
+ result (tuple[list] or list): The detection result, can be either
+ (bbox, segm) or just bbox.
+ class_names (list[str]): Names of each classes.
+ score_thr (float): Minimum score of bboxes to be shown. Default: 0
+ gt_bbox_color (str or tuple(int) or :obj:`Color`):Color of bbox lines.
+ The tuple of color should be in BGR order. Default: (255, 102, 61)
+ gt_text_color (str or tuple(int) or :obj:`Color`):Color of texts.
+ The tuple of color should be in BGR order. Default: (255, 102, 61)
+ gt_mask_color (str or tuple(int) or :obj:`Color`, optional):
+ Color of masks. The tuple of color should be in BGR order.
+ Default: (255, 102, 61)
+ det_bbox_color (str or tuple(int) or :obj:`Color`):Color of bbox lines.
+ The tuple of color should be in BGR order. Default: (72, 101, 241)
+ det_text_color (str or tuple(int) or :obj:`Color`):Color of texts.
+ The tuple of color should be in BGR order. Default: (72, 101, 241)
+ det_mask_color (str or tuple(int) or :obj:`Color`, optional):
+ Color of masks. The tuple of color should be in BGR order.
+ Default: (72, 101, 241)
+ thickness (int): Thickness of lines. Default: 2
+ font_size (int): Font size of texts. Default: 13
+ win_name (str): The window name. Default: ''
+ show (bool): Whether to show the image. Default: True
+ wait_time (float): Value of waitKey param. Default: 0.
+ out_file (str, optional): The filename to write the image.
+ Default: None
+
+ Returns:
+ ndarray: The image with bboxes or masks drawn on it.
+ """
+ assert 'gt_bboxes' in annotation
+ assert 'gt_labels' in annotation
+ assert isinstance(
+ result,
+ (tuple, list)), f'Expected tuple or list, but get {type(result)}'
+
+ gt_masks = annotation.get('gt_masks', None)
+ if gt_masks is not None:
+ gt_masks = mask2ndarray(gt_masks)
+
+ img = mmcv.imread(img)
+
+ img = imshow_det_bboxes(
+ img,
+ annotation['gt_bboxes'],
+ annotation['gt_labels'],
+ gt_masks,
+ class_names=class_names,
+ bbox_color=gt_bbox_color,
+ text_color=gt_text_color,
+ mask_color=gt_mask_color,
+ thickness=thickness,
+ font_size=font_size,
+ win_name=win_name,
+ show=False)
+
+ if isinstance(result, tuple):
+ bbox_result, segm_result = result
+ if isinstance(segm_result, tuple):
+ segm_result = segm_result[0] # ms rcnn
+ else:
+ bbox_result, segm_result = result, None
+
+ bboxes = np.vstack(bbox_result)
+ labels = [
+ np.full(bbox.shape[0], i, dtype=np.int32)
+ for i, bbox in enumerate(bbox_result)
+ ]
+ labels = np.concatenate(labels)
+
+ segms = None
+ if segm_result is not None and len(labels) > 0: # non empty
+ segms = mmcv.concat_list(segm_result)
+ segms = mask_util.decode(segms)
+ segms = segms.transpose(2, 0, 1)
+
+ img = imshow_det_bboxes(
+ img,
+ bboxes,
+ labels,
+ segms=segms,
+ class_names=class_names,
+ score_thr=score_thr,
+ bbox_color=det_bbox_color,
+ text_color=det_text_color,
+ mask_color=det_mask_color,
+ thickness=thickness,
+ font_size=font_size,
+ win_name=win_name,
+ show=show,
+ wait_time=wait_time,
+ out_file=out_file)
+ return img
diff --git a/annotator/uniformer/mmdet/datasets/__init__.py b/annotator/uniformer/mmdet/datasets/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..9b18b30a258c32283cbfc03ba01781a19fd993c1
--- /dev/null
+++ b/annotator/uniformer/mmdet/datasets/__init__.py
@@ -0,0 +1,24 @@
+from .builder import DATASETS, PIPELINES, build_dataloader, build_dataset
+from .cityscapes import CityscapesDataset
+from .coco import CocoDataset
+from .custom import CustomDataset
+from .dataset_wrappers import (ClassBalancedDataset, ConcatDataset,
+ RepeatDataset)
+from .deepfashion import DeepFashionDataset
+from .lvis import LVISDataset, LVISV1Dataset, LVISV05Dataset
+from .samplers import DistributedGroupSampler, DistributedSampler, GroupSampler
+from .utils import (NumClassCheckHook, get_loading_pipeline,
+ replace_ImageToTensor)
+from .voc import VOCDataset
+from .wider_face import WIDERFaceDataset
+from .xml_style import XMLDataset
+
+__all__ = [
+ 'CustomDataset', 'XMLDataset', 'CocoDataset', 'DeepFashionDataset',
+ 'VOCDataset', 'CityscapesDataset', 'LVISDataset', 'LVISV05Dataset',
+ 'LVISV1Dataset', 'GroupSampler', 'DistributedGroupSampler',
+ 'DistributedSampler', 'build_dataloader', 'ConcatDataset', 'RepeatDataset',
+ 'ClassBalancedDataset', 'WIDERFaceDataset', 'DATASETS', 'PIPELINES',
+ 'build_dataset', 'replace_ImageToTensor', 'get_loading_pipeline',
+ 'NumClassCheckHook'
+]
diff --git a/annotator/uniformer/mmdet/datasets/builder.py b/annotator/uniformer/mmdet/datasets/builder.py
new file mode 100644
index 0000000000000000000000000000000000000000..c9466a517dee746a6677b27a19713f2e89ed7194
--- /dev/null
+++ b/annotator/uniformer/mmdet/datasets/builder.py
@@ -0,0 +1,143 @@
+import copy
+import platform
+import random
+from functools import partial
+
+import numpy as np
+from mmcv.parallel import collate
+from mmcv.runner import get_dist_info
+from mmcv.utils import Registry, build_from_cfg
+from torch.utils.data import DataLoader
+
+from .samplers import DistributedGroupSampler, DistributedSampler, GroupSampler
+
+if platform.system() != 'Windows':
+ # https://github.com/pytorch/pytorch/issues/973
+ import resource
+ rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
+ hard_limit = rlimit[1]
+ soft_limit = min(4096, hard_limit)
+ resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit))
+
+DATASETS = Registry('dataset')
+PIPELINES = Registry('pipeline')
+
+
+def _concat_dataset(cfg, default_args=None):
+ from .dataset_wrappers import ConcatDataset
+ ann_files = cfg['ann_file']
+ img_prefixes = cfg.get('img_prefix', None)
+ seg_prefixes = cfg.get('seg_prefix', None)
+ proposal_files = cfg.get('proposal_file', None)
+ separate_eval = cfg.get('separate_eval', True)
+
+ datasets = []
+ num_dset = len(ann_files)
+ for i in range(num_dset):
+ data_cfg = copy.deepcopy(cfg)
+ # pop 'separate_eval' since it is not a valid key for common datasets.
+ if 'separate_eval' in data_cfg:
+ data_cfg.pop('separate_eval')
+ data_cfg['ann_file'] = ann_files[i]
+ if isinstance(img_prefixes, (list, tuple)):
+ data_cfg['img_prefix'] = img_prefixes[i]
+ if isinstance(seg_prefixes, (list, tuple)):
+ data_cfg['seg_prefix'] = seg_prefixes[i]
+ if isinstance(proposal_files, (list, tuple)):
+ data_cfg['proposal_file'] = proposal_files[i]
+ datasets.append(build_dataset(data_cfg, default_args))
+
+ return ConcatDataset(datasets, separate_eval)
+
+
+def build_dataset(cfg, default_args=None):
+ from .dataset_wrappers import (ConcatDataset, RepeatDataset,
+ ClassBalancedDataset)
+ if isinstance(cfg, (list, tuple)):
+ dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg])
+ elif cfg['type'] == 'ConcatDataset':
+ dataset = ConcatDataset(
+ [build_dataset(c, default_args) for c in cfg['datasets']],
+ cfg.get('separate_eval', True))
+ elif cfg['type'] == 'RepeatDataset':
+ dataset = RepeatDataset(
+ build_dataset(cfg['dataset'], default_args), cfg['times'])
+ elif cfg['type'] == 'ClassBalancedDataset':
+ dataset = ClassBalancedDataset(
+ build_dataset(cfg['dataset'], default_args), cfg['oversample_thr'])
+ elif isinstance(cfg.get('ann_file'), (list, tuple)):
+ dataset = _concat_dataset(cfg, default_args)
+ else:
+ dataset = build_from_cfg(cfg, DATASETS, default_args)
+
+ return dataset
+
+
+def build_dataloader(dataset,
+ samples_per_gpu,
+ workers_per_gpu,
+ num_gpus=1,
+ dist=True,
+ shuffle=True,
+ seed=None,
+ **kwargs):
+ """Build PyTorch DataLoader.
+
+ In distributed training, each GPU/process has a dataloader.
+ In non-distributed training, there is only one dataloader for all GPUs.
+
+ Args:
+ dataset (Dataset): A PyTorch dataset.
+ samples_per_gpu (int): Number of training samples on each GPU, i.e.,
+ batch size of each GPU.
+ workers_per_gpu (int): How many subprocesses to use for data loading
+ for each GPU.
+ num_gpus (int): Number of GPUs. Only used in non-distributed training.
+ dist (bool): Distributed training/test or not. Default: True.
+ shuffle (bool): Whether to shuffle the data at every epoch.
+ Default: True.
+ kwargs: any keyword argument to be used to initialize DataLoader
+
+ Returns:
+ DataLoader: A PyTorch dataloader.
+ """
+ rank, world_size = get_dist_info()
+ if dist:
+ # DistributedGroupSampler will definitely shuffle the data to satisfy
+ # that images on each GPU are in the same group
+ if shuffle:
+ sampler = DistributedGroupSampler(
+ dataset, samples_per_gpu, world_size, rank, seed=seed)
+ else:
+ sampler = DistributedSampler(
+ dataset, world_size, rank, shuffle=False, seed=seed)
+ batch_size = samples_per_gpu
+ num_workers = workers_per_gpu
+ else:
+ sampler = GroupSampler(dataset, samples_per_gpu) if shuffle else None
+ batch_size = num_gpus * samples_per_gpu
+ num_workers = num_gpus * workers_per_gpu
+
+ init_fn = partial(
+ worker_init_fn, num_workers=num_workers, rank=rank,
+ seed=seed) if seed is not None else None
+
+ data_loader = DataLoader(
+ dataset,
+ batch_size=batch_size,
+ sampler=sampler,
+ num_workers=num_workers,
+ collate_fn=partial(collate, samples_per_gpu=samples_per_gpu),
+ pin_memory=False,
+ worker_init_fn=init_fn,
+ **kwargs)
+
+ return data_loader
+
+
+def worker_init_fn(worker_id, num_workers, rank, seed):
+ # The seed of each worker equals to
+ # num_worker * rank + worker_id + user_seed
+ worker_seed = num_workers * rank + worker_id + seed
+ np.random.seed(worker_seed)
+ random.seed(worker_seed)
diff --git a/annotator/uniformer/mmdet/datasets/cityscapes.py b/annotator/uniformer/mmdet/datasets/cityscapes.py
new file mode 100644
index 0000000000000000000000000000000000000000..71eead87e7f4e511c0cb59e69c3a599832ada0e4
--- /dev/null
+++ b/annotator/uniformer/mmdet/datasets/cityscapes.py
@@ -0,0 +1,334 @@
+# Modified from https://github.com/facebookresearch/detectron2/blob/master/detectron2/data/datasets/cityscapes.py # noqa
+# and https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/evaluation/evalInstanceLevelSemanticLabeling.py # noqa
+
+import glob
+import os
+import os.path as osp
+import tempfile
+from collections import OrderedDict
+
+import mmcv
+import numpy as np
+import pycocotools.mask as maskUtils
+from mmcv.utils import print_log
+
+from .builder import DATASETS
+from .coco import CocoDataset
+
+
+@DATASETS.register_module()
+class CityscapesDataset(CocoDataset):
+
+ CLASSES = ('person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle',
+ 'bicycle')
+
+ def _filter_imgs(self, min_size=32):
+ """Filter images too small or without ground truths."""
+ valid_inds = []
+ # obtain images that contain annotation
+ ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values())
+ # obtain images that contain annotations of the required categories
+ ids_in_cat = set()
+ for i, class_id in enumerate(self.cat_ids):
+ ids_in_cat |= set(self.coco.cat_img_map[class_id])
+ # merge the image id sets of the two conditions and use the merged set
+ # to filter out images if self.filter_empty_gt=True
+ ids_in_cat &= ids_with_ann
+
+ valid_img_ids = []
+ for i, img_info in enumerate(self.data_infos):
+ img_id = img_info['id']
+ ann_ids = self.coco.getAnnIds(imgIds=[img_id])
+ ann_info = self.coco.loadAnns(ann_ids)
+ all_iscrowd = all([_['iscrowd'] for _ in ann_info])
+ if self.filter_empty_gt and (self.img_ids[i] not in ids_in_cat
+ or all_iscrowd):
+ continue
+ if min(img_info['width'], img_info['height']) >= min_size:
+ valid_inds.append(i)
+ valid_img_ids.append(img_id)
+ self.img_ids = valid_img_ids
+ return valid_inds
+
+ def _parse_ann_info(self, img_info, ann_info):
+ """Parse bbox and mask annotation.
+
+ Args:
+ img_info (dict): Image info of an image.
+ ann_info (list[dict]): Annotation info of an image.
+
+ Returns:
+ dict: A dict containing the following keys: bboxes, \
+ bboxes_ignore, labels, masks, seg_map. \
+ "masks" are already decoded into binary masks.
+ """
+ gt_bboxes = []
+ gt_labels = []
+ gt_bboxes_ignore = []
+ gt_masks_ann = []
+
+ for i, ann in enumerate(ann_info):
+ if ann.get('ignore', False):
+ continue
+ x1, y1, w, h = ann['bbox']
+ if ann['area'] <= 0 or w < 1 or h < 1:
+ continue
+ if ann['category_id'] not in self.cat_ids:
+ continue
+ bbox = [x1, y1, x1 + w, y1 + h]
+ if ann.get('iscrowd', False):
+ gt_bboxes_ignore.append(bbox)
+ else:
+ gt_bboxes.append(bbox)
+ gt_labels.append(self.cat2label[ann['category_id']])
+ gt_masks_ann.append(ann['segmentation'])
+
+ if gt_bboxes:
+ gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
+ gt_labels = np.array(gt_labels, dtype=np.int64)
+ else:
+ gt_bboxes = np.zeros((0, 4), dtype=np.float32)
+ gt_labels = np.array([], dtype=np.int64)
+
+ if gt_bboxes_ignore:
+ gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)
+ else:
+ gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)
+
+ ann = dict(
+ bboxes=gt_bboxes,
+ labels=gt_labels,
+ bboxes_ignore=gt_bboxes_ignore,
+ masks=gt_masks_ann,
+ seg_map=img_info['segm_file'])
+
+ return ann
+
+ def results2txt(self, results, outfile_prefix):
+ """Dump the detection results to a txt file.
+
+ Args:
+ results (list[list | tuple]): Testing results of the
+ dataset.
+ outfile_prefix (str): The filename prefix of the json files.
+ If the prefix is "somepath/xxx",
+ the txt files will be named "somepath/xxx.txt".
+
+ Returns:
+ list[str]: Result txt files which contains corresponding \
+ instance segmentation images.
+ """
+ try:
+ import cityscapesscripts.helpers.labels as CSLabels
+ except ImportError:
+ raise ImportError('Please run "pip install citscapesscripts" to '
+ 'install cityscapesscripts first.')
+ result_files = []
+ os.makedirs(outfile_prefix, exist_ok=True)
+ prog_bar = mmcv.ProgressBar(len(self))
+ for idx in range(len(self)):
+ result = results[idx]
+ filename = self.data_infos[idx]['filename']
+ basename = osp.splitext(osp.basename(filename))[0]
+ pred_txt = osp.join(outfile_prefix, basename + '_pred.txt')
+
+ bbox_result, segm_result = result
+ bboxes = np.vstack(bbox_result)
+ # segm results
+ if isinstance(segm_result, tuple):
+ # Some detectors use different scores for bbox and mask,
+ # like Mask Scoring R-CNN. Score of segm will be used instead
+ # of bbox score.
+ segms = mmcv.concat_list(segm_result[0])
+ mask_score = segm_result[1]
+ else:
+ # use bbox score for mask score
+ segms = mmcv.concat_list(segm_result)
+ mask_score = [bbox[-1] for bbox in bboxes]
+ labels = [
+ np.full(bbox.shape[0], i, dtype=np.int32)
+ for i, bbox in enumerate(bbox_result)
+ ]
+ labels = np.concatenate(labels)
+
+ assert len(bboxes) == len(segms) == len(labels)
+ num_instances = len(bboxes)
+ prog_bar.update()
+ with open(pred_txt, 'w') as fout:
+ for i in range(num_instances):
+ pred_class = labels[i]
+ classes = self.CLASSES[pred_class]
+ class_id = CSLabels.name2label[classes].id
+ score = mask_score[i]
+ mask = maskUtils.decode(segms[i]).astype(np.uint8)
+ png_filename = osp.join(outfile_prefix,
+ basename + f'_{i}_{classes}.png')
+ mmcv.imwrite(mask, png_filename)
+ fout.write(f'{osp.basename(png_filename)} {class_id} '
+ f'{score}\n')
+ result_files.append(pred_txt)
+
+ return result_files
+
+ def format_results(self, results, txtfile_prefix=None):
+ """Format the results to txt (standard format for Cityscapes
+ evaluation).
+
+ Args:
+ results (list): Testing results of the dataset.
+ txtfile_prefix (str | None): The prefix of txt files. It includes
+ the file path and the prefix of filename, e.g., "a/b/prefix".
+ If not specified, a temp file will be created. Default: None.
+
+ Returns:
+ tuple: (result_files, tmp_dir), result_files is a dict containing \
+ the json filepaths, tmp_dir is the temporal directory created \
+ for saving txt/png files when txtfile_prefix is not specified.
+ """
+ assert isinstance(results, list), 'results must be a list'
+ assert len(results) == len(self), (
+ 'The length of results is not equal to the dataset len: {} != {}'.
+ format(len(results), len(self)))
+
+ assert isinstance(results, list), 'results must be a list'
+ assert len(results) == len(self), (
+ 'The length of results is not equal to the dataset len: {} != {}'.
+ format(len(results), len(self)))
+
+ if txtfile_prefix is None:
+ tmp_dir = tempfile.TemporaryDirectory()
+ txtfile_prefix = osp.join(tmp_dir.name, 'results')
+ else:
+ tmp_dir = None
+ result_files = self.results2txt(results, txtfile_prefix)
+
+ return result_files, tmp_dir
+
+ def evaluate(self,
+ results,
+ metric='bbox',
+ logger=None,
+ outfile_prefix=None,
+ classwise=False,
+ proposal_nums=(100, 300, 1000),
+ iou_thrs=np.arange(0.5, 0.96, 0.05)):
+ """Evaluation in Cityscapes/COCO protocol.
+
+ Args:
+ results (list[list | tuple]): Testing results of the dataset.
+ metric (str | list[str]): Metrics to be evaluated. Options are
+ 'bbox', 'segm', 'proposal', 'proposal_fast'.
+ logger (logging.Logger | str | None): Logger used for printing
+ related information during evaluation. Default: None.
+ outfile_prefix (str | None): The prefix of output file. It includes
+ the file path and the prefix of filename, e.g., "a/b/prefix".
+ If results are evaluated with COCO protocol, it would be the
+ prefix of output json file. For example, the metric is 'bbox'
+ and 'segm', then json files would be "a/b/prefix.bbox.json" and
+ "a/b/prefix.segm.json".
+ If results are evaluated with cityscapes protocol, it would be
+ the prefix of output txt/png files. The output files would be
+ png images under folder "a/b/prefix/xxx/" and the file name of
+ images would be written into a txt file
+ "a/b/prefix/xxx_pred.txt", where "xxx" is the video name of
+ cityscapes. If not specified, a temp file will be created.
+ Default: None.
+ classwise (bool): Whether to evaluating the AP for each class.
+ proposal_nums (Sequence[int]): Proposal number used for evaluating
+ recalls, such as recall@100, recall@1000.
+ Default: (100, 300, 1000).
+ iou_thrs (Sequence[float]): IoU threshold used for evaluating
+ recalls. If set to a list, the average recall of all IoUs will
+ also be computed. Default: 0.5.
+
+ Returns:
+ dict[str, float]: COCO style evaluation metric or cityscapes mAP \
+ and AP@50.
+ """
+ eval_results = dict()
+
+ metrics = metric.copy() if isinstance(metric, list) else [metric]
+
+ if 'cityscapes' in metrics:
+ eval_results.update(
+ self._evaluate_cityscapes(results, outfile_prefix, logger))
+ metrics.remove('cityscapes')
+
+ # left metrics are all coco metric
+ if len(metrics) > 0:
+ # create CocoDataset with CityscapesDataset annotation
+ self_coco = CocoDataset(self.ann_file, self.pipeline.transforms,
+ None, self.data_root, self.img_prefix,
+ self.seg_prefix, self.proposal_file,
+ self.test_mode, self.filter_empty_gt)
+ # TODO: remove this in the future
+ # reload annotations of correct class
+ self_coco.CLASSES = self.CLASSES
+ self_coco.data_infos = self_coco.load_annotations(self.ann_file)
+ eval_results.update(
+ self_coco.evaluate(results, metrics, logger, outfile_prefix,
+ classwise, proposal_nums, iou_thrs))
+
+ return eval_results
+
+ def _evaluate_cityscapes(self, results, txtfile_prefix, logger):
+ """Evaluation in Cityscapes protocol.
+
+ Args:
+ results (list): Testing results of the dataset.
+ txtfile_prefix (str | None): The prefix of output txt file
+ logger (logging.Logger | str | None): Logger used for printing
+ related information during evaluation. Default: None.
+
+ Returns:
+ dict[str: float]: Cityscapes evaluation results, contains 'mAP' \
+ and 'AP@50'.
+ """
+
+ try:
+ import cityscapesscripts.evaluation.evalInstanceLevelSemanticLabeling as CSEval # noqa
+ except ImportError:
+ raise ImportError('Please run "pip install citscapesscripts" to '
+ 'install cityscapesscripts first.')
+ msg = 'Evaluating in Cityscapes style'
+ if logger is None:
+ msg = '\n' + msg
+ print_log(msg, logger=logger)
+
+ result_files, tmp_dir = self.format_results(results, txtfile_prefix)
+
+ if tmp_dir is None:
+ result_dir = osp.join(txtfile_prefix, 'results')
+ else:
+ result_dir = osp.join(tmp_dir.name, 'results')
+
+ eval_results = OrderedDict()
+ print_log(f'Evaluating results under {result_dir} ...', logger=logger)
+
+ # set global states in cityscapes evaluation API
+ CSEval.args.cityscapesPath = os.path.join(self.img_prefix, '../..')
+ CSEval.args.predictionPath = os.path.abspath(result_dir)
+ CSEval.args.predictionWalk = None
+ CSEval.args.JSONOutput = False
+ CSEval.args.colorized = False
+ CSEval.args.gtInstancesFile = os.path.join(result_dir,
+ 'gtInstances.json')
+ CSEval.args.groundTruthSearch = os.path.join(
+ self.img_prefix.replace('leftImg8bit', 'gtFine'),
+ '*/*_gtFine_instanceIds.png')
+
+ groundTruthImgList = glob.glob(CSEval.args.groundTruthSearch)
+ assert len(groundTruthImgList), 'Cannot find ground truth images' \
+ f' in {CSEval.args.groundTruthSearch}.'
+ predictionImgList = []
+ for gt in groundTruthImgList:
+ predictionImgList.append(CSEval.getPrediction(gt, CSEval.args))
+ CSEval_results = CSEval.evaluateImgLists(predictionImgList,
+ groundTruthImgList,
+ CSEval.args)['averages']
+
+ eval_results['mAP'] = CSEval_results['allAp']
+ eval_results['AP@50'] = CSEval_results['allAp50%']
+ if tmp_dir is not None:
+ tmp_dir.cleanup()
+ return eval_results
diff --git a/annotator/uniformer/mmdet/datasets/coco.py b/annotator/uniformer/mmdet/datasets/coco.py
new file mode 100644
index 0000000000000000000000000000000000000000..3a8e1bcfdd7f2854ca381d4f87788e3a63eb568c
--- /dev/null
+++ b/annotator/uniformer/mmdet/datasets/coco.py
@@ -0,0 +1,546 @@
+import itertools
+import logging
+import os.path as osp
+import tempfile
+from collections import OrderedDict
+
+import mmcv
+import numpy as np
+import pycocotools
+from mmcv.utils import print_log
+from pycocotools.coco import COCO
+from pycocotools.cocoeval import COCOeval
+from terminaltables import AsciiTable
+
+from mmdet.core import eval_recalls
+from .builder import DATASETS
+from .custom import CustomDataset
+
+
+@DATASETS.register_module()
+class CocoDataset(CustomDataset):
+
+ CLASSES = ('person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
+ 'train', 'truck', 'boat', 'traffic light', 'fire hydrant',
+ 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog',
+ 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe',
+ 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
+ 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat',
+ 'baseball glove', 'skateboard', 'surfboard', 'tennis racket',
+ 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',
+ 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
+ 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
+ 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop',
+ 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',
+ 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock',
+ 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush')
+
+ def load_annotations(self, ann_file):
+ """Load annotation from COCO style annotation file.
+
+ Args:
+ ann_file (str): Path of annotation file.
+
+ Returns:
+ list[dict]: Annotation info from COCO api.
+ """
+ if not getattr(pycocotools, '__version__', '0') >= '12.0.2':
+ raise AssertionError(
+ 'Incompatible version of pycocotools is installed. '
+ 'Run pip uninstall pycocotools first. Then run pip '
+ 'install mmpycocotools to install open-mmlab forked '
+ 'pycocotools.')
+
+ self.coco = COCO(ann_file)
+ self.cat_ids = self.coco.get_cat_ids(cat_names=self.CLASSES)
+ self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}
+ self.img_ids = self.coco.get_img_ids()
+ data_infos = []
+ total_ann_ids = []
+ for i in self.img_ids:
+ info = self.coco.load_imgs([i])[0]
+ info['filename'] = info['file_name']
+ data_infos.append(info)
+ ann_ids = self.coco.get_ann_ids(img_ids=[i])
+ total_ann_ids.extend(ann_ids)
+ assert len(set(total_ann_ids)) == len(
+ total_ann_ids), f"Annotation ids in '{ann_file}' are not unique!"
+ return data_infos
+
+ def get_ann_info(self, idx):
+ """Get COCO annotation by index.
+
+ Args:
+ idx (int): Index of data.
+
+ Returns:
+ dict: Annotation info of specified index.
+ """
+
+ img_id = self.data_infos[idx]['id']
+ ann_ids = self.coco.get_ann_ids(img_ids=[img_id])
+ ann_info = self.coco.load_anns(ann_ids)
+ return self._parse_ann_info(self.data_infos[idx], ann_info)
+
+ def get_cat_ids(self, idx):
+ """Get COCO category ids by index.
+
+ Args:
+ idx (int): Index of data.
+
+ Returns:
+ list[int]: All categories in the image of specified index.
+ """
+
+ img_id = self.data_infos[idx]['id']
+ ann_ids = self.coco.get_ann_ids(img_ids=[img_id])
+ ann_info = self.coco.load_anns(ann_ids)
+ return [ann['category_id'] for ann in ann_info]
+
+ def _filter_imgs(self, min_size=32):
+ """Filter images too small or without ground truths."""
+ valid_inds = []
+ # obtain images that contain annotation
+ ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values())
+ # obtain images that contain annotations of the required categories
+ ids_in_cat = set()
+ for i, class_id in enumerate(self.cat_ids):
+ ids_in_cat |= set(self.coco.cat_img_map[class_id])
+ # merge the image id sets of the two conditions and use the merged set
+ # to filter out images if self.filter_empty_gt=True
+ ids_in_cat &= ids_with_ann
+
+ valid_img_ids = []
+ for i, img_info in enumerate(self.data_infos):
+ img_id = self.img_ids[i]
+ if self.filter_empty_gt and img_id not in ids_in_cat:
+ continue
+ if min(img_info['width'], img_info['height']) >= min_size:
+ valid_inds.append(i)
+ valid_img_ids.append(img_id)
+ self.img_ids = valid_img_ids
+ return valid_inds
+
+ def _parse_ann_info(self, img_info, ann_info):
+ """Parse bbox and mask annotation.
+
+ Args:
+ ann_info (list[dict]): Annotation info of an image.
+ with_mask (bool): Whether to parse mask annotations.
+
+ Returns:
+ dict: A dict containing the following keys: bboxes, bboxes_ignore,\
+ labels, masks, seg_map. "masks" are raw annotations and not \
+ decoded into binary masks.
+ """
+ gt_bboxes = []
+ gt_labels = []
+ gt_bboxes_ignore = []
+ gt_masks_ann = []
+ for i, ann in enumerate(ann_info):
+ if ann.get('ignore', False):
+ continue
+ x1, y1, w, h = ann['bbox']
+ inter_w = max(0, min(x1 + w, img_info['width']) - max(x1, 0))
+ inter_h = max(0, min(y1 + h, img_info['height']) - max(y1, 0))
+ if inter_w * inter_h == 0:
+ continue
+ if ann['area'] <= 0 or w < 1 or h < 1:
+ continue
+ if ann['category_id'] not in self.cat_ids:
+ continue
+ bbox = [x1, y1, x1 + w, y1 + h]
+ if ann.get('iscrowd', False):
+ gt_bboxes_ignore.append(bbox)
+ else:
+ gt_bboxes.append(bbox)
+ gt_labels.append(self.cat2label[ann['category_id']])
+ gt_masks_ann.append(ann.get('segmentation', None))
+
+ if gt_bboxes:
+ gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
+ gt_labels = np.array(gt_labels, dtype=np.int64)
+ else:
+ gt_bboxes = np.zeros((0, 4), dtype=np.float32)
+ gt_labels = np.array([], dtype=np.int64)
+
+ if gt_bboxes_ignore:
+ gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)
+ else:
+ gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)
+
+ seg_map = img_info['filename'].replace('jpg', 'png')
+
+ ann = dict(
+ bboxes=gt_bboxes,
+ labels=gt_labels,
+ bboxes_ignore=gt_bboxes_ignore,
+ masks=gt_masks_ann,
+ seg_map=seg_map)
+
+ return ann
+
+ def xyxy2xywh(self, bbox):
+ """Convert ``xyxy`` style bounding boxes to ``xywh`` style for COCO
+ evaluation.
+
+ Args:
+ bbox (numpy.ndarray): The bounding boxes, shape (4, ), in
+ ``xyxy`` order.
+
+ Returns:
+ list[float]: The converted bounding boxes, in ``xywh`` order.
+ """
+
+ _bbox = bbox.tolist()
+ return [
+ _bbox[0],
+ _bbox[1],
+ _bbox[2] - _bbox[0],
+ _bbox[3] - _bbox[1],
+ ]
+
+ def _proposal2json(self, results):
+ """Convert proposal results to COCO json style."""
+ json_results = []
+ for idx in range(len(self)):
+ img_id = self.img_ids[idx]
+ bboxes = results[idx]
+ for i in range(bboxes.shape[0]):
+ data = dict()
+ data['image_id'] = img_id
+ data['bbox'] = self.xyxy2xywh(bboxes[i])
+ data['score'] = float(bboxes[i][4])
+ data['category_id'] = 1
+ json_results.append(data)
+ return json_results
+
+ def _det2json(self, results):
+ """Convert detection results to COCO json style."""
+ json_results = []
+ for idx in range(len(self)):
+ img_id = self.img_ids[idx]
+ result = results[idx]
+ for label in range(len(result)):
+ bboxes = result[label]
+ for i in range(bboxes.shape[0]):
+ data = dict()
+ data['image_id'] = img_id
+ data['bbox'] = self.xyxy2xywh(bboxes[i])
+ data['score'] = float(bboxes[i][4])
+ data['category_id'] = self.cat_ids[label]
+ json_results.append(data)
+ return json_results
+
+ def _segm2json(self, results):
+ """Convert instance segmentation results to COCO json style."""
+ bbox_json_results = []
+ segm_json_results = []
+ for idx in range(len(self)):
+ img_id = self.img_ids[idx]
+ det, seg = results[idx]
+ for label in range(len(det)):
+ # bbox results
+ bboxes = det[label]
+ for i in range(bboxes.shape[0]):
+ data = dict()
+ data['image_id'] = img_id
+ data['bbox'] = self.xyxy2xywh(bboxes[i])
+ data['score'] = float(bboxes[i][4])
+ data['category_id'] = self.cat_ids[label]
+ bbox_json_results.append(data)
+
+ # segm results
+ # some detectors use different scores for bbox and mask
+ if isinstance(seg, tuple):
+ segms = seg[0][label]
+ mask_score = seg[1][label]
+ else:
+ segms = seg[label]
+ mask_score = [bbox[4] for bbox in bboxes]
+ for i in range(bboxes.shape[0]):
+ data = dict()
+ data['image_id'] = img_id
+ data['bbox'] = self.xyxy2xywh(bboxes[i])
+ data['score'] = float(mask_score[i])
+ data['category_id'] = self.cat_ids[label]
+ if isinstance(segms[i]['counts'], bytes):
+ segms[i]['counts'] = segms[i]['counts'].decode()
+ data['segmentation'] = segms[i]
+ segm_json_results.append(data)
+ return bbox_json_results, segm_json_results
+
+ def results2json(self, results, outfile_prefix):
+ """Dump the detection results to a COCO style json file.
+
+ There are 3 types of results: proposals, bbox predictions, mask
+ predictions, and they have different data types. This method will
+ automatically recognize the type, and dump them to json files.
+
+ Args:
+ results (list[list | tuple | ndarray]): Testing results of the
+ dataset.
+ outfile_prefix (str): The filename prefix of the json files. If the
+ prefix is "somepath/xxx", the json files will be named
+ "somepath/xxx.bbox.json", "somepath/xxx.segm.json",
+ "somepath/xxx.proposal.json".
+
+ Returns:
+ dict[str: str]: Possible keys are "bbox", "segm", "proposal", and \
+ values are corresponding filenames.
+ """
+ result_files = dict()
+ if isinstance(results[0], list):
+ json_results = self._det2json(results)
+ result_files['bbox'] = f'{outfile_prefix}.bbox.json'
+ result_files['proposal'] = f'{outfile_prefix}.bbox.json'
+ mmcv.dump(json_results, result_files['bbox'])
+ elif isinstance(results[0], tuple):
+ json_results = self._segm2json(results)
+ result_files['bbox'] = f'{outfile_prefix}.bbox.json'
+ result_files['proposal'] = f'{outfile_prefix}.bbox.json'
+ result_files['segm'] = f'{outfile_prefix}.segm.json'
+ mmcv.dump(json_results[0], result_files['bbox'])
+ mmcv.dump(json_results[1], result_files['segm'])
+ elif isinstance(results[0], np.ndarray):
+ json_results = self._proposal2json(results)
+ result_files['proposal'] = f'{outfile_prefix}.proposal.json'
+ mmcv.dump(json_results, result_files['proposal'])
+ else:
+ raise TypeError('invalid type of results')
+ return result_files
+
+ def fast_eval_recall(self, results, proposal_nums, iou_thrs, logger=None):
+ gt_bboxes = []
+ for i in range(len(self.img_ids)):
+ ann_ids = self.coco.get_ann_ids(img_ids=self.img_ids[i])
+ ann_info = self.coco.load_anns(ann_ids)
+ if len(ann_info) == 0:
+ gt_bboxes.append(np.zeros((0, 4)))
+ continue
+ bboxes = []
+ for ann in ann_info:
+ if ann.get('ignore', False) or ann['iscrowd']:
+ continue
+ x1, y1, w, h = ann['bbox']
+ bboxes.append([x1, y1, x1 + w, y1 + h])
+ bboxes = np.array(bboxes, dtype=np.float32)
+ if bboxes.shape[0] == 0:
+ bboxes = np.zeros((0, 4))
+ gt_bboxes.append(bboxes)
+
+ recalls = eval_recalls(
+ gt_bboxes, results, proposal_nums, iou_thrs, logger=logger)
+ ar = recalls.mean(axis=1)
+ return ar
+
+ def format_results(self, results, jsonfile_prefix=None, **kwargs):
+ """Format the results to json (standard format for COCO evaluation).
+
+ Args:
+ results (list[tuple | numpy.ndarray]): Testing results of the
+ dataset.
+ jsonfile_prefix (str | None): The prefix of json files. It includes
+ the file path and the prefix of filename, e.g., "a/b/prefix".
+ If not specified, a temp file will be created. Default: None.
+
+ Returns:
+ tuple: (result_files, tmp_dir), result_files is a dict containing \
+ the json filepaths, tmp_dir is the temporal directory created \
+ for saving json files when jsonfile_prefix is not specified.
+ """
+ assert isinstance(results, list), 'results must be a list'
+ assert len(results) == len(self), (
+ 'The length of results is not equal to the dataset len: {} != {}'.
+ format(len(results), len(self)))
+
+ if jsonfile_prefix is None:
+ tmp_dir = tempfile.TemporaryDirectory()
+ jsonfile_prefix = osp.join(tmp_dir.name, 'results')
+ else:
+ tmp_dir = None
+ result_files = self.results2json(results, jsonfile_prefix)
+ return result_files, tmp_dir
+
+ def evaluate(self,
+ results,
+ metric='bbox',
+ logger=None,
+ jsonfile_prefix=None,
+ classwise=False,
+ proposal_nums=(100, 300, 1000),
+ iou_thrs=None,
+ metric_items=None):
+ """Evaluation in COCO protocol.
+
+ Args:
+ results (list[list | tuple]): Testing results of the dataset.
+ metric (str | list[str]): Metrics to be evaluated. Options are
+ 'bbox', 'segm', 'proposal', 'proposal_fast'.
+ logger (logging.Logger | str | None): Logger used for printing
+ related information during evaluation. Default: None.
+ jsonfile_prefix (str | None): The prefix of json files. It includes
+ the file path and the prefix of filename, e.g., "a/b/prefix".
+ If not specified, a temp file will be created. Default: None.
+ classwise (bool): Whether to evaluating the AP for each class.
+ proposal_nums (Sequence[int]): Proposal number used for evaluating
+ recalls, such as recall@100, recall@1000.
+ Default: (100, 300, 1000).
+ iou_thrs (Sequence[float], optional): IoU threshold used for
+ evaluating recalls/mAPs. If set to a list, the average of all
+ IoUs will also be computed. If not specified, [0.50, 0.55,
+ 0.60, 0.65, 0.70, 0.75, 0.80, 0.85, 0.90, 0.95] will be used.
+ Default: None.
+ metric_items (list[str] | str, optional): Metric items that will
+ be returned. If not specified, ``['AR@100', 'AR@300',
+ 'AR@1000', 'AR_s@1000', 'AR_m@1000', 'AR_l@1000' ]`` will be
+ used when ``metric=='proposal'``, ``['mAP', 'mAP_50', 'mAP_75',
+ 'mAP_s', 'mAP_m', 'mAP_l']`` will be used when
+ ``metric=='bbox' or metric=='segm'``.
+
+ Returns:
+ dict[str, float]: COCO style evaluation metric.
+ """
+
+ metrics = metric if isinstance(metric, list) else [metric]
+ allowed_metrics = ['bbox', 'segm', 'proposal', 'proposal_fast']
+ for metric in metrics:
+ if metric not in allowed_metrics:
+ raise KeyError(f'metric {metric} is not supported')
+ if iou_thrs is None:
+ iou_thrs = np.linspace(
+ .5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True)
+ if metric_items is not None:
+ if not isinstance(metric_items, list):
+ metric_items = [metric_items]
+
+ result_files, tmp_dir = self.format_results(results, jsonfile_prefix)
+
+ eval_results = OrderedDict()
+ cocoGt = self.coco
+ for metric in metrics:
+ msg = f'Evaluating {metric}...'
+ if logger is None:
+ msg = '\n' + msg
+ print_log(msg, logger=logger)
+
+ if metric == 'proposal_fast':
+ ar = self.fast_eval_recall(
+ results, proposal_nums, iou_thrs, logger='silent')
+ log_msg = []
+ for i, num in enumerate(proposal_nums):
+ eval_results[f'AR@{num}'] = ar[i]
+ log_msg.append(f'\nAR@{num}\t{ar[i]:.4f}')
+ log_msg = ''.join(log_msg)
+ print_log(log_msg, logger=logger)
+ continue
+
+ if metric not in result_files:
+ raise KeyError(f'{metric} is not in results')
+ try:
+ cocoDt = cocoGt.loadRes(result_files[metric])
+ except IndexError:
+ print_log(
+ 'The testing results of the whole dataset is empty.',
+ logger=logger,
+ level=logging.ERROR)
+ break
+
+ iou_type = 'bbox' if metric == 'proposal' else metric
+ cocoEval = COCOeval(cocoGt, cocoDt, iou_type)
+ cocoEval.params.catIds = self.cat_ids
+ cocoEval.params.imgIds = self.img_ids
+ cocoEval.params.maxDets = list(proposal_nums)
+ cocoEval.params.iouThrs = iou_thrs
+ # mapping of cocoEval.stats
+ coco_metric_names = {
+ 'mAP': 0,
+ 'mAP_50': 1,
+ 'mAP_75': 2,
+ 'mAP_s': 3,
+ 'mAP_m': 4,
+ 'mAP_l': 5,
+ 'AR@100': 6,
+ 'AR@300': 7,
+ 'AR@1000': 8,
+ 'AR_s@1000': 9,
+ 'AR_m@1000': 10,
+ 'AR_l@1000': 11
+ }
+ if metric_items is not None:
+ for metric_item in metric_items:
+ if metric_item not in coco_metric_names:
+ raise KeyError(
+ f'metric item {metric_item} is not supported')
+
+ if metric == 'proposal':
+ cocoEval.params.useCats = 0
+ cocoEval.evaluate()
+ cocoEval.accumulate()
+ cocoEval.summarize()
+ if metric_items is None:
+ metric_items = [
+ 'AR@100', 'AR@300', 'AR@1000', 'AR_s@1000',
+ 'AR_m@1000', 'AR_l@1000'
+ ]
+
+ for item in metric_items:
+ val = float(
+ f'{cocoEval.stats[coco_metric_names[item]]:.3f}')
+ eval_results[item] = val
+ else:
+ cocoEval.evaluate()
+ cocoEval.accumulate()
+ cocoEval.summarize()
+ if classwise: # Compute per-category AP
+ # Compute per-category AP
+ # from https://github.com/facebookresearch/detectron2/
+ precisions = cocoEval.eval['precision']
+ # precision: (iou, recall, cls, area range, max dets)
+ assert len(self.cat_ids) == precisions.shape[2]
+
+ results_per_category = []
+ for idx, catId in enumerate(self.cat_ids):
+ # area range index 0: all area ranges
+ # max dets index -1: typically 100 per image
+ nm = self.coco.loadCats(catId)[0]
+ precision = precisions[:, :, idx, 0, -1]
+ precision = precision[precision > -1]
+ if precision.size:
+ ap = np.mean(precision)
+ else:
+ ap = float('nan')
+ results_per_category.append(
+ (f'{nm["name"]}', f'{float(ap):0.3f}'))
+
+ num_columns = min(6, len(results_per_category) * 2)
+ results_flatten = list(
+ itertools.chain(*results_per_category))
+ headers = ['category', 'AP'] * (num_columns // 2)
+ results_2d = itertools.zip_longest(*[
+ results_flatten[i::num_columns]
+ for i in range(num_columns)
+ ])
+ table_data = [headers]
+ table_data += [result for result in results_2d]
+ table = AsciiTable(table_data)
+ print_log('\n' + table.table, logger=logger)
+
+ if metric_items is None:
+ metric_items = [
+ 'mAP', 'mAP_50', 'mAP_75', 'mAP_s', 'mAP_m', 'mAP_l'
+ ]
+
+ for metric_item in metric_items:
+ key = f'{metric}_{metric_item}'
+ val = float(
+ f'{cocoEval.stats[coco_metric_names[metric_item]]:.3f}'
+ )
+ eval_results[key] = val
+ ap = cocoEval.stats[:6]
+ eval_results[f'{metric}_mAP_copypaste'] = (
+ f'{ap[0]:.3f} {ap[1]:.3f} {ap[2]:.3f} {ap[3]:.3f} '
+ f'{ap[4]:.3f} {ap[5]:.3f}')
+ if tmp_dir is not None:
+ tmp_dir.cleanup()
+ return eval_results
diff --git a/annotator/uniformer/mmdet/datasets/custom.py b/annotator/uniformer/mmdet/datasets/custom.py
new file mode 100644
index 0000000000000000000000000000000000000000..1a2351c217f43d32178053dfc682a2b241f9a3f1
--- /dev/null
+++ b/annotator/uniformer/mmdet/datasets/custom.py
@@ -0,0 +1,323 @@
+import os.path as osp
+import warnings
+from collections import OrderedDict
+
+import mmcv
+import numpy as np
+from mmcv.utils import print_log
+from torch.utils.data import Dataset
+
+from mmdet.core import eval_map, eval_recalls
+from .builder import DATASETS
+from .pipelines import Compose
+
+
+@DATASETS.register_module()
+class CustomDataset(Dataset):
+ """Custom dataset for detection.
+
+ The annotation format is shown as follows. The `ann` field is optional for
+ testing.
+
+ .. code-block:: none
+
+ [
+ {
+ 'filename': 'a.jpg',
+ 'width': 1280,
+ 'height': 720,
+ 'ann': {
+ 'bboxes': (n, 4) in (x1, y1, x2, y2) order.
+ 'labels': (n, ),
+ 'bboxes_ignore': (k, 4), (optional field)
+ 'labels_ignore': (k, 4) (optional field)
+ }
+ },
+ ...
+ ]
+
+ Args:
+ ann_file (str): Annotation file path.
+ pipeline (list[dict]): Processing pipeline.
+ classes (str | Sequence[str], optional): Specify classes to load.
+ If is None, ``cls.CLASSES`` will be used. Default: None.
+ data_root (str, optional): Data root for ``ann_file``,
+ ``img_prefix``, ``seg_prefix``, ``proposal_file`` if specified.
+ test_mode (bool, optional): If set True, annotation will not be loaded.
+ filter_empty_gt (bool, optional): If set true, images without bounding
+ boxes of the dataset's classes will be filtered out. This option
+ only works when `test_mode=False`, i.e., we never filter images
+ during tests.
+ """
+
+ CLASSES = None
+
+ def __init__(self,
+ ann_file,
+ pipeline,
+ classes=None,
+ data_root=None,
+ img_prefix='',
+ seg_prefix=None,
+ proposal_file=None,
+ test_mode=False,
+ filter_empty_gt=True):
+ self.ann_file = ann_file
+ self.data_root = data_root
+ self.img_prefix = img_prefix
+ self.seg_prefix = seg_prefix
+ self.proposal_file = proposal_file
+ self.test_mode = test_mode
+ self.filter_empty_gt = filter_empty_gt
+ self.CLASSES = self.get_classes(classes)
+
+ # join paths if data_root is specified
+ if self.data_root is not None:
+ if not osp.isabs(self.ann_file):
+ self.ann_file = osp.join(self.data_root, self.ann_file)
+ if not (self.img_prefix is None or osp.isabs(self.img_prefix)):
+ self.img_prefix = osp.join(self.data_root, self.img_prefix)
+ if not (self.seg_prefix is None or osp.isabs(self.seg_prefix)):
+ self.seg_prefix = osp.join(self.data_root, self.seg_prefix)
+ if not (self.proposal_file is None
+ or osp.isabs(self.proposal_file)):
+ self.proposal_file = osp.join(self.data_root,
+ self.proposal_file)
+ # load annotations (and proposals)
+ self.data_infos = self.load_annotations(self.ann_file)
+
+ if self.proposal_file is not None:
+ self.proposals = self.load_proposals(self.proposal_file)
+ else:
+ self.proposals = None
+
+ # filter images too small and containing no annotations
+ if not test_mode:
+ valid_inds = self._filter_imgs()
+ self.data_infos = [self.data_infos[i] for i in valid_inds]
+ if self.proposals is not None:
+ self.proposals = [self.proposals[i] for i in valid_inds]
+ # set group flag for the sampler
+ self._set_group_flag()
+
+ # processing pipeline
+ self.pipeline = Compose(pipeline)
+
+ def __len__(self):
+ """Total number of samples of data."""
+ return len(self.data_infos)
+
+ def load_annotations(self, ann_file):
+ """Load annotation from annotation file."""
+ return mmcv.load(ann_file)
+
+ def load_proposals(self, proposal_file):
+ """Load proposal from proposal file."""
+ return mmcv.load(proposal_file)
+
+ def get_ann_info(self, idx):
+ """Get annotation by index.
+
+ Args:
+ idx (int): Index of data.
+
+ Returns:
+ dict: Annotation info of specified index.
+ """
+
+ return self.data_infos[idx]['ann']
+
+ def get_cat_ids(self, idx):
+ """Get category ids by index.
+
+ Args:
+ idx (int): Index of data.
+
+ Returns:
+ list[int]: All categories in the image of specified index.
+ """
+
+ return self.data_infos[idx]['ann']['labels'].astype(np.int).tolist()
+
+ def pre_pipeline(self, results):
+ """Prepare results dict for pipeline."""
+ results['img_prefix'] = self.img_prefix
+ results['seg_prefix'] = self.seg_prefix
+ results['proposal_file'] = self.proposal_file
+ results['bbox_fields'] = []
+ results['mask_fields'] = []
+ results['seg_fields'] = []
+
+ def _filter_imgs(self, min_size=32):
+ """Filter images too small."""
+ if self.filter_empty_gt:
+ warnings.warn(
+ 'CustomDataset does not support filtering empty gt images.')
+ valid_inds = []
+ for i, img_info in enumerate(self.data_infos):
+ if min(img_info['width'], img_info['height']) >= min_size:
+ valid_inds.append(i)
+ return valid_inds
+
+ def _set_group_flag(self):
+ """Set flag according to image aspect ratio.
+
+ Images with aspect ratio greater than 1 will be set as group 1,
+ otherwise group 0.
+ """
+ self.flag = np.zeros(len(self), dtype=np.uint8)
+ for i in range(len(self)):
+ img_info = self.data_infos[i]
+ if img_info['width'] / img_info['height'] > 1:
+ self.flag[i] = 1
+
+ def _rand_another(self, idx):
+ """Get another random index from the same group as the given index."""
+ pool = np.where(self.flag == self.flag[idx])[0]
+ return np.random.choice(pool)
+
+ def __getitem__(self, idx):
+ """Get training/test data after pipeline.
+
+ Args:
+ idx (int): Index of data.
+
+ Returns:
+ dict: Training/test data (with annotation if `test_mode` is set \
+ True).
+ """
+
+ if self.test_mode:
+ return self.prepare_test_img(idx)
+ while True:
+ data = self.prepare_train_img(idx)
+ if data is None:
+ idx = self._rand_another(idx)
+ continue
+ return data
+
+ def prepare_train_img(self, idx):
+ """Get training data and annotations after pipeline.
+
+ Args:
+ idx (int): Index of data.
+
+ Returns:
+ dict: Training data and annotation after pipeline with new keys \
+ introduced by pipeline.
+ """
+
+ img_info = self.data_infos[idx]
+ ann_info = self.get_ann_info(idx)
+ results = dict(img_info=img_info, ann_info=ann_info)
+ if self.proposals is not None:
+ results['proposals'] = self.proposals[idx]
+ self.pre_pipeline(results)
+ return self.pipeline(results)
+
+ def prepare_test_img(self, idx):
+ """Get testing data after pipeline.
+
+ Args:
+ idx (int): Index of data.
+
+ Returns:
+ dict: Testing data after pipeline with new keys introduced by \
+ pipeline.
+ """
+
+ img_info = self.data_infos[idx]
+ results = dict(img_info=img_info)
+ if self.proposals is not None:
+ results['proposals'] = self.proposals[idx]
+ self.pre_pipeline(results)
+ return self.pipeline(results)
+
+ @classmethod
+ def get_classes(cls, classes=None):
+ """Get class names of current dataset.
+
+ Args:
+ classes (Sequence[str] | str | None): If classes is None, use
+ default CLASSES defined by builtin dataset. If classes is a
+ string, take it as a file name. The file contains the name of
+ classes where each line contains one class name. If classes is
+ a tuple or list, override the CLASSES defined by the dataset.
+
+ Returns:
+ tuple[str] or list[str]: Names of categories of the dataset.
+ """
+ if classes is None:
+ return cls.CLASSES
+
+ if isinstance(classes, str):
+ # take it as a file path
+ class_names = mmcv.list_from_file(classes)
+ elif isinstance(classes, (tuple, list)):
+ class_names = classes
+ else:
+ raise ValueError(f'Unsupported type {type(classes)} of classes.')
+
+ return class_names
+
+ def format_results(self, results, **kwargs):
+ """Place holder to format result to dataset specific output."""
+
+ def evaluate(self,
+ results,
+ metric='mAP',
+ logger=None,
+ proposal_nums=(100, 300, 1000),
+ iou_thr=0.5,
+ scale_ranges=None):
+ """Evaluate the dataset.
+
+ Args:
+ results (list): Testing results of the dataset.
+ metric (str | list[str]): Metrics to be evaluated.
+ logger (logging.Logger | None | str): Logger used for printing
+ related information during evaluation. Default: None.
+ proposal_nums (Sequence[int]): Proposal number used for evaluating
+ recalls, such as recall@100, recall@1000.
+ Default: (100, 300, 1000).
+ iou_thr (float | list[float]): IoU threshold. Default: 0.5.
+ scale_ranges (list[tuple] | None): Scale ranges for evaluating mAP.
+ Default: None.
+ """
+
+ if not isinstance(metric, str):
+ assert len(metric) == 1
+ metric = metric[0]
+ allowed_metrics = ['mAP', 'recall']
+ if metric not in allowed_metrics:
+ raise KeyError(f'metric {metric} is not supported')
+ annotations = [self.get_ann_info(i) for i in range(len(self))]
+ eval_results = OrderedDict()
+ iou_thrs = [iou_thr] if isinstance(iou_thr, float) else iou_thr
+ if metric == 'mAP':
+ assert isinstance(iou_thrs, list)
+ mean_aps = []
+ for iou_thr in iou_thrs:
+ print_log(f'\n{"-" * 15}iou_thr: {iou_thr}{"-" * 15}')
+ mean_ap, _ = eval_map(
+ results,
+ annotations,
+ scale_ranges=scale_ranges,
+ iou_thr=iou_thr,
+ dataset=self.CLASSES,
+ logger=logger)
+ mean_aps.append(mean_ap)
+ eval_results[f'AP{int(iou_thr * 100):02d}'] = round(mean_ap, 3)
+ eval_results['mAP'] = sum(mean_aps) / len(mean_aps)
+ elif metric == 'recall':
+ gt_bboxes = [ann['bboxes'] for ann in annotations]
+ recalls = eval_recalls(
+ gt_bboxes, results, proposal_nums, iou_thr, logger=logger)
+ for i, num in enumerate(proposal_nums):
+ for j, iou in enumerate(iou_thrs):
+ eval_results[f'recall@{num}@{iou}'] = recalls[i, j]
+ if recalls.shape[1] > 1:
+ ar = recalls.mean(axis=1)
+ for i, num in enumerate(proposal_nums):
+ eval_results[f'AR@{num}'] = ar[i]
+ return eval_results
diff --git a/annotator/uniformer/mmdet/datasets/dataset_wrappers.py b/annotator/uniformer/mmdet/datasets/dataset_wrappers.py
new file mode 100644
index 0000000000000000000000000000000000000000..55ad5cb60e581a96bdbd1fbbeebc2f46f8c4e899
--- /dev/null
+++ b/annotator/uniformer/mmdet/datasets/dataset_wrappers.py
@@ -0,0 +1,282 @@
+import bisect
+import math
+from collections import defaultdict
+
+import numpy as np
+from mmcv.utils import print_log
+from torch.utils.data.dataset import ConcatDataset as _ConcatDataset
+
+from .builder import DATASETS
+from .coco import CocoDataset
+
+
+@DATASETS.register_module()
+class ConcatDataset(_ConcatDataset):
+ """A wrapper of concatenated dataset.
+
+ Same as :obj:`torch.utils.data.dataset.ConcatDataset`, but
+ concat the group flag for image aspect ratio.
+
+ Args:
+ datasets (list[:obj:`Dataset`]): A list of datasets.
+ separate_eval (bool): Whether to evaluate the results
+ separately if it is used as validation dataset.
+ Defaults to True.
+ """
+
+ def __init__(self, datasets, separate_eval=True):
+ super(ConcatDataset, self).__init__(datasets)
+ self.CLASSES = datasets[0].CLASSES
+ self.separate_eval = separate_eval
+ if not separate_eval:
+ if any([isinstance(ds, CocoDataset) for ds in datasets]):
+ raise NotImplementedError(
+ 'Evaluating concatenated CocoDataset as a whole is not'
+ ' supported! Please set "separate_eval=True"')
+ elif len(set([type(ds) for ds in datasets])) != 1:
+ raise NotImplementedError(
+ 'All the datasets should have same types')
+
+ if hasattr(datasets[0], 'flag'):
+ flags = []
+ for i in range(0, len(datasets)):
+ flags.append(datasets[i].flag)
+ self.flag = np.concatenate(flags)
+
+ def get_cat_ids(self, idx):
+ """Get category ids of concatenated dataset by index.
+
+ Args:
+ idx (int): Index of data.
+
+ Returns:
+ list[int]: All categories in the image of specified index.
+ """
+
+ if idx < 0:
+ if -idx > len(self):
+ raise ValueError(
+ 'absolute value of index should not exceed dataset length')
+ idx = len(self) + idx
+ dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx)
+ if dataset_idx == 0:
+ sample_idx = idx
+ else:
+ sample_idx = idx - self.cumulative_sizes[dataset_idx - 1]
+ return self.datasets[dataset_idx].get_cat_ids(sample_idx)
+
+ def evaluate(self, results, logger=None, **kwargs):
+ """Evaluate the results.
+
+ Args:
+ results (list[list | tuple]): Testing results of the dataset.
+ logger (logging.Logger | str | None): Logger used for printing
+ related information during evaluation. Default: None.
+
+ Returns:
+ dict[str: float]: AP results of the total dataset or each separate
+ dataset if `self.separate_eval=True`.
+ """
+ assert len(results) == self.cumulative_sizes[-1], \
+ ('Dataset and results have different sizes: '
+ f'{self.cumulative_sizes[-1]} v.s. {len(results)}')
+
+ # Check whether all the datasets support evaluation
+ for dataset in self.datasets:
+ assert hasattr(dataset, 'evaluate'), \
+ f'{type(dataset)} does not implement evaluate function'
+
+ if self.separate_eval:
+ dataset_idx = -1
+ total_eval_results = dict()
+ for size, dataset in zip(self.cumulative_sizes, self.datasets):
+ start_idx = 0 if dataset_idx == -1 else \
+ self.cumulative_sizes[dataset_idx]
+ end_idx = self.cumulative_sizes[dataset_idx + 1]
+
+ results_per_dataset = results[start_idx:end_idx]
+ print_log(
+ f'\nEvaluateing {dataset.ann_file} with '
+ f'{len(results_per_dataset)} images now',
+ logger=logger)
+
+ eval_results_per_dataset = dataset.evaluate(
+ results_per_dataset, logger=logger, **kwargs)
+ dataset_idx += 1
+ for k, v in eval_results_per_dataset.items():
+ total_eval_results.update({f'{dataset_idx}_{k}': v})
+
+ return total_eval_results
+ elif any([isinstance(ds, CocoDataset) for ds in self.datasets]):
+ raise NotImplementedError(
+ 'Evaluating concatenated CocoDataset as a whole is not'
+ ' supported! Please set "separate_eval=True"')
+ elif len(set([type(ds) for ds in self.datasets])) != 1:
+ raise NotImplementedError(
+ 'All the datasets should have same types')
+ else:
+ original_data_infos = self.datasets[0].data_infos
+ self.datasets[0].data_infos = sum(
+ [dataset.data_infos for dataset in self.datasets], [])
+ eval_results = self.datasets[0].evaluate(
+ results, logger=logger, **kwargs)
+ self.datasets[0].data_infos = original_data_infos
+ return eval_results
+
+
+@DATASETS.register_module()
+class RepeatDataset(object):
+ """A wrapper of repeated dataset.
+
+ The length of repeated dataset will be `times` larger than the original
+ dataset. This is useful when the data loading time is long but the dataset
+ is small. Using RepeatDataset can reduce the data loading time between
+ epochs.
+
+ Args:
+ dataset (:obj:`Dataset`): The dataset to be repeated.
+ times (int): Repeat times.
+ """
+
+ def __init__(self, dataset, times):
+ self.dataset = dataset
+ self.times = times
+ self.CLASSES = dataset.CLASSES
+ if hasattr(self.dataset, 'flag'):
+ self.flag = np.tile(self.dataset.flag, times)
+
+ self._ori_len = len(self.dataset)
+
+ def __getitem__(self, idx):
+ return self.dataset[idx % self._ori_len]
+
+ def get_cat_ids(self, idx):
+ """Get category ids of repeat dataset by index.
+
+ Args:
+ idx (int): Index of data.
+
+ Returns:
+ list[int]: All categories in the image of specified index.
+ """
+
+ return self.dataset.get_cat_ids(idx % self._ori_len)
+
+ def __len__(self):
+ """Length after repetition."""
+ return self.times * self._ori_len
+
+
+# Modified from https://github.com/facebookresearch/detectron2/blob/41d475b75a230221e21d9cac5d69655e3415e3a4/detectron2/data/samplers/distributed_sampler.py#L57 # noqa
+@DATASETS.register_module()
+class ClassBalancedDataset(object):
+ """A wrapper of repeated dataset with repeat factor.
+
+ Suitable for training on class imbalanced datasets like LVIS. Following
+ the sampling strategy in the `paper `_,
+ in each epoch, an image may appear multiple times based on its
+ "repeat factor".
+ The repeat factor for an image is a function of the frequency the rarest
+ category labeled in that image. The "frequency of category c" in [0, 1]
+ is defined by the fraction of images in the training set (without repeats)
+ in which category c appears.
+ The dataset needs to instantiate :func:`self.get_cat_ids` to support
+ ClassBalancedDataset.
+
+ The repeat factor is computed as followed.
+
+ 1. For each category c, compute the fraction # of images
+ that contain it: :math:`f(c)`
+ 2. For each category c, compute the category-level repeat factor:
+ :math:`r(c) = max(1, sqrt(t/f(c)))`
+ 3. For each image I, compute the image-level repeat factor:
+ :math:`r(I) = max_{c in I} r(c)`
+
+ Args:
+ dataset (:obj:`CustomDataset`): The dataset to be repeated.
+ oversample_thr (float): frequency threshold below which data is
+ repeated. For categories with ``f_c >= oversample_thr``, there is
+ no oversampling. For categories with ``f_c < oversample_thr``, the
+ degree of oversampling following the square-root inverse frequency
+ heuristic above.
+ filter_empty_gt (bool, optional): If set true, images without bounding
+ boxes will not be oversampled. Otherwise, they will be categorized
+ as the pure background class and involved into the oversampling.
+ Default: True.
+ """
+
+ def __init__(self, dataset, oversample_thr, filter_empty_gt=True):
+ self.dataset = dataset
+ self.oversample_thr = oversample_thr
+ self.filter_empty_gt = filter_empty_gt
+ self.CLASSES = dataset.CLASSES
+
+ repeat_factors = self._get_repeat_factors(dataset, oversample_thr)
+ repeat_indices = []
+ for dataset_idx, repeat_factor in enumerate(repeat_factors):
+ repeat_indices.extend([dataset_idx] * math.ceil(repeat_factor))
+ self.repeat_indices = repeat_indices
+
+ flags = []
+ if hasattr(self.dataset, 'flag'):
+ for flag, repeat_factor in zip(self.dataset.flag, repeat_factors):
+ flags.extend([flag] * int(math.ceil(repeat_factor)))
+ assert len(flags) == len(repeat_indices)
+ self.flag = np.asarray(flags, dtype=np.uint8)
+
+ def _get_repeat_factors(self, dataset, repeat_thr):
+ """Get repeat factor for each images in the dataset.
+
+ Args:
+ dataset (:obj:`CustomDataset`): The dataset
+ repeat_thr (float): The threshold of frequency. If an image
+ contains the categories whose frequency below the threshold,
+ it would be repeated.
+
+ Returns:
+ list[float]: The repeat factors for each images in the dataset.
+ """
+
+ # 1. For each category c, compute the fraction # of images
+ # that contain it: f(c)
+ category_freq = defaultdict(int)
+ num_images = len(dataset)
+ for idx in range(num_images):
+ cat_ids = set(self.dataset.get_cat_ids(idx))
+ if len(cat_ids) == 0 and not self.filter_empty_gt:
+ cat_ids = set([len(self.CLASSES)])
+ for cat_id in cat_ids:
+ category_freq[cat_id] += 1
+ for k, v in category_freq.items():
+ category_freq[k] = v / num_images
+
+ # 2. For each category c, compute the category-level repeat factor:
+ # r(c) = max(1, sqrt(t/f(c)))
+ category_repeat = {
+ cat_id: max(1.0, math.sqrt(repeat_thr / cat_freq))
+ for cat_id, cat_freq in category_freq.items()
+ }
+
+ # 3. For each image I, compute the image-level repeat factor:
+ # r(I) = max_{c in I} r(c)
+ repeat_factors = []
+ for idx in range(num_images):
+ cat_ids = set(self.dataset.get_cat_ids(idx))
+ if len(cat_ids) == 0 and not self.filter_empty_gt:
+ cat_ids = set([len(self.CLASSES)])
+ repeat_factor = 1
+ if len(cat_ids) > 0:
+ repeat_factor = max(
+ {category_repeat[cat_id]
+ for cat_id in cat_ids})
+ repeat_factors.append(repeat_factor)
+
+ return repeat_factors
+
+ def __getitem__(self, idx):
+ ori_index = self.repeat_indices[idx]
+ return self.dataset[ori_index]
+
+ def __len__(self):
+ """Length after repetition."""
+ return len(self.repeat_indices)
diff --git a/annotator/uniformer/mmdet/datasets/deepfashion.py b/annotator/uniformer/mmdet/datasets/deepfashion.py
new file mode 100644
index 0000000000000000000000000000000000000000..1125376091f2d4ee6843ae4f2156b3b0453be369
--- /dev/null
+++ b/annotator/uniformer/mmdet/datasets/deepfashion.py
@@ -0,0 +1,10 @@
+from .builder import DATASETS
+from .coco import CocoDataset
+
+
+@DATASETS.register_module()
+class DeepFashionDataset(CocoDataset):
+
+ CLASSES = ('top', 'skirt', 'leggings', 'dress', 'outer', 'pants', 'bag',
+ 'neckwear', 'headwear', 'eyeglass', 'belt', 'footwear', 'hair',
+ 'skin', 'face')
diff --git a/annotator/uniformer/mmdet/datasets/lvis.py b/annotator/uniformer/mmdet/datasets/lvis.py
new file mode 100644
index 0000000000000000000000000000000000000000..122c64e79cf5f060d7ceddf4ad29c4debe40944b
--- /dev/null
+++ b/annotator/uniformer/mmdet/datasets/lvis.py
@@ -0,0 +1,742 @@
+import itertools
+import logging
+import os.path as osp
+import tempfile
+from collections import OrderedDict
+
+import numpy as np
+from mmcv.utils import print_log
+from terminaltables import AsciiTable
+
+from .builder import DATASETS
+from .coco import CocoDataset
+
+
+@DATASETS.register_module()
+class LVISV05Dataset(CocoDataset):
+
+ CLASSES = (
+ 'acorn', 'aerosol_can', 'air_conditioner', 'airplane', 'alarm_clock',
+ 'alcohol', 'alligator', 'almond', 'ambulance', 'amplifier', 'anklet',
+ 'antenna', 'apple', 'apple_juice', 'applesauce', 'apricot', 'apron',
+ 'aquarium', 'armband', 'armchair', 'armoire', 'armor', 'artichoke',
+ 'trash_can', 'ashtray', 'asparagus', 'atomizer', 'avocado', 'award',
+ 'awning', 'ax', 'baby_buggy', 'basketball_backboard', 'backpack',
+ 'handbag', 'suitcase', 'bagel', 'bagpipe', 'baguet', 'bait', 'ball',
+ 'ballet_skirt', 'balloon', 'bamboo', 'banana', 'Band_Aid', 'bandage',
+ 'bandanna', 'banjo', 'banner', 'barbell', 'barge', 'barrel',
+ 'barrette', 'barrow', 'baseball_base', 'baseball', 'baseball_bat',
+ 'baseball_cap', 'baseball_glove', 'basket', 'basketball_hoop',
+ 'basketball', 'bass_horn', 'bat_(animal)', 'bath_mat', 'bath_towel',
+ 'bathrobe', 'bathtub', 'batter_(food)', 'battery', 'beachball', 'bead',
+ 'beaker', 'bean_curd', 'beanbag', 'beanie', 'bear', 'bed',
+ 'bedspread', 'cow', 'beef_(food)', 'beeper', 'beer_bottle', 'beer_can',
+ 'beetle', 'bell', 'bell_pepper', 'belt', 'belt_buckle', 'bench',
+ 'beret', 'bib', 'Bible', 'bicycle', 'visor', 'binder', 'binoculars',
+ 'bird', 'birdfeeder', 'birdbath', 'birdcage', 'birdhouse',
+ 'birthday_cake', 'birthday_card', 'biscuit_(bread)', 'pirate_flag',
+ 'black_sheep', 'blackboard', 'blanket', 'blazer', 'blender', 'blimp',
+ 'blinker', 'blueberry', 'boar', 'gameboard', 'boat', 'bobbin',
+ 'bobby_pin', 'boiled_egg', 'bolo_tie', 'deadbolt', 'bolt', 'bonnet',
+ 'book', 'book_bag', 'bookcase', 'booklet', 'bookmark',
+ 'boom_microphone', 'boot', 'bottle', 'bottle_opener', 'bouquet',
+ 'bow_(weapon)', 'bow_(decorative_ribbons)', 'bow-tie', 'bowl',
+ 'pipe_bowl', 'bowler_hat', 'bowling_ball', 'bowling_pin',
+ 'boxing_glove', 'suspenders', 'bracelet', 'brass_plaque', 'brassiere',
+ 'bread-bin', 'breechcloth', 'bridal_gown', 'briefcase',
+ 'bristle_brush', 'broccoli', 'broach', 'broom', 'brownie',
+ 'brussels_sprouts', 'bubble_gum', 'bucket', 'horse_buggy', 'bull',
+ 'bulldog', 'bulldozer', 'bullet_train', 'bulletin_board',
+ 'bulletproof_vest', 'bullhorn', 'corned_beef', 'bun', 'bunk_bed',
+ 'buoy', 'burrito', 'bus_(vehicle)', 'business_card', 'butcher_knife',
+ 'butter', 'butterfly', 'button', 'cab_(taxi)', 'cabana', 'cabin_car',
+ 'cabinet', 'locker', 'cake', 'calculator', 'calendar', 'calf',
+ 'camcorder', 'camel', 'camera', 'camera_lens', 'camper_(vehicle)',
+ 'can', 'can_opener', 'candelabrum', 'candle', 'candle_holder',
+ 'candy_bar', 'candy_cane', 'walking_cane', 'canister', 'cannon',
+ 'canoe', 'cantaloup', 'canteen', 'cap_(headwear)', 'bottle_cap',
+ 'cape', 'cappuccino', 'car_(automobile)', 'railcar_(part_of_a_train)',
+ 'elevator_car', 'car_battery', 'identity_card', 'card', 'cardigan',
+ 'cargo_ship', 'carnation', 'horse_carriage', 'carrot', 'tote_bag',
+ 'cart', 'carton', 'cash_register', 'casserole', 'cassette', 'cast',
+ 'cat', 'cauliflower', 'caviar', 'cayenne_(spice)', 'CD_player',
+ 'celery', 'cellular_telephone', 'chain_mail', 'chair', 'chaise_longue',
+ 'champagne', 'chandelier', 'chap', 'checkbook', 'checkerboard',
+ 'cherry', 'chessboard', 'chest_of_drawers_(furniture)',
+ 'chicken_(animal)', 'chicken_wire', 'chickpea', 'Chihuahua',
+ 'chili_(vegetable)', 'chime', 'chinaware', 'crisp_(potato_chip)',
+ 'poker_chip', 'chocolate_bar', 'chocolate_cake', 'chocolate_milk',
+ 'chocolate_mousse', 'choker', 'chopping_board', 'chopstick',
+ 'Christmas_tree', 'slide', 'cider', 'cigar_box', 'cigarette',
+ 'cigarette_case', 'cistern', 'clarinet', 'clasp', 'cleansing_agent',
+ 'clementine', 'clip', 'clipboard', 'clock', 'clock_tower',
+ 'clothes_hamper', 'clothespin', 'clutch_bag', 'coaster', 'coat',
+ 'coat_hanger', 'coatrack', 'cock', 'coconut', 'coffee_filter',
+ 'coffee_maker', 'coffee_table', 'coffeepot', 'coil', 'coin',
+ 'colander', 'coleslaw', 'coloring_material', 'combination_lock',
+ 'pacifier', 'comic_book', 'computer_keyboard', 'concrete_mixer',
+ 'cone', 'control', 'convertible_(automobile)', 'sofa_bed', 'cookie',
+ 'cookie_jar', 'cooking_utensil', 'cooler_(for_food)',
+ 'cork_(bottle_plug)', 'corkboard', 'corkscrew', 'edible_corn',
+ 'cornbread', 'cornet', 'cornice', 'cornmeal', 'corset',
+ 'romaine_lettuce', 'costume', 'cougar', 'coverall', 'cowbell',
+ 'cowboy_hat', 'crab_(animal)', 'cracker', 'crape', 'crate', 'crayon',
+ 'cream_pitcher', 'credit_card', 'crescent_roll', 'crib', 'crock_pot',
+ 'crossbar', 'crouton', 'crow', 'crown', 'crucifix', 'cruise_ship',
+ 'police_cruiser', 'crumb', 'crutch', 'cub_(animal)', 'cube',
+ 'cucumber', 'cufflink', 'cup', 'trophy_cup', 'cupcake', 'hair_curler',
+ 'curling_iron', 'curtain', 'cushion', 'custard', 'cutting_tool',
+ 'cylinder', 'cymbal', 'dachshund', 'dagger', 'dartboard',
+ 'date_(fruit)', 'deck_chair', 'deer', 'dental_floss', 'desk',
+ 'detergent', 'diaper', 'diary', 'die', 'dinghy', 'dining_table', 'tux',
+ 'dish', 'dish_antenna', 'dishrag', 'dishtowel', 'dishwasher',
+ 'dishwasher_detergent', 'diskette', 'dispenser', 'Dixie_cup', 'dog',
+ 'dog_collar', 'doll', 'dollar', 'dolphin', 'domestic_ass', 'eye_mask',
+ 'doorbell', 'doorknob', 'doormat', 'doughnut', 'dove', 'dragonfly',
+ 'drawer', 'underdrawers', 'dress', 'dress_hat', 'dress_suit',
+ 'dresser', 'drill', 'drinking_fountain', 'drone', 'dropper',
+ 'drum_(musical_instrument)', 'drumstick', 'duck', 'duckling',
+ 'duct_tape', 'duffel_bag', 'dumbbell', 'dumpster', 'dustpan',
+ 'Dutch_oven', 'eagle', 'earphone', 'earplug', 'earring', 'easel',
+ 'eclair', 'eel', 'egg', 'egg_roll', 'egg_yolk', 'eggbeater',
+ 'eggplant', 'electric_chair', 'refrigerator', 'elephant', 'elk',
+ 'envelope', 'eraser', 'escargot', 'eyepatch', 'falcon', 'fan',
+ 'faucet', 'fedora', 'ferret', 'Ferris_wheel', 'ferry', 'fig_(fruit)',
+ 'fighter_jet', 'figurine', 'file_cabinet', 'file_(tool)', 'fire_alarm',
+ 'fire_engine', 'fire_extinguisher', 'fire_hose', 'fireplace',
+ 'fireplug', 'fish', 'fish_(food)', 'fishbowl', 'fishing_boat',
+ 'fishing_rod', 'flag', 'flagpole', 'flamingo', 'flannel', 'flash',
+ 'flashlight', 'fleece', 'flip-flop_(sandal)', 'flipper_(footwear)',
+ 'flower_arrangement', 'flute_glass', 'foal', 'folding_chair',
+ 'food_processor', 'football_(American)', 'football_helmet',
+ 'footstool', 'fork', 'forklift', 'freight_car', 'French_toast',
+ 'freshener', 'frisbee', 'frog', 'fruit_juice', 'fruit_salad',
+ 'frying_pan', 'fudge', 'funnel', 'futon', 'gag', 'garbage',
+ 'garbage_truck', 'garden_hose', 'gargle', 'gargoyle', 'garlic',
+ 'gasmask', 'gazelle', 'gelatin', 'gemstone', 'giant_panda',
+ 'gift_wrap', 'ginger', 'giraffe', 'cincture',
+ 'glass_(drink_container)', 'globe', 'glove', 'goat', 'goggles',
+ 'goldfish', 'golf_club', 'golfcart', 'gondola_(boat)', 'goose',
+ 'gorilla', 'gourd', 'surgical_gown', 'grape', 'grasshopper', 'grater',
+ 'gravestone', 'gravy_boat', 'green_bean', 'green_onion', 'griddle',
+ 'grillroom', 'grinder_(tool)', 'grits', 'grizzly', 'grocery_bag',
+ 'guacamole', 'guitar', 'gull', 'gun', 'hair_spray', 'hairbrush',
+ 'hairnet', 'hairpin', 'ham', 'hamburger', 'hammer', 'hammock',
+ 'hamper', 'hamster', 'hair_dryer', 'hand_glass', 'hand_towel',
+ 'handcart', 'handcuff', 'handkerchief', 'handle', 'handsaw',
+ 'hardback_book', 'harmonium', 'hat', 'hatbox', 'hatch', 'veil',
+ 'headband', 'headboard', 'headlight', 'headscarf', 'headset',
+ 'headstall_(for_horses)', 'hearing_aid', 'heart', 'heater',
+ 'helicopter', 'helmet', 'heron', 'highchair', 'hinge', 'hippopotamus',
+ 'hockey_stick', 'hog', 'home_plate_(baseball)', 'honey', 'fume_hood',
+ 'hook', 'horse', 'hose', 'hot-air_balloon', 'hotplate', 'hot_sauce',
+ 'hourglass', 'houseboat', 'hummingbird', 'hummus', 'polar_bear',
+ 'icecream', 'popsicle', 'ice_maker', 'ice_pack', 'ice_skate',
+ 'ice_tea', 'igniter', 'incense', 'inhaler', 'iPod',
+ 'iron_(for_clothing)', 'ironing_board', 'jacket', 'jam', 'jean',
+ 'jeep', 'jelly_bean', 'jersey', 'jet_plane', 'jewelry', 'joystick',
+ 'jumpsuit', 'kayak', 'keg', 'kennel', 'kettle', 'key', 'keycard',
+ 'kilt', 'kimono', 'kitchen_sink', 'kitchen_table', 'kite', 'kitten',
+ 'kiwi_fruit', 'knee_pad', 'knife', 'knight_(chess_piece)',
+ 'knitting_needle', 'knob', 'knocker_(on_a_door)', 'koala', 'lab_coat',
+ 'ladder', 'ladle', 'ladybug', 'lamb_(animal)', 'lamb-chop', 'lamp',
+ 'lamppost', 'lampshade', 'lantern', 'lanyard', 'laptop_computer',
+ 'lasagna', 'latch', 'lawn_mower', 'leather', 'legging_(clothing)',
+ 'Lego', 'lemon', 'lemonade', 'lettuce', 'license_plate', 'life_buoy',
+ 'life_jacket', 'lightbulb', 'lightning_rod', 'lime', 'limousine',
+ 'linen_paper', 'lion', 'lip_balm', 'lipstick', 'liquor', 'lizard',
+ 'Loafer_(type_of_shoe)', 'log', 'lollipop', 'lotion',
+ 'speaker_(stero_equipment)', 'loveseat', 'machine_gun', 'magazine',
+ 'magnet', 'mail_slot', 'mailbox_(at_home)', 'mallet', 'mammoth',
+ 'mandarin_orange', 'manger', 'manhole', 'map', 'marker', 'martini',
+ 'mascot', 'mashed_potato', 'masher', 'mask', 'mast',
+ 'mat_(gym_equipment)', 'matchbox', 'mattress', 'measuring_cup',
+ 'measuring_stick', 'meatball', 'medicine', 'melon', 'microphone',
+ 'microscope', 'microwave_oven', 'milestone', 'milk', 'minivan',
+ 'mint_candy', 'mirror', 'mitten', 'mixer_(kitchen_tool)', 'money',
+ 'monitor_(computer_equipment) computer_monitor', 'monkey', 'motor',
+ 'motor_scooter', 'motor_vehicle', 'motorboat', 'motorcycle',
+ 'mound_(baseball)', 'mouse_(animal_rodent)',
+ 'mouse_(computer_equipment)', 'mousepad', 'muffin', 'mug', 'mushroom',
+ 'music_stool', 'musical_instrument', 'nailfile', 'nameplate', 'napkin',
+ 'neckerchief', 'necklace', 'necktie', 'needle', 'nest', 'newsstand',
+ 'nightshirt', 'nosebag_(for_animals)', 'noseband_(for_animals)',
+ 'notebook', 'notepad', 'nut', 'nutcracker', 'oar', 'octopus_(food)',
+ 'octopus_(animal)', 'oil_lamp', 'olive_oil', 'omelet', 'onion',
+ 'orange_(fruit)', 'orange_juice', 'oregano', 'ostrich', 'ottoman',
+ 'overalls_(clothing)', 'owl', 'packet', 'inkpad', 'pad', 'paddle',
+ 'padlock', 'paintbox', 'paintbrush', 'painting', 'pajamas', 'palette',
+ 'pan_(for_cooking)', 'pan_(metal_container)', 'pancake', 'pantyhose',
+ 'papaya', 'paperclip', 'paper_plate', 'paper_towel', 'paperback_book',
+ 'paperweight', 'parachute', 'parakeet', 'parasail_(sports)',
+ 'parchment', 'parka', 'parking_meter', 'parrot',
+ 'passenger_car_(part_of_a_train)', 'passenger_ship', 'passport',
+ 'pastry', 'patty_(food)', 'pea_(food)', 'peach', 'peanut_butter',
+ 'pear', 'peeler_(tool_for_fruit_and_vegetables)', 'pegboard',
+ 'pelican', 'pen', 'pencil', 'pencil_box', 'pencil_sharpener',
+ 'pendulum', 'penguin', 'pennant', 'penny_(coin)', 'pepper',
+ 'pepper_mill', 'perfume', 'persimmon', 'baby', 'pet', 'petfood',
+ 'pew_(church_bench)', 'phonebook', 'phonograph_record', 'piano',
+ 'pickle', 'pickup_truck', 'pie', 'pigeon', 'piggy_bank', 'pillow',
+ 'pin_(non_jewelry)', 'pineapple', 'pinecone', 'ping-pong_ball',
+ 'pinwheel', 'tobacco_pipe', 'pipe', 'pistol', 'pita_(bread)',
+ 'pitcher_(vessel_for_liquid)', 'pitchfork', 'pizza', 'place_mat',
+ 'plate', 'platter', 'playing_card', 'playpen', 'pliers',
+ 'plow_(farm_equipment)', 'pocket_watch', 'pocketknife',
+ 'poker_(fire_stirring_tool)', 'pole', 'police_van', 'polo_shirt',
+ 'poncho', 'pony', 'pool_table', 'pop_(soda)', 'portrait',
+ 'postbox_(public)', 'postcard', 'poster', 'pot', 'flowerpot', 'potato',
+ 'potholder', 'pottery', 'pouch', 'power_shovel', 'prawn', 'printer',
+ 'projectile_(weapon)', 'projector', 'propeller', 'prune', 'pudding',
+ 'puffer_(fish)', 'puffin', 'pug-dog', 'pumpkin', 'puncher', 'puppet',
+ 'puppy', 'quesadilla', 'quiche', 'quilt', 'rabbit', 'race_car',
+ 'racket', 'radar', 'radiator', 'radio_receiver', 'radish', 'raft',
+ 'rag_doll', 'raincoat', 'ram_(animal)', 'raspberry', 'rat',
+ 'razorblade', 'reamer_(juicer)', 'rearview_mirror', 'receipt',
+ 'recliner', 'record_player', 'red_cabbage', 'reflector',
+ 'remote_control', 'rhinoceros', 'rib_(food)', 'rifle', 'ring',
+ 'river_boat', 'road_map', 'robe', 'rocking_chair', 'roller_skate',
+ 'Rollerblade', 'rolling_pin', 'root_beer',
+ 'router_(computer_equipment)', 'rubber_band', 'runner_(carpet)',
+ 'plastic_bag', 'saddle_(on_an_animal)', 'saddle_blanket', 'saddlebag',
+ 'safety_pin', 'sail', 'salad', 'salad_plate', 'salami',
+ 'salmon_(fish)', 'salmon_(food)', 'salsa', 'saltshaker',
+ 'sandal_(type_of_shoe)', 'sandwich', 'satchel', 'saucepan', 'saucer',
+ 'sausage', 'sawhorse', 'saxophone', 'scale_(measuring_instrument)',
+ 'scarecrow', 'scarf', 'school_bus', 'scissors', 'scoreboard',
+ 'scrambled_eggs', 'scraper', 'scratcher', 'screwdriver',
+ 'scrubbing_brush', 'sculpture', 'seabird', 'seahorse', 'seaplane',
+ 'seashell', 'seedling', 'serving_dish', 'sewing_machine', 'shaker',
+ 'shampoo', 'shark', 'sharpener', 'Sharpie', 'shaver_(electric)',
+ 'shaving_cream', 'shawl', 'shears', 'sheep', 'shepherd_dog',
+ 'sherbert', 'shield', 'shirt', 'shoe', 'shopping_bag', 'shopping_cart',
+ 'short_pants', 'shot_glass', 'shoulder_bag', 'shovel', 'shower_head',
+ 'shower_curtain', 'shredder_(for_paper)', 'sieve', 'signboard', 'silo',
+ 'sink', 'skateboard', 'skewer', 'ski', 'ski_boot', 'ski_parka',
+ 'ski_pole', 'skirt', 'sled', 'sleeping_bag', 'sling_(bandage)',
+ 'slipper_(footwear)', 'smoothie', 'snake', 'snowboard', 'snowman',
+ 'snowmobile', 'soap', 'soccer_ball', 'sock', 'soda_fountain',
+ 'carbonated_water', 'sofa', 'softball', 'solar_array', 'sombrero',
+ 'soup', 'soup_bowl', 'soupspoon', 'sour_cream', 'soya_milk',
+ 'space_shuttle', 'sparkler_(fireworks)', 'spatula', 'spear',
+ 'spectacles', 'spice_rack', 'spider', 'sponge', 'spoon', 'sportswear',
+ 'spotlight', 'squirrel', 'stapler_(stapling_machine)', 'starfish',
+ 'statue_(sculpture)', 'steak_(food)', 'steak_knife',
+ 'steamer_(kitchen_appliance)', 'steering_wheel', 'stencil',
+ 'stepladder', 'step_stool', 'stereo_(sound_system)', 'stew', 'stirrer',
+ 'stirrup', 'stockings_(leg_wear)', 'stool', 'stop_sign', 'brake_light',
+ 'stove', 'strainer', 'strap', 'straw_(for_drinking)', 'strawberry',
+ 'street_sign', 'streetlight', 'string_cheese', 'stylus', 'subwoofer',
+ 'sugar_bowl', 'sugarcane_(plant)', 'suit_(clothing)', 'sunflower',
+ 'sunglasses', 'sunhat', 'sunscreen', 'surfboard', 'sushi', 'mop',
+ 'sweat_pants', 'sweatband', 'sweater', 'sweatshirt', 'sweet_potato',
+ 'swimsuit', 'sword', 'syringe', 'Tabasco_sauce', 'table-tennis_table',
+ 'table', 'table_lamp', 'tablecloth', 'tachometer', 'taco', 'tag',
+ 'taillight', 'tambourine', 'army_tank', 'tank_(storage_vessel)',
+ 'tank_top_(clothing)', 'tape_(sticky_cloth_or_paper)', 'tape_measure',
+ 'tapestry', 'tarp', 'tartan', 'tassel', 'tea_bag', 'teacup',
+ 'teakettle', 'teapot', 'teddy_bear', 'telephone', 'telephone_booth',
+ 'telephone_pole', 'telephoto_lens', 'television_camera',
+ 'television_set', 'tennis_ball', 'tennis_racket', 'tequila',
+ 'thermometer', 'thermos_bottle', 'thermostat', 'thimble', 'thread',
+ 'thumbtack', 'tiara', 'tiger', 'tights_(clothing)', 'timer', 'tinfoil',
+ 'tinsel', 'tissue_paper', 'toast_(food)', 'toaster', 'toaster_oven',
+ 'toilet', 'toilet_tissue', 'tomato', 'tongs', 'toolbox', 'toothbrush',
+ 'toothpaste', 'toothpick', 'cover', 'tortilla', 'tow_truck', 'towel',
+ 'towel_rack', 'toy', 'tractor_(farm_equipment)', 'traffic_light',
+ 'dirt_bike', 'trailer_truck', 'train_(railroad_vehicle)', 'trampoline',
+ 'tray', 'tree_house', 'trench_coat', 'triangle_(musical_instrument)',
+ 'tricycle', 'tripod', 'trousers', 'truck', 'truffle_(chocolate)',
+ 'trunk', 'vat', 'turban', 'turkey_(bird)', 'turkey_(food)', 'turnip',
+ 'turtle', 'turtleneck_(clothing)', 'typewriter', 'umbrella',
+ 'underwear', 'unicycle', 'urinal', 'urn', 'vacuum_cleaner', 'valve',
+ 'vase', 'vending_machine', 'vent', 'videotape', 'vinegar', 'violin',
+ 'vodka', 'volleyball', 'vulture', 'waffle', 'waffle_iron', 'wagon',
+ 'wagon_wheel', 'walking_stick', 'wall_clock', 'wall_socket', 'wallet',
+ 'walrus', 'wardrobe', 'wasabi', 'automatic_washer', 'watch',
+ 'water_bottle', 'water_cooler', 'water_faucet', 'water_filter',
+ 'water_heater', 'water_jug', 'water_gun', 'water_scooter', 'water_ski',
+ 'water_tower', 'watering_can', 'watermelon', 'weathervane', 'webcam',
+ 'wedding_cake', 'wedding_ring', 'wet_suit', 'wheel', 'wheelchair',
+ 'whipped_cream', 'whiskey', 'whistle', 'wick', 'wig', 'wind_chime',
+ 'windmill', 'window_box_(for_plants)', 'windshield_wiper', 'windsock',
+ 'wine_bottle', 'wine_bucket', 'wineglass', 'wing_chair',
+ 'blinder_(for_horses)', 'wok', 'wolf', 'wooden_spoon', 'wreath',
+ 'wrench', 'wristband', 'wristlet', 'yacht', 'yak', 'yogurt',
+ 'yoke_(animal_equipment)', 'zebra', 'zucchini')
+
+ def load_annotations(self, ann_file):
+ """Load annotation from lvis style annotation file.
+
+ Args:
+ ann_file (str): Path of annotation file.
+
+ Returns:
+ list[dict]: Annotation info from LVIS api.
+ """
+
+ try:
+ import lvis
+ assert lvis.__version__ >= '10.5.3'
+ from lvis import LVIS
+ except AssertionError:
+ raise AssertionError('Incompatible version of lvis is installed. '
+ 'Run pip uninstall lvis first. Then run pip '
+ 'install mmlvis to install open-mmlab forked '
+ 'lvis. ')
+ except ImportError:
+ raise ImportError('Package lvis is not installed. Please run pip '
+ 'install mmlvis to install open-mmlab forked '
+ 'lvis.')
+ self.coco = LVIS(ann_file)
+ self.cat_ids = self.coco.get_cat_ids()
+ self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}
+ self.img_ids = self.coco.get_img_ids()
+ data_infos = []
+ for i in self.img_ids:
+ info = self.coco.load_imgs([i])[0]
+ if info['file_name'].startswith('COCO'):
+ # Convert form the COCO 2014 file naming convention of
+ # COCO_[train/val/test]2014_000000000000.jpg to the 2017
+ # naming convention of 000000000000.jpg
+ # (LVIS v1 will fix this naming issue)
+ info['filename'] = info['file_name'][-16:]
+ else:
+ info['filename'] = info['file_name']
+ data_infos.append(info)
+ return data_infos
+
+ def evaluate(self,
+ results,
+ metric='bbox',
+ logger=None,
+ jsonfile_prefix=None,
+ classwise=False,
+ proposal_nums=(100, 300, 1000),
+ iou_thrs=np.arange(0.5, 0.96, 0.05)):
+ """Evaluation in LVIS protocol.
+
+ Args:
+ results (list[list | tuple]): Testing results of the dataset.
+ metric (str | list[str]): Metrics to be evaluated. Options are
+ 'bbox', 'segm', 'proposal', 'proposal_fast'.
+ logger (logging.Logger | str | None): Logger used for printing
+ related information during evaluation. Default: None.
+ jsonfile_prefix (str | None):
+ classwise (bool): Whether to evaluating the AP for each class.
+ proposal_nums (Sequence[int]): Proposal number used for evaluating
+ recalls, such as recall@100, recall@1000.
+ Default: (100, 300, 1000).
+ iou_thrs (Sequence[float]): IoU threshold used for evaluating
+ recalls. If set to a list, the average recall of all IoUs will
+ also be computed. Default: 0.5.
+
+ Returns:
+ dict[str, float]: LVIS style metrics.
+ """
+
+ try:
+ import lvis
+ assert lvis.__version__ >= '10.5.3'
+ from lvis import LVISResults, LVISEval
+ except AssertionError:
+ raise AssertionError('Incompatible version of lvis is installed. '
+ 'Run pip uninstall lvis first. Then run pip '
+ 'install mmlvis to install open-mmlab forked '
+ 'lvis. ')
+ except ImportError:
+ raise ImportError('Package lvis is not installed. Please run pip '
+ 'install mmlvis to install open-mmlab forked '
+ 'lvis.')
+ assert isinstance(results, list), 'results must be a list'
+ assert len(results) == len(self), (
+ 'The length of results is not equal to the dataset len: {} != {}'.
+ format(len(results), len(self)))
+
+ metrics = metric if isinstance(metric, list) else [metric]
+ allowed_metrics = ['bbox', 'segm', 'proposal', 'proposal_fast']
+ for metric in metrics:
+ if metric not in allowed_metrics:
+ raise KeyError('metric {} is not supported'.format(metric))
+
+ if jsonfile_prefix is None:
+ tmp_dir = tempfile.TemporaryDirectory()
+ jsonfile_prefix = osp.join(tmp_dir.name, 'results')
+ else:
+ tmp_dir = None
+ result_files = self.results2json(results, jsonfile_prefix)
+
+ eval_results = OrderedDict()
+ # get original api
+ lvis_gt = self.coco
+ for metric in metrics:
+ msg = 'Evaluating {}...'.format(metric)
+ if logger is None:
+ msg = '\n' + msg
+ print_log(msg, logger=logger)
+
+ if metric == 'proposal_fast':
+ ar = self.fast_eval_recall(
+ results, proposal_nums, iou_thrs, logger='silent')
+ log_msg = []
+ for i, num in enumerate(proposal_nums):
+ eval_results['AR@{}'.format(num)] = ar[i]
+ log_msg.append('\nAR@{}\t{:.4f}'.format(num, ar[i]))
+ log_msg = ''.join(log_msg)
+ print_log(log_msg, logger=logger)
+ continue
+
+ if metric not in result_files:
+ raise KeyError('{} is not in results'.format(metric))
+ try:
+ lvis_dt = LVISResults(lvis_gt, result_files[metric])
+ except IndexError:
+ print_log(
+ 'The testing results of the whole dataset is empty.',
+ logger=logger,
+ level=logging.ERROR)
+ break
+
+ iou_type = 'bbox' if metric == 'proposal' else metric
+ lvis_eval = LVISEval(lvis_gt, lvis_dt, iou_type)
+ lvis_eval.params.imgIds = self.img_ids
+ if metric == 'proposal':
+ lvis_eval.params.useCats = 0
+ lvis_eval.params.maxDets = list(proposal_nums)
+ lvis_eval.evaluate()
+ lvis_eval.accumulate()
+ lvis_eval.summarize()
+ for k, v in lvis_eval.get_results().items():
+ if k.startswith('AR'):
+ val = float('{:.3f}'.format(float(v)))
+ eval_results[k] = val
+ else:
+ lvis_eval.evaluate()
+ lvis_eval.accumulate()
+ lvis_eval.summarize()
+ lvis_results = lvis_eval.get_results()
+ if classwise: # Compute per-category AP
+ # Compute per-category AP
+ # from https://github.com/facebookresearch/detectron2/
+ precisions = lvis_eval.eval['precision']
+ # precision: (iou, recall, cls, area range, max dets)
+ assert len(self.cat_ids) == precisions.shape[2]
+
+ results_per_category = []
+ for idx, catId in enumerate(self.cat_ids):
+ # area range index 0: all area ranges
+ # max dets index -1: typically 100 per image
+ nm = self.coco.load_cats(catId)[0]
+ precision = precisions[:, :, idx, 0, -1]
+ precision = precision[precision > -1]
+ if precision.size:
+ ap = np.mean(precision)
+ else:
+ ap = float('nan')
+ results_per_category.append(
+ (f'{nm["name"]}', f'{float(ap):0.3f}'))
+
+ num_columns = min(6, len(results_per_category) * 2)
+ results_flatten = list(
+ itertools.chain(*results_per_category))
+ headers = ['category', 'AP'] * (num_columns // 2)
+ results_2d = itertools.zip_longest(*[
+ results_flatten[i::num_columns]
+ for i in range(num_columns)
+ ])
+ table_data = [headers]
+ table_data += [result for result in results_2d]
+ table = AsciiTable(table_data)
+ print_log('\n' + table.table, logger=logger)
+
+ for k, v in lvis_results.items():
+ if k.startswith('AP'):
+ key = '{}_{}'.format(metric, k)
+ val = float('{:.3f}'.format(float(v)))
+ eval_results[key] = val
+ ap_summary = ' '.join([
+ '{}:{:.3f}'.format(k, float(v))
+ for k, v in lvis_results.items() if k.startswith('AP')
+ ])
+ eval_results['{}_mAP_copypaste'.format(metric)] = ap_summary
+ lvis_eval.print_results()
+ if tmp_dir is not None:
+ tmp_dir.cleanup()
+ return eval_results
+
+
+LVISDataset = LVISV05Dataset
+DATASETS.register_module(name='LVISDataset', module=LVISDataset)
+
+
+@DATASETS.register_module()
+class LVISV1Dataset(LVISDataset):
+
+ CLASSES = (
+ 'aerosol_can', 'air_conditioner', 'airplane', 'alarm_clock', 'alcohol',
+ 'alligator', 'almond', 'ambulance', 'amplifier', 'anklet', 'antenna',
+ 'apple', 'applesauce', 'apricot', 'apron', 'aquarium',
+ 'arctic_(type_of_shoe)', 'armband', 'armchair', 'armoire', 'armor',
+ 'artichoke', 'trash_can', 'ashtray', 'asparagus', 'atomizer',
+ 'avocado', 'award', 'awning', 'ax', 'baboon', 'baby_buggy',
+ 'basketball_backboard', 'backpack', 'handbag', 'suitcase', 'bagel',
+ 'bagpipe', 'baguet', 'bait', 'ball', 'ballet_skirt', 'balloon',
+ 'bamboo', 'banana', 'Band_Aid', 'bandage', 'bandanna', 'banjo',
+ 'banner', 'barbell', 'barge', 'barrel', 'barrette', 'barrow',
+ 'baseball_base', 'baseball', 'baseball_bat', 'baseball_cap',
+ 'baseball_glove', 'basket', 'basketball', 'bass_horn', 'bat_(animal)',
+ 'bath_mat', 'bath_towel', 'bathrobe', 'bathtub', 'batter_(food)',
+ 'battery', 'beachball', 'bead', 'bean_curd', 'beanbag', 'beanie',
+ 'bear', 'bed', 'bedpan', 'bedspread', 'cow', 'beef_(food)', 'beeper',
+ 'beer_bottle', 'beer_can', 'beetle', 'bell', 'bell_pepper', 'belt',
+ 'belt_buckle', 'bench', 'beret', 'bib', 'Bible', 'bicycle', 'visor',
+ 'billboard', 'binder', 'binoculars', 'bird', 'birdfeeder', 'birdbath',
+ 'birdcage', 'birdhouse', 'birthday_cake', 'birthday_card',
+ 'pirate_flag', 'black_sheep', 'blackberry', 'blackboard', 'blanket',
+ 'blazer', 'blender', 'blimp', 'blinker', 'blouse', 'blueberry',
+ 'gameboard', 'boat', 'bob', 'bobbin', 'bobby_pin', 'boiled_egg',
+ 'bolo_tie', 'deadbolt', 'bolt', 'bonnet', 'book', 'bookcase',
+ 'booklet', 'bookmark', 'boom_microphone', 'boot', 'bottle',
+ 'bottle_opener', 'bouquet', 'bow_(weapon)', 'bow_(decorative_ribbons)',
+ 'bow-tie', 'bowl', 'pipe_bowl', 'bowler_hat', 'bowling_ball', 'box',
+ 'boxing_glove', 'suspenders', 'bracelet', 'brass_plaque', 'brassiere',
+ 'bread-bin', 'bread', 'breechcloth', 'bridal_gown', 'briefcase',
+ 'broccoli', 'broach', 'broom', 'brownie', 'brussels_sprouts',
+ 'bubble_gum', 'bucket', 'horse_buggy', 'bull', 'bulldog', 'bulldozer',
+ 'bullet_train', 'bulletin_board', 'bulletproof_vest', 'bullhorn',
+ 'bun', 'bunk_bed', 'buoy', 'burrito', 'bus_(vehicle)', 'business_card',
+ 'butter', 'butterfly', 'button', 'cab_(taxi)', 'cabana', 'cabin_car',
+ 'cabinet', 'locker', 'cake', 'calculator', 'calendar', 'calf',
+ 'camcorder', 'camel', 'camera', 'camera_lens', 'camper_(vehicle)',
+ 'can', 'can_opener', 'candle', 'candle_holder', 'candy_bar',
+ 'candy_cane', 'walking_cane', 'canister', 'canoe', 'cantaloup',
+ 'canteen', 'cap_(headwear)', 'bottle_cap', 'cape', 'cappuccino',
+ 'car_(automobile)', 'railcar_(part_of_a_train)', 'elevator_car',
+ 'car_battery', 'identity_card', 'card', 'cardigan', 'cargo_ship',
+ 'carnation', 'horse_carriage', 'carrot', 'tote_bag', 'cart', 'carton',
+ 'cash_register', 'casserole', 'cassette', 'cast', 'cat', 'cauliflower',
+ 'cayenne_(spice)', 'CD_player', 'celery', 'cellular_telephone',
+ 'chain_mail', 'chair', 'chaise_longue', 'chalice', 'chandelier',
+ 'chap', 'checkbook', 'checkerboard', 'cherry', 'chessboard',
+ 'chicken_(animal)', 'chickpea', 'chili_(vegetable)', 'chime',
+ 'chinaware', 'crisp_(potato_chip)', 'poker_chip', 'chocolate_bar',
+ 'chocolate_cake', 'chocolate_milk', 'chocolate_mousse', 'choker',
+ 'chopping_board', 'chopstick', 'Christmas_tree', 'slide', 'cider',
+ 'cigar_box', 'cigarette', 'cigarette_case', 'cistern', 'clarinet',
+ 'clasp', 'cleansing_agent', 'cleat_(for_securing_rope)', 'clementine',
+ 'clip', 'clipboard', 'clippers_(for_plants)', 'cloak', 'clock',
+ 'clock_tower', 'clothes_hamper', 'clothespin', 'clutch_bag', 'coaster',
+ 'coat', 'coat_hanger', 'coatrack', 'cock', 'cockroach',
+ 'cocoa_(beverage)', 'coconut', 'coffee_maker', 'coffee_table',
+ 'coffeepot', 'coil', 'coin', 'colander', 'coleslaw',
+ 'coloring_material', 'combination_lock', 'pacifier', 'comic_book',
+ 'compass', 'computer_keyboard', 'condiment', 'cone', 'control',
+ 'convertible_(automobile)', 'sofa_bed', 'cooker', 'cookie',
+ 'cooking_utensil', 'cooler_(for_food)', 'cork_(bottle_plug)',
+ 'corkboard', 'corkscrew', 'edible_corn', 'cornbread', 'cornet',
+ 'cornice', 'cornmeal', 'corset', 'costume', 'cougar', 'coverall',
+ 'cowbell', 'cowboy_hat', 'crab_(animal)', 'crabmeat', 'cracker',
+ 'crape', 'crate', 'crayon', 'cream_pitcher', 'crescent_roll', 'crib',
+ 'crock_pot', 'crossbar', 'crouton', 'crow', 'crowbar', 'crown',
+ 'crucifix', 'cruise_ship', 'police_cruiser', 'crumb', 'crutch',
+ 'cub_(animal)', 'cube', 'cucumber', 'cufflink', 'cup', 'trophy_cup',
+ 'cupboard', 'cupcake', 'hair_curler', 'curling_iron', 'curtain',
+ 'cushion', 'cylinder', 'cymbal', 'dagger', 'dalmatian', 'dartboard',
+ 'date_(fruit)', 'deck_chair', 'deer', 'dental_floss', 'desk',
+ 'detergent', 'diaper', 'diary', 'die', 'dinghy', 'dining_table', 'tux',
+ 'dish', 'dish_antenna', 'dishrag', 'dishtowel', 'dishwasher',
+ 'dishwasher_detergent', 'dispenser', 'diving_board', 'Dixie_cup',
+ 'dog', 'dog_collar', 'doll', 'dollar', 'dollhouse', 'dolphin',
+ 'domestic_ass', 'doorknob', 'doormat', 'doughnut', 'dove', 'dragonfly',
+ 'drawer', 'underdrawers', 'dress', 'dress_hat', 'dress_suit',
+ 'dresser', 'drill', 'drone', 'dropper', 'drum_(musical_instrument)',
+ 'drumstick', 'duck', 'duckling', 'duct_tape', 'duffel_bag', 'dumbbell',
+ 'dumpster', 'dustpan', 'eagle', 'earphone', 'earplug', 'earring',
+ 'easel', 'eclair', 'eel', 'egg', 'egg_roll', 'egg_yolk', 'eggbeater',
+ 'eggplant', 'electric_chair', 'refrigerator', 'elephant', 'elk',
+ 'envelope', 'eraser', 'escargot', 'eyepatch', 'falcon', 'fan',
+ 'faucet', 'fedora', 'ferret', 'Ferris_wheel', 'ferry', 'fig_(fruit)',
+ 'fighter_jet', 'figurine', 'file_cabinet', 'file_(tool)', 'fire_alarm',
+ 'fire_engine', 'fire_extinguisher', 'fire_hose', 'fireplace',
+ 'fireplug', 'first-aid_kit', 'fish', 'fish_(food)', 'fishbowl',
+ 'fishing_rod', 'flag', 'flagpole', 'flamingo', 'flannel', 'flap',
+ 'flash', 'flashlight', 'fleece', 'flip-flop_(sandal)',
+ 'flipper_(footwear)', 'flower_arrangement', 'flute_glass', 'foal',
+ 'folding_chair', 'food_processor', 'football_(American)',
+ 'football_helmet', 'footstool', 'fork', 'forklift', 'freight_car',
+ 'French_toast', 'freshener', 'frisbee', 'frog', 'fruit_juice',
+ 'frying_pan', 'fudge', 'funnel', 'futon', 'gag', 'garbage',
+ 'garbage_truck', 'garden_hose', 'gargle', 'gargoyle', 'garlic',
+ 'gasmask', 'gazelle', 'gelatin', 'gemstone', 'generator',
+ 'giant_panda', 'gift_wrap', 'ginger', 'giraffe', 'cincture',
+ 'glass_(drink_container)', 'globe', 'glove', 'goat', 'goggles',
+ 'goldfish', 'golf_club', 'golfcart', 'gondola_(boat)', 'goose',
+ 'gorilla', 'gourd', 'grape', 'grater', 'gravestone', 'gravy_boat',
+ 'green_bean', 'green_onion', 'griddle', 'grill', 'grits', 'grizzly',
+ 'grocery_bag', 'guitar', 'gull', 'gun', 'hairbrush', 'hairnet',
+ 'hairpin', 'halter_top', 'ham', 'hamburger', 'hammer', 'hammock',
+ 'hamper', 'hamster', 'hair_dryer', 'hand_glass', 'hand_towel',
+ 'handcart', 'handcuff', 'handkerchief', 'handle', 'handsaw',
+ 'hardback_book', 'harmonium', 'hat', 'hatbox', 'veil', 'headband',
+ 'headboard', 'headlight', 'headscarf', 'headset',
+ 'headstall_(for_horses)', 'heart', 'heater', 'helicopter', 'helmet',
+ 'heron', 'highchair', 'hinge', 'hippopotamus', 'hockey_stick', 'hog',
+ 'home_plate_(baseball)', 'honey', 'fume_hood', 'hook', 'hookah',
+ 'hornet', 'horse', 'hose', 'hot-air_balloon', 'hotplate', 'hot_sauce',
+ 'hourglass', 'houseboat', 'hummingbird', 'hummus', 'polar_bear',
+ 'icecream', 'popsicle', 'ice_maker', 'ice_pack', 'ice_skate',
+ 'igniter', 'inhaler', 'iPod', 'iron_(for_clothing)', 'ironing_board',
+ 'jacket', 'jam', 'jar', 'jean', 'jeep', 'jelly_bean', 'jersey',
+ 'jet_plane', 'jewel', 'jewelry', 'joystick', 'jumpsuit', 'kayak',
+ 'keg', 'kennel', 'kettle', 'key', 'keycard', 'kilt', 'kimono',
+ 'kitchen_sink', 'kitchen_table', 'kite', 'kitten', 'kiwi_fruit',
+ 'knee_pad', 'knife', 'knitting_needle', 'knob', 'knocker_(on_a_door)',
+ 'koala', 'lab_coat', 'ladder', 'ladle', 'ladybug', 'lamb_(animal)',
+ 'lamb-chop', 'lamp', 'lamppost', 'lampshade', 'lantern', 'lanyard',
+ 'laptop_computer', 'lasagna', 'latch', 'lawn_mower', 'leather',
+ 'legging_(clothing)', 'Lego', 'legume', 'lemon', 'lemonade', 'lettuce',
+ 'license_plate', 'life_buoy', 'life_jacket', 'lightbulb',
+ 'lightning_rod', 'lime', 'limousine', 'lion', 'lip_balm', 'liquor',
+ 'lizard', 'log', 'lollipop', 'speaker_(stero_equipment)', 'loveseat',
+ 'machine_gun', 'magazine', 'magnet', 'mail_slot', 'mailbox_(at_home)',
+ 'mallard', 'mallet', 'mammoth', 'manatee', 'mandarin_orange', 'manger',
+ 'manhole', 'map', 'marker', 'martini', 'mascot', 'mashed_potato',
+ 'masher', 'mask', 'mast', 'mat_(gym_equipment)', 'matchbox',
+ 'mattress', 'measuring_cup', 'measuring_stick', 'meatball', 'medicine',
+ 'melon', 'microphone', 'microscope', 'microwave_oven', 'milestone',
+ 'milk', 'milk_can', 'milkshake', 'minivan', 'mint_candy', 'mirror',
+ 'mitten', 'mixer_(kitchen_tool)', 'money',
+ 'monitor_(computer_equipment) computer_monitor', 'monkey', 'motor',
+ 'motor_scooter', 'motor_vehicle', 'motorcycle', 'mound_(baseball)',
+ 'mouse_(computer_equipment)', 'mousepad', 'muffin', 'mug', 'mushroom',
+ 'music_stool', 'musical_instrument', 'nailfile', 'napkin',
+ 'neckerchief', 'necklace', 'necktie', 'needle', 'nest', 'newspaper',
+ 'newsstand', 'nightshirt', 'nosebag_(for_animals)',
+ 'noseband_(for_animals)', 'notebook', 'notepad', 'nut', 'nutcracker',
+ 'oar', 'octopus_(food)', 'octopus_(animal)', 'oil_lamp', 'olive_oil',
+ 'omelet', 'onion', 'orange_(fruit)', 'orange_juice', 'ostrich',
+ 'ottoman', 'oven', 'overalls_(clothing)', 'owl', 'packet', 'inkpad',
+ 'pad', 'paddle', 'padlock', 'paintbrush', 'painting', 'pajamas',
+ 'palette', 'pan_(for_cooking)', 'pan_(metal_container)', 'pancake',
+ 'pantyhose', 'papaya', 'paper_plate', 'paper_towel', 'paperback_book',
+ 'paperweight', 'parachute', 'parakeet', 'parasail_(sports)', 'parasol',
+ 'parchment', 'parka', 'parking_meter', 'parrot',
+ 'passenger_car_(part_of_a_train)', 'passenger_ship', 'passport',
+ 'pastry', 'patty_(food)', 'pea_(food)', 'peach', 'peanut_butter',
+ 'pear', 'peeler_(tool_for_fruit_and_vegetables)', 'wooden_leg',
+ 'pegboard', 'pelican', 'pen', 'pencil', 'pencil_box',
+ 'pencil_sharpener', 'pendulum', 'penguin', 'pennant', 'penny_(coin)',
+ 'pepper', 'pepper_mill', 'perfume', 'persimmon', 'person', 'pet',
+ 'pew_(church_bench)', 'phonebook', 'phonograph_record', 'piano',
+ 'pickle', 'pickup_truck', 'pie', 'pigeon', 'piggy_bank', 'pillow',
+ 'pin_(non_jewelry)', 'pineapple', 'pinecone', 'ping-pong_ball',
+ 'pinwheel', 'tobacco_pipe', 'pipe', 'pistol', 'pita_(bread)',
+ 'pitcher_(vessel_for_liquid)', 'pitchfork', 'pizza', 'place_mat',
+ 'plate', 'platter', 'playpen', 'pliers', 'plow_(farm_equipment)',
+ 'plume', 'pocket_watch', 'pocketknife', 'poker_(fire_stirring_tool)',
+ 'pole', 'polo_shirt', 'poncho', 'pony', 'pool_table', 'pop_(soda)',
+ 'postbox_(public)', 'postcard', 'poster', 'pot', 'flowerpot', 'potato',
+ 'potholder', 'pottery', 'pouch', 'power_shovel', 'prawn', 'pretzel',
+ 'printer', 'projectile_(weapon)', 'projector', 'propeller', 'prune',
+ 'pudding', 'puffer_(fish)', 'puffin', 'pug-dog', 'pumpkin', 'puncher',
+ 'puppet', 'puppy', 'quesadilla', 'quiche', 'quilt', 'rabbit',
+ 'race_car', 'racket', 'radar', 'radiator', 'radio_receiver', 'radish',
+ 'raft', 'rag_doll', 'raincoat', 'ram_(animal)', 'raspberry', 'rat',
+ 'razorblade', 'reamer_(juicer)', 'rearview_mirror', 'receipt',
+ 'recliner', 'record_player', 'reflector', 'remote_control',
+ 'rhinoceros', 'rib_(food)', 'rifle', 'ring', 'river_boat', 'road_map',
+ 'robe', 'rocking_chair', 'rodent', 'roller_skate', 'Rollerblade',
+ 'rolling_pin', 'root_beer', 'router_(computer_equipment)',
+ 'rubber_band', 'runner_(carpet)', 'plastic_bag',
+ 'saddle_(on_an_animal)', 'saddle_blanket', 'saddlebag', 'safety_pin',
+ 'sail', 'salad', 'salad_plate', 'salami', 'salmon_(fish)',
+ 'salmon_(food)', 'salsa', 'saltshaker', 'sandal_(type_of_shoe)',
+ 'sandwich', 'satchel', 'saucepan', 'saucer', 'sausage', 'sawhorse',
+ 'saxophone', 'scale_(measuring_instrument)', 'scarecrow', 'scarf',
+ 'school_bus', 'scissors', 'scoreboard', 'scraper', 'screwdriver',
+ 'scrubbing_brush', 'sculpture', 'seabird', 'seahorse', 'seaplane',
+ 'seashell', 'sewing_machine', 'shaker', 'shampoo', 'shark',
+ 'sharpener', 'Sharpie', 'shaver_(electric)', 'shaving_cream', 'shawl',
+ 'shears', 'sheep', 'shepherd_dog', 'sherbert', 'shield', 'shirt',
+ 'shoe', 'shopping_bag', 'shopping_cart', 'short_pants', 'shot_glass',
+ 'shoulder_bag', 'shovel', 'shower_head', 'shower_cap',
+ 'shower_curtain', 'shredder_(for_paper)', 'signboard', 'silo', 'sink',
+ 'skateboard', 'skewer', 'ski', 'ski_boot', 'ski_parka', 'ski_pole',
+ 'skirt', 'skullcap', 'sled', 'sleeping_bag', 'sling_(bandage)',
+ 'slipper_(footwear)', 'smoothie', 'snake', 'snowboard', 'snowman',
+ 'snowmobile', 'soap', 'soccer_ball', 'sock', 'sofa', 'softball',
+ 'solar_array', 'sombrero', 'soup', 'soup_bowl', 'soupspoon',
+ 'sour_cream', 'soya_milk', 'space_shuttle', 'sparkler_(fireworks)',
+ 'spatula', 'spear', 'spectacles', 'spice_rack', 'spider', 'crawfish',
+ 'sponge', 'spoon', 'sportswear', 'spotlight', 'squid_(food)',
+ 'squirrel', 'stagecoach', 'stapler_(stapling_machine)', 'starfish',
+ 'statue_(sculpture)', 'steak_(food)', 'steak_knife', 'steering_wheel',
+ 'stepladder', 'step_stool', 'stereo_(sound_system)', 'stew', 'stirrer',
+ 'stirrup', 'stool', 'stop_sign', 'brake_light', 'stove', 'strainer',
+ 'strap', 'straw_(for_drinking)', 'strawberry', 'street_sign',
+ 'streetlight', 'string_cheese', 'stylus', 'subwoofer', 'sugar_bowl',
+ 'sugarcane_(plant)', 'suit_(clothing)', 'sunflower', 'sunglasses',
+ 'sunhat', 'surfboard', 'sushi', 'mop', 'sweat_pants', 'sweatband',
+ 'sweater', 'sweatshirt', 'sweet_potato', 'swimsuit', 'sword',
+ 'syringe', 'Tabasco_sauce', 'table-tennis_table', 'table',
+ 'table_lamp', 'tablecloth', 'tachometer', 'taco', 'tag', 'taillight',
+ 'tambourine', 'army_tank', 'tank_(storage_vessel)',
+ 'tank_top_(clothing)', 'tape_(sticky_cloth_or_paper)', 'tape_measure',
+ 'tapestry', 'tarp', 'tartan', 'tassel', 'tea_bag', 'teacup',
+ 'teakettle', 'teapot', 'teddy_bear', 'telephone', 'telephone_booth',
+ 'telephone_pole', 'telephoto_lens', 'television_camera',
+ 'television_set', 'tennis_ball', 'tennis_racket', 'tequila',
+ 'thermometer', 'thermos_bottle', 'thermostat', 'thimble', 'thread',
+ 'thumbtack', 'tiara', 'tiger', 'tights_(clothing)', 'timer', 'tinfoil',
+ 'tinsel', 'tissue_paper', 'toast_(food)', 'toaster', 'toaster_oven',
+ 'toilet', 'toilet_tissue', 'tomato', 'tongs', 'toolbox', 'toothbrush',
+ 'toothpaste', 'toothpick', 'cover', 'tortilla', 'tow_truck', 'towel',
+ 'towel_rack', 'toy', 'tractor_(farm_equipment)', 'traffic_light',
+ 'dirt_bike', 'trailer_truck', 'train_(railroad_vehicle)', 'trampoline',
+ 'tray', 'trench_coat', 'triangle_(musical_instrument)', 'tricycle',
+ 'tripod', 'trousers', 'truck', 'truffle_(chocolate)', 'trunk', 'vat',
+ 'turban', 'turkey_(food)', 'turnip', 'turtle', 'turtleneck_(clothing)',
+ 'typewriter', 'umbrella', 'underwear', 'unicycle', 'urinal', 'urn',
+ 'vacuum_cleaner', 'vase', 'vending_machine', 'vent', 'vest',
+ 'videotape', 'vinegar', 'violin', 'vodka', 'volleyball', 'vulture',
+ 'waffle', 'waffle_iron', 'wagon', 'wagon_wheel', 'walking_stick',
+ 'wall_clock', 'wall_socket', 'wallet', 'walrus', 'wardrobe',
+ 'washbasin', 'automatic_washer', 'watch', 'water_bottle',
+ 'water_cooler', 'water_faucet', 'water_heater', 'water_jug',
+ 'water_gun', 'water_scooter', 'water_ski', 'water_tower',
+ 'watering_can', 'watermelon', 'weathervane', 'webcam', 'wedding_cake',
+ 'wedding_ring', 'wet_suit', 'wheel', 'wheelchair', 'whipped_cream',
+ 'whistle', 'wig', 'wind_chime', 'windmill', 'window_box_(for_plants)',
+ 'windshield_wiper', 'windsock', 'wine_bottle', 'wine_bucket',
+ 'wineglass', 'blinder_(for_horses)', 'wok', 'wolf', 'wooden_spoon',
+ 'wreath', 'wrench', 'wristband', 'wristlet', 'yacht', 'yogurt',
+ 'yoke_(animal_equipment)', 'zebra', 'zucchini')
+
+ def load_annotations(self, ann_file):
+ try:
+ import lvis
+ assert lvis.__version__ >= '10.5.3'
+ from lvis import LVIS
+ except AssertionError:
+ raise AssertionError('Incompatible version of lvis is installed. '
+ 'Run pip uninstall lvis first. Then run pip '
+ 'install mmlvis to install open-mmlab forked '
+ 'lvis. ')
+ except ImportError:
+ raise ImportError('Package lvis is not installed. Please run pip '
+ 'install mmlvis to install open-mmlab forked '
+ 'lvis.')
+ self.coco = LVIS(ann_file)
+ self.cat_ids = self.coco.get_cat_ids()
+ self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}
+ self.img_ids = self.coco.get_img_ids()
+ data_infos = []
+ for i in self.img_ids:
+ info = self.coco.load_imgs([i])[0]
+ # coco_url is used in LVISv1 instead of file_name
+ # e.g. http://images.cocodataset.org/train2017/000000391895.jpg
+ # train/val split in specified in url
+ info['filename'] = info['coco_url'].replace(
+ 'http://images.cocodataset.org/', '')
+ data_infos.append(info)
+ return data_infos
diff --git a/annotator/uniformer/mmdet/datasets/pipelines/__init__.py b/annotator/uniformer/mmdet/datasets/pipelines/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..c6f424debd1623e7511dd77da464a6639d816745
--- /dev/null
+++ b/annotator/uniformer/mmdet/datasets/pipelines/__init__.py
@@ -0,0 +1,25 @@
+from .auto_augment import (AutoAugment, BrightnessTransform, ColorTransform,
+ ContrastTransform, EqualizeTransform, Rotate, Shear,
+ Translate)
+from .compose import Compose
+from .formating import (Collect, DefaultFormatBundle, ImageToTensor,
+ ToDataContainer, ToTensor, Transpose, to_tensor)
+from .instaboost import InstaBoost
+from .loading import (LoadAnnotations, LoadImageFromFile, LoadImageFromWebcam,
+ LoadMultiChannelImageFromFiles, LoadProposals)
+from .test_time_aug import MultiScaleFlipAug
+from .transforms import (Albu, CutOut, Expand, MinIoURandomCrop, Normalize,
+ Pad, PhotoMetricDistortion, RandomCenterCropPad,
+ RandomCrop, RandomFlip, Resize, SegRescale)
+
+__all__ = [
+ 'Compose', 'to_tensor', 'ToTensor', 'ImageToTensor', 'ToDataContainer',
+ 'Transpose', 'Collect', 'DefaultFormatBundle', 'LoadAnnotations',
+ 'LoadImageFromFile', 'LoadImageFromWebcam',
+ 'LoadMultiChannelImageFromFiles', 'LoadProposals', 'MultiScaleFlipAug',
+ 'Resize', 'RandomFlip', 'Pad', 'RandomCrop', 'Normalize', 'SegRescale',
+ 'MinIoURandomCrop', 'Expand', 'PhotoMetricDistortion', 'Albu',
+ 'InstaBoost', 'RandomCenterCropPad', 'AutoAugment', 'CutOut', 'Shear',
+ 'Rotate', 'ColorTransform', 'EqualizeTransform', 'BrightnessTransform',
+ 'ContrastTransform', 'Translate'
+]
diff --git a/annotator/uniformer/mmdet/datasets/pipelines/auto_augment.py b/annotator/uniformer/mmdet/datasets/pipelines/auto_augment.py
new file mode 100644
index 0000000000000000000000000000000000000000..e19adaec18a96cac4dbe1d8c2c9193e9901be1fb
--- /dev/null
+++ b/annotator/uniformer/mmdet/datasets/pipelines/auto_augment.py
@@ -0,0 +1,890 @@
+import copy
+
+import cv2
+import mmcv
+import numpy as np
+
+from ..builder import PIPELINES
+from .compose import Compose
+
+_MAX_LEVEL = 10
+
+
+def level_to_value(level, max_value):
+ """Map from level to values based on max_value."""
+ return (level / _MAX_LEVEL) * max_value
+
+
+def enhance_level_to_value(level, a=1.8, b=0.1):
+ """Map from level to values."""
+ return (level / _MAX_LEVEL) * a + b
+
+
+def random_negative(value, random_negative_prob):
+ """Randomly negate value based on random_negative_prob."""
+ return -value if np.random.rand() < random_negative_prob else value
+
+
+def bbox2fields():
+ """The key correspondence from bboxes to labels, masks and
+ segmentations."""
+ bbox2label = {
+ 'gt_bboxes': 'gt_labels',
+ 'gt_bboxes_ignore': 'gt_labels_ignore'
+ }
+ bbox2mask = {
+ 'gt_bboxes': 'gt_masks',
+ 'gt_bboxes_ignore': 'gt_masks_ignore'
+ }
+ bbox2seg = {
+ 'gt_bboxes': 'gt_semantic_seg',
+ }
+ return bbox2label, bbox2mask, bbox2seg
+
+
+@PIPELINES.register_module()
+class AutoAugment(object):
+ """Auto augmentation.
+
+ This data augmentation is proposed in `Learning Data Augmentation
+ Strategies for Object Detection `_.
+
+ TODO: Implement 'Shear', 'Sharpness' and 'Rotate' transforms
+
+ Args:
+ policies (list[list[dict]]): The policies of auto augmentation. Each
+ policy in ``policies`` is a specific augmentation policy, and is
+ composed by several augmentations (dict). When AutoAugment is
+ called, a random policy in ``policies`` will be selected to
+ augment images.
+
+ Examples:
+ >>> replace = (104, 116, 124)
+ >>> policies = [
+ >>> [
+ >>> dict(type='Sharpness', prob=0.0, level=8),
+ >>> dict(
+ >>> type='Shear',
+ >>> prob=0.4,
+ >>> level=0,
+ >>> replace=replace,
+ >>> axis='x')
+ >>> ],
+ >>> [
+ >>> dict(
+ >>> type='Rotate',
+ >>> prob=0.6,
+ >>> level=10,
+ >>> replace=replace),
+ >>> dict(type='Color', prob=1.0, level=6)
+ >>> ]
+ >>> ]
+ >>> augmentation = AutoAugment(policies)
+ >>> img = np.ones(100, 100, 3)
+ >>> gt_bboxes = np.ones(10, 4)
+ >>> results = dict(img=img, gt_bboxes=gt_bboxes)
+ >>> results = augmentation(results)
+ """
+
+ def __init__(self, policies):
+ assert isinstance(policies, list) and len(policies) > 0, \
+ 'Policies must be a non-empty list.'
+ for policy in policies:
+ assert isinstance(policy, list) and len(policy) > 0, \
+ 'Each policy in policies must be a non-empty list.'
+ for augment in policy:
+ assert isinstance(augment, dict) and 'type' in augment, \
+ 'Each specific augmentation must be a dict with key' \
+ ' "type".'
+
+ self.policies = copy.deepcopy(policies)
+ self.transforms = [Compose(policy) for policy in self.policies]
+
+ def __call__(self, results):
+ transform = np.random.choice(self.transforms)
+ return transform(results)
+
+ def __repr__(self):
+ return f'{self.__class__.__name__}(policies={self.policies})'
+
+
+@PIPELINES.register_module()
+class Shear(object):
+ """Apply Shear Transformation to image (and its corresponding bbox, mask,
+ segmentation).
+
+ Args:
+ level (int | float): The level should be in range [0,_MAX_LEVEL].
+ img_fill_val (int | float | tuple): The filled values for image border.
+ If float, the same fill value will be used for all the three
+ channels of image. If tuple, the should be 3 elements.
+ seg_ignore_label (int): The fill value used for segmentation map.
+ Note this value must equals ``ignore_label`` in ``semantic_head``
+ of the corresponding config. Default 255.
+ prob (float): The probability for performing Shear and should be in
+ range [0, 1].
+ direction (str): The direction for shear, either "horizontal"
+ or "vertical".
+ max_shear_magnitude (float): The maximum magnitude for Shear
+ transformation.
+ random_negative_prob (float): The probability that turns the
+ offset negative. Should be in range [0,1]
+ interpolation (str): Same as in :func:`mmcv.imshear`.
+ """
+
+ def __init__(self,
+ level,
+ img_fill_val=128,
+ seg_ignore_label=255,
+ prob=0.5,
+ direction='horizontal',
+ max_shear_magnitude=0.3,
+ random_negative_prob=0.5,
+ interpolation='bilinear'):
+ assert isinstance(level, (int, float)), 'The level must be type ' \
+ f'int or float, got {type(level)}.'
+ assert 0 <= level <= _MAX_LEVEL, 'The level should be in range ' \
+ f'[0,{_MAX_LEVEL}], got {level}.'
+ if isinstance(img_fill_val, (float, int)):
+ img_fill_val = tuple([float(img_fill_val)] * 3)
+ elif isinstance(img_fill_val, tuple):
+ assert len(img_fill_val) == 3, 'img_fill_val as tuple must ' \
+ f'have 3 elements. got {len(img_fill_val)}.'
+ img_fill_val = tuple([float(val) for val in img_fill_val])
+ else:
+ raise ValueError(
+ 'img_fill_val must be float or tuple with 3 elements.')
+ assert np.all([0 <= val <= 255 for val in img_fill_val]), 'all ' \
+ 'elements of img_fill_val should between range [0,255].' \
+ f'got {img_fill_val}.'
+ assert 0 <= prob <= 1.0, 'The probability of shear should be in ' \
+ f'range [0,1]. got {prob}.'
+ assert direction in ('horizontal', 'vertical'), 'direction must ' \
+ f'in be either "horizontal" or "vertical". got {direction}.'
+ assert isinstance(max_shear_magnitude, float), 'max_shear_magnitude ' \
+ f'should be type float. got {type(max_shear_magnitude)}.'
+ assert 0. <= max_shear_magnitude <= 1., 'Defaultly ' \
+ 'max_shear_magnitude should be in range [0,1]. ' \
+ f'got {max_shear_magnitude}.'
+ self.level = level
+ self.magnitude = level_to_value(level, max_shear_magnitude)
+ self.img_fill_val = img_fill_val
+ self.seg_ignore_label = seg_ignore_label
+ self.prob = prob
+ self.direction = direction
+ self.max_shear_magnitude = max_shear_magnitude
+ self.random_negative_prob = random_negative_prob
+ self.interpolation = interpolation
+
+ def _shear_img(self,
+ results,
+ magnitude,
+ direction='horizontal',
+ interpolation='bilinear'):
+ """Shear the image.
+
+ Args:
+ results (dict): Result dict from loading pipeline.
+ magnitude (int | float): The magnitude used for shear.
+ direction (str): The direction for shear, either "horizontal"
+ or "vertical".
+ interpolation (str): Same as in :func:`mmcv.imshear`.
+ """
+ for key in results.get('img_fields', ['img']):
+ img = results[key]
+ img_sheared = mmcv.imshear(
+ img,
+ magnitude,
+ direction,
+ border_value=self.img_fill_val,
+ interpolation=interpolation)
+ results[key] = img_sheared.astype(img.dtype)
+
+ def _shear_bboxes(self, results, magnitude):
+ """Shear the bboxes."""
+ h, w, c = results['img_shape']
+ if self.direction == 'horizontal':
+ shear_matrix = np.stack([[1, magnitude],
+ [0, 1]]).astype(np.float32) # [2, 2]
+ else:
+ shear_matrix = np.stack([[1, 0], [magnitude,
+ 1]]).astype(np.float32)
+ for key in results.get('bbox_fields', []):
+ min_x, min_y, max_x, max_y = np.split(
+ results[key], results[key].shape[-1], axis=-1)
+ coordinates = np.stack([[min_x, min_y], [max_x, min_y],
+ [min_x, max_y],
+ [max_x, max_y]]) # [4, 2, nb_box, 1]
+ coordinates = coordinates[..., 0].transpose(
+ (2, 1, 0)).astype(np.float32) # [nb_box, 2, 4]
+ new_coords = np.matmul(shear_matrix[None, :, :],
+ coordinates) # [nb_box, 2, 4]
+ min_x = np.min(new_coords[:, 0, :], axis=-1)
+ min_y = np.min(new_coords[:, 1, :], axis=-1)
+ max_x = np.max(new_coords[:, 0, :], axis=-1)
+ max_y = np.max(new_coords[:, 1, :], axis=-1)
+ min_x = np.clip(min_x, a_min=0, a_max=w)
+ min_y = np.clip(min_y, a_min=0, a_max=h)
+ max_x = np.clip(max_x, a_min=min_x, a_max=w)
+ max_y = np.clip(max_y, a_min=min_y, a_max=h)
+ results[key] = np.stack([min_x, min_y, max_x, max_y],
+ axis=-1).astype(results[key].dtype)
+
+ def _shear_masks(self,
+ results,
+ magnitude,
+ direction='horizontal',
+ fill_val=0,
+ interpolation='bilinear'):
+ """Shear the masks."""
+ h, w, c = results['img_shape']
+ for key in results.get('mask_fields', []):
+ masks = results[key]
+ results[key] = masks.shear((h, w),
+ magnitude,
+ direction,
+ border_value=fill_val,
+ interpolation=interpolation)
+
+ def _shear_seg(self,
+ results,
+ magnitude,
+ direction='horizontal',
+ fill_val=255,
+ interpolation='bilinear'):
+ """Shear the segmentation maps."""
+ for key in results.get('seg_fields', []):
+ seg = results[key]
+ results[key] = mmcv.imshear(
+ seg,
+ magnitude,
+ direction,
+ border_value=fill_val,
+ interpolation=interpolation).astype(seg.dtype)
+
+ def _filter_invalid(self, results, min_bbox_size=0):
+ """Filter bboxes and corresponding masks too small after shear
+ augmentation."""
+ bbox2label, bbox2mask, _ = bbox2fields()
+ for key in results.get('bbox_fields', []):
+ bbox_w = results[key][:, 2] - results[key][:, 0]
+ bbox_h = results[key][:, 3] - results[key][:, 1]
+ valid_inds = (bbox_w > min_bbox_size) & (bbox_h > min_bbox_size)
+ valid_inds = np.nonzero(valid_inds)[0]
+ results[key] = results[key][valid_inds]
+ # label fields. e.g. gt_labels and gt_labels_ignore
+ label_key = bbox2label.get(key)
+ if label_key in results:
+ results[label_key] = results[label_key][valid_inds]
+ # mask fields, e.g. gt_masks and gt_masks_ignore
+ mask_key = bbox2mask.get(key)
+ if mask_key in results:
+ results[mask_key] = results[mask_key][valid_inds]
+
+ def __call__(self, results):
+ """Call function to shear images, bounding boxes, masks and semantic
+ segmentation maps.
+
+ Args:
+ results (dict): Result dict from loading pipeline.
+
+ Returns:
+ dict: Sheared results.
+ """
+ if np.random.rand() > self.prob:
+ return results
+ magnitude = random_negative(self.magnitude, self.random_negative_prob)
+ self._shear_img(results, magnitude, self.direction, self.interpolation)
+ self._shear_bboxes(results, magnitude)
+ # fill_val set to 0 for background of mask.
+ self._shear_masks(
+ results,
+ magnitude,
+ self.direction,
+ fill_val=0,
+ interpolation=self.interpolation)
+ self._shear_seg(
+ results,
+ magnitude,
+ self.direction,
+ fill_val=self.seg_ignore_label,
+ interpolation=self.interpolation)
+ self._filter_invalid(results)
+ return results
+
+ def __repr__(self):
+ repr_str = self.__class__.__name__
+ repr_str += f'(level={self.level}, '
+ repr_str += f'img_fill_val={self.img_fill_val}, '
+ repr_str += f'seg_ignore_label={self.seg_ignore_label}, '
+ repr_str += f'prob={self.prob}, '
+ repr_str += f'direction={self.direction}, '
+ repr_str += f'max_shear_magnitude={self.max_shear_magnitude}, '
+ repr_str += f'random_negative_prob={self.random_negative_prob}, '
+ repr_str += f'interpolation={self.interpolation})'
+ return repr_str
+
+
+@PIPELINES.register_module()
+class Rotate(object):
+ """Apply Rotate Transformation to image (and its corresponding bbox, mask,
+ segmentation).
+
+ Args:
+ level (int | float): The level should be in range (0,_MAX_LEVEL].
+ scale (int | float): Isotropic scale factor. Same in
+ ``mmcv.imrotate``.
+ center (int | float | tuple[float]): Center point (w, h) of the
+ rotation in the source image. If None, the center of the
+ image will be used. Same in ``mmcv.imrotate``.
+ img_fill_val (int | float | tuple): The fill value for image border.
+ If float, the same value will be used for all the three
+ channels of image. If tuple, the should be 3 elements (e.g.
+ equals the number of channels for image).
+ seg_ignore_label (int): The fill value used for segmentation map.
+ Note this value must equals ``ignore_label`` in ``semantic_head``
+ of the corresponding config. Default 255.
+ prob (float): The probability for perform transformation and
+ should be in range 0 to 1.
+ max_rotate_angle (int | float): The maximum angles for rotate
+ transformation.
+ random_negative_prob (float): The probability that turns the
+ offset negative.
+ """
+
+ def __init__(self,
+ level,
+ scale=1,
+ center=None,
+ img_fill_val=128,
+ seg_ignore_label=255,
+ prob=0.5,
+ max_rotate_angle=30,
+ random_negative_prob=0.5):
+ assert isinstance(level, (int, float)), \
+ f'The level must be type int or float. got {type(level)}.'
+ assert 0 <= level <= _MAX_LEVEL, \
+ f'The level should be in range (0,{_MAX_LEVEL}]. got {level}.'
+ assert isinstance(scale, (int, float)), \
+ f'The scale must be type int or float. got type {type(scale)}.'
+ if isinstance(center, (int, float)):
+ center = (center, center)
+ elif isinstance(center, tuple):
+ assert len(center) == 2, 'center with type tuple must have '\
+ f'2 elements. got {len(center)} elements.'
+ else:
+ assert center is None, 'center must be None or type int, '\
+ f'float or tuple, got type {type(center)}.'
+ if isinstance(img_fill_val, (float, int)):
+ img_fill_val = tuple([float(img_fill_val)] * 3)
+ elif isinstance(img_fill_val, tuple):
+ assert len(img_fill_val) == 3, 'img_fill_val as tuple must '\
+ f'have 3 elements. got {len(img_fill_val)}.'
+ img_fill_val = tuple([float(val) for val in img_fill_val])
+ else:
+ raise ValueError(
+ 'img_fill_val must be float or tuple with 3 elements.')
+ assert np.all([0 <= val <= 255 for val in img_fill_val]), \
+ 'all elements of img_fill_val should between range [0,255]. '\
+ f'got {img_fill_val}.'
+ assert 0 <= prob <= 1.0, 'The probability should be in range [0,1]. '\
+ 'got {prob}.'
+ assert isinstance(max_rotate_angle, (int, float)), 'max_rotate_angle '\
+ f'should be type int or float. got type {type(max_rotate_angle)}.'
+ self.level = level
+ self.scale = scale
+ # Rotation angle in degrees. Positive values mean
+ # clockwise rotation.
+ self.angle = level_to_value(level, max_rotate_angle)
+ self.center = center
+ self.img_fill_val = img_fill_val
+ self.seg_ignore_label = seg_ignore_label
+ self.prob = prob
+ self.max_rotate_angle = max_rotate_angle
+ self.random_negative_prob = random_negative_prob
+
+ def _rotate_img(self, results, angle, center=None, scale=1.0):
+ """Rotate the image.
+
+ Args:
+ results (dict): Result dict from loading pipeline.
+ angle (float): Rotation angle in degrees, positive values
+ mean clockwise rotation. Same in ``mmcv.imrotate``.
+ center (tuple[float], optional): Center point (w, h) of the
+ rotation. Same in ``mmcv.imrotate``.
+ scale (int | float): Isotropic scale factor. Same in
+ ``mmcv.imrotate``.
+ """
+ for key in results.get('img_fields', ['img']):
+ img = results[key].copy()
+ img_rotated = mmcv.imrotate(
+ img, angle, center, scale, border_value=self.img_fill_val)
+ results[key] = img_rotated.astype(img.dtype)
+
+ def _rotate_bboxes(self, results, rotate_matrix):
+ """Rotate the bboxes."""
+ h, w, c = results['img_shape']
+ for key in results.get('bbox_fields', []):
+ min_x, min_y, max_x, max_y = np.split(
+ results[key], results[key].shape[-1], axis=-1)
+ coordinates = np.stack([[min_x, min_y], [max_x, min_y],
+ [min_x, max_y],
+ [max_x, max_y]]) # [4, 2, nb_bbox, 1]
+ # pad 1 to convert from format [x, y] to homogeneous
+ # coordinates format [x, y, 1]
+ coordinates = np.concatenate(
+ (coordinates,
+ np.ones((4, 1, coordinates.shape[2], 1), coordinates.dtype)),
+ axis=1) # [4, 3, nb_bbox, 1]
+ coordinates = coordinates.transpose(
+ (2, 0, 1, 3)) # [nb_bbox, 4, 3, 1]
+ rotated_coords = np.matmul(rotate_matrix,
+ coordinates) # [nb_bbox, 4, 2, 1]
+ rotated_coords = rotated_coords[..., 0] # [nb_bbox, 4, 2]
+ min_x, min_y = np.min(
+ rotated_coords[:, :, 0], axis=1), np.min(
+ rotated_coords[:, :, 1], axis=1)
+ max_x, max_y = np.max(
+ rotated_coords[:, :, 0], axis=1), np.max(
+ rotated_coords[:, :, 1], axis=1)
+ min_x, min_y = np.clip(
+ min_x, a_min=0, a_max=w), np.clip(
+ min_y, a_min=0, a_max=h)
+ max_x, max_y = np.clip(
+ max_x, a_min=min_x, a_max=w), np.clip(
+ max_y, a_min=min_y, a_max=h)
+ results[key] = np.stack([min_x, min_y, max_x, max_y],
+ axis=-1).astype(results[key].dtype)
+
+ def _rotate_masks(self,
+ results,
+ angle,
+ center=None,
+ scale=1.0,
+ fill_val=0):
+ """Rotate the masks."""
+ h, w, c = results['img_shape']
+ for key in results.get('mask_fields', []):
+ masks = results[key]
+ results[key] = masks.rotate((h, w), angle, center, scale, fill_val)
+
+ def _rotate_seg(self,
+ results,
+ angle,
+ center=None,
+ scale=1.0,
+ fill_val=255):
+ """Rotate the segmentation map."""
+ for key in results.get('seg_fields', []):
+ seg = results[key].copy()
+ results[key] = mmcv.imrotate(
+ seg, angle, center, scale,
+ border_value=fill_val).astype(seg.dtype)
+
+ def _filter_invalid(self, results, min_bbox_size=0):
+ """Filter bboxes and corresponding masks too small after rotate
+ augmentation."""
+ bbox2label, bbox2mask, _ = bbox2fields()
+ for key in results.get('bbox_fields', []):
+ bbox_w = results[key][:, 2] - results[key][:, 0]
+ bbox_h = results[key][:, 3] - results[key][:, 1]
+ valid_inds = (bbox_w > min_bbox_size) & (bbox_h > min_bbox_size)
+ valid_inds = np.nonzero(valid_inds)[0]
+ results[key] = results[key][valid_inds]
+ # label fields. e.g. gt_labels and gt_labels_ignore
+ label_key = bbox2label.get(key)
+ if label_key in results:
+ results[label_key] = results[label_key][valid_inds]
+ # mask fields, e.g. gt_masks and gt_masks_ignore
+ mask_key = bbox2mask.get(key)
+ if mask_key in results:
+ results[mask_key] = results[mask_key][valid_inds]
+
+ def __call__(self, results):
+ """Call function to rotate images, bounding boxes, masks and semantic
+ segmentation maps.
+
+ Args:
+ results (dict): Result dict from loading pipeline.
+
+ Returns:
+ dict: Rotated results.
+ """
+ if np.random.rand() > self.prob:
+ return results
+ h, w = results['img'].shape[:2]
+ center = self.center
+ if center is None:
+ center = ((w - 1) * 0.5, (h - 1) * 0.5)
+ angle = random_negative(self.angle, self.random_negative_prob)
+ self._rotate_img(results, angle, center, self.scale)
+ rotate_matrix = cv2.getRotationMatrix2D(center, -angle, self.scale)
+ self._rotate_bboxes(results, rotate_matrix)
+ self._rotate_masks(results, angle, center, self.scale, fill_val=0)
+ self._rotate_seg(
+ results, angle, center, self.scale, fill_val=self.seg_ignore_label)
+ self._filter_invalid(results)
+ return results
+
+ def __repr__(self):
+ repr_str = self.__class__.__name__
+ repr_str += f'(level={self.level}, '
+ repr_str += f'scale={self.scale}, '
+ repr_str += f'center={self.center}, '
+ repr_str += f'img_fill_val={self.img_fill_val}, '
+ repr_str += f'seg_ignore_label={self.seg_ignore_label}, '
+ repr_str += f'prob={self.prob}, '
+ repr_str += f'max_rotate_angle={self.max_rotate_angle}, '
+ repr_str += f'random_negative_prob={self.random_negative_prob})'
+ return repr_str
+
+
+@PIPELINES.register_module()
+class Translate(object):
+ """Translate the images, bboxes, masks and segmentation maps horizontally
+ or vertically.
+
+ Args:
+ level (int | float): The level for Translate and should be in
+ range [0,_MAX_LEVEL].
+ prob (float): The probability for performing translation and
+ should be in range [0, 1].
+ img_fill_val (int | float | tuple): The filled value for image
+ border. If float, the same fill value will be used for all
+ the three channels of image. If tuple, the should be 3
+ elements (e.g. equals the number of channels for image).
+ seg_ignore_label (int): The fill value used for segmentation map.
+ Note this value must equals ``ignore_label`` in ``semantic_head``
+ of the corresponding config. Default 255.
+ direction (str): The translate direction, either "horizontal"
+ or "vertical".
+ max_translate_offset (int | float): The maximum pixel's offset for
+ Translate.
+ random_negative_prob (float): The probability that turns the
+ offset negative.
+ min_size (int | float): The minimum pixel for filtering
+ invalid bboxes after the translation.
+ """
+
+ def __init__(self,
+ level,
+ prob=0.5,
+ img_fill_val=128,
+ seg_ignore_label=255,
+ direction='horizontal',
+ max_translate_offset=250.,
+ random_negative_prob=0.5,
+ min_size=0):
+ assert isinstance(level, (int, float)), \
+ 'The level must be type int or float.'
+ assert 0 <= level <= _MAX_LEVEL, \
+ 'The level used for calculating Translate\'s offset should be ' \
+ 'in range [0,_MAX_LEVEL]'
+ assert 0 <= prob <= 1.0, \
+ 'The probability of translation should be in range [0, 1].'
+ if isinstance(img_fill_val, (float, int)):
+ img_fill_val = tuple([float(img_fill_val)] * 3)
+ elif isinstance(img_fill_val, tuple):
+ assert len(img_fill_val) == 3, \
+ 'img_fill_val as tuple must have 3 elements.'
+ img_fill_val = tuple([float(val) for val in img_fill_val])
+ else:
+ raise ValueError('img_fill_val must be type float or tuple.')
+ assert np.all([0 <= val <= 255 for val in img_fill_val]), \
+ 'all elements of img_fill_val should between range [0,255].'
+ assert direction in ('horizontal', 'vertical'), \
+ 'direction should be "horizontal" or "vertical".'
+ assert isinstance(max_translate_offset, (int, float)), \
+ 'The max_translate_offset must be type int or float.'
+ # the offset used for translation
+ self.offset = int(level_to_value(level, max_translate_offset))
+ self.level = level
+ self.prob = prob
+ self.img_fill_val = img_fill_val
+ self.seg_ignore_label = seg_ignore_label
+ self.direction = direction
+ self.max_translate_offset = max_translate_offset
+ self.random_negative_prob = random_negative_prob
+ self.min_size = min_size
+
+ def _translate_img(self, results, offset, direction='horizontal'):
+ """Translate the image.
+
+ Args:
+ results (dict): Result dict from loading pipeline.
+ offset (int | float): The offset for translate.
+ direction (str): The translate direction, either "horizontal"
+ or "vertical".
+ """
+ for key in results.get('img_fields', ['img']):
+ img = results[key].copy()
+ results[key] = mmcv.imtranslate(
+ img, offset, direction, self.img_fill_val).astype(img.dtype)
+
+ def _translate_bboxes(self, results, offset):
+ """Shift bboxes horizontally or vertically, according to offset."""
+ h, w, c = results['img_shape']
+ for key in results.get('bbox_fields', []):
+ min_x, min_y, max_x, max_y = np.split(
+ results[key], results[key].shape[-1], axis=-1)
+ if self.direction == 'horizontal':
+ min_x = np.maximum(0, min_x + offset)
+ max_x = np.minimum(w, max_x + offset)
+ elif self.direction == 'vertical':
+ min_y = np.maximum(0, min_y + offset)
+ max_y = np.minimum(h, max_y + offset)
+
+ # the boxes translated outside of image will be filtered along with
+ # the corresponding masks, by invoking ``_filter_invalid``.
+ results[key] = np.concatenate([min_x, min_y, max_x, max_y],
+ axis=-1)
+
+ def _translate_masks(self,
+ results,
+ offset,
+ direction='horizontal',
+ fill_val=0):
+ """Translate masks horizontally or vertically."""
+ h, w, c = results['img_shape']
+ for key in results.get('mask_fields', []):
+ masks = results[key]
+ results[key] = masks.translate((h, w), offset, direction, fill_val)
+
+ def _translate_seg(self,
+ results,
+ offset,
+ direction='horizontal',
+ fill_val=255):
+ """Translate segmentation maps horizontally or vertically."""
+ for key in results.get('seg_fields', []):
+ seg = results[key].copy()
+ results[key] = mmcv.imtranslate(seg, offset, direction,
+ fill_val).astype(seg.dtype)
+
+ def _filter_invalid(self, results, min_size=0):
+ """Filter bboxes and masks too small or translated out of image."""
+ bbox2label, bbox2mask, _ = bbox2fields()
+ for key in results.get('bbox_fields', []):
+ bbox_w = results[key][:, 2] - results[key][:, 0]
+ bbox_h = results[key][:, 3] - results[key][:, 1]
+ valid_inds = (bbox_w > min_size) & (bbox_h > min_size)
+ valid_inds = np.nonzero(valid_inds)[0]
+ results[key] = results[key][valid_inds]
+ # label fields. e.g. gt_labels and gt_labels_ignore
+ label_key = bbox2label.get(key)
+ if label_key in results:
+ results[label_key] = results[label_key][valid_inds]
+ # mask fields, e.g. gt_masks and gt_masks_ignore
+ mask_key = bbox2mask.get(key)
+ if mask_key in results:
+ results[mask_key] = results[mask_key][valid_inds]
+ return results
+
+ def __call__(self, results):
+ """Call function to translate images, bounding boxes, masks and
+ semantic segmentation maps.
+
+ Args:
+ results (dict): Result dict from loading pipeline.
+
+ Returns:
+ dict: Translated results.
+ """
+ if np.random.rand() > self.prob:
+ return results
+ offset = random_negative(self.offset, self.random_negative_prob)
+ self._translate_img(results, offset, self.direction)
+ self._translate_bboxes(results, offset)
+ # fill_val defaultly 0 for BitmapMasks and None for PolygonMasks.
+ self._translate_masks(results, offset, self.direction)
+ # fill_val set to ``seg_ignore_label`` for the ignored value
+ # of segmentation map.
+ self._translate_seg(
+ results, offset, self.direction, fill_val=self.seg_ignore_label)
+ self._filter_invalid(results, min_size=self.min_size)
+ return results
+
+
+@PIPELINES.register_module()
+class ColorTransform(object):
+ """Apply Color transformation to image. The bboxes, masks, and
+ segmentations are not modified.
+
+ Args:
+ level (int | float): Should be in range [0,_MAX_LEVEL].
+ prob (float): The probability for performing Color transformation.
+ """
+
+ def __init__(self, level, prob=0.5):
+ assert isinstance(level, (int, float)), \
+ 'The level must be type int or float.'
+ assert 0 <= level <= _MAX_LEVEL, \
+ 'The level should be in range [0,_MAX_LEVEL].'
+ assert 0 <= prob <= 1.0, \
+ 'The probability should be in range [0,1].'
+ self.level = level
+ self.prob = prob
+ self.factor = enhance_level_to_value(level)
+
+ def _adjust_color_img(self, results, factor=1.0):
+ """Apply Color transformation to image."""
+ for key in results.get('img_fields', ['img']):
+ # NOTE defaultly the image should be BGR format
+ img = results[key]
+ results[key] = mmcv.adjust_color(img, factor).astype(img.dtype)
+
+ def __call__(self, results):
+ """Call function for Color transformation.
+
+ Args:
+ results (dict): Result dict from loading pipeline.
+
+ Returns:
+ dict: Colored results.
+ """
+ if np.random.rand() > self.prob:
+ return results
+ self._adjust_color_img(results, self.factor)
+ return results
+
+ def __repr__(self):
+ repr_str = self.__class__.__name__
+ repr_str += f'(level={self.level}, '
+ repr_str += f'prob={self.prob})'
+ return repr_str
+
+
+@PIPELINES.register_module()
+class EqualizeTransform(object):
+ """Apply Equalize transformation to image. The bboxes, masks and
+ segmentations are not modified.
+
+ Args:
+ prob (float): The probability for performing Equalize transformation.
+ """
+
+ def __init__(self, prob=0.5):
+ assert 0 <= prob <= 1.0, \
+ 'The probability should be in range [0,1].'
+ self.prob = prob
+
+ def _imequalize(self, results):
+ """Equalizes the histogram of one image."""
+ for key in results.get('img_fields', ['img']):
+ img = results[key]
+ results[key] = mmcv.imequalize(img).astype(img.dtype)
+
+ def __call__(self, results):
+ """Call function for Equalize transformation.
+
+ Args:
+ results (dict): Results dict from loading pipeline.
+
+ Returns:
+ dict: Results after the transformation.
+ """
+ if np.random.rand() > self.prob:
+ return results
+ self._imequalize(results)
+ return results
+
+ def __repr__(self):
+ repr_str = self.__class__.__name__
+ repr_str += f'(prob={self.prob})'
+
+
+@PIPELINES.register_module()
+class BrightnessTransform(object):
+ """Apply Brightness transformation to image. The bboxes, masks and
+ segmentations are not modified.
+
+ Args:
+ level (int | float): Should be in range [0,_MAX_LEVEL].
+ prob (float): The probability for performing Brightness transformation.
+ """
+
+ def __init__(self, level, prob=0.5):
+ assert isinstance(level, (int, float)), \
+ 'The level must be type int or float.'
+ assert 0 <= level <= _MAX_LEVEL, \
+ 'The level should be in range [0,_MAX_LEVEL].'
+ assert 0 <= prob <= 1.0, \
+ 'The probability should be in range [0,1].'
+ self.level = level
+ self.prob = prob
+ self.factor = enhance_level_to_value(level)
+
+ def _adjust_brightness_img(self, results, factor=1.0):
+ """Adjust the brightness of image."""
+ for key in results.get('img_fields', ['img']):
+ img = results[key]
+ results[key] = mmcv.adjust_brightness(img,
+ factor).astype(img.dtype)
+
+ def __call__(self, results):
+ """Call function for Brightness transformation.
+
+ Args:
+ results (dict): Results dict from loading pipeline.
+
+ Returns:
+ dict: Results after the transformation.
+ """
+ if np.random.rand() > self.prob:
+ return results
+ self._adjust_brightness_img(results, self.factor)
+ return results
+
+ def __repr__(self):
+ repr_str = self.__class__.__name__
+ repr_str += f'(level={self.level}, '
+ repr_str += f'prob={self.prob})'
+ return repr_str
+
+
+@PIPELINES.register_module()
+class ContrastTransform(object):
+ """Apply Contrast transformation to image. The bboxes, masks and
+ segmentations are not modified.
+
+ Args:
+ level (int | float): Should be in range [0,_MAX_LEVEL].
+ prob (float): The probability for performing Contrast transformation.
+ """
+
+ def __init__(self, level, prob=0.5):
+ assert isinstance(level, (int, float)), \
+ 'The level must be type int or float.'
+ assert 0 <= level <= _MAX_LEVEL, \
+ 'The level should be in range [0,_MAX_LEVEL].'
+ assert 0 <= prob <= 1.0, \
+ 'The probability should be in range [0,1].'
+ self.level = level
+ self.prob = prob
+ self.factor = enhance_level_to_value(level)
+
+ def _adjust_contrast_img(self, results, factor=1.0):
+ """Adjust the image contrast."""
+ for key in results.get('img_fields', ['img']):
+ img = results[key]
+ results[key] = mmcv.adjust_contrast(img, factor).astype(img.dtype)
+
+ def __call__(self, results):
+ """Call function for Contrast transformation.
+
+ Args:
+ results (dict): Results dict from loading pipeline.
+
+ Returns:
+ dict: Results after the transformation.
+ """
+ if np.random.rand() > self.prob:
+ return results
+ self._adjust_contrast_img(results, self.factor)
+ return results
+
+ def __repr__(self):
+ repr_str = self.__class__.__name__
+ repr_str += f'(level={self.level}, '
+ repr_str += f'prob={self.prob})'
+ return repr_str
diff --git a/annotator/uniformer/mmdet/datasets/pipelines/compose.py b/annotator/uniformer/mmdet/datasets/pipelines/compose.py
new file mode 100644
index 0000000000000000000000000000000000000000..ca48f1c935755c486edc2744e1713e2b5ba3cdc8
--- /dev/null
+++ b/annotator/uniformer/mmdet/datasets/pipelines/compose.py
@@ -0,0 +1,51 @@
+import collections
+
+from mmcv.utils import build_from_cfg
+
+from ..builder import PIPELINES
+
+
+@PIPELINES.register_module()
+class Compose(object):
+ """Compose multiple transforms sequentially.
+
+ Args:
+ transforms (Sequence[dict | callable]): Sequence of transform object or
+ config dict to be composed.
+ """
+
+ def __init__(self, transforms):
+ assert isinstance(transforms, collections.abc.Sequence)
+ self.transforms = []
+ for transform in transforms:
+ if isinstance(transform, dict):
+ transform = build_from_cfg(transform, PIPELINES)
+ self.transforms.append(transform)
+ elif callable(transform):
+ self.transforms.append(transform)
+ else:
+ raise TypeError('transform must be callable or a dict')
+
+ def __call__(self, data):
+ """Call function to apply transforms sequentially.
+
+ Args:
+ data (dict): A result dict contains the data to transform.
+
+ Returns:
+ dict: Transformed data.
+ """
+
+ for t in self.transforms:
+ data = t(data)
+ if data is None:
+ return None
+ return data
+
+ def __repr__(self):
+ format_string = self.__class__.__name__ + '('
+ for t in self.transforms:
+ format_string += '\n'
+ format_string += f' {t}'
+ format_string += '\n)'
+ return format_string
diff --git a/annotator/uniformer/mmdet/datasets/pipelines/formating.py b/annotator/uniformer/mmdet/datasets/pipelines/formating.py
new file mode 100644
index 0000000000000000000000000000000000000000..5781341bd48766a740f23ebba7a85cf8993642d7
--- /dev/null
+++ b/annotator/uniformer/mmdet/datasets/pipelines/formating.py
@@ -0,0 +1,364 @@
+from collections.abc import Sequence
+
+import mmcv
+import numpy as np
+import torch
+from mmcv.parallel import DataContainer as DC
+
+from ..builder import PIPELINES
+
+
+def to_tensor(data):
+ """Convert objects of various python types to :obj:`torch.Tensor`.
+
+ Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`,
+ :class:`Sequence`, :class:`int` and :class:`float`.
+
+ Args:
+ data (torch.Tensor | numpy.ndarray | Sequence | int | float): Data to
+ be converted.
+ """
+
+ if isinstance(data, torch.Tensor):
+ return data
+ elif isinstance(data, np.ndarray):
+ return torch.from_numpy(data)
+ elif isinstance(data, Sequence) and not mmcv.is_str(data):
+ return torch.tensor(data)
+ elif isinstance(data, int):
+ return torch.LongTensor([data])
+ elif isinstance(data, float):
+ return torch.FloatTensor([data])
+ else:
+ raise TypeError(f'type {type(data)} cannot be converted to tensor.')
+
+
+@PIPELINES.register_module()
+class ToTensor(object):
+ """Convert some results to :obj:`torch.Tensor` by given keys.
+
+ Args:
+ keys (Sequence[str]): Keys that need to be converted to Tensor.
+ """
+
+ def __init__(self, keys):
+ self.keys = keys
+
+ def __call__(self, results):
+ """Call function to convert data in results to :obj:`torch.Tensor`.
+
+ Args:
+ results (dict): Result dict contains the data to convert.
+
+ Returns:
+ dict: The result dict contains the data converted
+ to :obj:`torch.Tensor`.
+ """
+ for key in self.keys:
+ results[key] = to_tensor(results[key])
+ return results
+
+ def __repr__(self):
+ return self.__class__.__name__ + f'(keys={self.keys})'
+
+
+@PIPELINES.register_module()
+class ImageToTensor(object):
+ """Convert image to :obj:`torch.Tensor` by given keys.
+
+ The dimension order of input image is (H, W, C). The pipeline will convert
+ it to (C, H, W). If only 2 dimension (H, W) is given, the output would be
+ (1, H, W).
+
+ Args:
+ keys (Sequence[str]): Key of images to be converted to Tensor.
+ """
+
+ def __init__(self, keys):
+ self.keys = keys
+
+ def __call__(self, results):
+ """Call function to convert image in results to :obj:`torch.Tensor` and
+ transpose the channel order.
+
+ Args:
+ results (dict): Result dict contains the image data to convert.
+
+ Returns:
+ dict: The result dict contains the image converted
+ to :obj:`torch.Tensor` and transposed to (C, H, W) order.
+ """
+ for key in self.keys:
+ img = results[key]
+ if len(img.shape) < 3:
+ img = np.expand_dims(img, -1)
+ results[key] = to_tensor(img.transpose(2, 0, 1))
+ return results
+
+ def __repr__(self):
+ return self.__class__.__name__ + f'(keys={self.keys})'
+
+
+@PIPELINES.register_module()
+class Transpose(object):
+ """Transpose some results by given keys.
+
+ Args:
+ keys (Sequence[str]): Keys of results to be transposed.
+ order (Sequence[int]): Order of transpose.
+ """
+
+ def __init__(self, keys, order):
+ self.keys = keys
+ self.order = order
+
+ def __call__(self, results):
+ """Call function to transpose the channel order of data in results.
+
+ Args:
+ results (dict): Result dict contains the data to transpose.
+
+ Returns:
+ dict: The result dict contains the data transposed to \
+ ``self.order``.
+ """
+ for key in self.keys:
+ results[key] = results[key].transpose(self.order)
+ return results
+
+ def __repr__(self):
+ return self.__class__.__name__ + \
+ f'(keys={self.keys}, order={self.order})'
+
+
+@PIPELINES.register_module()
+class ToDataContainer(object):
+ """Convert results to :obj:`mmcv.DataContainer` by given fields.
+
+ Args:
+ fields (Sequence[dict]): Each field is a dict like
+ ``dict(key='xxx', **kwargs)``. The ``key`` in result will
+ be converted to :obj:`mmcv.DataContainer` with ``**kwargs``.
+ Default: ``(dict(key='img', stack=True), dict(key='gt_bboxes'),
+ dict(key='gt_labels'))``.
+ """
+
+ def __init__(self,
+ fields=(dict(key='img', stack=True), dict(key='gt_bboxes'),
+ dict(key='gt_labels'))):
+ self.fields = fields
+
+ def __call__(self, results):
+ """Call function to convert data in results to
+ :obj:`mmcv.DataContainer`.
+
+ Args:
+ results (dict): Result dict contains the data to convert.
+
+ Returns:
+ dict: The result dict contains the data converted to \
+ :obj:`mmcv.DataContainer`.
+ """
+
+ for field in self.fields:
+ field = field.copy()
+ key = field.pop('key')
+ results[key] = DC(results[key], **field)
+ return results
+
+ def __repr__(self):
+ return self.__class__.__name__ + f'(fields={self.fields})'
+
+
+@PIPELINES.register_module()
+class DefaultFormatBundle(object):
+ """Default formatting bundle.
+
+ It simplifies the pipeline of formatting common fields, including "img",
+ "proposals", "gt_bboxes", "gt_labels", "gt_masks" and "gt_semantic_seg".
+ These fields are formatted as follows.
+
+ - img: (1)transpose, (2)to tensor, (3)to DataContainer (stack=True)
+ - proposals: (1)to tensor, (2)to DataContainer
+ - gt_bboxes: (1)to tensor, (2)to DataContainer
+ - gt_bboxes_ignore: (1)to tensor, (2)to DataContainer
+ - gt_labels: (1)to tensor, (2)to DataContainer
+ - gt_masks: (1)to tensor, (2)to DataContainer (cpu_only=True)
+ - gt_semantic_seg: (1)unsqueeze dim-0 (2)to tensor, \
+ (3)to DataContainer (stack=True)
+ """
+
+ def __call__(self, results):
+ """Call function to transform and format common fields in results.
+
+ Args:
+ results (dict): Result dict contains the data to convert.
+
+ Returns:
+ dict: The result dict contains the data that is formatted with \
+ default bundle.
+ """
+
+ if 'img' in results:
+ img = results['img']
+ # add default meta keys
+ results = self._add_default_meta_keys(results)
+ if len(img.shape) < 3:
+ img = np.expand_dims(img, -1)
+ img = np.ascontiguousarray(img.transpose(2, 0, 1))
+ results['img'] = DC(to_tensor(img), stack=True)
+ for key in ['proposals', 'gt_bboxes', 'gt_bboxes_ignore', 'gt_labels']:
+ if key not in results:
+ continue
+ results[key] = DC(to_tensor(results[key]))
+ if 'gt_masks' in results:
+ results['gt_masks'] = DC(results['gt_masks'], cpu_only=True)
+ if 'gt_semantic_seg' in results:
+ results['gt_semantic_seg'] = DC(
+ to_tensor(results['gt_semantic_seg'][None, ...]), stack=True)
+ return results
+
+ def _add_default_meta_keys(self, results):
+ """Add default meta keys.
+
+ We set default meta keys including `pad_shape`, `scale_factor` and
+ `img_norm_cfg` to avoid the case where no `Resize`, `Normalize` and
+ `Pad` are implemented during the whole pipeline.
+
+ Args:
+ results (dict): Result dict contains the data to convert.
+
+ Returns:
+ results (dict): Updated result dict contains the data to convert.
+ """
+ img = results['img']
+ results.setdefault('pad_shape', img.shape)
+ results.setdefault('scale_factor', 1.0)
+ num_channels = 1 if len(img.shape) < 3 else img.shape[2]
+ results.setdefault(
+ 'img_norm_cfg',
+ dict(
+ mean=np.zeros(num_channels, dtype=np.float32),
+ std=np.ones(num_channels, dtype=np.float32),
+ to_rgb=False))
+ return results
+
+ def __repr__(self):
+ return self.__class__.__name__
+
+
+@PIPELINES.register_module()
+class Collect(object):
+ """Collect data from the loader relevant to the specific task.
+
+ This is usually the last stage of the data loader pipeline. Typically keys
+ is set to some subset of "img", "proposals", "gt_bboxes",
+ "gt_bboxes_ignore", "gt_labels", and/or "gt_masks".
+
+ The "img_meta" item is always populated. The contents of the "img_meta"
+ dictionary depends on "meta_keys". By default this includes:
+
+ - "img_shape": shape of the image input to the network as a tuple \
+ (h, w, c). Note that images may be zero padded on the \
+ bottom/right if the batch tensor is larger than this shape.
+
+ - "scale_factor": a float indicating the preprocessing scale
+
+ - "flip": a boolean indicating if image flip transform was used
+
+ - "filename": path to the image file
+
+ - "ori_shape": original shape of the image as a tuple (h, w, c)
+
+ - "pad_shape": image shape after padding
+
+ - "img_norm_cfg": a dict of normalization information:
+
+ - mean - per channel mean subtraction
+ - std - per channel std divisor
+ - to_rgb - bool indicating if bgr was converted to rgb
+
+ Args:
+ keys (Sequence[str]): Keys of results to be collected in ``data``.
+ meta_keys (Sequence[str], optional): Meta keys to be converted to
+ ``mmcv.DataContainer`` and collected in ``data[img_metas]``.
+ Default: ``('filename', 'ori_filename', 'ori_shape', 'img_shape',
+ 'pad_shape', 'scale_factor', 'flip', 'flip_direction',
+ 'img_norm_cfg')``
+ """
+
+ def __init__(self,
+ keys,
+ meta_keys=('filename', 'ori_filename', 'ori_shape',
+ 'img_shape', 'pad_shape', 'scale_factor', 'flip',
+ 'flip_direction', 'img_norm_cfg')):
+ self.keys = keys
+ self.meta_keys = meta_keys
+
+ def __call__(self, results):
+ """Call function to collect keys in results. The keys in ``meta_keys``
+ will be converted to :obj:mmcv.DataContainer.
+
+ Args:
+ results (dict): Result dict contains the data to collect.
+
+ Returns:
+ dict: The result dict contains the following keys
+
+ - keys in``self.keys``
+ - ``img_metas``
+ """
+
+ data = {}
+ img_meta = {}
+ for key in self.meta_keys:
+ img_meta[key] = results[key]
+ data['img_metas'] = DC(img_meta, cpu_only=True)
+ for key in self.keys:
+ data[key] = results[key]
+ return data
+
+ def __repr__(self):
+ return self.__class__.__name__ + \
+ f'(keys={self.keys}, meta_keys={self.meta_keys})'
+
+
+@PIPELINES.register_module()
+class WrapFieldsToLists(object):
+ """Wrap fields of the data dictionary into lists for evaluation.
+
+ This class can be used as a last step of a test or validation
+ pipeline for single image evaluation or inference.
+
+ Example:
+ >>> test_pipeline = [
+ >>> dict(type='LoadImageFromFile'),
+ >>> dict(type='Normalize',
+ mean=[123.675, 116.28, 103.53],
+ std=[58.395, 57.12, 57.375],
+ to_rgb=True),
+ >>> dict(type='Pad', size_divisor=32),
+ >>> dict(type='ImageToTensor', keys=['img']),
+ >>> dict(type='Collect', keys=['img']),
+ >>> dict(type='WrapFieldsToLists')
+ >>> ]
+ """
+
+ def __call__(self, results):
+ """Call function to wrap fields into lists.
+
+ Args:
+ results (dict): Result dict contains the data to wrap.
+
+ Returns:
+ dict: The result dict where value of ``self.keys`` are wrapped \
+ into list.
+ """
+
+ # Wrap dict fields into lists
+ for key, val in results.items():
+ results[key] = [val]
+ return results
+
+ def __repr__(self):
+ return f'{self.__class__.__name__}()'
diff --git a/annotator/uniformer/mmdet/datasets/pipelines/instaboost.py b/annotator/uniformer/mmdet/datasets/pipelines/instaboost.py
new file mode 100644
index 0000000000000000000000000000000000000000..38b6819f60587a6e0c0f6d57bfda32bb3a7a4267
--- /dev/null
+++ b/annotator/uniformer/mmdet/datasets/pipelines/instaboost.py
@@ -0,0 +1,98 @@
+import numpy as np
+
+from ..builder import PIPELINES
+
+
+@PIPELINES.register_module()
+class InstaBoost(object):
+ r"""Data augmentation method in `InstaBoost: Boosting Instance
+ Segmentation Via Probability Map Guided Copy-Pasting
+ `_.
+
+ Refer to https://github.com/GothicAi/Instaboost for implementation details.
+ """
+
+ def __init__(self,
+ action_candidate=('normal', 'horizontal', 'skip'),
+ action_prob=(1, 0, 0),
+ scale=(0.8, 1.2),
+ dx=15,
+ dy=15,
+ theta=(-1, 1),
+ color_prob=0.5,
+ hflag=False,
+ aug_ratio=0.5):
+ try:
+ import instaboostfast as instaboost
+ except ImportError:
+ raise ImportError(
+ 'Please run "pip install instaboostfast" '
+ 'to install instaboostfast first for instaboost augmentation.')
+ self.cfg = instaboost.InstaBoostConfig(action_candidate, action_prob,
+ scale, dx, dy, theta,
+ color_prob, hflag)
+ self.aug_ratio = aug_ratio
+
+ def _load_anns(self, results):
+ labels = results['ann_info']['labels']
+ masks = results['ann_info']['masks']
+ bboxes = results['ann_info']['bboxes']
+ n = len(labels)
+
+ anns = []
+ for i in range(n):
+ label = labels[i]
+ bbox = bboxes[i]
+ mask = masks[i]
+ x1, y1, x2, y2 = bbox
+ # assert (x2 - x1) >= 1 and (y2 - y1) >= 1
+ bbox = [x1, y1, x2 - x1, y2 - y1]
+ anns.append({
+ 'category_id': label,
+ 'segmentation': mask,
+ 'bbox': bbox
+ })
+
+ return anns
+
+ def _parse_anns(self, results, anns, img):
+ gt_bboxes = []
+ gt_labels = []
+ gt_masks_ann = []
+ for ann in anns:
+ x1, y1, w, h = ann['bbox']
+ # TODO: more essential bug need to be fixed in instaboost
+ if w <= 0 or h <= 0:
+ continue
+ bbox = [x1, y1, x1 + w, y1 + h]
+ gt_bboxes.append(bbox)
+ gt_labels.append(ann['category_id'])
+ gt_masks_ann.append(ann['segmentation'])
+ gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
+ gt_labels = np.array(gt_labels, dtype=np.int64)
+ results['ann_info']['labels'] = gt_labels
+ results['ann_info']['bboxes'] = gt_bboxes
+ results['ann_info']['masks'] = gt_masks_ann
+ results['img'] = img
+ return results
+
+ def __call__(self, results):
+ img = results['img']
+ orig_type = img.dtype
+ anns = self._load_anns(results)
+ if np.random.choice([0, 1], p=[1 - self.aug_ratio, self.aug_ratio]):
+ try:
+ import instaboostfast as instaboost
+ except ImportError:
+ raise ImportError('Please run "pip install instaboostfast" '
+ 'to install instaboostfast first.')
+ anns, img = instaboost.get_new_data(
+ anns, img.astype(np.uint8), self.cfg, background=None)
+
+ results = self._parse_anns(results, anns, img.astype(orig_type))
+ return results
+
+ def __repr__(self):
+ repr_str = self.__class__.__name__
+ repr_str += f'(cfg={self.cfg}, aug_ratio={self.aug_ratio})'
+ return repr_str
diff --git a/annotator/uniformer/mmdet/datasets/pipelines/loading.py b/annotator/uniformer/mmdet/datasets/pipelines/loading.py
new file mode 100644
index 0000000000000000000000000000000000000000..69225941903f6b9d67b8b8c5fc3b1801cd964fb2
--- /dev/null
+++ b/annotator/uniformer/mmdet/datasets/pipelines/loading.py
@@ -0,0 +1,458 @@
+import os.path as osp
+
+import mmcv
+import numpy as np
+import pycocotools.mask as maskUtils
+
+from mmdet.core import BitmapMasks, PolygonMasks
+from ..builder import PIPELINES
+
+
+@PIPELINES.register_module()
+class LoadImageFromFile(object):
+ """Load an image from file.
+
+ Required keys are "img_prefix" and "img_info" (a dict that must contain the
+ key "filename"). Added or updated keys are "filename", "img", "img_shape",
+ "ori_shape" (same as `img_shape`), "pad_shape" (same as `img_shape`),
+ "scale_factor" (1.0) and "img_norm_cfg" (means=0 and stds=1).
+
+ Args:
+ to_float32 (bool): Whether to convert the loaded image to a float32
+ numpy array. If set to False, the loaded image is an uint8 array.
+ Defaults to False.
+ color_type (str): The flag argument for :func:`mmcv.imfrombytes`.
+ Defaults to 'color'.
+ file_client_args (dict): Arguments to instantiate a FileClient.
+ See :class:`mmcv.fileio.FileClient` for details.
+ Defaults to ``dict(backend='disk')``.
+ """
+
+ def __init__(self,
+ to_float32=False,
+ color_type='color',
+ file_client_args=dict(backend='disk')):
+ self.to_float32 = to_float32
+ self.color_type = color_type
+ self.file_client_args = file_client_args.copy()
+ self.file_client = None
+
+ def __call__(self, results):
+ """Call functions to load image and get image meta information.
+
+ Args:
+ results (dict): Result dict from :obj:`mmdet.CustomDataset`.
+
+ Returns:
+ dict: The dict contains loaded image and meta information.
+ """
+
+ if self.file_client is None:
+ self.file_client = mmcv.FileClient(**self.file_client_args)
+
+ if results['img_prefix'] is not None:
+ filename = osp.join(results['img_prefix'],
+ results['img_info']['filename'])
+ else:
+ filename = results['img_info']['filename']
+
+ img_bytes = self.file_client.get(filename)
+ img = mmcv.imfrombytes(img_bytes, flag=self.color_type)
+ if self.to_float32:
+ img = img.astype(np.float32)
+
+ results['filename'] = filename
+ results['ori_filename'] = results['img_info']['filename']
+ results['img'] = img
+ results['img_shape'] = img.shape
+ results['ori_shape'] = img.shape
+ results['img_fields'] = ['img']
+ return results
+
+ def __repr__(self):
+ repr_str = (f'{self.__class__.__name__}('
+ f'to_float32={self.to_float32}, '
+ f"color_type='{self.color_type}', "
+ f'file_client_args={self.file_client_args})')
+ return repr_str
+
+
+@PIPELINES.register_module()
+class LoadImageFromWebcam(LoadImageFromFile):
+ """Load an image from webcam.
+
+ Similar with :obj:`LoadImageFromFile`, but the image read from webcam is in
+ ``results['img']``.
+ """
+
+ def __call__(self, results):
+ """Call functions to add image meta information.
+
+ Args:
+ results (dict): Result dict with Webcam read image in
+ ``results['img']``.
+
+ Returns:
+ dict: The dict contains loaded image and meta information.
+ """
+
+ img = results['img']
+ if self.to_float32:
+ img = img.astype(np.float32)
+
+ results['filename'] = None
+ results['ori_filename'] = None
+ results['img'] = img
+ results['img_shape'] = img.shape
+ results['ori_shape'] = img.shape
+ results['img_fields'] = ['img']
+ return results
+
+
+@PIPELINES.register_module()
+class LoadMultiChannelImageFromFiles(object):
+ """Load multi-channel images from a list of separate channel files.
+
+ Required keys are "img_prefix" and "img_info" (a dict that must contain the
+ key "filename", which is expected to be a list of filenames).
+ Added or updated keys are "filename", "img", "img_shape",
+ "ori_shape" (same as `img_shape`), "pad_shape" (same as `img_shape`),
+ "scale_factor" (1.0) and "img_norm_cfg" (means=0 and stds=1).
+
+ Args:
+ to_float32 (bool): Whether to convert the loaded image to a float32
+ numpy array. If set to False, the loaded image is an uint8 array.
+ Defaults to False.
+ color_type (str): The flag argument for :func:`mmcv.imfrombytes`.
+ Defaults to 'color'.
+ file_client_args (dict): Arguments to instantiate a FileClient.
+ See :class:`mmcv.fileio.FileClient` for details.
+ Defaults to ``dict(backend='disk')``.
+ """
+
+ def __init__(self,
+ to_float32=False,
+ color_type='unchanged',
+ file_client_args=dict(backend='disk')):
+ self.to_float32 = to_float32
+ self.color_type = color_type
+ self.file_client_args = file_client_args.copy()
+ self.file_client = None
+
+ def __call__(self, results):
+ """Call functions to load multiple images and get images meta
+ information.
+
+ Args:
+ results (dict): Result dict from :obj:`mmdet.CustomDataset`.
+
+ Returns:
+ dict: The dict contains loaded images and meta information.
+ """
+
+ if self.file_client is None:
+ self.file_client = mmcv.FileClient(**self.file_client_args)
+
+ if results['img_prefix'] is not None:
+ filename = [
+ osp.join(results['img_prefix'], fname)
+ for fname in results['img_info']['filename']
+ ]
+ else:
+ filename = results['img_info']['filename']
+
+ img = []
+ for name in filename:
+ img_bytes = self.file_client.get(name)
+ img.append(mmcv.imfrombytes(img_bytes, flag=self.color_type))
+ img = np.stack(img, axis=-1)
+ if self.to_float32:
+ img = img.astype(np.float32)
+
+ results['filename'] = filename
+ results['ori_filename'] = results['img_info']['filename']
+ results['img'] = img
+ results['img_shape'] = img.shape
+ results['ori_shape'] = img.shape
+ # Set initial values for default meta_keys
+ results['pad_shape'] = img.shape
+ results['scale_factor'] = 1.0
+ num_channels = 1 if len(img.shape) < 3 else img.shape[2]
+ results['img_norm_cfg'] = dict(
+ mean=np.zeros(num_channels, dtype=np.float32),
+ std=np.ones(num_channels, dtype=np.float32),
+ to_rgb=False)
+ return results
+
+ def __repr__(self):
+ repr_str = (f'{self.__class__.__name__}('
+ f'to_float32={self.to_float32}, '
+ f"color_type='{self.color_type}', "
+ f'file_client_args={self.file_client_args})')
+ return repr_str
+
+
+@PIPELINES.register_module()
+class LoadAnnotations(object):
+ """Load mutiple types of annotations.
+
+ Args:
+ with_bbox (bool): Whether to parse and load the bbox annotation.
+ Default: True.
+ with_label (bool): Whether to parse and load the label annotation.
+ Default: True.
+ with_mask (bool): Whether to parse and load the mask annotation.
+ Default: False.
+ with_seg (bool): Whether to parse and load the semantic segmentation
+ annotation. Default: False.
+ poly2mask (bool): Whether to convert the instance masks from polygons
+ to bitmaps. Default: True.
+ file_client_args (dict): Arguments to instantiate a FileClient.
+ See :class:`mmcv.fileio.FileClient` for details.
+ Defaults to ``dict(backend='disk')``.
+ """
+
+ def __init__(self,
+ with_bbox=True,
+ with_label=True,
+ with_mask=False,
+ with_seg=False,
+ poly2mask=True,
+ file_client_args=dict(backend='disk')):
+ self.with_bbox = with_bbox
+ self.with_label = with_label
+ self.with_mask = with_mask
+ self.with_seg = with_seg
+ self.poly2mask = poly2mask
+ self.file_client_args = file_client_args.copy()
+ self.file_client = None
+
+ def _load_bboxes(self, results):
+ """Private function to load bounding box annotations.
+
+ Args:
+ results (dict): Result dict from :obj:`mmdet.CustomDataset`.
+
+ Returns:
+ dict: The dict contains loaded bounding box annotations.
+ """
+
+ ann_info = results['ann_info']
+ results['gt_bboxes'] = ann_info['bboxes'].copy()
+
+ gt_bboxes_ignore = ann_info.get('bboxes_ignore', None)
+ if gt_bboxes_ignore is not None:
+ results['gt_bboxes_ignore'] = gt_bboxes_ignore.copy()
+ results['bbox_fields'].append('gt_bboxes_ignore')
+ results['bbox_fields'].append('gt_bboxes')
+ return results
+
+ def _load_labels(self, results):
+ """Private function to load label annotations.
+
+ Args:
+ results (dict): Result dict from :obj:`mmdet.CustomDataset`.
+
+ Returns:
+ dict: The dict contains loaded label annotations.
+ """
+
+ results['gt_labels'] = results['ann_info']['labels'].copy()
+ return results
+
+ def _poly2mask(self, mask_ann, img_h, img_w):
+ """Private function to convert masks represented with polygon to
+ bitmaps.
+
+ Args:
+ mask_ann (list | dict): Polygon mask annotation input.
+ img_h (int): The height of output mask.
+ img_w (int): The width of output mask.
+
+ Returns:
+ numpy.ndarray: The decode bitmap mask of shape (img_h, img_w).
+ """
+
+ if isinstance(mask_ann, list):
+ # polygon -- a single object might consist of multiple parts
+ # we merge all parts into one mask rle code
+ rles = maskUtils.frPyObjects(mask_ann, img_h, img_w)
+ rle = maskUtils.merge(rles)
+ elif isinstance(mask_ann['counts'], list):
+ # uncompressed RLE
+ rle = maskUtils.frPyObjects(mask_ann, img_h, img_w)
+ else:
+ # rle
+ rle = mask_ann
+ mask = maskUtils.decode(rle)
+ return mask
+
+ def process_polygons(self, polygons):
+ """Convert polygons to list of ndarray and filter invalid polygons.
+
+ Args:
+ polygons (list[list]): Polygons of one instance.
+
+ Returns:
+ list[numpy.ndarray]: Processed polygons.
+ """
+
+ polygons = [np.array(p) for p in polygons]
+ valid_polygons = []
+ for polygon in polygons:
+ if len(polygon) % 2 == 0 and len(polygon) >= 6:
+ valid_polygons.append(polygon)
+ return valid_polygons
+
+ def _load_masks(self, results):
+ """Private function to load mask annotations.
+
+ Args:
+ results (dict): Result dict from :obj:`mmdet.CustomDataset`.
+
+ Returns:
+ dict: The dict contains loaded mask annotations.
+ If ``self.poly2mask`` is set ``True``, `gt_mask` will contain
+ :obj:`PolygonMasks`. Otherwise, :obj:`BitmapMasks` is used.
+ """
+
+ h, w = results['img_info']['height'], results['img_info']['width']
+ gt_masks = results['ann_info']['masks']
+ if self.poly2mask:
+ gt_masks = BitmapMasks(
+ [self._poly2mask(mask, h, w) for mask in gt_masks], h, w)
+ else:
+ gt_masks = PolygonMasks(
+ [self.process_polygons(polygons) for polygons in gt_masks], h,
+ w)
+ results['gt_masks'] = gt_masks
+ results['mask_fields'].append('gt_masks')
+ return results
+
+ def _load_semantic_seg(self, results):
+ """Private function to load semantic segmentation annotations.
+
+ Args:
+ results (dict): Result dict from :obj:`dataset`.
+
+ Returns:
+ dict: The dict contains loaded semantic segmentation annotations.
+ """
+
+ if self.file_client is None:
+ self.file_client = mmcv.FileClient(**self.file_client_args)
+
+ filename = osp.join(results['seg_prefix'],
+ results['ann_info']['seg_map'])
+ img_bytes = self.file_client.get(filename)
+ results['gt_semantic_seg'] = mmcv.imfrombytes(
+ img_bytes, flag='unchanged').squeeze()
+ results['seg_fields'].append('gt_semantic_seg')
+ return results
+
+ def __call__(self, results):
+ """Call function to load multiple types annotations.
+
+ Args:
+ results (dict): Result dict from :obj:`mmdet.CustomDataset`.
+
+ Returns:
+ dict: The dict contains loaded bounding box, label, mask and
+ semantic segmentation annotations.
+ """
+
+ if self.with_bbox:
+ results = self._load_bboxes(results)
+ if results is None:
+ return None
+ if self.with_label:
+ results = self._load_labels(results)
+ if self.with_mask:
+ results = self._load_masks(results)
+ if self.with_seg:
+ results = self._load_semantic_seg(results)
+ return results
+
+ def __repr__(self):
+ repr_str = self.__class__.__name__
+ repr_str += f'(with_bbox={self.with_bbox}, '
+ repr_str += f'with_label={self.with_label}, '
+ repr_str += f'with_mask={self.with_mask}, '
+ repr_str += f'with_seg={self.with_seg}, '
+ repr_str += f'poly2mask={self.poly2mask}, '
+ repr_str += f'poly2mask={self.file_client_args})'
+ return repr_str
+
+
+@PIPELINES.register_module()
+class LoadProposals(object):
+ """Load proposal pipeline.
+
+ Required key is "proposals". Updated keys are "proposals", "bbox_fields".
+
+ Args:
+ num_max_proposals (int, optional): Maximum number of proposals to load.
+ If not specified, all proposals will be loaded.
+ """
+
+ def __init__(self, num_max_proposals=None):
+ self.num_max_proposals = num_max_proposals
+
+ def __call__(self, results):
+ """Call function to load proposals from file.
+
+ Args:
+ results (dict): Result dict from :obj:`mmdet.CustomDataset`.
+
+ Returns:
+ dict: The dict contains loaded proposal annotations.
+ """
+
+ proposals = results['proposals']
+ if proposals.shape[1] not in (4, 5):
+ raise AssertionError(
+ 'proposals should have shapes (n, 4) or (n, 5), '
+ f'but found {proposals.shape}')
+ proposals = proposals[:, :4]
+
+ if self.num_max_proposals is not None:
+ proposals = proposals[:self.num_max_proposals]
+
+ if len(proposals) == 0:
+ proposals = np.array([[0, 0, 0, 0]], dtype=np.float32)
+ results['proposals'] = proposals
+ results['bbox_fields'].append('proposals')
+ return results
+
+ def __repr__(self):
+ return self.__class__.__name__ + \
+ f'(num_max_proposals={self.num_max_proposals})'
+
+
+@PIPELINES.register_module()
+class FilterAnnotations(object):
+ """Filter invalid annotations.
+
+ Args:
+ min_gt_bbox_wh (tuple[int]): Minimum width and height of ground truth
+ boxes.
+ """
+
+ def __init__(self, min_gt_bbox_wh):
+ # TODO: add more filter options
+ self.min_gt_bbox_wh = min_gt_bbox_wh
+
+ def __call__(self, results):
+ assert 'gt_bboxes' in results
+ gt_bboxes = results['gt_bboxes']
+ w = gt_bboxes[:, 2] - gt_bboxes[:, 0]
+ h = gt_bboxes[:, 3] - gt_bboxes[:, 1]
+ keep = (w > self.min_gt_bbox_wh[0]) & (h > self.min_gt_bbox_wh[1])
+ if not keep.any():
+ return None
+ else:
+ keys = ('gt_bboxes', 'gt_labels', 'gt_masks', 'gt_semantic_seg')
+ for key in keys:
+ if key in results:
+ results[key] = results[key][keep]
+ return results
diff --git a/annotator/uniformer/mmdet/datasets/pipelines/test_time_aug.py b/annotator/uniformer/mmdet/datasets/pipelines/test_time_aug.py
new file mode 100644
index 0000000000000000000000000000000000000000..b6226e040499882c99f15594c66ebf3d07829168
--- /dev/null
+++ b/annotator/uniformer/mmdet/datasets/pipelines/test_time_aug.py
@@ -0,0 +1,119 @@
+import warnings
+
+import mmcv
+
+from ..builder import PIPELINES
+from .compose import Compose
+
+
+@PIPELINES.register_module()
+class MultiScaleFlipAug(object):
+ """Test-time augmentation with multiple scales and flipping.
+
+ An example configuration is as followed:
+
+ .. code-block::
+
+ img_scale=[(1333, 400), (1333, 800)],
+ flip=True,
+ transforms=[
+ dict(type='Resize', keep_ratio=True),
+ dict(type='RandomFlip'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='Pad', size_divisor=32),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img']),
+ ]
+
+ After MultiScaleFLipAug with above configuration, the results are wrapped
+ into lists of the same length as followed:
+
+ .. code-block::
+
+ dict(
+ img=[...],
+ img_shape=[...],
+ scale=[(1333, 400), (1333, 400), (1333, 800), (1333, 800)]
+ flip=[False, True, False, True]
+ ...
+ )
+
+ Args:
+ transforms (list[dict]): Transforms to apply in each augmentation.
+ img_scale (tuple | list[tuple] | None): Images scales for resizing.
+ scale_factor (float | list[float] | None): Scale factors for resizing.
+ flip (bool): Whether apply flip augmentation. Default: False.
+ flip_direction (str | list[str]): Flip augmentation directions,
+ options are "horizontal" and "vertical". If flip_direction is list,
+ multiple flip augmentations will be applied.
+ It has no effect when flip == False. Default: "horizontal".
+ """
+
+ def __init__(self,
+ transforms,
+ img_scale=None,
+ scale_factor=None,
+ flip=False,
+ flip_direction='horizontal'):
+ self.transforms = Compose(transforms)
+ assert (img_scale is None) ^ (scale_factor is None), (
+ 'Must have but only one variable can be setted')
+ if img_scale is not None:
+ self.img_scale = img_scale if isinstance(img_scale,
+ list) else [img_scale]
+ self.scale_key = 'scale'
+ assert mmcv.is_list_of(self.img_scale, tuple)
+ else:
+ self.img_scale = scale_factor if isinstance(
+ scale_factor, list) else [scale_factor]
+ self.scale_key = 'scale_factor'
+
+ self.flip = flip
+ self.flip_direction = flip_direction if isinstance(
+ flip_direction, list) else [flip_direction]
+ assert mmcv.is_list_of(self.flip_direction, str)
+ if not self.flip and self.flip_direction != ['horizontal']:
+ warnings.warn(
+ 'flip_direction has no effect when flip is set to False')
+ if (self.flip
+ and not any([t['type'] == 'RandomFlip' for t in transforms])):
+ warnings.warn(
+ 'flip has no effect when RandomFlip is not in transforms')
+
+ def __call__(self, results):
+ """Call function to apply test time augment transforms on results.
+
+ Args:
+ results (dict): Result dict contains the data to transform.
+
+ Returns:
+ dict[str: list]: The augmented data, where each value is wrapped
+ into a list.
+ """
+
+ aug_data = []
+ flip_args = [(False, None)]
+ if self.flip:
+ flip_args += [(True, direction)
+ for direction in self.flip_direction]
+ for scale in self.img_scale:
+ for flip, direction in flip_args:
+ _results = results.copy()
+ _results[self.scale_key] = scale
+ _results['flip'] = flip
+ _results['flip_direction'] = direction
+ data = self.transforms(_results)
+ aug_data.append(data)
+ # list of dict to dict of list
+ aug_data_dict = {key: [] for key in aug_data[0]}
+ for data in aug_data:
+ for key, val in data.items():
+ aug_data_dict[key].append(val)
+ return aug_data_dict
+
+ def __repr__(self):
+ repr_str = self.__class__.__name__
+ repr_str += f'(transforms={self.transforms}, '
+ repr_str += f'img_scale={self.img_scale}, flip={self.flip}, '
+ repr_str += f'flip_direction={self.flip_direction})'
+ return repr_str
diff --git a/annotator/uniformer/mmdet/datasets/pipelines/transforms.py b/annotator/uniformer/mmdet/datasets/pipelines/transforms.py
new file mode 100644
index 0000000000000000000000000000000000000000..caed51d89ffc1259d0b086954f03c3d4c0749cf2
--- /dev/null
+++ b/annotator/uniformer/mmdet/datasets/pipelines/transforms.py
@@ -0,0 +1,1811 @@
+import copy
+import inspect
+
+import mmcv
+import numpy as np
+from numpy import random
+
+from mmdet.core import PolygonMasks
+from mmdet.core.evaluation.bbox_overlaps import bbox_overlaps
+from ..builder import PIPELINES
+
+try:
+ from imagecorruptions import corrupt
+except ImportError:
+ corrupt = None
+
+try:
+ import albumentations
+ from albumentations import Compose
+except ImportError:
+ albumentations = None
+ Compose = None
+
+
+@PIPELINES.register_module()
+class Resize(object):
+ """Resize images & bbox & mask.
+
+ This transform resizes the input image to some scale. Bboxes and masks are
+ then resized with the same scale factor. If the input dict contains the key
+ "scale", then the scale in the input dict is used, otherwise the specified
+ scale in the init method is used. If the input dict contains the key
+ "scale_factor" (if MultiScaleFlipAug does not give img_scale but
+ scale_factor), the actual scale will be computed by image shape and
+ scale_factor.
+
+ `img_scale` can either be a tuple (single-scale) or a list of tuple
+ (multi-scale). There are 3 multiscale modes:
+
+ - ``ratio_range is not None``: randomly sample a ratio from the ratio \
+ range and multiply it with the image scale.
+ - ``ratio_range is None`` and ``multiscale_mode == "range"``: randomly \
+ sample a scale from the multiscale range.
+ - ``ratio_range is None`` and ``multiscale_mode == "value"``: randomly \
+ sample a scale from multiple scales.
+
+ Args:
+ img_scale (tuple or list[tuple]): Images scales for resizing.
+ multiscale_mode (str): Either "range" or "value".
+ ratio_range (tuple[float]): (min_ratio, max_ratio)
+ keep_ratio (bool): Whether to keep the aspect ratio when resizing the
+ image.
+ bbox_clip_border (bool, optional): Whether clip the objects outside
+ the border of the image. Defaults to True.
+ backend (str): Image resize backend, choices are 'cv2' and 'pillow'.
+ These two backends generates slightly different results. Defaults
+ to 'cv2'.
+ override (bool, optional): Whether to override `scale` and
+ `scale_factor` so as to call resize twice. Default False. If True,
+ after the first resizing, the existed `scale` and `scale_factor`
+ will be ignored so the second resizing can be allowed.
+ This option is a work-around for multiple times of resize in DETR.
+ Defaults to False.
+ """
+
+ def __init__(self,
+ img_scale=None,
+ multiscale_mode='range',
+ ratio_range=None,
+ keep_ratio=True,
+ bbox_clip_border=True,
+ backend='cv2',
+ override=False):
+ if img_scale is None:
+ self.img_scale = None
+ else:
+ if isinstance(img_scale, list):
+ self.img_scale = img_scale
+ else:
+ self.img_scale = [img_scale]
+ assert mmcv.is_list_of(self.img_scale, tuple)
+
+ if ratio_range is not None:
+ # mode 1: given a scale and a range of image ratio
+ assert len(self.img_scale) == 1
+ else:
+ # mode 2: given multiple scales or a range of scales
+ assert multiscale_mode in ['value', 'range']
+
+ self.backend = backend
+ self.multiscale_mode = multiscale_mode
+ self.ratio_range = ratio_range
+ self.keep_ratio = keep_ratio
+ # TODO: refactor the override option in Resize
+ self.override = override
+ self.bbox_clip_border = bbox_clip_border
+
+ @staticmethod
+ def random_select(img_scales):
+ """Randomly select an img_scale from given candidates.
+
+ Args:
+ img_scales (list[tuple]): Images scales for selection.
+
+ Returns:
+ (tuple, int): Returns a tuple ``(img_scale, scale_dix)``, \
+ where ``img_scale`` is the selected image scale and \
+ ``scale_idx`` is the selected index in the given candidates.
+ """
+
+ assert mmcv.is_list_of(img_scales, tuple)
+ scale_idx = np.random.randint(len(img_scales))
+ img_scale = img_scales[scale_idx]
+ return img_scale, scale_idx
+
+ @staticmethod
+ def random_sample(img_scales):
+ """Randomly sample an img_scale when ``multiscale_mode=='range'``.
+
+ Args:
+ img_scales (list[tuple]): Images scale range for sampling.
+ There must be two tuples in img_scales, which specify the lower
+ and upper bound of image scales.
+
+ Returns:
+ (tuple, None): Returns a tuple ``(img_scale, None)``, where \
+ ``img_scale`` is sampled scale and None is just a placeholder \
+ to be consistent with :func:`random_select`.
+ """
+
+ assert mmcv.is_list_of(img_scales, tuple) and len(img_scales) == 2
+ img_scale_long = [max(s) for s in img_scales]
+ img_scale_short = [min(s) for s in img_scales]
+ long_edge = np.random.randint(
+ min(img_scale_long),
+ max(img_scale_long) + 1)
+ short_edge = np.random.randint(
+ min(img_scale_short),
+ max(img_scale_short) + 1)
+ img_scale = (long_edge, short_edge)
+ return img_scale, None
+
+ @staticmethod
+ def random_sample_ratio(img_scale, ratio_range):
+ """Randomly sample an img_scale when ``ratio_range`` is specified.
+
+ A ratio will be randomly sampled from the range specified by
+ ``ratio_range``. Then it would be multiplied with ``img_scale`` to
+ generate sampled scale.
+
+ Args:
+ img_scale (tuple): Images scale base to multiply with ratio.
+ ratio_range (tuple[float]): The minimum and maximum ratio to scale
+ the ``img_scale``.
+
+ Returns:
+ (tuple, None): Returns a tuple ``(scale, None)``, where \
+ ``scale`` is sampled ratio multiplied with ``img_scale`` and \
+ None is just a placeholder to be consistent with \
+ :func:`random_select`.
+ """
+
+ assert isinstance(img_scale, tuple) and len(img_scale) == 2
+ min_ratio, max_ratio = ratio_range
+ assert min_ratio <= max_ratio
+ ratio = np.random.random_sample() * (max_ratio - min_ratio) + min_ratio
+ scale = int(img_scale[0] * ratio), int(img_scale[1] * ratio)
+ return scale, None
+
+ def _random_scale(self, results):
+ """Randomly sample an img_scale according to ``ratio_range`` and
+ ``multiscale_mode``.
+
+ If ``ratio_range`` is specified, a ratio will be sampled and be
+ multiplied with ``img_scale``.
+ If multiple scales are specified by ``img_scale``, a scale will be
+ sampled according to ``multiscale_mode``.
+ Otherwise, single scale will be used.
+
+ Args:
+ results (dict): Result dict from :obj:`dataset`.
+
+ Returns:
+ dict: Two new keys 'scale` and 'scale_idx` are added into \
+ ``results``, which would be used by subsequent pipelines.
+ """
+
+ if self.ratio_range is not None:
+ scale, scale_idx = self.random_sample_ratio(
+ self.img_scale[0], self.ratio_range)
+ elif len(self.img_scale) == 1:
+ scale, scale_idx = self.img_scale[0], 0
+ elif self.multiscale_mode == 'range':
+ scale, scale_idx = self.random_sample(self.img_scale)
+ elif self.multiscale_mode == 'value':
+ scale, scale_idx = self.random_select(self.img_scale)
+ else:
+ raise NotImplementedError
+
+ results['scale'] = scale
+ results['scale_idx'] = scale_idx
+
+ def _resize_img(self, results):
+ """Resize images with ``results['scale']``."""
+ for key in results.get('img_fields', ['img']):
+ if self.keep_ratio:
+ img, scale_factor = mmcv.imrescale(
+ results[key],
+ results['scale'],
+ return_scale=True,
+ backend=self.backend)
+ # the w_scale and h_scale has minor difference
+ # a real fix should be done in the mmcv.imrescale in the future
+ new_h, new_w = img.shape[:2]
+ h, w = results[key].shape[:2]
+ w_scale = new_w / w
+ h_scale = new_h / h
+ else:
+ img, w_scale, h_scale = mmcv.imresize(
+ results[key],
+ results['scale'],
+ return_scale=True,
+ backend=self.backend)
+ results[key] = img
+
+ scale_factor = np.array([w_scale, h_scale, w_scale, h_scale],
+ dtype=np.float32)
+ results['img_shape'] = img.shape
+ # in case that there is no padding
+ results['pad_shape'] = img.shape
+ results['scale_factor'] = scale_factor
+ results['keep_ratio'] = self.keep_ratio
+
+ def _resize_bboxes(self, results):
+ """Resize bounding boxes with ``results['scale_factor']``."""
+ for key in results.get('bbox_fields', []):
+ bboxes = results[key] * results['scale_factor']
+ if self.bbox_clip_border:
+ img_shape = results['img_shape']
+ bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, img_shape[1])
+ bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, img_shape[0])
+ results[key] = bboxes
+
+ def _resize_masks(self, results):
+ """Resize masks with ``results['scale']``"""
+ for key in results.get('mask_fields', []):
+ if results[key] is None:
+ continue
+ if self.keep_ratio:
+ results[key] = results[key].rescale(results['scale'])
+ else:
+ results[key] = results[key].resize(results['img_shape'][:2])
+
+ def _resize_seg(self, results):
+ """Resize semantic segmentation map with ``results['scale']``."""
+ for key in results.get('seg_fields', []):
+ if self.keep_ratio:
+ gt_seg = mmcv.imrescale(
+ results[key],
+ results['scale'],
+ interpolation='nearest',
+ backend=self.backend)
+ else:
+ gt_seg = mmcv.imresize(
+ results[key],
+ results['scale'],
+ interpolation='nearest',
+ backend=self.backend)
+ results['gt_semantic_seg'] = gt_seg
+
+ def __call__(self, results):
+ """Call function to resize images, bounding boxes, masks, semantic
+ segmentation map.
+
+ Args:
+ results (dict): Result dict from loading pipeline.
+
+ Returns:
+ dict: Resized results, 'img_shape', 'pad_shape', 'scale_factor', \
+ 'keep_ratio' keys are added into result dict.
+ """
+
+ if 'scale' not in results:
+ if 'scale_factor' in results:
+ img_shape = results['img'].shape[:2]
+ scale_factor = results['scale_factor']
+ assert isinstance(scale_factor, float)
+ results['scale'] = tuple(
+ [int(x * scale_factor) for x in img_shape][::-1])
+ else:
+ self._random_scale(results)
+ else:
+ if not self.override:
+ assert 'scale_factor' not in results, (
+ 'scale and scale_factor cannot be both set.')
+ else:
+ results.pop('scale')
+ if 'scale_factor' in results:
+ results.pop('scale_factor')
+ self._random_scale(results)
+
+ self._resize_img(results)
+ self._resize_bboxes(results)
+ self._resize_masks(results)
+ self._resize_seg(results)
+ return results
+
+ def __repr__(self):
+ repr_str = self.__class__.__name__
+ repr_str += f'(img_scale={self.img_scale}, '
+ repr_str += f'multiscale_mode={self.multiscale_mode}, '
+ repr_str += f'ratio_range={self.ratio_range}, '
+ repr_str += f'keep_ratio={self.keep_ratio}, '
+ repr_str += f'bbox_clip_border={self.bbox_clip_border})'
+ return repr_str
+
+
+@PIPELINES.register_module()
+class RandomFlip(object):
+ """Flip the image & bbox & mask.
+
+ If the input dict contains the key "flip", then the flag will be used,
+ otherwise it will be randomly decided by a ratio specified in the init
+ method.
+
+ When random flip is enabled, ``flip_ratio``/``direction`` can either be a
+ float/string or tuple of float/string. There are 3 flip modes:
+
+ - ``flip_ratio`` is float, ``direction`` is string: the image will be
+ ``direction``ly flipped with probability of ``flip_ratio`` .
+ E.g., ``flip_ratio=0.5``, ``direction='horizontal'``,
+ then image will be horizontally flipped with probability of 0.5.
+ - ``flip_ratio`` is float, ``direction`` is list of string: the image wil
+ be ``direction[i]``ly flipped with probability of
+ ``flip_ratio/len(direction)``.
+ E.g., ``flip_ratio=0.5``, ``direction=['horizontal', 'vertical']``,
+ then image will be horizontally flipped with probability of 0.25,
+ vertically with probability of 0.25.
+ - ``flip_ratio`` is list of float, ``direction`` is list of string:
+ given ``len(flip_ratio) == len(direction)``, the image wil
+ be ``direction[i]``ly flipped with probability of ``flip_ratio[i]``.
+ E.g., ``flip_ratio=[0.3, 0.5]``, ``direction=['horizontal',
+ 'vertical']``, then image will be horizontally flipped with probability
+ of 0.3, vertically with probability of 0.5
+
+ Args:
+ flip_ratio (float | list[float], optional): The flipping probability.
+ Default: None.
+ direction(str | list[str], optional): The flipping direction. Options
+ are 'horizontal', 'vertical', 'diagonal'. Default: 'horizontal'.
+ If input is a list, the length must equal ``flip_ratio``. Each
+ element in ``flip_ratio`` indicates the flip probability of
+ corresponding direction.
+ """
+
+ def __init__(self, flip_ratio=None, direction='horizontal'):
+ if isinstance(flip_ratio, list):
+ assert mmcv.is_list_of(flip_ratio, float)
+ assert 0 <= sum(flip_ratio) <= 1
+ elif isinstance(flip_ratio, float):
+ assert 0 <= flip_ratio <= 1
+ elif flip_ratio is None:
+ pass
+ else:
+ raise ValueError('flip_ratios must be None, float, '
+ 'or list of float')
+ self.flip_ratio = flip_ratio
+
+ valid_directions = ['horizontal', 'vertical', 'diagonal']
+ if isinstance(direction, str):
+ assert direction in valid_directions
+ elif isinstance(direction, list):
+ assert mmcv.is_list_of(direction, str)
+ assert set(direction).issubset(set(valid_directions))
+ else:
+ raise ValueError('direction must be either str or list of str')
+ self.direction = direction
+
+ if isinstance(flip_ratio, list):
+ assert len(self.flip_ratio) == len(self.direction)
+
+ def bbox_flip(self, bboxes, img_shape, direction):
+ """Flip bboxes horizontally.
+
+ Args:
+ bboxes (numpy.ndarray): Bounding boxes, shape (..., 4*k)
+ img_shape (tuple[int]): Image shape (height, width)
+ direction (str): Flip direction. Options are 'horizontal',
+ 'vertical'.
+
+ Returns:
+ numpy.ndarray: Flipped bounding boxes.
+ """
+
+ assert bboxes.shape[-1] % 4 == 0
+ flipped = bboxes.copy()
+ if direction == 'horizontal':
+ w = img_shape[1]
+ flipped[..., 0::4] = w - bboxes[..., 2::4]
+ flipped[..., 2::4] = w - bboxes[..., 0::4]
+ elif direction == 'vertical':
+ h = img_shape[0]
+ flipped[..., 1::4] = h - bboxes[..., 3::4]
+ flipped[..., 3::4] = h - bboxes[..., 1::4]
+ elif direction == 'diagonal':
+ w = img_shape[1]
+ h = img_shape[0]
+ flipped[..., 0::4] = w - bboxes[..., 2::4]
+ flipped[..., 1::4] = h - bboxes[..., 3::4]
+ flipped[..., 2::4] = w - bboxes[..., 0::4]
+ flipped[..., 3::4] = h - bboxes[..., 1::4]
+ else:
+ raise ValueError(f"Invalid flipping direction '{direction}'")
+ return flipped
+
+ def __call__(self, results):
+ """Call function to flip bounding boxes, masks, semantic segmentation
+ maps.
+
+ Args:
+ results (dict): Result dict from loading pipeline.
+
+ Returns:
+ dict: Flipped results, 'flip', 'flip_direction' keys are added \
+ into result dict.
+ """
+
+ if 'flip' not in results:
+ if isinstance(self.direction, list):
+ # None means non-flip
+ direction_list = self.direction + [None]
+ else:
+ # None means non-flip
+ direction_list = [self.direction, None]
+
+ if isinstance(self.flip_ratio, list):
+ non_flip_ratio = 1 - sum(self.flip_ratio)
+ flip_ratio_list = self.flip_ratio + [non_flip_ratio]
+ else:
+ non_flip_ratio = 1 - self.flip_ratio
+ # exclude non-flip
+ single_ratio = self.flip_ratio / (len(direction_list) - 1)
+ flip_ratio_list = [single_ratio] * (len(direction_list) -
+ 1) + [non_flip_ratio]
+
+ cur_dir = np.random.choice(direction_list, p=flip_ratio_list)
+
+ results['flip'] = cur_dir is not None
+ if 'flip_direction' not in results:
+ results['flip_direction'] = cur_dir
+ if results['flip']:
+ # flip image
+ for key in results.get('img_fields', ['img']):
+ results[key] = mmcv.imflip(
+ results[key], direction=results['flip_direction'])
+ # flip bboxes
+ for key in results.get('bbox_fields', []):
+ results[key] = self.bbox_flip(results[key],
+ results['img_shape'],
+ results['flip_direction'])
+ # flip masks
+ for key in results.get('mask_fields', []):
+ results[key] = results[key].flip(results['flip_direction'])
+
+ # flip segs
+ for key in results.get('seg_fields', []):
+ results[key] = mmcv.imflip(
+ results[key], direction=results['flip_direction'])
+ return results
+
+ def __repr__(self):
+ return self.__class__.__name__ + f'(flip_ratio={self.flip_ratio})'
+
+
+@PIPELINES.register_module()
+class Pad(object):
+ """Pad the image & mask.
+
+ There are two padding modes: (1) pad to a fixed size and (2) pad to the
+ minimum size that is divisible by some number.
+ Added keys are "pad_shape", "pad_fixed_size", "pad_size_divisor",
+
+ Args:
+ size (tuple, optional): Fixed padding size.
+ size_divisor (int, optional): The divisor of padded size.
+ pad_val (float, optional): Padding value, 0 by default.
+ """
+
+ def __init__(self, size=None, size_divisor=None, pad_val=0):
+ self.size = size
+ self.size_divisor = size_divisor
+ self.pad_val = pad_val
+ # only one of size and size_divisor should be valid
+ assert size is not None or size_divisor is not None
+ assert size is None or size_divisor is None
+
+ def _pad_img(self, results):
+ """Pad images according to ``self.size``."""
+ for key in results.get('img_fields', ['img']):
+ if self.size is not None:
+ padded_img = mmcv.impad(
+ results[key], shape=self.size, pad_val=self.pad_val)
+ elif self.size_divisor is not None:
+ padded_img = mmcv.impad_to_multiple(
+ results[key], self.size_divisor, pad_val=self.pad_val)
+ results[key] = padded_img
+ results['pad_shape'] = padded_img.shape
+ results['pad_fixed_size'] = self.size
+ results['pad_size_divisor'] = self.size_divisor
+
+ def _pad_masks(self, results):
+ """Pad masks according to ``results['pad_shape']``."""
+ pad_shape = results['pad_shape'][:2]
+ for key in results.get('mask_fields', []):
+ results[key] = results[key].pad(pad_shape, pad_val=self.pad_val)
+
+ def _pad_seg(self, results):
+ """Pad semantic segmentation map according to
+ ``results['pad_shape']``."""
+ for key in results.get('seg_fields', []):
+ results[key] = mmcv.impad(
+ results[key], shape=results['pad_shape'][:2])
+
+ def __call__(self, results):
+ """Call function to pad images, masks, semantic segmentation maps.
+
+ Args:
+ results (dict): Result dict from loading pipeline.
+
+ Returns:
+ dict: Updated result dict.
+ """
+ self._pad_img(results)
+ self._pad_masks(results)
+ self._pad_seg(results)
+ return results
+
+ def __repr__(self):
+ repr_str = self.__class__.__name__
+ repr_str += f'(size={self.size}, '
+ repr_str += f'size_divisor={self.size_divisor}, '
+ repr_str += f'pad_val={self.pad_val})'
+ return repr_str
+
+
+@PIPELINES.register_module()
+class Normalize(object):
+ """Normalize the image.
+
+ Added key is "img_norm_cfg".
+
+ Args:
+ mean (sequence): Mean values of 3 channels.
+ std (sequence): Std values of 3 channels.
+ to_rgb (bool): Whether to convert the image from BGR to RGB,
+ default is true.
+ """
+
+ def __init__(self, mean, std, to_rgb=True):
+ self.mean = np.array(mean, dtype=np.float32)
+ self.std = np.array(std, dtype=np.float32)
+ self.to_rgb = to_rgb
+
+ def __call__(self, results):
+ """Call function to normalize images.
+
+ Args:
+ results (dict): Result dict from loading pipeline.
+
+ Returns:
+ dict: Normalized results, 'img_norm_cfg' key is added into
+ result dict.
+ """
+ for key in results.get('img_fields', ['img']):
+ results[key] = mmcv.imnormalize(results[key], self.mean, self.std,
+ self.to_rgb)
+ results['img_norm_cfg'] = dict(
+ mean=self.mean, std=self.std, to_rgb=self.to_rgb)
+ return results
+
+ def __repr__(self):
+ repr_str = self.__class__.__name__
+ repr_str += f'(mean={self.mean}, std={self.std}, to_rgb={self.to_rgb})'
+ return repr_str
+
+
+@PIPELINES.register_module()
+class RandomCrop(object):
+ """Random crop the image & bboxes & masks.
+
+ The absolute `crop_size` is sampled based on `crop_type` and `image_size`,
+ then the cropped results are generated.
+
+ Args:
+ crop_size (tuple): The relative ratio or absolute pixels of
+ height and width.
+ crop_type (str, optional): one of "relative_range", "relative",
+ "absolute", "absolute_range". "relative" randomly crops
+ (h * crop_size[0], w * crop_size[1]) part from an input of size
+ (h, w). "relative_range" uniformly samples relative crop size from
+ range [crop_size[0], 1] and [crop_size[1], 1] for height and width
+ respectively. "absolute" crops from an input with absolute size
+ (crop_size[0], crop_size[1]). "absolute_range" uniformly samples
+ crop_h in range [crop_size[0], min(h, crop_size[1])] and crop_w
+ in range [crop_size[0], min(w, crop_size[1])]. Default "absolute".
+ allow_negative_crop (bool, optional): Whether to allow a crop that does
+ not contain any bbox area. Default False.
+ bbox_clip_border (bool, optional): Whether clip the objects outside
+ the border of the image. Defaults to True.
+
+ Note:
+ - If the image is smaller than the absolute crop size, return the
+ original image.
+ - The keys for bboxes, labels and masks must be aligned. That is,
+ `gt_bboxes` corresponds to `gt_labels` and `gt_masks`, and
+ `gt_bboxes_ignore` corresponds to `gt_labels_ignore` and
+ `gt_masks_ignore`.
+ - If the crop does not contain any gt-bbox region and
+ `allow_negative_crop` is set to False, skip this image.
+ """
+
+ def __init__(self,
+ crop_size,
+ crop_type='absolute',
+ allow_negative_crop=False,
+ bbox_clip_border=True):
+ if crop_type not in [
+ 'relative_range', 'relative', 'absolute', 'absolute_range'
+ ]:
+ raise ValueError(f'Invalid crop_type {crop_type}.')
+ if crop_type in ['absolute', 'absolute_range']:
+ assert crop_size[0] > 0 and crop_size[1] > 0
+ assert isinstance(crop_size[0], int) and isinstance(
+ crop_size[1], int)
+ else:
+ assert 0 < crop_size[0] <= 1 and 0 < crop_size[1] <= 1
+ self.crop_size = crop_size
+ self.crop_type = crop_type
+ self.allow_negative_crop = allow_negative_crop
+ self.bbox_clip_border = bbox_clip_border
+ # The key correspondence from bboxes to labels and masks.
+ self.bbox2label = {
+ 'gt_bboxes': 'gt_labels',
+ 'gt_bboxes_ignore': 'gt_labels_ignore'
+ }
+ self.bbox2mask = {
+ 'gt_bboxes': 'gt_masks',
+ 'gt_bboxes_ignore': 'gt_masks_ignore'
+ }
+
+ def _crop_data(self, results, crop_size, allow_negative_crop):
+ """Function to randomly crop images, bounding boxes, masks, semantic
+ segmentation maps.
+
+ Args:
+ results (dict): Result dict from loading pipeline.
+ crop_size (tuple): Expected absolute size after cropping, (h, w).
+ allow_negative_crop (bool): Whether to allow a crop that does not
+ contain any bbox area. Default to False.
+
+ Returns:
+ dict: Randomly cropped results, 'img_shape' key in result dict is
+ updated according to crop size.
+ """
+ assert crop_size[0] > 0 and crop_size[1] > 0
+ for key in results.get('img_fields', ['img']):
+ img = results[key]
+ margin_h = max(img.shape[0] - crop_size[0], 0)
+ margin_w = max(img.shape[1] - crop_size[1], 0)
+ offset_h = np.random.randint(0, margin_h + 1)
+ offset_w = np.random.randint(0, margin_w + 1)
+ crop_y1, crop_y2 = offset_h, offset_h + crop_size[0]
+ crop_x1, crop_x2 = offset_w, offset_w + crop_size[1]
+
+ # crop the image
+ img = img[crop_y1:crop_y2, crop_x1:crop_x2, ...]
+ img_shape = img.shape
+ results[key] = img
+ results['img_shape'] = img_shape
+
+ # crop bboxes accordingly and clip to the image boundary
+ for key in results.get('bbox_fields', []):
+ # e.g. gt_bboxes and gt_bboxes_ignore
+ bbox_offset = np.array([offset_w, offset_h, offset_w, offset_h],
+ dtype=np.float32)
+ bboxes = results[key] - bbox_offset
+ if self.bbox_clip_border:
+ bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, img_shape[1])
+ bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, img_shape[0])
+ valid_inds = (bboxes[:, 2] > bboxes[:, 0]) & (
+ bboxes[:, 3] > bboxes[:, 1])
+ # If the crop does not contain any gt-bbox area and
+ # allow_negative_crop is False, skip this image.
+ if (key == 'gt_bboxes' and not valid_inds.any()
+ and not allow_negative_crop):
+ return None
+ results[key] = bboxes[valid_inds, :]
+ # label fields. e.g. gt_labels and gt_labels_ignore
+ label_key = self.bbox2label.get(key)
+ if label_key in results:
+ results[label_key] = results[label_key][valid_inds]
+
+ # mask fields, e.g. gt_masks and gt_masks_ignore
+ mask_key = self.bbox2mask.get(key)
+ if mask_key in results:
+ results[mask_key] = results[mask_key][
+ valid_inds.nonzero()[0]].crop(
+ np.asarray([crop_x1, crop_y1, crop_x2, crop_y2]))
+
+ # crop semantic seg
+ for key in results.get('seg_fields', []):
+ results[key] = results[key][crop_y1:crop_y2, crop_x1:crop_x2]
+
+ return results
+
+ def _get_crop_size(self, image_size):
+ """Randomly generates the absolute crop size based on `crop_type` and
+ `image_size`.
+
+ Args:
+ image_size (tuple): (h, w).
+
+ Returns:
+ crop_size (tuple): (crop_h, crop_w) in absolute pixels.
+ """
+ h, w = image_size
+ if self.crop_type == 'absolute':
+ return (min(self.crop_size[0], h), min(self.crop_size[1], w))
+ elif self.crop_type == 'absolute_range':
+ assert self.crop_size[0] <= self.crop_size[1]
+ crop_h = np.random.randint(
+ min(h, self.crop_size[0]),
+ min(h, self.crop_size[1]) + 1)
+ crop_w = np.random.randint(
+ min(w, self.crop_size[0]),
+ min(w, self.crop_size[1]) + 1)
+ return crop_h, crop_w
+ elif self.crop_type == 'relative':
+ crop_h, crop_w = self.crop_size
+ return int(h * crop_h + 0.5), int(w * crop_w + 0.5)
+ elif self.crop_type == 'relative_range':
+ crop_size = np.asarray(self.crop_size, dtype=np.float32)
+ crop_h, crop_w = crop_size + np.random.rand(2) * (1 - crop_size)
+ return int(h * crop_h + 0.5), int(w * crop_w + 0.5)
+
+ def __call__(self, results):
+ """Call function to randomly crop images, bounding boxes, masks,
+ semantic segmentation maps.
+
+ Args:
+ results (dict): Result dict from loading pipeline.
+
+ Returns:
+ dict: Randomly cropped results, 'img_shape' key in result dict is
+ updated according to crop size.
+ """
+ image_size = results['img'].shape[:2]
+ crop_size = self._get_crop_size(image_size)
+ results = self._crop_data(results, crop_size, self.allow_negative_crop)
+ return results
+
+ def __repr__(self):
+ repr_str = self.__class__.__name__
+ repr_str += f'(crop_size={self.crop_size}, '
+ repr_str += f'crop_type={self.crop_type}, '
+ repr_str += f'allow_negative_crop={self.allow_negative_crop}, '
+ repr_str += f'bbox_clip_border={self.bbox_clip_border})'
+ return repr_str
+
+
+@PIPELINES.register_module()
+class SegRescale(object):
+ """Rescale semantic segmentation maps.
+
+ Args:
+ scale_factor (float): The scale factor of the final output.
+ backend (str): Image rescale backend, choices are 'cv2' and 'pillow'.
+ These two backends generates slightly different results. Defaults
+ to 'cv2'.
+ """
+
+ def __init__(self, scale_factor=1, backend='cv2'):
+ self.scale_factor = scale_factor
+ self.backend = backend
+
+ def __call__(self, results):
+ """Call function to scale the semantic segmentation map.
+
+ Args:
+ results (dict): Result dict from loading pipeline.
+
+ Returns:
+ dict: Result dict with semantic segmentation map scaled.
+ """
+
+ for key in results.get('seg_fields', []):
+ if self.scale_factor != 1:
+ results[key] = mmcv.imrescale(
+ results[key],
+ self.scale_factor,
+ interpolation='nearest',
+ backend=self.backend)
+ return results
+
+ def __repr__(self):
+ return self.__class__.__name__ + f'(scale_factor={self.scale_factor})'
+
+
+@PIPELINES.register_module()
+class PhotoMetricDistortion(object):
+ """Apply photometric distortion to image sequentially, every transformation
+ is applied with a probability of 0.5. The position of random contrast is in
+ second or second to last.
+
+ 1. random brightness
+ 2. random contrast (mode 0)
+ 3. convert color from BGR to HSV
+ 4. random saturation
+ 5. random hue
+ 6. convert color from HSV to BGR
+ 7. random contrast (mode 1)
+ 8. randomly swap channels
+
+ Args:
+ brightness_delta (int): delta of brightness.
+ contrast_range (tuple): range of contrast.
+ saturation_range (tuple): range of saturation.
+ hue_delta (int): delta of hue.
+ """
+
+ def __init__(self,
+ brightness_delta=32,
+ contrast_range=(0.5, 1.5),
+ saturation_range=(0.5, 1.5),
+ hue_delta=18):
+ self.brightness_delta = brightness_delta
+ self.contrast_lower, self.contrast_upper = contrast_range
+ self.saturation_lower, self.saturation_upper = saturation_range
+ self.hue_delta = hue_delta
+
+ def __call__(self, results):
+ """Call function to perform photometric distortion on images.
+
+ Args:
+ results (dict): Result dict from loading pipeline.
+
+ Returns:
+ dict: Result dict with images distorted.
+ """
+
+ if 'img_fields' in results:
+ assert results['img_fields'] == ['img'], \
+ 'Only single img_fields is allowed'
+ img = results['img']
+ assert img.dtype == np.float32, \
+ 'PhotoMetricDistortion needs the input image of dtype np.float32,'\
+ ' please set "to_float32=True" in "LoadImageFromFile" pipeline'
+ # random brightness
+ if random.randint(2):
+ delta = random.uniform(-self.brightness_delta,
+ self.brightness_delta)
+ img += delta
+
+ # mode == 0 --> do random contrast first
+ # mode == 1 --> do random contrast last
+ mode = random.randint(2)
+ if mode == 1:
+ if random.randint(2):
+ alpha = random.uniform(self.contrast_lower,
+ self.contrast_upper)
+ img *= alpha
+
+ # convert color from BGR to HSV
+ img = mmcv.bgr2hsv(img)
+
+ # random saturation
+ if random.randint(2):
+ img[..., 1] *= random.uniform(self.saturation_lower,
+ self.saturation_upper)
+
+ # random hue
+ if random.randint(2):
+ img[..., 0] += random.uniform(-self.hue_delta, self.hue_delta)
+ img[..., 0][img[..., 0] > 360] -= 360
+ img[..., 0][img[..., 0] < 0] += 360
+
+ # convert color from HSV to BGR
+ img = mmcv.hsv2bgr(img)
+
+ # random contrast
+ if mode == 0:
+ if random.randint(2):
+ alpha = random.uniform(self.contrast_lower,
+ self.contrast_upper)
+ img *= alpha
+
+ # randomly swap channels
+ if random.randint(2):
+ img = img[..., random.permutation(3)]
+
+ results['img'] = img
+ return results
+
+ def __repr__(self):
+ repr_str = self.__class__.__name__
+ repr_str += f'(\nbrightness_delta={self.brightness_delta},\n'
+ repr_str += 'contrast_range='
+ repr_str += f'{(self.contrast_lower, self.contrast_upper)},\n'
+ repr_str += 'saturation_range='
+ repr_str += f'{(self.saturation_lower, self.saturation_upper)},\n'
+ repr_str += f'hue_delta={self.hue_delta})'
+ return repr_str
+
+
+@PIPELINES.register_module()
+class Expand(object):
+ """Random expand the image & bboxes.
+
+ Randomly place the original image on a canvas of 'ratio' x original image
+ size filled with mean values. The ratio is in the range of ratio_range.
+
+ Args:
+ mean (tuple): mean value of dataset.
+ to_rgb (bool): if need to convert the order of mean to align with RGB.
+ ratio_range (tuple): range of expand ratio.
+ prob (float): probability of applying this transformation
+ """
+
+ def __init__(self,
+ mean=(0, 0, 0),
+ to_rgb=True,
+ ratio_range=(1, 4),
+ seg_ignore_label=None,
+ prob=0.5):
+ self.to_rgb = to_rgb
+ self.ratio_range = ratio_range
+ if to_rgb:
+ self.mean = mean[::-1]
+ else:
+ self.mean = mean
+ self.min_ratio, self.max_ratio = ratio_range
+ self.seg_ignore_label = seg_ignore_label
+ self.prob = prob
+
+ def __call__(self, results):
+ """Call function to expand images, bounding boxes.
+
+ Args:
+ results (dict): Result dict from loading pipeline.
+
+ Returns:
+ dict: Result dict with images, bounding boxes expanded
+ """
+
+ if random.uniform(0, 1) > self.prob:
+ return results
+
+ if 'img_fields' in results:
+ assert results['img_fields'] == ['img'], \
+ 'Only single img_fields is allowed'
+ img = results['img']
+
+ h, w, c = img.shape
+ ratio = random.uniform(self.min_ratio, self.max_ratio)
+ # speedup expand when meets large image
+ if np.all(self.mean == self.mean[0]):
+ expand_img = np.empty((int(h * ratio), int(w * ratio), c),
+ img.dtype)
+ expand_img.fill(self.mean[0])
+ else:
+ expand_img = np.full((int(h * ratio), int(w * ratio), c),
+ self.mean,
+ dtype=img.dtype)
+ left = int(random.uniform(0, w * ratio - w))
+ top = int(random.uniform(0, h * ratio - h))
+ expand_img[top:top + h, left:left + w] = img
+
+ results['img'] = expand_img
+ # expand bboxes
+ for key in results.get('bbox_fields', []):
+ results[key] = results[key] + np.tile(
+ (left, top), 2).astype(results[key].dtype)
+
+ # expand masks
+ for key in results.get('mask_fields', []):
+ results[key] = results[key].expand(
+ int(h * ratio), int(w * ratio), top, left)
+
+ # expand segs
+ for key in results.get('seg_fields', []):
+ gt_seg = results[key]
+ expand_gt_seg = np.full((int(h * ratio), int(w * ratio)),
+ self.seg_ignore_label,
+ dtype=gt_seg.dtype)
+ expand_gt_seg[top:top + h, left:left + w] = gt_seg
+ results[key] = expand_gt_seg
+ return results
+
+ def __repr__(self):
+ repr_str = self.__class__.__name__
+ repr_str += f'(mean={self.mean}, to_rgb={self.to_rgb}, '
+ repr_str += f'ratio_range={self.ratio_range}, '
+ repr_str += f'seg_ignore_label={self.seg_ignore_label})'
+ return repr_str
+
+
+@PIPELINES.register_module()
+class MinIoURandomCrop(object):
+ """Random crop the image & bboxes, the cropped patches have minimum IoU
+ requirement with original image & bboxes, the IoU threshold is randomly
+ selected from min_ious.
+
+ Args:
+ min_ious (tuple): minimum IoU threshold for all intersections with
+ bounding boxes
+ min_crop_size (float): minimum crop's size (i.e. h,w := a*h, a*w,
+ where a >= min_crop_size).
+ bbox_clip_border (bool, optional): Whether clip the objects outside
+ the border of the image. Defaults to True.
+
+ Note:
+ The keys for bboxes, labels and masks should be paired. That is, \
+ `gt_bboxes` corresponds to `gt_labels` and `gt_masks`, and \
+ `gt_bboxes_ignore` to `gt_labels_ignore` and `gt_masks_ignore`.
+ """
+
+ def __init__(self,
+ min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
+ min_crop_size=0.3,
+ bbox_clip_border=True):
+ # 1: return ori img
+ self.min_ious = min_ious
+ self.sample_mode = (1, *min_ious, 0)
+ self.min_crop_size = min_crop_size
+ self.bbox_clip_border = bbox_clip_border
+ self.bbox2label = {
+ 'gt_bboxes': 'gt_labels',
+ 'gt_bboxes_ignore': 'gt_labels_ignore'
+ }
+ self.bbox2mask = {
+ 'gt_bboxes': 'gt_masks',
+ 'gt_bboxes_ignore': 'gt_masks_ignore'
+ }
+
+ def __call__(self, results):
+ """Call function to crop images and bounding boxes with minimum IoU
+ constraint.
+
+ Args:
+ results (dict): Result dict from loading pipeline.
+
+ Returns:
+ dict: Result dict with images and bounding boxes cropped, \
+ 'img_shape' key is updated.
+ """
+
+ if 'img_fields' in results:
+ assert results['img_fields'] == ['img'], \
+ 'Only single img_fields is allowed'
+ img = results['img']
+ assert 'bbox_fields' in results
+ boxes = [results[key] for key in results['bbox_fields']]
+ boxes = np.concatenate(boxes, 0)
+ h, w, c = img.shape
+ while True:
+ mode = random.choice(self.sample_mode)
+ self.mode = mode
+ if mode == 1:
+ return results
+
+ min_iou = mode
+ for i in range(50):
+ new_w = random.uniform(self.min_crop_size * w, w)
+ new_h = random.uniform(self.min_crop_size * h, h)
+
+ # h / w in [0.5, 2]
+ if new_h / new_w < 0.5 or new_h / new_w > 2:
+ continue
+
+ left = random.uniform(w - new_w)
+ top = random.uniform(h - new_h)
+
+ patch = np.array(
+ (int(left), int(top), int(left + new_w), int(top + new_h)))
+ # Line or point crop is not allowed
+ if patch[2] == patch[0] or patch[3] == patch[1]:
+ continue
+ overlaps = bbox_overlaps(
+ patch.reshape(-1, 4), boxes.reshape(-1, 4)).reshape(-1)
+ if len(overlaps) > 0 and overlaps.min() < min_iou:
+ continue
+
+ # center of boxes should inside the crop img
+ # only adjust boxes and instance masks when the gt is not empty
+ if len(overlaps) > 0:
+ # adjust boxes
+ def is_center_of_bboxes_in_patch(boxes, patch):
+ center = (boxes[:, :2] + boxes[:, 2:]) / 2
+ mask = ((center[:, 0] > patch[0]) *
+ (center[:, 1] > patch[1]) *
+ (center[:, 0] < patch[2]) *
+ (center[:, 1] < patch[3]))
+ return mask
+
+ mask = is_center_of_bboxes_in_patch(boxes, patch)
+ if not mask.any():
+ continue
+ for key in results.get('bbox_fields', []):
+ boxes = results[key].copy()
+ mask = is_center_of_bboxes_in_patch(boxes, patch)
+ boxes = boxes[mask]
+ if self.bbox_clip_border:
+ boxes[:, 2:] = boxes[:, 2:].clip(max=patch[2:])
+ boxes[:, :2] = boxes[:, :2].clip(min=patch[:2])
+ boxes -= np.tile(patch[:2], 2)
+
+ results[key] = boxes
+ # labels
+ label_key = self.bbox2label.get(key)
+ if label_key in results:
+ results[label_key] = results[label_key][mask]
+
+ # mask fields
+ mask_key = self.bbox2mask.get(key)
+ if mask_key in results:
+ results[mask_key] = results[mask_key][
+ mask.nonzero()[0]].crop(patch)
+ # adjust the img no matter whether the gt is empty before crop
+ img = img[patch[1]:patch[3], patch[0]:patch[2]]
+ results['img'] = img
+ results['img_shape'] = img.shape
+
+ # seg fields
+ for key in results.get('seg_fields', []):
+ results[key] = results[key][patch[1]:patch[3],
+ patch[0]:patch[2]]
+ return results
+
+ def __repr__(self):
+ repr_str = self.__class__.__name__
+ repr_str += f'(min_ious={self.min_ious}, '
+ repr_str += f'min_crop_size={self.min_crop_size}, '
+ repr_str += f'bbox_clip_border={self.bbox_clip_border})'
+ return repr_str
+
+
+@PIPELINES.register_module()
+class Corrupt(object):
+ """Corruption augmentation.
+
+ Corruption transforms implemented based on
+ `imagecorruptions `_.
+
+ Args:
+ corruption (str): Corruption name.
+ severity (int, optional): The severity of corruption. Default: 1.
+ """
+
+ def __init__(self, corruption, severity=1):
+ self.corruption = corruption
+ self.severity = severity
+
+ def __call__(self, results):
+ """Call function to corrupt image.
+
+ Args:
+ results (dict): Result dict from loading pipeline.
+
+ Returns:
+ dict: Result dict with images corrupted.
+ """
+
+ if corrupt is None:
+ raise RuntimeError('imagecorruptions is not installed')
+ if 'img_fields' in results:
+ assert results['img_fields'] == ['img'], \
+ 'Only single img_fields is allowed'
+ results['img'] = corrupt(
+ results['img'].astype(np.uint8),
+ corruption_name=self.corruption,
+ severity=self.severity)
+ return results
+
+ def __repr__(self):
+ repr_str = self.__class__.__name__
+ repr_str += f'(corruption={self.corruption}, '
+ repr_str += f'severity={self.severity})'
+ return repr_str
+
+
+@PIPELINES.register_module()
+class Albu(object):
+ """Albumentation augmentation.
+
+ Adds custom transformations from Albumentations library.
+ Please, visit `https://albumentations.readthedocs.io`
+ to get more information.
+
+ An example of ``transforms`` is as followed:
+
+ .. code-block::
+
+ [
+ dict(
+ type='ShiftScaleRotate',
+ shift_limit=0.0625,
+ scale_limit=0.0,
+ rotate_limit=0,
+ interpolation=1,
+ p=0.5),
+ dict(
+ type='RandomBrightnessContrast',
+ brightness_limit=[0.1, 0.3],
+ contrast_limit=[0.1, 0.3],
+ p=0.2),
+ dict(type='ChannelShuffle', p=0.1),
+ dict(
+ type='OneOf',
+ transforms=[
+ dict(type='Blur', blur_limit=3, p=1.0),
+ dict(type='MedianBlur', blur_limit=3, p=1.0)
+ ],
+ p=0.1),
+ ]
+
+ Args:
+ transforms (list[dict]): A list of albu transformations
+ bbox_params (dict): Bbox_params for albumentation `Compose`
+ keymap (dict): Contains {'input key':'albumentation-style key'}
+ skip_img_without_anno (bool): Whether to skip the image if no ann left
+ after aug
+ """
+
+ def __init__(self,
+ transforms,
+ bbox_params=None,
+ keymap=None,
+ update_pad_shape=False,
+ skip_img_without_anno=False):
+ if Compose is None:
+ raise RuntimeError('albumentations is not installed')
+
+ # Args will be modified later, copying it will be safer
+ transforms = copy.deepcopy(transforms)
+ if bbox_params is not None:
+ bbox_params = copy.deepcopy(bbox_params)
+ if keymap is not None:
+ keymap = copy.deepcopy(keymap)
+ self.transforms = transforms
+ self.filter_lost_elements = False
+ self.update_pad_shape = update_pad_shape
+ self.skip_img_without_anno = skip_img_without_anno
+
+ # A simple workaround to remove masks without boxes
+ if (isinstance(bbox_params, dict) and 'label_fields' in bbox_params
+ and 'filter_lost_elements' in bbox_params):
+ self.filter_lost_elements = True
+ self.origin_label_fields = bbox_params['label_fields']
+ bbox_params['label_fields'] = ['idx_mapper']
+ del bbox_params['filter_lost_elements']
+
+ self.bbox_params = (
+ self.albu_builder(bbox_params) if bbox_params else None)
+ self.aug = Compose([self.albu_builder(t) for t in self.transforms],
+ bbox_params=self.bbox_params)
+
+ if not keymap:
+ self.keymap_to_albu = {
+ 'img': 'image',
+ 'gt_masks': 'masks',
+ 'gt_bboxes': 'bboxes'
+ }
+ else:
+ self.keymap_to_albu = keymap
+ self.keymap_back = {v: k for k, v in self.keymap_to_albu.items()}
+
+ def albu_builder(self, cfg):
+ """Import a module from albumentations.
+
+ It inherits some of :func:`build_from_cfg` logic.
+
+ Args:
+ cfg (dict): Config dict. It should at least contain the key "type".
+
+ Returns:
+ obj: The constructed object.
+ """
+
+ assert isinstance(cfg, dict) and 'type' in cfg
+ args = cfg.copy()
+
+ obj_type = args.pop('type')
+ if mmcv.is_str(obj_type):
+ if albumentations is None:
+ raise RuntimeError('albumentations is not installed')
+ obj_cls = getattr(albumentations, obj_type)
+ elif inspect.isclass(obj_type):
+ obj_cls = obj_type
+ else:
+ raise TypeError(
+ f'type must be a str or valid type, but got {type(obj_type)}')
+
+ if 'transforms' in args:
+ args['transforms'] = [
+ self.albu_builder(transform)
+ for transform in args['transforms']
+ ]
+
+ return obj_cls(**args)
+
+ @staticmethod
+ def mapper(d, keymap):
+ """Dictionary mapper. Renames keys according to keymap provided.
+
+ Args:
+ d (dict): old dict
+ keymap (dict): {'old_key':'new_key'}
+ Returns:
+ dict: new dict.
+ """
+
+ updated_dict = {}
+ for k, v in zip(d.keys(), d.values()):
+ new_k = keymap.get(k, k)
+ updated_dict[new_k] = d[k]
+ return updated_dict
+
+ def __call__(self, results):
+ # dict to albumentations format
+ results = self.mapper(results, self.keymap_to_albu)
+ # TODO: add bbox_fields
+ if 'bboxes' in results:
+ # to list of boxes
+ if isinstance(results['bboxes'], np.ndarray):
+ results['bboxes'] = [x for x in results['bboxes']]
+ # add pseudo-field for filtration
+ if self.filter_lost_elements:
+ results['idx_mapper'] = np.arange(len(results['bboxes']))
+
+ # TODO: Support mask structure in albu
+ if 'masks' in results:
+ if isinstance(results['masks'], PolygonMasks):
+ raise NotImplementedError(
+ 'Albu only supports BitMap masks now')
+ ori_masks = results['masks']
+ if albumentations.__version__ < '0.5':
+ results['masks'] = results['masks'].masks
+ else:
+ results['masks'] = [mask for mask in results['masks'].masks]
+
+ results = self.aug(**results)
+
+ if 'bboxes' in results:
+ if isinstance(results['bboxes'], list):
+ results['bboxes'] = np.array(
+ results['bboxes'], dtype=np.float32)
+ results['bboxes'] = results['bboxes'].reshape(-1, 4)
+
+ # filter label_fields
+ if self.filter_lost_elements:
+
+ for label in self.origin_label_fields:
+ results[label] = np.array(
+ [results[label][i] for i in results['idx_mapper']])
+ if 'masks' in results:
+ results['masks'] = np.array(
+ [results['masks'][i] for i in results['idx_mapper']])
+ results['masks'] = ori_masks.__class__(
+ results['masks'], results['image'].shape[0],
+ results['image'].shape[1])
+
+ if (not len(results['idx_mapper'])
+ and self.skip_img_without_anno):
+ return None
+
+ if 'gt_labels' in results:
+ if isinstance(results['gt_labels'], list):
+ results['gt_labels'] = np.array(results['gt_labels'])
+ results['gt_labels'] = results['gt_labels'].astype(np.int64)
+
+ # back to the original format
+ results = self.mapper(results, self.keymap_back)
+
+ # update final shape
+ if self.update_pad_shape:
+ results['pad_shape'] = results['img'].shape
+
+ return results
+
+ def __repr__(self):
+ repr_str = self.__class__.__name__ + f'(transforms={self.transforms})'
+ return repr_str
+
+
+@PIPELINES.register_module()
+class RandomCenterCropPad(object):
+ """Random center crop and random around padding for CornerNet.
+
+ This operation generates randomly cropped image from the original image and
+ pads it simultaneously. Different from :class:`RandomCrop`, the output
+ shape may not equal to ``crop_size`` strictly. We choose a random value
+ from ``ratios`` and the output shape could be larger or smaller than
+ ``crop_size``. The padding operation is also different from :class:`Pad`,
+ here we use around padding instead of right-bottom padding.
+
+ The relation between output image (padding image) and original image:
+
+ .. code:: text
+
+ output image
+
+ +----------------------------+
+ | padded area |
+ +------|----------------------------|----------+
+ | | cropped area | |
+ | | +---------------+ | |
+ | | | . center | | | original image
+ | | | range | | |
+ | | +---------------+ | |
+ +------|----------------------------|----------+
+ | padded area |
+ +----------------------------+
+
+ There are 5 main areas in the figure:
+
+ - output image: output image of this operation, also called padding
+ image in following instruction.
+ - original image: input image of this operation.
+ - padded area: non-intersect area of output image and original image.
+ - cropped area: the overlap of output image and original image.
+ - center range: a smaller area where random center chosen from.
+ center range is computed by ``border`` and original image's shape
+ to avoid our random center is too close to original image's border.
+
+ Also this operation act differently in train and test mode, the summary
+ pipeline is listed below.
+
+ Train pipeline:
+
+ 1. Choose a ``random_ratio`` from ``ratios``, the shape of padding image
+ will be ``random_ratio * crop_size``.
+ 2. Choose a ``random_center`` in center range.
+ 3. Generate padding image with center matches the ``random_center``.
+ 4. Initialize the padding image with pixel value equals to ``mean``.
+ 5. Copy the cropped area to padding image.
+ 6. Refine annotations.
+
+ Test pipeline:
+
+ 1. Compute output shape according to ``test_pad_mode``.
+ 2. Generate padding image with center matches the original image
+ center.
+ 3. Initialize the padding image with pixel value equals to ``mean``.
+ 4. Copy the ``cropped area`` to padding image.
+
+ Args:
+ crop_size (tuple | None): expected size after crop, final size will
+ computed according to ratio. Requires (h, w) in train mode, and
+ None in test mode.
+ ratios (tuple): random select a ratio from tuple and crop image to
+ (crop_size[0] * ratio) * (crop_size[1] * ratio).
+ Only available in train mode.
+ border (int): max distance from center select area to image border.
+ Only available in train mode.
+ mean (sequence): Mean values of 3 channels.
+ std (sequence): Std values of 3 channels.
+ to_rgb (bool): Whether to convert the image from BGR to RGB.
+ test_mode (bool): whether involve random variables in transform.
+ In train mode, crop_size is fixed, center coords and ratio is
+ random selected from predefined lists. In test mode, crop_size
+ is image's original shape, center coords and ratio is fixed.
+ test_pad_mode (tuple): padding method and padding shape value, only
+ available in test mode. Default is using 'logical_or' with
+ 127 as padding shape value.
+
+ - 'logical_or': final_shape = input_shape | padding_shape_value
+ - 'size_divisor': final_shape = int(
+ ceil(input_shape / padding_shape_value) * padding_shape_value)
+ bbox_clip_border (bool, optional): Whether clip the objects outside
+ the border of the image. Defaults to True.
+ """
+
+ def __init__(self,
+ crop_size=None,
+ ratios=(0.9, 1.0, 1.1),
+ border=128,
+ mean=None,
+ std=None,
+ to_rgb=None,
+ test_mode=False,
+ test_pad_mode=('logical_or', 127),
+ bbox_clip_border=True):
+ if test_mode:
+ assert crop_size is None, 'crop_size must be None in test mode'
+ assert ratios is None, 'ratios must be None in test mode'
+ assert border is None, 'border must be None in test mode'
+ assert isinstance(test_pad_mode, (list, tuple))
+ assert test_pad_mode[0] in ['logical_or', 'size_divisor']
+ else:
+ assert isinstance(crop_size, (list, tuple))
+ assert crop_size[0] > 0 and crop_size[1] > 0, (
+ 'crop_size must > 0 in train mode')
+ assert isinstance(ratios, (list, tuple))
+ assert test_pad_mode is None, (
+ 'test_pad_mode must be None in train mode')
+
+ self.crop_size = crop_size
+ self.ratios = ratios
+ self.border = border
+ # We do not set default value to mean, std and to_rgb because these
+ # hyper-parameters are easy to forget but could affect the performance.
+ # Please use the same setting as Normalize for performance assurance.
+ assert mean is not None and std is not None and to_rgb is not None
+ self.to_rgb = to_rgb
+ self.input_mean = mean
+ self.input_std = std
+ if to_rgb:
+ self.mean = mean[::-1]
+ self.std = std[::-1]
+ else:
+ self.mean = mean
+ self.std = std
+ self.test_mode = test_mode
+ self.test_pad_mode = test_pad_mode
+ self.bbox_clip_border = bbox_clip_border
+
+ def _get_border(self, border, size):
+ """Get final border for the target size.
+
+ This function generates a ``final_border`` according to image's shape.
+ The area between ``final_border`` and ``size - final_border`` is the
+ ``center range``. We randomly choose center from the ``center range``
+ to avoid our random center is too close to original image's border.
+ Also ``center range`` should be larger than 0.
+
+ Args:
+ border (int): The initial border, default is 128.
+ size (int): The width or height of original image.
+ Returns:
+ int: The final border.
+ """
+ k = 2 * border / size
+ i = pow(2, np.ceil(np.log2(np.ceil(k))) + (k == int(k)))
+ return border // i
+
+ def _filter_boxes(self, patch, boxes):
+ """Check whether the center of each box is in the patch.
+
+ Args:
+ patch (list[int]): The cropped area, [left, top, right, bottom].
+ boxes (numpy array, (N x 4)): Ground truth boxes.
+
+ Returns:
+ mask (numpy array, (N,)): Each box is inside or outside the patch.
+ """
+ center = (boxes[:, :2] + boxes[:, 2:]) / 2
+ mask = (center[:, 0] > patch[0]) * (center[:, 1] > patch[1]) * (
+ center[:, 0] < patch[2]) * (
+ center[:, 1] < patch[3])
+ return mask
+
+ def _crop_image_and_paste(self, image, center, size):
+ """Crop image with a given center and size, then paste the cropped
+ image to a blank image with two centers align.
+
+ This function is equivalent to generating a blank image with ``size``
+ as its shape. Then cover it on the original image with two centers (
+ the center of blank image and the random center of original image)
+ aligned. The overlap area is paste from the original image and the
+ outside area is filled with ``mean pixel``.
+
+ Args:
+ image (np array, H x W x C): Original image.
+ center (list[int]): Target crop center coord.
+ size (list[int]): Target crop size. [target_h, target_w]
+
+ Returns:
+ cropped_img (np array, target_h x target_w x C): Cropped image.
+ border (np array, 4): The distance of four border of
+ ``cropped_img`` to the original image area, [top, bottom,
+ left, right]
+ patch (list[int]): The cropped area, [left, top, right, bottom].
+ """
+ center_y, center_x = center
+ target_h, target_w = size
+ img_h, img_w, img_c = image.shape
+
+ x0 = max(0, center_x - target_w // 2)
+ x1 = min(center_x + target_w // 2, img_w)
+ y0 = max(0, center_y - target_h // 2)
+ y1 = min(center_y + target_h // 2, img_h)
+ patch = np.array((int(x0), int(y0), int(x1), int(y1)))
+
+ left, right = center_x - x0, x1 - center_x
+ top, bottom = center_y - y0, y1 - center_y
+
+ cropped_center_y, cropped_center_x = target_h // 2, target_w // 2
+ cropped_img = np.zeros((target_h, target_w, img_c), dtype=image.dtype)
+ for i in range(img_c):
+ cropped_img[:, :, i] += self.mean[i]
+ y_slice = slice(cropped_center_y - top, cropped_center_y + bottom)
+ x_slice = slice(cropped_center_x - left, cropped_center_x + right)
+ cropped_img[y_slice, x_slice, :] = image[y0:y1, x0:x1, :]
+
+ border = np.array([
+ cropped_center_y - top, cropped_center_y + bottom,
+ cropped_center_x - left, cropped_center_x + right
+ ],
+ dtype=np.float32)
+
+ return cropped_img, border, patch
+
+ def _train_aug(self, results):
+ """Random crop and around padding the original image.
+
+ Args:
+ results (dict): Image infomations in the augment pipeline.
+
+ Returns:
+ results (dict): The updated dict.
+ """
+ img = results['img']
+ h, w, c = img.shape
+ boxes = results['gt_bboxes']
+ while True:
+ scale = random.choice(self.ratios)
+ new_h = int(self.crop_size[0] * scale)
+ new_w = int(self.crop_size[1] * scale)
+ h_border = self._get_border(self.border, h)
+ w_border = self._get_border(self.border, w)
+
+ for i in range(50):
+ center_x = random.randint(low=w_border, high=w - w_border)
+ center_y = random.randint(low=h_border, high=h - h_border)
+
+ cropped_img, border, patch = self._crop_image_and_paste(
+ img, [center_y, center_x], [new_h, new_w])
+
+ mask = self._filter_boxes(patch, boxes)
+ # if image do not have valid bbox, any crop patch is valid.
+ if not mask.any() and len(boxes) > 0:
+ continue
+
+ results['img'] = cropped_img
+ results['img_shape'] = cropped_img.shape
+ results['pad_shape'] = cropped_img.shape
+
+ x0, y0, x1, y1 = patch
+
+ left_w, top_h = center_x - x0, center_y - y0
+ cropped_center_x, cropped_center_y = new_w // 2, new_h // 2
+
+ # crop bboxes accordingly and clip to the image boundary
+ for key in results.get('bbox_fields', []):
+ mask = self._filter_boxes(patch, results[key])
+ bboxes = results[key][mask]
+ bboxes[:, 0:4:2] += cropped_center_x - left_w - x0
+ bboxes[:, 1:4:2] += cropped_center_y - top_h - y0
+ if self.bbox_clip_border:
+ bboxes[:, 0:4:2] = np.clip(bboxes[:, 0:4:2], 0, new_w)
+ bboxes[:, 1:4:2] = np.clip(bboxes[:, 1:4:2], 0, new_h)
+ keep = (bboxes[:, 2] > bboxes[:, 0]) & (
+ bboxes[:, 3] > bboxes[:, 1])
+ bboxes = bboxes[keep]
+ results[key] = bboxes
+ if key in ['gt_bboxes']:
+ if 'gt_labels' in results:
+ labels = results['gt_labels'][mask]
+ labels = labels[keep]
+ results['gt_labels'] = labels
+ if 'gt_masks' in results:
+ raise NotImplementedError(
+ 'RandomCenterCropPad only supports bbox.')
+
+ # crop semantic seg
+ for key in results.get('seg_fields', []):
+ raise NotImplementedError(
+ 'RandomCenterCropPad only supports bbox.')
+ return results
+
+ def _test_aug(self, results):
+ """Around padding the original image without cropping.
+
+ The padding mode and value are from ``test_pad_mode``.
+
+ Args:
+ results (dict): Image infomations in the augment pipeline.
+
+ Returns:
+ results (dict): The updated dict.
+ """
+ img = results['img']
+ h, w, c = img.shape
+ results['img_shape'] = img.shape
+ if self.test_pad_mode[0] in ['logical_or']:
+ target_h = h | self.test_pad_mode[1]
+ target_w = w | self.test_pad_mode[1]
+ elif self.test_pad_mode[0] in ['size_divisor']:
+ divisor = self.test_pad_mode[1]
+ target_h = int(np.ceil(h / divisor)) * divisor
+ target_w = int(np.ceil(w / divisor)) * divisor
+ else:
+ raise NotImplementedError(
+ 'RandomCenterCropPad only support two testing pad mode:'
+ 'logical-or and size_divisor.')
+
+ cropped_img, border, _ = self._crop_image_and_paste(
+ img, [h // 2, w // 2], [target_h, target_w])
+ results['img'] = cropped_img
+ results['pad_shape'] = cropped_img.shape
+ results['border'] = border
+ return results
+
+ def __call__(self, results):
+ img = results['img']
+ assert img.dtype == np.float32, (
+ 'RandomCenterCropPad needs the input image of dtype np.float32,'
+ ' please set "to_float32=True" in "LoadImageFromFile" pipeline')
+ h, w, c = img.shape
+ assert c == len(self.mean)
+ if self.test_mode:
+ return self._test_aug(results)
+ else:
+ return self._train_aug(results)
+
+ def __repr__(self):
+ repr_str = self.__class__.__name__
+ repr_str += f'(crop_size={self.crop_size}, '
+ repr_str += f'ratios={self.ratios}, '
+ repr_str += f'border={self.border}, '
+ repr_str += f'mean={self.input_mean}, '
+ repr_str += f'std={self.input_std}, '
+ repr_str += f'to_rgb={self.to_rgb}, '
+ repr_str += f'test_mode={self.test_mode}, '
+ repr_str += f'test_pad_mode={self.test_pad_mode}, '
+ repr_str += f'bbox_clip_border={self.bbox_clip_border})'
+ return repr_str
+
+
+@PIPELINES.register_module()
+class CutOut(object):
+ """CutOut operation.
+
+ Randomly drop some regions of image used in
+ `Cutout `_.
+
+ Args:
+ n_holes (int | tuple[int, int]): Number of regions to be dropped.
+ If it is given as a list, number of holes will be randomly
+ selected from the closed interval [`n_holes[0]`, `n_holes[1]`].
+ cutout_shape (tuple[int, int] | list[tuple[int, int]]): The candidate
+ shape of dropped regions. It can be `tuple[int, int]` to use a
+ fixed cutout shape, or `list[tuple[int, int]]` to randomly choose
+ shape from the list.
+ cutout_ratio (tuple[float, float] | list[tuple[float, float]]): The
+ candidate ratio of dropped regions. It can be `tuple[float, float]`
+ to use a fixed ratio or `list[tuple[float, float]]` to randomly
+ choose ratio from the list. Please note that `cutout_shape`
+ and `cutout_ratio` cannot be both given at the same time.
+ fill_in (tuple[float, float, float] | tuple[int, int, int]): The value
+ of pixel to fill in the dropped regions. Default: (0, 0, 0).
+ """
+
+ def __init__(self,
+ n_holes,
+ cutout_shape=None,
+ cutout_ratio=None,
+ fill_in=(0, 0, 0)):
+
+ assert (cutout_shape is None) ^ (cutout_ratio is None), \
+ 'Either cutout_shape or cutout_ratio should be specified.'
+ assert (isinstance(cutout_shape, (list, tuple))
+ or isinstance(cutout_ratio, (list, tuple)))
+ if isinstance(n_holes, tuple):
+ assert len(n_holes) == 2 and 0 <= n_holes[0] < n_holes[1]
+ else:
+ n_holes = (n_holes, n_holes)
+ self.n_holes = n_holes
+ self.fill_in = fill_in
+ self.with_ratio = cutout_ratio is not None
+ self.candidates = cutout_ratio if self.with_ratio else cutout_shape
+ if not isinstance(self.candidates, list):
+ self.candidates = [self.candidates]
+
+ def __call__(self, results):
+ """Call function to drop some regions of image."""
+ h, w, c = results['img'].shape
+ n_holes = np.random.randint(self.n_holes[0], self.n_holes[1] + 1)
+ for _ in range(n_holes):
+ x1 = np.random.randint(0, w)
+ y1 = np.random.randint(0, h)
+ index = np.random.randint(0, len(self.candidates))
+ if not self.with_ratio:
+ cutout_w, cutout_h = self.candidates[index]
+ else:
+ cutout_w = int(self.candidates[index][0] * w)
+ cutout_h = int(self.candidates[index][1] * h)
+
+ x2 = np.clip(x1 + cutout_w, 0, w)
+ y2 = np.clip(y1 + cutout_h, 0, h)
+ results['img'][y1:y2, x1:x2, :] = self.fill_in
+
+ return results
+
+ def __repr__(self):
+ repr_str = self.__class__.__name__
+ repr_str += f'(n_holes={self.n_holes}, '
+ repr_str += (f'cutout_ratio={self.candidates}, ' if self.with_ratio
+ else f'cutout_shape={self.candidates}, ')
+ repr_str += f'fill_in={self.fill_in})'
+ return repr_str
diff --git a/annotator/uniformer/mmdet/datasets/samplers/__init__.py b/annotator/uniformer/mmdet/datasets/samplers/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..2596aeb2ccfc85b58624713c04453d34e94a4062
--- /dev/null
+++ b/annotator/uniformer/mmdet/datasets/samplers/__init__.py
@@ -0,0 +1,4 @@
+from .distributed_sampler import DistributedSampler
+from .group_sampler import DistributedGroupSampler, GroupSampler
+
+__all__ = ['DistributedSampler', 'DistributedGroupSampler', 'GroupSampler']
diff --git a/annotator/uniformer/mmdet/datasets/samplers/distributed_sampler.py b/annotator/uniformer/mmdet/datasets/samplers/distributed_sampler.py
new file mode 100644
index 0000000000000000000000000000000000000000..cc61019484655ee2829f7908dc442caa20cf1d54
--- /dev/null
+++ b/annotator/uniformer/mmdet/datasets/samplers/distributed_sampler.py
@@ -0,0 +1,39 @@
+import math
+
+import torch
+from torch.utils.data import DistributedSampler as _DistributedSampler
+
+
+class DistributedSampler(_DistributedSampler):
+
+ def __init__(self,
+ dataset,
+ num_replicas=None,
+ rank=None,
+ shuffle=True,
+ seed=0):
+ super().__init__(
+ dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)
+ # for the compatibility from PyTorch 1.3+
+ self.seed = seed if seed is not None else 0
+
+ def __iter__(self):
+ # deterministically shuffle based on epoch
+ if self.shuffle:
+ g = torch.Generator()
+ g.manual_seed(self.epoch + self.seed)
+ indices = torch.randperm(len(self.dataset), generator=g).tolist()
+ else:
+ indices = torch.arange(len(self.dataset)).tolist()
+
+ # add extra samples to make it evenly divisible
+ # in case that indices is shorter than half of total_size
+ indices = (indices *
+ math.ceil(self.total_size / len(indices)))[:self.total_size]
+ assert len(indices) == self.total_size
+
+ # subsample
+ indices = indices[self.rank:self.total_size:self.num_replicas]
+ assert len(indices) == self.num_samples
+
+ return iter(indices)
diff --git a/annotator/uniformer/mmdet/datasets/samplers/group_sampler.py b/annotator/uniformer/mmdet/datasets/samplers/group_sampler.py
new file mode 100644
index 0000000000000000000000000000000000000000..f88cf3439446a2eb7d8656388ddbe93196315f5b
--- /dev/null
+++ b/annotator/uniformer/mmdet/datasets/samplers/group_sampler.py
@@ -0,0 +1,148 @@
+from __future__ import division
+import math
+
+import numpy as np
+import torch
+from mmcv.runner import get_dist_info
+from torch.utils.data import Sampler
+
+
+class GroupSampler(Sampler):
+
+ def __init__(self, dataset, samples_per_gpu=1):
+ assert hasattr(dataset, 'flag')
+ self.dataset = dataset
+ self.samples_per_gpu = samples_per_gpu
+ self.flag = dataset.flag.astype(np.int64)
+ self.group_sizes = np.bincount(self.flag)
+ self.num_samples = 0
+ for i, size in enumerate(self.group_sizes):
+ self.num_samples += int(np.ceil(
+ size / self.samples_per_gpu)) * self.samples_per_gpu
+
+ def __iter__(self):
+ indices = []
+ for i, size in enumerate(self.group_sizes):
+ if size == 0:
+ continue
+ indice = np.where(self.flag == i)[0]
+ assert len(indice) == size
+ np.random.shuffle(indice)
+ num_extra = int(np.ceil(size / self.samples_per_gpu)
+ ) * self.samples_per_gpu - len(indice)
+ indice = np.concatenate(
+ [indice, np.random.choice(indice, num_extra)])
+ indices.append(indice)
+ indices = np.concatenate(indices)
+ indices = [
+ indices[i * self.samples_per_gpu:(i + 1) * self.samples_per_gpu]
+ for i in np.random.permutation(
+ range(len(indices) // self.samples_per_gpu))
+ ]
+ indices = np.concatenate(indices)
+ indices = indices.astype(np.int64).tolist()
+ assert len(indices) == self.num_samples
+ return iter(indices)
+
+ def __len__(self):
+ return self.num_samples
+
+
+class DistributedGroupSampler(Sampler):
+ """Sampler that restricts data loading to a subset of the dataset.
+
+ It is especially useful in conjunction with
+ :class:`torch.nn.parallel.DistributedDataParallel`. In such case, each
+ process can pass a DistributedSampler instance as a DataLoader sampler,
+ and load a subset of the original dataset that is exclusive to it.
+
+ .. note::
+ Dataset is assumed to be of constant size.
+
+ Arguments:
+ dataset: Dataset used for sampling.
+ num_replicas (optional): Number of processes participating in
+ distributed training.
+ rank (optional): Rank of the current process within num_replicas.
+ seed (int, optional): random seed used to shuffle the sampler if
+ ``shuffle=True``. This number should be identical across all
+ processes in the distributed group. Default: 0.
+ """
+
+ def __init__(self,
+ dataset,
+ samples_per_gpu=1,
+ num_replicas=None,
+ rank=None,
+ seed=0):
+ _rank, _num_replicas = get_dist_info()
+ if num_replicas is None:
+ num_replicas = _num_replicas
+ if rank is None:
+ rank = _rank
+ self.dataset = dataset
+ self.samples_per_gpu = samples_per_gpu
+ self.num_replicas = num_replicas
+ self.rank = rank
+ self.epoch = 0
+ self.seed = seed if seed is not None else 0
+
+ assert hasattr(self.dataset, 'flag')
+ self.flag = self.dataset.flag
+ self.group_sizes = np.bincount(self.flag)
+
+ self.num_samples = 0
+ for i, j in enumerate(self.group_sizes):
+ self.num_samples += int(
+ math.ceil(self.group_sizes[i] * 1.0 / self.samples_per_gpu /
+ self.num_replicas)) * self.samples_per_gpu
+ self.total_size = self.num_samples * self.num_replicas
+
+ def __iter__(self):
+ # deterministically shuffle based on epoch
+ g = torch.Generator()
+ g.manual_seed(self.epoch + self.seed)
+
+ indices = []
+ for i, size in enumerate(self.group_sizes):
+ if size > 0:
+ indice = np.where(self.flag == i)[0]
+ assert len(indice) == size
+ # add .numpy() to avoid bug when selecting indice in parrots.
+ # TODO: check whether torch.randperm() can be replaced by
+ # numpy.random.permutation().
+ indice = indice[list(
+ torch.randperm(int(size), generator=g).numpy())].tolist()
+ extra = int(
+ math.ceil(
+ size * 1.0 / self.samples_per_gpu / self.num_replicas)
+ ) * self.samples_per_gpu * self.num_replicas - len(indice)
+ # pad indice
+ tmp = indice.copy()
+ for _ in range(extra // size):
+ indice.extend(tmp)
+ indice.extend(tmp[:extra % size])
+ indices.extend(indice)
+
+ assert len(indices) == self.total_size
+
+ indices = [
+ indices[j] for i in list(
+ torch.randperm(
+ len(indices) // self.samples_per_gpu, generator=g))
+ for j in range(i * self.samples_per_gpu, (i + 1) *
+ self.samples_per_gpu)
+ ]
+
+ # subsample
+ offset = self.num_samples * self.rank
+ indices = indices[offset:offset + self.num_samples]
+ assert len(indices) == self.num_samples
+
+ return iter(indices)
+
+ def __len__(self):
+ return self.num_samples
+
+ def set_epoch(self, epoch):
+ self.epoch = epoch
diff --git a/annotator/uniformer/mmdet/datasets/utils.py b/annotator/uniformer/mmdet/datasets/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..157c9a2e1fe009552fdec9b9c9e7a33ed46d51ff
--- /dev/null
+++ b/annotator/uniformer/mmdet/datasets/utils.py
@@ -0,0 +1,158 @@
+import copy
+import warnings
+
+from mmcv.cnn import VGG
+from mmcv.runner.hooks import HOOKS, Hook
+
+from mmdet.datasets.builder import PIPELINES
+from mmdet.datasets.pipelines import LoadAnnotations, LoadImageFromFile
+from mmdet.models.dense_heads import GARPNHead, RPNHead
+from mmdet.models.roi_heads.mask_heads import FusedSemanticHead
+
+
+def replace_ImageToTensor(pipelines):
+ """Replace the ImageToTensor transform in a data pipeline to
+ DefaultFormatBundle, which is normally useful in batch inference.
+
+ Args:
+ pipelines (list[dict]): Data pipeline configs.
+
+ Returns:
+ list: The new pipeline list with all ImageToTensor replaced by
+ DefaultFormatBundle.
+
+ Examples:
+ >>> pipelines = [
+ ... dict(type='LoadImageFromFile'),
+ ... dict(
+ ... type='MultiScaleFlipAug',
+ ... img_scale=(1333, 800),
+ ... flip=False,
+ ... transforms=[
+ ... dict(type='Resize', keep_ratio=True),
+ ... dict(type='RandomFlip'),
+ ... dict(type='Normalize', mean=[0, 0, 0], std=[1, 1, 1]),
+ ... dict(type='Pad', size_divisor=32),
+ ... dict(type='ImageToTensor', keys=['img']),
+ ... dict(type='Collect', keys=['img']),
+ ... ])
+ ... ]
+ >>> expected_pipelines = [
+ ... dict(type='LoadImageFromFile'),
+ ... dict(
+ ... type='MultiScaleFlipAug',
+ ... img_scale=(1333, 800),
+ ... flip=False,
+ ... transforms=[
+ ... dict(type='Resize', keep_ratio=True),
+ ... dict(type='RandomFlip'),
+ ... dict(type='Normalize', mean=[0, 0, 0], std=[1, 1, 1]),
+ ... dict(type='Pad', size_divisor=32),
+ ... dict(type='DefaultFormatBundle'),
+ ... dict(type='Collect', keys=['img']),
+ ... ])
+ ... ]
+ >>> assert expected_pipelines == replace_ImageToTensor(pipelines)
+ """
+ pipelines = copy.deepcopy(pipelines)
+ for i, pipeline in enumerate(pipelines):
+ if pipeline['type'] == 'MultiScaleFlipAug':
+ assert 'transforms' in pipeline
+ pipeline['transforms'] = replace_ImageToTensor(
+ pipeline['transforms'])
+ elif pipeline['type'] == 'ImageToTensor':
+ warnings.warn(
+ '"ImageToTensor" pipeline is replaced by '
+ '"DefaultFormatBundle" for batch inference. It is '
+ 'recommended to manually replace it in the test '
+ 'data pipeline in your config file.', UserWarning)
+ pipelines[i] = {'type': 'DefaultFormatBundle'}
+ return pipelines
+
+
+def get_loading_pipeline(pipeline):
+ """Only keep loading image and annotations related configuration.
+
+ Args:
+ pipeline (list[dict]): Data pipeline configs.
+
+ Returns:
+ list[dict]: The new pipeline list with only keep
+ loading image and annotations related configuration.
+
+ Examples:
+ >>> pipelines = [
+ ... dict(type='LoadImageFromFile'),
+ ... dict(type='LoadAnnotations', with_bbox=True),
+ ... dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+ ... dict(type='RandomFlip', flip_ratio=0.5),
+ ... dict(type='Normalize', **img_norm_cfg),
+ ... dict(type='Pad', size_divisor=32),
+ ... dict(type='DefaultFormatBundle'),
+ ... dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
+ ... ]
+ >>> expected_pipelines = [
+ ... dict(type='LoadImageFromFile'),
+ ... dict(type='LoadAnnotations', with_bbox=True)
+ ... ]
+ >>> assert expected_pipelines ==\
+ ... get_loading_pipeline(pipelines)
+ """
+ loading_pipeline_cfg = []
+ for cfg in pipeline:
+ obj_cls = PIPELINES.get(cfg['type'])
+ # TODO:use more elegant way to distinguish loading modules
+ if obj_cls is not None and obj_cls in (LoadImageFromFile,
+ LoadAnnotations):
+ loading_pipeline_cfg.append(cfg)
+ assert len(loading_pipeline_cfg) == 2, \
+ 'The data pipeline in your config file must include ' \
+ 'loading image and annotations related pipeline.'
+ return loading_pipeline_cfg
+
+
+@HOOKS.register_module()
+class NumClassCheckHook(Hook):
+
+ def _check_head(self, runner):
+ """Check whether the `num_classes` in head matches the length of
+ `CLASSSES` in `dataset`.
+
+ Args:
+ runner (obj:`EpochBasedRunner`): Epoch based Runner.
+ """
+ model = runner.model
+ dataset = runner.data_loader.dataset
+ if dataset.CLASSES is None:
+ runner.logger.warning(
+ f'Please set `CLASSES` '
+ f'in the {dataset.__class__.__name__} and'
+ f'check if it is consistent with the `num_classes` '
+ f'of head')
+ else:
+ for name, module in model.named_modules():
+ if hasattr(module, 'num_classes') and not isinstance(
+ module, (RPNHead, VGG, FusedSemanticHead, GARPNHead)):
+ assert module.num_classes == len(dataset.CLASSES), \
+ (f'The `num_classes` ({module.num_classes}) in '
+ f'{module.__class__.__name__} of '
+ f'{model.__class__.__name__} does not matches '
+ f'the length of `CLASSES` '
+ f'{len(dataset.CLASSES)}) in '
+ f'{dataset.__class__.__name__}')
+
+ def before_train_epoch(self, runner):
+ """Check whether the training dataset is compatible with head.
+
+ Args:
+ runner (obj:`EpochBasedRunner`): Epoch based Runner.
+ """
+ self._check_head(runner)
+
+ def before_val_epoch(self, runner):
+ """Check whether the dataset in val epoch is compatible with head.
+
+ Args:
+ runner (obj:`EpochBasedRunner`): Epoch based Runner.
+ """
+ self._check_head(runner)
diff --git a/annotator/uniformer/mmdet/datasets/voc.py b/annotator/uniformer/mmdet/datasets/voc.py
new file mode 100644
index 0000000000000000000000000000000000000000..abd4cb8947238936faff48fc92c093c8ae06daff
--- /dev/null
+++ b/annotator/uniformer/mmdet/datasets/voc.py
@@ -0,0 +1,93 @@
+from collections import OrderedDict
+
+from mmcv.utils import print_log
+
+from mmdet.core import eval_map, eval_recalls
+from .builder import DATASETS
+from .xml_style import XMLDataset
+
+
+@DATASETS.register_module()
+class VOCDataset(XMLDataset):
+
+ CLASSES = ('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car',
+ 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse',
+ 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train',
+ 'tvmonitor')
+
+ def __init__(self, **kwargs):
+ super(VOCDataset, self).__init__(**kwargs)
+ if 'VOC2007' in self.img_prefix:
+ self.year = 2007
+ elif 'VOC2012' in self.img_prefix:
+ self.year = 2012
+ else:
+ raise ValueError('Cannot infer dataset year from img_prefix')
+
+ def evaluate(self,
+ results,
+ metric='mAP',
+ logger=None,
+ proposal_nums=(100, 300, 1000),
+ iou_thr=0.5,
+ scale_ranges=None):
+ """Evaluate in VOC protocol.
+
+ Args:
+ results (list[list | tuple]): Testing results of the dataset.
+ metric (str | list[str]): Metrics to be evaluated. Options are
+ 'mAP', 'recall'.
+ logger (logging.Logger | str, optional): Logger used for printing
+ related information during evaluation. Default: None.
+ proposal_nums (Sequence[int]): Proposal number used for evaluating
+ recalls, such as recall@100, recall@1000.
+ Default: (100, 300, 1000).
+ iou_thr (float | list[float]): IoU threshold. Default: 0.5.
+ scale_ranges (list[tuple], optional): Scale ranges for evaluating
+ mAP. If not specified, all bounding boxes would be included in
+ evaluation. Default: None.
+
+ Returns:
+ dict[str, float]: AP/recall metrics.
+ """
+
+ if not isinstance(metric, str):
+ assert len(metric) == 1
+ metric = metric[0]
+ allowed_metrics = ['mAP', 'recall']
+ if metric not in allowed_metrics:
+ raise KeyError(f'metric {metric} is not supported')
+ annotations = [self.get_ann_info(i) for i in range(len(self))]
+ eval_results = OrderedDict()
+ iou_thrs = [iou_thr] if isinstance(iou_thr, float) else iou_thr
+ if metric == 'mAP':
+ assert isinstance(iou_thrs, list)
+ if self.year == 2007:
+ ds_name = 'voc07'
+ else:
+ ds_name = self.CLASSES
+ mean_aps = []
+ for iou_thr in iou_thrs:
+ print_log(f'\n{"-" * 15}iou_thr: {iou_thr}{"-" * 15}')
+ mean_ap, _ = eval_map(
+ results,
+ annotations,
+ scale_ranges=None,
+ iou_thr=iou_thr,
+ dataset=ds_name,
+ logger=logger)
+ mean_aps.append(mean_ap)
+ eval_results[f'AP{int(iou_thr * 100):02d}'] = round(mean_ap, 3)
+ eval_results['mAP'] = sum(mean_aps) / len(mean_aps)
+ elif metric == 'recall':
+ gt_bboxes = [ann['bboxes'] for ann in annotations]
+ recalls = eval_recalls(
+ gt_bboxes, results, proposal_nums, iou_thr, logger=logger)
+ for i, num in enumerate(proposal_nums):
+ for j, iou in enumerate(iou_thr):
+ eval_results[f'recall@{num}@{iou}'] = recalls[i, j]
+ if recalls.shape[1] > 1:
+ ar = recalls.mean(axis=1)
+ for i, num in enumerate(proposal_nums):
+ eval_results[f'AR@{num}'] = ar[i]
+ return eval_results
diff --git a/annotator/uniformer/mmdet/datasets/wider_face.py b/annotator/uniformer/mmdet/datasets/wider_face.py
new file mode 100644
index 0000000000000000000000000000000000000000..3a13907db87a9986a7d701837259a0b712fc9dca
--- /dev/null
+++ b/annotator/uniformer/mmdet/datasets/wider_face.py
@@ -0,0 +1,51 @@
+import os.path as osp
+import xml.etree.ElementTree as ET
+
+import mmcv
+
+from .builder import DATASETS
+from .xml_style import XMLDataset
+
+
+@DATASETS.register_module()
+class WIDERFaceDataset(XMLDataset):
+ """Reader for the WIDER Face dataset in PASCAL VOC format.
+
+ Conversion scripts can be found in
+ https://github.com/sovrasov/wider-face-pascal-voc-annotations
+ """
+ CLASSES = ('face', )
+
+ def __init__(self, **kwargs):
+ super(WIDERFaceDataset, self).__init__(**kwargs)
+
+ def load_annotations(self, ann_file):
+ """Load annotation from WIDERFace XML style annotation file.
+
+ Args:
+ ann_file (str): Path of XML file.
+
+ Returns:
+ list[dict]: Annotation info from XML file.
+ """
+
+ data_infos = []
+ img_ids = mmcv.list_from_file(ann_file)
+ for img_id in img_ids:
+ filename = f'{img_id}.jpg'
+ xml_path = osp.join(self.img_prefix, 'Annotations',
+ f'{img_id}.xml')
+ tree = ET.parse(xml_path)
+ root = tree.getroot()
+ size = root.find('size')
+ width = int(size.find('width').text)
+ height = int(size.find('height').text)
+ folder = root.find('folder').text
+ data_infos.append(
+ dict(
+ id=img_id,
+ filename=osp.join(folder, filename),
+ width=width,
+ height=height))
+
+ return data_infos
diff --git a/annotator/uniformer/mmdet/datasets/xml_style.py b/annotator/uniformer/mmdet/datasets/xml_style.py
new file mode 100644
index 0000000000000000000000000000000000000000..71069488b0f6da3b37e588228f44460ce5f00679
--- /dev/null
+++ b/annotator/uniformer/mmdet/datasets/xml_style.py
@@ -0,0 +1,170 @@
+import os.path as osp
+import xml.etree.ElementTree as ET
+
+import mmcv
+import numpy as np
+from PIL import Image
+
+from .builder import DATASETS
+from .custom import CustomDataset
+
+
+@DATASETS.register_module()
+class XMLDataset(CustomDataset):
+ """XML dataset for detection.
+
+ Args:
+ min_size (int | float, optional): The minimum size of bounding
+ boxes in the images. If the size of a bounding box is less than
+ ``min_size``, it would be add to ignored field.
+ """
+
+ def __init__(self, min_size=None, **kwargs):
+ assert self.CLASSES or kwargs.get(
+ 'classes', None), 'CLASSES in `XMLDataset` can not be None.'
+ super(XMLDataset, self).__init__(**kwargs)
+ self.cat2label = {cat: i for i, cat in enumerate(self.CLASSES)}
+ self.min_size = min_size
+
+ def load_annotations(self, ann_file):
+ """Load annotation from XML style ann_file.
+
+ Args:
+ ann_file (str): Path of XML file.
+
+ Returns:
+ list[dict]: Annotation info from XML file.
+ """
+
+ data_infos = []
+ img_ids = mmcv.list_from_file(ann_file)
+ for img_id in img_ids:
+ filename = f'JPEGImages/{img_id}.jpg'
+ xml_path = osp.join(self.img_prefix, 'Annotations',
+ f'{img_id}.xml')
+ tree = ET.parse(xml_path)
+ root = tree.getroot()
+ size = root.find('size')
+ if size is not None:
+ width = int(size.find('width').text)
+ height = int(size.find('height').text)
+ else:
+ img_path = osp.join(self.img_prefix, 'JPEGImages',
+ '{}.jpg'.format(img_id))
+ img = Image.open(img_path)
+ width, height = img.size
+ data_infos.append(
+ dict(id=img_id, filename=filename, width=width, height=height))
+
+ return data_infos
+
+ def _filter_imgs(self, min_size=32):
+ """Filter images too small or without annotation."""
+ valid_inds = []
+ for i, img_info in enumerate(self.data_infos):
+ if min(img_info['width'], img_info['height']) < min_size:
+ continue
+ if self.filter_empty_gt:
+ img_id = img_info['id']
+ xml_path = osp.join(self.img_prefix, 'Annotations',
+ f'{img_id}.xml')
+ tree = ET.parse(xml_path)
+ root = tree.getroot()
+ for obj in root.findall('object'):
+ name = obj.find('name').text
+ if name in self.CLASSES:
+ valid_inds.append(i)
+ break
+ else:
+ valid_inds.append(i)
+ return valid_inds
+
+ def get_ann_info(self, idx):
+ """Get annotation from XML file by index.
+
+ Args:
+ idx (int): Index of data.
+
+ Returns:
+ dict: Annotation info of specified index.
+ """
+
+ img_id = self.data_infos[idx]['id']
+ xml_path = osp.join(self.img_prefix, 'Annotations', f'{img_id}.xml')
+ tree = ET.parse(xml_path)
+ root = tree.getroot()
+ bboxes = []
+ labels = []
+ bboxes_ignore = []
+ labels_ignore = []
+ for obj in root.findall('object'):
+ name = obj.find('name').text
+ if name not in self.CLASSES:
+ continue
+ label = self.cat2label[name]
+ difficult = obj.find('difficult')
+ difficult = 0 if difficult is None else int(difficult.text)
+ bnd_box = obj.find('bndbox')
+ # TODO: check whether it is necessary to use int
+ # Coordinates may be float type
+ bbox = [
+ int(float(bnd_box.find('xmin').text)),
+ int(float(bnd_box.find('ymin').text)),
+ int(float(bnd_box.find('xmax').text)),
+ int(float(bnd_box.find('ymax').text))
+ ]
+ ignore = False
+ if self.min_size:
+ assert not self.test_mode
+ w = bbox[2] - bbox[0]
+ h = bbox[3] - bbox[1]
+ if w < self.min_size or h < self.min_size:
+ ignore = True
+ if difficult or ignore:
+ bboxes_ignore.append(bbox)
+ labels_ignore.append(label)
+ else:
+ bboxes.append(bbox)
+ labels.append(label)
+ if not bboxes:
+ bboxes = np.zeros((0, 4))
+ labels = np.zeros((0, ))
+ else:
+ bboxes = np.array(bboxes, ndmin=2) - 1
+ labels = np.array(labels)
+ if not bboxes_ignore:
+ bboxes_ignore = np.zeros((0, 4))
+ labels_ignore = np.zeros((0, ))
+ else:
+ bboxes_ignore = np.array(bboxes_ignore, ndmin=2) - 1
+ labels_ignore = np.array(labels_ignore)
+ ann = dict(
+ bboxes=bboxes.astype(np.float32),
+ labels=labels.astype(np.int64),
+ bboxes_ignore=bboxes_ignore.astype(np.float32),
+ labels_ignore=labels_ignore.astype(np.int64))
+ return ann
+
+ def get_cat_ids(self, idx):
+ """Get category ids in XML file by index.
+
+ Args:
+ idx (int): Index of data.
+
+ Returns:
+ list[int]: All categories in the image of specified index.
+ """
+
+ cat_ids = []
+ img_id = self.data_infos[idx]['id']
+ xml_path = osp.join(self.img_prefix, 'Annotations', f'{img_id}.xml')
+ tree = ET.parse(xml_path)
+ root = tree.getroot()
+ for obj in root.findall('object'):
+ name = obj.find('name').text
+ if name not in self.CLASSES:
+ continue
+ label = self.cat2label[name]
+ cat_ids.append(label)
+
+ return cat_ids
diff --git a/annotator/uniformer/mmdet/models/__init__.py b/annotator/uniformer/mmdet/models/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..44ac99855ae52101c91be167fa78d8219fc47259
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/__init__.py
@@ -0,0 +1,16 @@
+from .backbones import * # noqa: F401,F403
+from .builder import (BACKBONES, DETECTORS, HEADS, LOSSES, NECKS,
+ ROI_EXTRACTORS, SHARED_HEADS, build_backbone,
+ build_detector, build_head, build_loss, build_neck,
+ build_roi_extractor, build_shared_head)
+from .dense_heads import * # noqa: F401,F403
+from .detectors import * # noqa: F401,F403
+from .losses import * # noqa: F401,F403
+from .necks import * # noqa: F401,F403
+from .roi_heads import * # noqa: F401,F403
+
+__all__ = [
+ 'BACKBONES', 'NECKS', 'ROI_EXTRACTORS', 'SHARED_HEADS', 'HEADS', 'LOSSES',
+ 'DETECTORS', 'build_backbone', 'build_neck', 'build_roi_extractor',
+ 'build_shared_head', 'build_head', 'build_loss', 'build_detector'
+]
diff --git a/annotator/uniformer/mmdet/models/backbones/__init__.py b/annotator/uniformer/mmdet/models/backbones/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e54b088acf644d285ecbeb1440c414e722b9db58
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/backbones/__init__.py
@@ -0,0 +1,20 @@
+from .darknet import Darknet
+from .detectors_resnet import DetectoRS_ResNet
+from .detectors_resnext import DetectoRS_ResNeXt
+from .hourglass import HourglassNet
+from .hrnet import HRNet
+from .regnet import RegNet
+from .res2net import Res2Net
+from .resnest import ResNeSt
+from .resnet import ResNet, ResNetV1d
+from .resnext import ResNeXt
+from .ssd_vgg import SSDVGG
+from .trident_resnet import TridentResNet
+from .swin_transformer import SwinTransformer
+from .uniformer import UniFormer
+
+__all__ = [
+ 'RegNet', 'ResNet', 'ResNetV1d', 'ResNeXt', 'SSDVGG', 'HRNet', 'Res2Net',
+ 'HourglassNet', 'DetectoRS_ResNet', 'DetectoRS_ResNeXt', 'Darknet',
+ 'ResNeSt', 'TridentResNet', 'SwinTransformer', 'UniFormer'
+]
diff --git a/annotator/uniformer/mmdet/models/backbones/darknet.py b/annotator/uniformer/mmdet/models/backbones/darknet.py
new file mode 100644
index 0000000000000000000000000000000000000000..517fe26259217792e0dad80ca3824d914cfe3904
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/backbones/darknet.py
@@ -0,0 +1,199 @@
+# Copyright (c) 2019 Western Digital Corporation or its affiliates.
+
+import logging
+
+import torch.nn as nn
+from mmcv.cnn import ConvModule, constant_init, kaiming_init
+from mmcv.runner import load_checkpoint
+from torch.nn.modules.batchnorm import _BatchNorm
+
+from ..builder import BACKBONES
+
+
+class ResBlock(nn.Module):
+ """The basic residual block used in Darknet. Each ResBlock consists of two
+ ConvModules and the input is added to the final output. Each ConvModule is
+ composed of Conv, BN, and LeakyReLU. In YoloV3 paper, the first convLayer
+ has half of the number of the filters as much as the second convLayer. The
+ first convLayer has filter size of 1x1 and the second one has the filter
+ size of 3x3.
+
+ Args:
+ in_channels (int): The input channels. Must be even.
+ conv_cfg (dict): Config dict for convolution layer. Default: None.
+ norm_cfg (dict): Dictionary to construct and config norm layer.
+ Default: dict(type='BN', requires_grad=True)
+ act_cfg (dict): Config dict for activation layer.
+ Default: dict(type='LeakyReLU', negative_slope=0.1).
+ """
+
+ def __init__(self,
+ in_channels,
+ conv_cfg=None,
+ norm_cfg=dict(type='BN', requires_grad=True),
+ act_cfg=dict(type='LeakyReLU', negative_slope=0.1)):
+ super(ResBlock, self).__init__()
+ assert in_channels % 2 == 0 # ensure the in_channels is even
+ half_in_channels = in_channels // 2
+
+ # shortcut
+ cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)
+
+ self.conv1 = ConvModule(in_channels, half_in_channels, 1, **cfg)
+ self.conv2 = ConvModule(
+ half_in_channels, in_channels, 3, padding=1, **cfg)
+
+ def forward(self, x):
+ residual = x
+ out = self.conv1(x)
+ out = self.conv2(out)
+ out = out + residual
+
+ return out
+
+
+@BACKBONES.register_module()
+class Darknet(nn.Module):
+ """Darknet backbone.
+
+ Args:
+ depth (int): Depth of Darknet. Currently only support 53.
+ out_indices (Sequence[int]): Output from which stages.
+ frozen_stages (int): Stages to be frozen (stop grad and set eval mode).
+ -1 means not freezing any parameters. Default: -1.
+ conv_cfg (dict): Config dict for convolution layer. Default: None.
+ norm_cfg (dict): Dictionary to construct and config norm layer.
+ Default: dict(type='BN', requires_grad=True)
+ act_cfg (dict): Config dict for activation layer.
+ Default: dict(type='LeakyReLU', negative_slope=0.1).
+ norm_eval (bool): Whether to set norm layers to eval mode, namely,
+ freeze running stats (mean and var). Note: Effect on Batch Norm
+ and its variants only.
+
+ Example:
+ >>> from mmdet.models import Darknet
+ >>> import torch
+ >>> self = Darknet(depth=53)
+ >>> self.eval()
+ >>> inputs = torch.rand(1, 3, 416, 416)
+ >>> level_outputs = self.forward(inputs)
+ >>> for level_out in level_outputs:
+ ... print(tuple(level_out.shape))
+ ...
+ (1, 256, 52, 52)
+ (1, 512, 26, 26)
+ (1, 1024, 13, 13)
+ """
+
+ # Dict(depth: (layers, channels))
+ arch_settings = {
+ 53: ((1, 2, 8, 8, 4), ((32, 64), (64, 128), (128, 256), (256, 512),
+ (512, 1024)))
+ }
+
+ def __init__(self,
+ depth=53,
+ out_indices=(3, 4, 5),
+ frozen_stages=-1,
+ conv_cfg=None,
+ norm_cfg=dict(type='BN', requires_grad=True),
+ act_cfg=dict(type='LeakyReLU', negative_slope=0.1),
+ norm_eval=True):
+ super(Darknet, self).__init__()
+ if depth not in self.arch_settings:
+ raise KeyError(f'invalid depth {depth} for darknet')
+ self.depth = depth
+ self.out_indices = out_indices
+ self.frozen_stages = frozen_stages
+ self.layers, self.channels = self.arch_settings[depth]
+
+ cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)
+
+ self.conv1 = ConvModule(3, 32, 3, padding=1, **cfg)
+
+ self.cr_blocks = ['conv1']
+ for i, n_layers in enumerate(self.layers):
+ layer_name = f'conv_res_block{i + 1}'
+ in_c, out_c = self.channels[i]
+ self.add_module(
+ layer_name,
+ self.make_conv_res_block(in_c, out_c, n_layers, **cfg))
+ self.cr_blocks.append(layer_name)
+
+ self.norm_eval = norm_eval
+
+ def forward(self, x):
+ outs = []
+ for i, layer_name in enumerate(self.cr_blocks):
+ cr_block = getattr(self, layer_name)
+ x = cr_block(x)
+ if i in self.out_indices:
+ outs.append(x)
+
+ return tuple(outs)
+
+ def init_weights(self, pretrained=None):
+ if isinstance(pretrained, str):
+ logger = logging.getLogger()
+ load_checkpoint(self, pretrained, strict=False, logger=logger)
+ elif pretrained is None:
+ for m in self.modules():
+ if isinstance(m, nn.Conv2d):
+ kaiming_init(m)
+ elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
+ constant_init(m, 1)
+
+ else:
+ raise TypeError('pretrained must be a str or None')
+
+ def _freeze_stages(self):
+ if self.frozen_stages >= 0:
+ for i in range(self.frozen_stages):
+ m = getattr(self, self.cr_blocks[i])
+ m.eval()
+ for param in m.parameters():
+ param.requires_grad = False
+
+ def train(self, mode=True):
+ super(Darknet, self).train(mode)
+ self._freeze_stages()
+ if mode and self.norm_eval:
+ for m in self.modules():
+ if isinstance(m, _BatchNorm):
+ m.eval()
+
+ @staticmethod
+ def make_conv_res_block(in_channels,
+ out_channels,
+ res_repeat,
+ conv_cfg=None,
+ norm_cfg=dict(type='BN', requires_grad=True),
+ act_cfg=dict(type='LeakyReLU',
+ negative_slope=0.1)):
+ """In Darknet backbone, ConvLayer is usually followed by ResBlock. This
+ function will make that. The Conv layers always have 3x3 filters with
+ stride=2. The number of the filters in Conv layer is the same as the
+ out channels of the ResBlock.
+
+ Args:
+ in_channels (int): The number of input channels.
+ out_channels (int): The number of output channels.
+ res_repeat (int): The number of ResBlocks.
+ conv_cfg (dict): Config dict for convolution layer. Default: None.
+ norm_cfg (dict): Dictionary to construct and config norm layer.
+ Default: dict(type='BN', requires_grad=True)
+ act_cfg (dict): Config dict for activation layer.
+ Default: dict(type='LeakyReLU', negative_slope=0.1).
+ """
+
+ cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)
+
+ model = nn.Sequential()
+ model.add_module(
+ 'conv',
+ ConvModule(
+ in_channels, out_channels, 3, stride=2, padding=1, **cfg))
+ for idx in range(res_repeat):
+ model.add_module('res{}'.format(idx),
+ ResBlock(out_channels, **cfg))
+ return model
diff --git a/annotator/uniformer/mmdet/models/backbones/detectors_resnet.py b/annotator/uniformer/mmdet/models/backbones/detectors_resnet.py
new file mode 100644
index 0000000000000000000000000000000000000000..519db464493c7c7b60fc34be1d21add2235ec341
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/backbones/detectors_resnet.py
@@ -0,0 +1,305 @@
+import torch.nn as nn
+import torch.utils.checkpoint as cp
+from mmcv.cnn import build_conv_layer, build_norm_layer, constant_init
+
+from ..builder import BACKBONES
+from .resnet import Bottleneck as _Bottleneck
+from .resnet import ResNet
+
+
+class Bottleneck(_Bottleneck):
+ r"""Bottleneck for the ResNet backbone in `DetectoRS
+ `_.
+
+ This bottleneck allows the users to specify whether to use
+ SAC (Switchable Atrous Convolution) and RFP (Recursive Feature Pyramid).
+
+ Args:
+ inplanes (int): The number of input channels.
+ planes (int): The number of output channels before expansion.
+ rfp_inplanes (int, optional): The number of channels from RFP.
+ Default: None. If specified, an additional conv layer will be
+ added for ``rfp_feat``. Otherwise, the structure is the same as
+ base class.
+ sac (dict, optional): Dictionary to construct SAC. Default: None.
+ """
+ expansion = 4
+
+ def __init__(self,
+ inplanes,
+ planes,
+ rfp_inplanes=None,
+ sac=None,
+ **kwargs):
+ super(Bottleneck, self).__init__(inplanes, planes, **kwargs)
+
+ assert sac is None or isinstance(sac, dict)
+ self.sac = sac
+ self.with_sac = sac is not None
+ if self.with_sac:
+ self.conv2 = build_conv_layer(
+ self.sac,
+ planes,
+ planes,
+ kernel_size=3,
+ stride=self.conv2_stride,
+ padding=self.dilation,
+ dilation=self.dilation,
+ bias=False)
+
+ self.rfp_inplanes = rfp_inplanes
+ if self.rfp_inplanes:
+ self.rfp_conv = build_conv_layer(
+ None,
+ self.rfp_inplanes,
+ planes * self.expansion,
+ 1,
+ stride=1,
+ bias=True)
+ self.init_weights()
+
+ def init_weights(self):
+ """Initialize the weights."""
+ if self.rfp_inplanes:
+ constant_init(self.rfp_conv, 0)
+
+ def rfp_forward(self, x, rfp_feat):
+ """The forward function that also takes the RFP features as input."""
+
+ def _inner_forward(x):
+ identity = x
+
+ out = self.conv1(x)
+ out = self.norm1(out)
+ out = self.relu(out)
+
+ if self.with_plugins:
+ out = self.forward_plugin(out, self.after_conv1_plugin_names)
+
+ out = self.conv2(out)
+ out = self.norm2(out)
+ out = self.relu(out)
+
+ if self.with_plugins:
+ out = self.forward_plugin(out, self.after_conv2_plugin_names)
+
+ out = self.conv3(out)
+ out = self.norm3(out)
+
+ if self.with_plugins:
+ out = self.forward_plugin(out, self.after_conv3_plugin_names)
+
+ if self.downsample is not None:
+ identity = self.downsample(x)
+
+ out += identity
+
+ return out
+
+ if self.with_cp and x.requires_grad:
+ out = cp.checkpoint(_inner_forward, x)
+ else:
+ out = _inner_forward(x)
+
+ if self.rfp_inplanes:
+ rfp_feat = self.rfp_conv(rfp_feat)
+ out = out + rfp_feat
+
+ out = self.relu(out)
+
+ return out
+
+
+class ResLayer(nn.Sequential):
+ """ResLayer to build ResNet style backbone for RPF in detectoRS.
+
+ The difference between this module and base class is that we pass
+ ``rfp_inplanes`` to the first block.
+
+ Args:
+ block (nn.Module): block used to build ResLayer.
+ inplanes (int): inplanes of block.
+ planes (int): planes of block.
+ num_blocks (int): number of blocks.
+ stride (int): stride of the first block. Default: 1
+ avg_down (bool): Use AvgPool instead of stride conv when
+ downsampling in the bottleneck. Default: False
+ conv_cfg (dict): dictionary to construct and config conv layer.
+ Default: None
+ norm_cfg (dict): dictionary to construct and config norm layer.
+ Default: dict(type='BN')
+ downsample_first (bool): Downsample at the first block or last block.
+ False for Hourglass, True for ResNet. Default: True
+ rfp_inplanes (int, optional): The number of channels from RFP.
+ Default: None. If specified, an additional conv layer will be
+ added for ``rfp_feat``. Otherwise, the structure is the same as
+ base class.
+ """
+
+ def __init__(self,
+ block,
+ inplanes,
+ planes,
+ num_blocks,
+ stride=1,
+ avg_down=False,
+ conv_cfg=None,
+ norm_cfg=dict(type='BN'),
+ downsample_first=True,
+ rfp_inplanes=None,
+ **kwargs):
+ self.block = block
+ assert downsample_first, f'downsample_first={downsample_first} is ' \
+ 'not supported in DetectoRS'
+
+ downsample = None
+ if stride != 1 or inplanes != planes * block.expansion:
+ downsample = []
+ conv_stride = stride
+ if avg_down and stride != 1:
+ conv_stride = 1
+ downsample.append(
+ nn.AvgPool2d(
+ kernel_size=stride,
+ stride=stride,
+ ceil_mode=True,
+ count_include_pad=False))
+ downsample.extend([
+ build_conv_layer(
+ conv_cfg,
+ inplanes,
+ planes * block.expansion,
+ kernel_size=1,
+ stride=conv_stride,
+ bias=False),
+ build_norm_layer(norm_cfg, planes * block.expansion)[1]
+ ])
+ downsample = nn.Sequential(*downsample)
+
+ layers = []
+ layers.append(
+ block(
+ inplanes=inplanes,
+ planes=planes,
+ stride=stride,
+ downsample=downsample,
+ conv_cfg=conv_cfg,
+ norm_cfg=norm_cfg,
+ rfp_inplanes=rfp_inplanes,
+ **kwargs))
+ inplanes = planes * block.expansion
+ for _ in range(1, num_blocks):
+ layers.append(
+ block(
+ inplanes=inplanes,
+ planes=planes,
+ stride=1,
+ conv_cfg=conv_cfg,
+ norm_cfg=norm_cfg,
+ **kwargs))
+
+ super(ResLayer, self).__init__(*layers)
+
+
+@BACKBONES.register_module()
+class DetectoRS_ResNet(ResNet):
+ """ResNet backbone for DetectoRS.
+
+ Args:
+ sac (dict, optional): Dictionary to construct SAC (Switchable Atrous
+ Convolution). Default: None.
+ stage_with_sac (list): Which stage to use sac. Default: (False, False,
+ False, False).
+ rfp_inplanes (int, optional): The number of channels from RFP.
+ Default: None. If specified, an additional conv layer will be
+ added for ``rfp_feat``. Otherwise, the structure is the same as
+ base class.
+ output_img (bool): If ``True``, the input image will be inserted into
+ the starting position of output. Default: False.
+ pretrained (str, optional): The pretrained model to load.
+ """
+
+ arch_settings = {
+ 50: (Bottleneck, (3, 4, 6, 3)),
+ 101: (Bottleneck, (3, 4, 23, 3)),
+ 152: (Bottleneck, (3, 8, 36, 3))
+ }
+
+ def __init__(self,
+ sac=None,
+ stage_with_sac=(False, False, False, False),
+ rfp_inplanes=None,
+ output_img=False,
+ pretrained=None,
+ **kwargs):
+ self.sac = sac
+ self.stage_with_sac = stage_with_sac
+ self.rfp_inplanes = rfp_inplanes
+ self.output_img = output_img
+ self.pretrained = pretrained
+ super(DetectoRS_ResNet, self).__init__(**kwargs)
+
+ self.inplanes = self.stem_channels
+ self.res_layers = []
+ for i, num_blocks in enumerate(self.stage_blocks):
+ stride = self.strides[i]
+ dilation = self.dilations[i]
+ dcn = self.dcn if self.stage_with_dcn[i] else None
+ sac = self.sac if self.stage_with_sac[i] else None
+ if self.plugins is not None:
+ stage_plugins = self.make_stage_plugins(self.plugins, i)
+ else:
+ stage_plugins = None
+ planes = self.base_channels * 2**i
+ res_layer = self.make_res_layer(
+ block=self.block,
+ inplanes=self.inplanes,
+ planes=planes,
+ num_blocks=num_blocks,
+ stride=stride,
+ dilation=dilation,
+ style=self.style,
+ avg_down=self.avg_down,
+ with_cp=self.with_cp,
+ conv_cfg=self.conv_cfg,
+ norm_cfg=self.norm_cfg,
+ dcn=dcn,
+ sac=sac,
+ rfp_inplanes=rfp_inplanes if i > 0 else None,
+ plugins=stage_plugins)
+ self.inplanes = planes * self.block.expansion
+ layer_name = f'layer{i + 1}'
+ self.add_module(layer_name, res_layer)
+ self.res_layers.append(layer_name)
+
+ self._freeze_stages()
+
+ def make_res_layer(self, **kwargs):
+ """Pack all blocks in a stage into a ``ResLayer`` for DetectoRS."""
+ return ResLayer(**kwargs)
+
+ def forward(self, x):
+ """Forward function."""
+ outs = list(super(DetectoRS_ResNet, self).forward(x))
+ if self.output_img:
+ outs.insert(0, x)
+ return tuple(outs)
+
+ def rfp_forward(self, x, rfp_feats):
+ """Forward function for RFP."""
+ if self.deep_stem:
+ x = self.stem(x)
+ else:
+ x = self.conv1(x)
+ x = self.norm1(x)
+ x = self.relu(x)
+ x = self.maxpool(x)
+ outs = []
+ for i, layer_name in enumerate(self.res_layers):
+ res_layer = getattr(self, layer_name)
+ rfp_feat = rfp_feats[i] if i > 0 else None
+ for layer in res_layer:
+ x = layer.rfp_forward(x, rfp_feat)
+ if i in self.out_indices:
+ outs.append(x)
+ return tuple(outs)
diff --git a/annotator/uniformer/mmdet/models/backbones/detectors_resnext.py b/annotator/uniformer/mmdet/models/backbones/detectors_resnext.py
new file mode 100644
index 0000000000000000000000000000000000000000..57d032fe37ed82d5ba24e761bdc014cc0ee5ac64
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/backbones/detectors_resnext.py
@@ -0,0 +1,122 @@
+import math
+
+from mmcv.cnn import build_conv_layer, build_norm_layer
+
+from ..builder import BACKBONES
+from .detectors_resnet import Bottleneck as _Bottleneck
+from .detectors_resnet import DetectoRS_ResNet
+
+
+class Bottleneck(_Bottleneck):
+ expansion = 4
+
+ def __init__(self,
+ inplanes,
+ planes,
+ groups=1,
+ base_width=4,
+ base_channels=64,
+ **kwargs):
+ """Bottleneck block for ResNeXt.
+
+ If style is "pytorch", the stride-two layer is the 3x3 conv layer, if
+ it is "caffe", the stride-two layer is the first 1x1 conv layer.
+ """
+ super(Bottleneck, self).__init__(inplanes, planes, **kwargs)
+
+ if groups == 1:
+ width = self.planes
+ else:
+ width = math.floor(self.planes *
+ (base_width / base_channels)) * groups
+
+ self.norm1_name, norm1 = build_norm_layer(
+ self.norm_cfg, width, postfix=1)
+ self.norm2_name, norm2 = build_norm_layer(
+ self.norm_cfg, width, postfix=2)
+ self.norm3_name, norm3 = build_norm_layer(
+ self.norm_cfg, self.planes * self.expansion, postfix=3)
+
+ self.conv1 = build_conv_layer(
+ self.conv_cfg,
+ self.inplanes,
+ width,
+ kernel_size=1,
+ stride=self.conv1_stride,
+ bias=False)
+ self.add_module(self.norm1_name, norm1)
+ fallback_on_stride = False
+ self.with_modulated_dcn = False
+ if self.with_dcn:
+ fallback_on_stride = self.dcn.pop('fallback_on_stride', False)
+ if self.with_sac:
+ self.conv2 = build_conv_layer(
+ self.sac,
+ width,
+ width,
+ kernel_size=3,
+ stride=self.conv2_stride,
+ padding=self.dilation,
+ dilation=self.dilation,
+ groups=groups,
+ bias=False)
+ elif not self.with_dcn or fallback_on_stride:
+ self.conv2 = build_conv_layer(
+ self.conv_cfg,
+ width,
+ width,
+ kernel_size=3,
+ stride=self.conv2_stride,
+ padding=self.dilation,
+ dilation=self.dilation,
+ groups=groups,
+ bias=False)
+ else:
+ assert self.conv_cfg is None, 'conv_cfg must be None for DCN'
+ self.conv2 = build_conv_layer(
+ self.dcn,
+ width,
+ width,
+ kernel_size=3,
+ stride=self.conv2_stride,
+ padding=self.dilation,
+ dilation=self.dilation,
+ groups=groups,
+ bias=False)
+
+ self.add_module(self.norm2_name, norm2)
+ self.conv3 = build_conv_layer(
+ self.conv_cfg,
+ width,
+ self.planes * self.expansion,
+ kernel_size=1,
+ bias=False)
+ self.add_module(self.norm3_name, norm3)
+
+
+@BACKBONES.register_module()
+class DetectoRS_ResNeXt(DetectoRS_ResNet):
+ """ResNeXt backbone for DetectoRS.
+
+ Args:
+ groups (int): The number of groups in ResNeXt.
+ base_width (int): The base width of ResNeXt.
+ """
+
+ arch_settings = {
+ 50: (Bottleneck, (3, 4, 6, 3)),
+ 101: (Bottleneck, (3, 4, 23, 3)),
+ 152: (Bottleneck, (3, 8, 36, 3))
+ }
+
+ def __init__(self, groups=1, base_width=4, **kwargs):
+ self.groups = groups
+ self.base_width = base_width
+ super(DetectoRS_ResNeXt, self).__init__(**kwargs)
+
+ def make_res_layer(self, **kwargs):
+ return super().make_res_layer(
+ groups=self.groups,
+ base_width=self.base_width,
+ base_channels=self.base_channels,
+ **kwargs)
diff --git a/annotator/uniformer/mmdet/models/backbones/hourglass.py b/annotator/uniformer/mmdet/models/backbones/hourglass.py
new file mode 100644
index 0000000000000000000000000000000000000000..3422acee35e3c6f8731cdb310f188e671b5be12f
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/backbones/hourglass.py
@@ -0,0 +1,198 @@
+import torch.nn as nn
+from mmcv.cnn import ConvModule
+
+from ..builder import BACKBONES
+from ..utils import ResLayer
+from .resnet import BasicBlock
+
+
+class HourglassModule(nn.Module):
+ """Hourglass Module for HourglassNet backbone.
+
+ Generate module recursively and use BasicBlock as the base unit.
+
+ Args:
+ depth (int): Depth of current HourglassModule.
+ stage_channels (list[int]): Feature channels of sub-modules in current
+ and follow-up HourglassModule.
+ stage_blocks (list[int]): Number of sub-modules stacked in current and
+ follow-up HourglassModule.
+ norm_cfg (dict): Dictionary to construct and config norm layer.
+ """
+
+ def __init__(self,
+ depth,
+ stage_channels,
+ stage_blocks,
+ norm_cfg=dict(type='BN', requires_grad=True)):
+ super(HourglassModule, self).__init__()
+
+ self.depth = depth
+
+ cur_block = stage_blocks[0]
+ next_block = stage_blocks[1]
+
+ cur_channel = stage_channels[0]
+ next_channel = stage_channels[1]
+
+ self.up1 = ResLayer(
+ BasicBlock, cur_channel, cur_channel, cur_block, norm_cfg=norm_cfg)
+
+ self.low1 = ResLayer(
+ BasicBlock,
+ cur_channel,
+ next_channel,
+ cur_block,
+ stride=2,
+ norm_cfg=norm_cfg)
+
+ if self.depth > 1:
+ self.low2 = HourglassModule(depth - 1, stage_channels[1:],
+ stage_blocks[1:])
+ else:
+ self.low2 = ResLayer(
+ BasicBlock,
+ next_channel,
+ next_channel,
+ next_block,
+ norm_cfg=norm_cfg)
+
+ self.low3 = ResLayer(
+ BasicBlock,
+ next_channel,
+ cur_channel,
+ cur_block,
+ norm_cfg=norm_cfg,
+ downsample_first=False)
+
+ self.up2 = nn.Upsample(scale_factor=2)
+
+ def forward(self, x):
+ """Forward function."""
+ up1 = self.up1(x)
+ low1 = self.low1(x)
+ low2 = self.low2(low1)
+ low3 = self.low3(low2)
+ up2 = self.up2(low3)
+ return up1 + up2
+
+
+@BACKBONES.register_module()
+class HourglassNet(nn.Module):
+ """HourglassNet backbone.
+
+ Stacked Hourglass Networks for Human Pose Estimation.
+ More details can be found in the `paper
+ `_ .
+
+ Args:
+ downsample_times (int): Downsample times in a HourglassModule.
+ num_stacks (int): Number of HourglassModule modules stacked,
+ 1 for Hourglass-52, 2 for Hourglass-104.
+ stage_channels (list[int]): Feature channel of each sub-module in a
+ HourglassModule.
+ stage_blocks (list[int]): Number of sub-modules stacked in a
+ HourglassModule.
+ feat_channel (int): Feature channel of conv after a HourglassModule.
+ norm_cfg (dict): Dictionary to construct and config norm layer.
+
+ Example:
+ >>> from mmdet.models import HourglassNet
+ >>> import torch
+ >>> self = HourglassNet()
+ >>> self.eval()
+ >>> inputs = torch.rand(1, 3, 511, 511)
+ >>> level_outputs = self.forward(inputs)
+ >>> for level_output in level_outputs:
+ ... print(tuple(level_output.shape))
+ (1, 256, 128, 128)
+ (1, 256, 128, 128)
+ """
+
+ def __init__(self,
+ downsample_times=5,
+ num_stacks=2,
+ stage_channels=(256, 256, 384, 384, 384, 512),
+ stage_blocks=(2, 2, 2, 2, 2, 4),
+ feat_channel=256,
+ norm_cfg=dict(type='BN', requires_grad=True)):
+ super(HourglassNet, self).__init__()
+
+ self.num_stacks = num_stacks
+ assert self.num_stacks >= 1
+ assert len(stage_channels) == len(stage_blocks)
+ assert len(stage_channels) > downsample_times
+
+ cur_channel = stage_channels[0]
+
+ self.stem = nn.Sequential(
+ ConvModule(3, 128, 7, padding=3, stride=2, norm_cfg=norm_cfg),
+ ResLayer(BasicBlock, 128, 256, 1, stride=2, norm_cfg=norm_cfg))
+
+ self.hourglass_modules = nn.ModuleList([
+ HourglassModule(downsample_times, stage_channels, stage_blocks)
+ for _ in range(num_stacks)
+ ])
+
+ self.inters = ResLayer(
+ BasicBlock,
+ cur_channel,
+ cur_channel,
+ num_stacks - 1,
+ norm_cfg=norm_cfg)
+
+ self.conv1x1s = nn.ModuleList([
+ ConvModule(
+ cur_channel, cur_channel, 1, norm_cfg=norm_cfg, act_cfg=None)
+ for _ in range(num_stacks - 1)
+ ])
+
+ self.out_convs = nn.ModuleList([
+ ConvModule(
+ cur_channel, feat_channel, 3, padding=1, norm_cfg=norm_cfg)
+ for _ in range(num_stacks)
+ ])
+
+ self.remap_convs = nn.ModuleList([
+ ConvModule(
+ feat_channel, cur_channel, 1, norm_cfg=norm_cfg, act_cfg=None)
+ for _ in range(num_stacks - 1)
+ ])
+
+ self.relu = nn.ReLU(inplace=True)
+
+ def init_weights(self, pretrained=None):
+ """Init module weights.
+
+ We do nothing in this function because all modules we used
+ (ConvModule, BasicBlock and etc.) have default initialization, and
+ currently we don't provide pretrained model of HourglassNet.
+
+ Detector's __init__() will call backbone's init_weights() with
+ pretrained as input, so we keep this function.
+ """
+ # Training Centripetal Model needs to reset parameters for Conv2d
+ for m in self.modules():
+ if isinstance(m, nn.Conv2d):
+ m.reset_parameters()
+
+ def forward(self, x):
+ """Forward function."""
+ inter_feat = self.stem(x)
+ out_feats = []
+
+ for ind in range(self.num_stacks):
+ single_hourglass = self.hourglass_modules[ind]
+ out_conv = self.out_convs[ind]
+
+ hourglass_feat = single_hourglass(inter_feat)
+ out_feat = out_conv(hourglass_feat)
+ out_feats.append(out_feat)
+
+ if ind < self.num_stacks - 1:
+ inter_feat = self.conv1x1s[ind](
+ inter_feat) + self.remap_convs[ind](
+ out_feat)
+ inter_feat = self.inters[ind](self.relu(inter_feat))
+
+ return out_feats
diff --git a/annotator/uniformer/mmdet/models/backbones/hrnet.py b/annotator/uniformer/mmdet/models/backbones/hrnet.py
new file mode 100644
index 0000000000000000000000000000000000000000..c0fd0a974192231506aa68b1e1719f618b78a1b3
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/backbones/hrnet.py
@@ -0,0 +1,537 @@
+import torch.nn as nn
+from mmcv.cnn import (build_conv_layer, build_norm_layer, constant_init,
+ kaiming_init)
+from mmcv.runner import load_checkpoint
+from torch.nn.modules.batchnorm import _BatchNorm
+
+from mmdet.utils import get_root_logger
+from ..builder import BACKBONES
+from .resnet import BasicBlock, Bottleneck
+
+
+class HRModule(nn.Module):
+ """High-Resolution Module for HRNet.
+
+ In this module, every branch has 4 BasicBlocks/Bottlenecks. Fusion/Exchange
+ is in this module.
+ """
+
+ def __init__(self,
+ num_branches,
+ blocks,
+ num_blocks,
+ in_channels,
+ num_channels,
+ multiscale_output=True,
+ with_cp=False,
+ conv_cfg=None,
+ norm_cfg=dict(type='BN')):
+ super(HRModule, self).__init__()
+ self._check_branches(num_branches, num_blocks, in_channels,
+ num_channels)
+
+ self.in_channels = in_channels
+ self.num_branches = num_branches
+
+ self.multiscale_output = multiscale_output
+ self.norm_cfg = norm_cfg
+ self.conv_cfg = conv_cfg
+ self.with_cp = with_cp
+ self.branches = self._make_branches(num_branches, blocks, num_blocks,
+ num_channels)
+ self.fuse_layers = self._make_fuse_layers()
+ self.relu = nn.ReLU(inplace=False)
+
+ def _check_branches(self, num_branches, num_blocks, in_channels,
+ num_channels):
+ if num_branches != len(num_blocks):
+ error_msg = f'NUM_BRANCHES({num_branches}) ' \
+ f'!= NUM_BLOCKS({len(num_blocks)})'
+ raise ValueError(error_msg)
+
+ if num_branches != len(num_channels):
+ error_msg = f'NUM_BRANCHES({num_branches}) ' \
+ f'!= NUM_CHANNELS({len(num_channels)})'
+ raise ValueError(error_msg)
+
+ if num_branches != len(in_channels):
+ error_msg = f'NUM_BRANCHES({num_branches}) ' \
+ f'!= NUM_INCHANNELS({len(in_channels)})'
+ raise ValueError(error_msg)
+
+ def _make_one_branch(self,
+ branch_index,
+ block,
+ num_blocks,
+ num_channels,
+ stride=1):
+ downsample = None
+ if stride != 1 or \
+ self.in_channels[branch_index] != \
+ num_channels[branch_index] * block.expansion:
+ downsample = nn.Sequential(
+ build_conv_layer(
+ self.conv_cfg,
+ self.in_channels[branch_index],
+ num_channels[branch_index] * block.expansion,
+ kernel_size=1,
+ stride=stride,
+ bias=False),
+ build_norm_layer(self.norm_cfg, num_channels[branch_index] *
+ block.expansion)[1])
+
+ layers = []
+ layers.append(
+ block(
+ self.in_channels[branch_index],
+ num_channels[branch_index],
+ stride,
+ downsample=downsample,
+ with_cp=self.with_cp,
+ norm_cfg=self.norm_cfg,
+ conv_cfg=self.conv_cfg))
+ self.in_channels[branch_index] = \
+ num_channels[branch_index] * block.expansion
+ for i in range(1, num_blocks[branch_index]):
+ layers.append(
+ block(
+ self.in_channels[branch_index],
+ num_channels[branch_index],
+ with_cp=self.with_cp,
+ norm_cfg=self.norm_cfg,
+ conv_cfg=self.conv_cfg))
+
+ return nn.Sequential(*layers)
+
+ def _make_branches(self, num_branches, block, num_blocks, num_channels):
+ branches = []
+
+ for i in range(num_branches):
+ branches.append(
+ self._make_one_branch(i, block, num_blocks, num_channels))
+
+ return nn.ModuleList(branches)
+
+ def _make_fuse_layers(self):
+ if self.num_branches == 1:
+ return None
+
+ num_branches = self.num_branches
+ in_channels = self.in_channels
+ fuse_layers = []
+ num_out_branches = num_branches if self.multiscale_output else 1
+ for i in range(num_out_branches):
+ fuse_layer = []
+ for j in range(num_branches):
+ if j > i:
+ fuse_layer.append(
+ nn.Sequential(
+ build_conv_layer(
+ self.conv_cfg,
+ in_channels[j],
+ in_channels[i],
+ kernel_size=1,
+ stride=1,
+ padding=0,
+ bias=False),
+ build_norm_layer(self.norm_cfg, in_channels[i])[1],
+ nn.Upsample(
+ scale_factor=2**(j - i), mode='nearest')))
+ elif j == i:
+ fuse_layer.append(None)
+ else:
+ conv_downsamples = []
+ for k in range(i - j):
+ if k == i - j - 1:
+ conv_downsamples.append(
+ nn.Sequential(
+ build_conv_layer(
+ self.conv_cfg,
+ in_channels[j],
+ in_channels[i],
+ kernel_size=3,
+ stride=2,
+ padding=1,
+ bias=False),
+ build_norm_layer(self.norm_cfg,
+ in_channels[i])[1]))
+ else:
+ conv_downsamples.append(
+ nn.Sequential(
+ build_conv_layer(
+ self.conv_cfg,
+ in_channels[j],
+ in_channels[j],
+ kernel_size=3,
+ stride=2,
+ padding=1,
+ bias=False),
+ build_norm_layer(self.norm_cfg,
+ in_channels[j])[1],
+ nn.ReLU(inplace=False)))
+ fuse_layer.append(nn.Sequential(*conv_downsamples))
+ fuse_layers.append(nn.ModuleList(fuse_layer))
+
+ return nn.ModuleList(fuse_layers)
+
+ def forward(self, x):
+ """Forward function."""
+ if self.num_branches == 1:
+ return [self.branches[0](x[0])]
+
+ for i in range(self.num_branches):
+ x[i] = self.branches[i](x[i])
+
+ x_fuse = []
+ for i in range(len(self.fuse_layers)):
+ y = 0
+ for j in range(self.num_branches):
+ if i == j:
+ y += x[j]
+ else:
+ y += self.fuse_layers[i][j](x[j])
+ x_fuse.append(self.relu(y))
+ return x_fuse
+
+
+@BACKBONES.register_module()
+class HRNet(nn.Module):
+ """HRNet backbone.
+
+ High-Resolution Representations for Labeling Pixels and Regions
+ arXiv: https://arxiv.org/abs/1904.04514
+
+ Args:
+ extra (dict): detailed configuration for each stage of HRNet.
+ in_channels (int): Number of input image channels. Default: 3.
+ conv_cfg (dict): dictionary to construct and config conv layer.
+ norm_cfg (dict): dictionary to construct and config norm layer.
+ norm_eval (bool): Whether to set norm layers to eval mode, namely,
+ freeze running stats (mean and var). Note: Effect on Batch Norm
+ and its variants only.
+ with_cp (bool): Use checkpoint or not. Using checkpoint will save some
+ memory while slowing down the training speed.
+ zero_init_residual (bool): whether to use zero init for last norm layer
+ in resblocks to let them behave as identity.
+
+ Example:
+ >>> from mmdet.models import HRNet
+ >>> import torch
+ >>> extra = dict(
+ >>> stage1=dict(
+ >>> num_modules=1,
+ >>> num_branches=1,
+ >>> block='BOTTLENECK',
+ >>> num_blocks=(4, ),
+ >>> num_channels=(64, )),
+ >>> stage2=dict(
+ >>> num_modules=1,
+ >>> num_branches=2,
+ >>> block='BASIC',
+ >>> num_blocks=(4, 4),
+ >>> num_channels=(32, 64)),
+ >>> stage3=dict(
+ >>> num_modules=4,
+ >>> num_branches=3,
+ >>> block='BASIC',
+ >>> num_blocks=(4, 4, 4),
+ >>> num_channels=(32, 64, 128)),
+ >>> stage4=dict(
+ >>> num_modules=3,
+ >>> num_branches=4,
+ >>> block='BASIC',
+ >>> num_blocks=(4, 4, 4, 4),
+ >>> num_channels=(32, 64, 128, 256)))
+ >>> self = HRNet(extra, in_channels=1)
+ >>> self.eval()
+ >>> inputs = torch.rand(1, 1, 32, 32)
+ >>> level_outputs = self.forward(inputs)
+ >>> for level_out in level_outputs:
+ ... print(tuple(level_out.shape))
+ (1, 32, 8, 8)
+ (1, 64, 4, 4)
+ (1, 128, 2, 2)
+ (1, 256, 1, 1)
+ """
+
+ blocks_dict = {'BASIC': BasicBlock, 'BOTTLENECK': Bottleneck}
+
+ def __init__(self,
+ extra,
+ in_channels=3,
+ conv_cfg=None,
+ norm_cfg=dict(type='BN'),
+ norm_eval=True,
+ with_cp=False,
+ zero_init_residual=False):
+ super(HRNet, self).__init__()
+ self.extra = extra
+ self.conv_cfg = conv_cfg
+ self.norm_cfg = norm_cfg
+ self.norm_eval = norm_eval
+ self.with_cp = with_cp
+ self.zero_init_residual = zero_init_residual
+
+ # stem net
+ self.norm1_name, norm1 = build_norm_layer(self.norm_cfg, 64, postfix=1)
+ self.norm2_name, norm2 = build_norm_layer(self.norm_cfg, 64, postfix=2)
+
+ self.conv1 = build_conv_layer(
+ self.conv_cfg,
+ in_channels,
+ 64,
+ kernel_size=3,
+ stride=2,
+ padding=1,
+ bias=False)
+
+ self.add_module(self.norm1_name, norm1)
+ self.conv2 = build_conv_layer(
+ self.conv_cfg,
+ 64,
+ 64,
+ kernel_size=3,
+ stride=2,
+ padding=1,
+ bias=False)
+
+ self.add_module(self.norm2_name, norm2)
+ self.relu = nn.ReLU(inplace=True)
+
+ # stage 1
+ self.stage1_cfg = self.extra['stage1']
+ num_channels = self.stage1_cfg['num_channels'][0]
+ block_type = self.stage1_cfg['block']
+ num_blocks = self.stage1_cfg['num_blocks'][0]
+
+ block = self.blocks_dict[block_type]
+ stage1_out_channels = num_channels * block.expansion
+ self.layer1 = self._make_layer(block, 64, num_channels, num_blocks)
+
+ # stage 2
+ self.stage2_cfg = self.extra['stage2']
+ num_channels = self.stage2_cfg['num_channels']
+ block_type = self.stage2_cfg['block']
+
+ block = self.blocks_dict[block_type]
+ num_channels = [channel * block.expansion for channel in num_channels]
+ self.transition1 = self._make_transition_layer([stage1_out_channels],
+ num_channels)
+ self.stage2, pre_stage_channels = self._make_stage(
+ self.stage2_cfg, num_channels)
+
+ # stage 3
+ self.stage3_cfg = self.extra['stage3']
+ num_channels = self.stage3_cfg['num_channels']
+ block_type = self.stage3_cfg['block']
+
+ block = self.blocks_dict[block_type]
+ num_channels = [channel * block.expansion for channel in num_channels]
+ self.transition2 = self._make_transition_layer(pre_stage_channels,
+ num_channels)
+ self.stage3, pre_stage_channels = self._make_stage(
+ self.stage3_cfg, num_channels)
+
+ # stage 4
+ self.stage4_cfg = self.extra['stage4']
+ num_channels = self.stage4_cfg['num_channels']
+ block_type = self.stage4_cfg['block']
+
+ block = self.blocks_dict[block_type]
+ num_channels = [channel * block.expansion for channel in num_channels]
+ self.transition3 = self._make_transition_layer(pre_stage_channels,
+ num_channels)
+ self.stage4, pre_stage_channels = self._make_stage(
+ self.stage4_cfg, num_channels)
+
+ @property
+ def norm1(self):
+ """nn.Module: the normalization layer named "norm1" """
+ return getattr(self, self.norm1_name)
+
+ @property
+ def norm2(self):
+ """nn.Module: the normalization layer named "norm2" """
+ return getattr(self, self.norm2_name)
+
+ def _make_transition_layer(self, num_channels_pre_layer,
+ num_channels_cur_layer):
+ num_branches_cur = len(num_channels_cur_layer)
+ num_branches_pre = len(num_channels_pre_layer)
+
+ transition_layers = []
+ for i in range(num_branches_cur):
+ if i < num_branches_pre:
+ if num_channels_cur_layer[i] != num_channels_pre_layer[i]:
+ transition_layers.append(
+ nn.Sequential(
+ build_conv_layer(
+ self.conv_cfg,
+ num_channels_pre_layer[i],
+ num_channels_cur_layer[i],
+ kernel_size=3,
+ stride=1,
+ padding=1,
+ bias=False),
+ build_norm_layer(self.norm_cfg,
+ num_channels_cur_layer[i])[1],
+ nn.ReLU(inplace=True)))
+ else:
+ transition_layers.append(None)
+ else:
+ conv_downsamples = []
+ for j in range(i + 1 - num_branches_pre):
+ in_channels = num_channels_pre_layer[-1]
+ out_channels = num_channels_cur_layer[i] \
+ if j == i - num_branches_pre else in_channels
+ conv_downsamples.append(
+ nn.Sequential(
+ build_conv_layer(
+ self.conv_cfg,
+ in_channels,
+ out_channels,
+ kernel_size=3,
+ stride=2,
+ padding=1,
+ bias=False),
+ build_norm_layer(self.norm_cfg, out_channels)[1],
+ nn.ReLU(inplace=True)))
+ transition_layers.append(nn.Sequential(*conv_downsamples))
+
+ return nn.ModuleList(transition_layers)
+
+ def _make_layer(self, block, inplanes, planes, blocks, stride=1):
+ downsample = None
+ if stride != 1 or inplanes != planes * block.expansion:
+ downsample = nn.Sequential(
+ build_conv_layer(
+ self.conv_cfg,
+ inplanes,
+ planes * block.expansion,
+ kernel_size=1,
+ stride=stride,
+ bias=False),
+ build_norm_layer(self.norm_cfg, planes * block.expansion)[1])
+
+ layers = []
+ layers.append(
+ block(
+ inplanes,
+ planes,
+ stride,
+ downsample=downsample,
+ with_cp=self.with_cp,
+ norm_cfg=self.norm_cfg,
+ conv_cfg=self.conv_cfg))
+ inplanes = planes * block.expansion
+ for i in range(1, blocks):
+ layers.append(
+ block(
+ inplanes,
+ planes,
+ with_cp=self.with_cp,
+ norm_cfg=self.norm_cfg,
+ conv_cfg=self.conv_cfg))
+
+ return nn.Sequential(*layers)
+
+ def _make_stage(self, layer_config, in_channels, multiscale_output=True):
+ num_modules = layer_config['num_modules']
+ num_branches = layer_config['num_branches']
+ num_blocks = layer_config['num_blocks']
+ num_channels = layer_config['num_channels']
+ block = self.blocks_dict[layer_config['block']]
+
+ hr_modules = []
+ for i in range(num_modules):
+ # multi_scale_output is only used for the last module
+ if not multiscale_output and i == num_modules - 1:
+ reset_multiscale_output = False
+ else:
+ reset_multiscale_output = True
+
+ hr_modules.append(
+ HRModule(
+ num_branches,
+ block,
+ num_blocks,
+ in_channels,
+ num_channels,
+ reset_multiscale_output,
+ with_cp=self.with_cp,
+ norm_cfg=self.norm_cfg,
+ conv_cfg=self.conv_cfg))
+
+ return nn.Sequential(*hr_modules), in_channels
+
+ def init_weights(self, pretrained=None):
+ """Initialize the weights in backbone.
+
+ Args:
+ pretrained (str, optional): Path to pre-trained weights.
+ Defaults to None.
+ """
+ if isinstance(pretrained, str):
+ logger = get_root_logger()
+ load_checkpoint(self, pretrained, strict=False, logger=logger)
+ elif pretrained is None:
+ for m in self.modules():
+ if isinstance(m, nn.Conv2d):
+ kaiming_init(m)
+ elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
+ constant_init(m, 1)
+
+ if self.zero_init_residual:
+ for m in self.modules():
+ if isinstance(m, Bottleneck):
+ constant_init(m.norm3, 0)
+ elif isinstance(m, BasicBlock):
+ constant_init(m.norm2, 0)
+ else:
+ raise TypeError('pretrained must be a str or None')
+
+ def forward(self, x):
+ """Forward function."""
+ x = self.conv1(x)
+ x = self.norm1(x)
+ x = self.relu(x)
+ x = self.conv2(x)
+ x = self.norm2(x)
+ x = self.relu(x)
+ x = self.layer1(x)
+
+ x_list = []
+ for i in range(self.stage2_cfg['num_branches']):
+ if self.transition1[i] is not None:
+ x_list.append(self.transition1[i](x))
+ else:
+ x_list.append(x)
+ y_list = self.stage2(x_list)
+
+ x_list = []
+ for i in range(self.stage3_cfg['num_branches']):
+ if self.transition2[i] is not None:
+ x_list.append(self.transition2[i](y_list[-1]))
+ else:
+ x_list.append(y_list[i])
+ y_list = self.stage3(x_list)
+
+ x_list = []
+ for i in range(self.stage4_cfg['num_branches']):
+ if self.transition3[i] is not None:
+ x_list.append(self.transition3[i](y_list[-1]))
+ else:
+ x_list.append(y_list[i])
+ y_list = self.stage4(x_list)
+
+ return y_list
+
+ def train(self, mode=True):
+ """Convert the model into training mode will keeping the normalization
+ layer freezed."""
+ super(HRNet, self).train(mode)
+ if mode and self.norm_eval:
+ for m in self.modules():
+ # trick: eval have effect on BatchNorm only
+ if isinstance(m, _BatchNorm):
+ m.eval()
diff --git a/annotator/uniformer/mmdet/models/backbones/regnet.py b/annotator/uniformer/mmdet/models/backbones/regnet.py
new file mode 100644
index 0000000000000000000000000000000000000000..91a602a952226cebb5fd0e3e282c6f98ae4fa455
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/backbones/regnet.py
@@ -0,0 +1,325 @@
+import numpy as np
+import torch.nn as nn
+from mmcv.cnn import build_conv_layer, build_norm_layer
+
+from ..builder import BACKBONES
+from .resnet import ResNet
+from .resnext import Bottleneck
+
+
+@BACKBONES.register_module()
+class RegNet(ResNet):
+ """RegNet backbone.
+
+ More details can be found in `paper `_ .
+
+ Args:
+ arch (dict): The parameter of RegNets.
+
+ - w0 (int): initial width
+ - wa (float): slope of width
+ - wm (float): quantization parameter to quantize the width
+ - depth (int): depth of the backbone
+ - group_w (int): width of group
+ - bot_mul (float): bottleneck ratio, i.e. expansion of bottleneck.
+ strides (Sequence[int]): Strides of the first block of each stage.
+ base_channels (int): Base channels after stem layer.
+ in_channels (int): Number of input image channels. Default: 3.
+ dilations (Sequence[int]): Dilation of each stage.
+ out_indices (Sequence[int]): Output from which stages.
+ style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two
+ layer is the 3x3 conv layer, otherwise the stride-two layer is
+ the first 1x1 conv layer.
+ frozen_stages (int): Stages to be frozen (all param fixed). -1 means
+ not freezing any parameters.
+ norm_cfg (dict): dictionary to construct and config norm layer.
+ norm_eval (bool): Whether to set norm layers to eval mode, namely,
+ freeze running stats (mean and var). Note: Effect on Batch Norm
+ and its variants only.
+ with_cp (bool): Use checkpoint or not. Using checkpoint will save some
+ memory while slowing down the training speed.
+ zero_init_residual (bool): whether to use zero init for last norm layer
+ in resblocks to let them behave as identity.
+
+ Example:
+ >>> from mmdet.models import RegNet
+ >>> import torch
+ >>> self = RegNet(
+ arch=dict(
+ w0=88,
+ wa=26.31,
+ wm=2.25,
+ group_w=48,
+ depth=25,
+ bot_mul=1.0))
+ >>> self.eval()
+ >>> inputs = torch.rand(1, 3, 32, 32)
+ >>> level_outputs = self.forward(inputs)
+ >>> for level_out in level_outputs:
+ ... print(tuple(level_out.shape))
+ (1, 96, 8, 8)
+ (1, 192, 4, 4)
+ (1, 432, 2, 2)
+ (1, 1008, 1, 1)
+ """
+ arch_settings = {
+ 'regnetx_400mf':
+ dict(w0=24, wa=24.48, wm=2.54, group_w=16, depth=22, bot_mul=1.0),
+ 'regnetx_800mf':
+ dict(w0=56, wa=35.73, wm=2.28, group_w=16, depth=16, bot_mul=1.0),
+ 'regnetx_1.6gf':
+ dict(w0=80, wa=34.01, wm=2.25, group_w=24, depth=18, bot_mul=1.0),
+ 'regnetx_3.2gf':
+ dict(w0=88, wa=26.31, wm=2.25, group_w=48, depth=25, bot_mul=1.0),
+ 'regnetx_4.0gf':
+ dict(w0=96, wa=38.65, wm=2.43, group_w=40, depth=23, bot_mul=1.0),
+ 'regnetx_6.4gf':
+ dict(w0=184, wa=60.83, wm=2.07, group_w=56, depth=17, bot_mul=1.0),
+ 'regnetx_8.0gf':
+ dict(w0=80, wa=49.56, wm=2.88, group_w=120, depth=23, bot_mul=1.0),
+ 'regnetx_12gf':
+ dict(w0=168, wa=73.36, wm=2.37, group_w=112, depth=19, bot_mul=1.0),
+ }
+
+ def __init__(self,
+ arch,
+ in_channels=3,
+ stem_channels=32,
+ base_channels=32,
+ strides=(2, 2, 2, 2),
+ dilations=(1, 1, 1, 1),
+ out_indices=(0, 1, 2, 3),
+ style='pytorch',
+ deep_stem=False,
+ avg_down=False,
+ frozen_stages=-1,
+ conv_cfg=None,
+ norm_cfg=dict(type='BN', requires_grad=True),
+ norm_eval=True,
+ dcn=None,
+ stage_with_dcn=(False, False, False, False),
+ plugins=None,
+ with_cp=False,
+ zero_init_residual=True):
+ super(ResNet, self).__init__()
+
+ # Generate RegNet parameters first
+ if isinstance(arch, str):
+ assert arch in self.arch_settings, \
+ f'"arch": "{arch}" is not one of the' \
+ ' arch_settings'
+ arch = self.arch_settings[arch]
+ elif not isinstance(arch, dict):
+ raise ValueError('Expect "arch" to be either a string '
+ f'or a dict, got {type(arch)}')
+
+ widths, num_stages = self.generate_regnet(
+ arch['w0'],
+ arch['wa'],
+ arch['wm'],
+ arch['depth'],
+ )
+ # Convert to per stage format
+ stage_widths, stage_blocks = self.get_stages_from_blocks(widths)
+ # Generate group widths and bot muls
+ group_widths = [arch['group_w'] for _ in range(num_stages)]
+ self.bottleneck_ratio = [arch['bot_mul'] for _ in range(num_stages)]
+ # Adjust the compatibility of stage_widths and group_widths
+ stage_widths, group_widths = self.adjust_width_group(
+ stage_widths, self.bottleneck_ratio, group_widths)
+
+ # Group params by stage
+ self.stage_widths = stage_widths
+ self.group_widths = group_widths
+ self.depth = sum(stage_blocks)
+ self.stem_channels = stem_channels
+ self.base_channels = base_channels
+ self.num_stages = num_stages
+ assert num_stages >= 1 and num_stages <= 4
+ self.strides = strides
+ self.dilations = dilations
+ assert len(strides) == len(dilations) == num_stages
+ self.out_indices = out_indices
+ assert max(out_indices) < num_stages
+ self.style = style
+ self.deep_stem = deep_stem
+ self.avg_down = avg_down
+ self.frozen_stages = frozen_stages
+ self.conv_cfg = conv_cfg
+ self.norm_cfg = norm_cfg
+ self.with_cp = with_cp
+ self.norm_eval = norm_eval
+ self.dcn = dcn
+ self.stage_with_dcn = stage_with_dcn
+ if dcn is not None:
+ assert len(stage_with_dcn) == num_stages
+ self.plugins = plugins
+ self.zero_init_residual = zero_init_residual
+ self.block = Bottleneck
+ expansion_bak = self.block.expansion
+ self.block.expansion = 1
+ self.stage_blocks = stage_blocks[:num_stages]
+
+ self._make_stem_layer(in_channels, stem_channels)
+
+ self.inplanes = stem_channels
+ self.res_layers = []
+ for i, num_blocks in enumerate(self.stage_blocks):
+ stride = self.strides[i]
+ dilation = self.dilations[i]
+ group_width = self.group_widths[i]
+ width = int(round(self.stage_widths[i] * self.bottleneck_ratio[i]))
+ stage_groups = width // group_width
+
+ dcn = self.dcn if self.stage_with_dcn[i] else None
+ if self.plugins is not None:
+ stage_plugins = self.make_stage_plugins(self.plugins, i)
+ else:
+ stage_plugins = None
+
+ res_layer = self.make_res_layer(
+ block=self.block,
+ inplanes=self.inplanes,
+ planes=self.stage_widths[i],
+ num_blocks=num_blocks,
+ stride=stride,
+ dilation=dilation,
+ style=self.style,
+ avg_down=self.avg_down,
+ with_cp=self.with_cp,
+ conv_cfg=self.conv_cfg,
+ norm_cfg=self.norm_cfg,
+ dcn=dcn,
+ plugins=stage_plugins,
+ groups=stage_groups,
+ base_width=group_width,
+ base_channels=self.stage_widths[i])
+ self.inplanes = self.stage_widths[i]
+ layer_name = f'layer{i + 1}'
+ self.add_module(layer_name, res_layer)
+ self.res_layers.append(layer_name)
+
+ self._freeze_stages()
+
+ self.feat_dim = stage_widths[-1]
+ self.block.expansion = expansion_bak
+
+ def _make_stem_layer(self, in_channels, base_channels):
+ self.conv1 = build_conv_layer(
+ self.conv_cfg,
+ in_channels,
+ base_channels,
+ kernel_size=3,
+ stride=2,
+ padding=1,
+ bias=False)
+ self.norm1_name, norm1 = build_norm_layer(
+ self.norm_cfg, base_channels, postfix=1)
+ self.add_module(self.norm1_name, norm1)
+ self.relu = nn.ReLU(inplace=True)
+
+ def generate_regnet(self,
+ initial_width,
+ width_slope,
+ width_parameter,
+ depth,
+ divisor=8):
+ """Generates per block width from RegNet parameters.
+
+ Args:
+ initial_width ([int]): Initial width of the backbone
+ width_slope ([float]): Slope of the quantized linear function
+ width_parameter ([int]): Parameter used to quantize the width.
+ depth ([int]): Depth of the backbone.
+ divisor (int, optional): The divisor of channels. Defaults to 8.
+
+ Returns:
+ list, int: return a list of widths of each stage and the number \
+ of stages
+ """
+ assert width_slope >= 0
+ assert initial_width > 0
+ assert width_parameter > 1
+ assert initial_width % divisor == 0
+ widths_cont = np.arange(depth) * width_slope + initial_width
+ ks = np.round(
+ np.log(widths_cont / initial_width) / np.log(width_parameter))
+ widths = initial_width * np.power(width_parameter, ks)
+ widths = np.round(np.divide(widths, divisor)) * divisor
+ num_stages = len(np.unique(widths))
+ widths, widths_cont = widths.astype(int).tolist(), widths_cont.tolist()
+ return widths, num_stages
+
+ @staticmethod
+ def quantize_float(number, divisor):
+ """Converts a float to closest non-zero int divisible by divisor.
+
+ Args:
+ number (int): Original number to be quantized.
+ divisor (int): Divisor used to quantize the number.
+
+ Returns:
+ int: quantized number that is divisible by devisor.
+ """
+ return int(round(number / divisor) * divisor)
+
+ def adjust_width_group(self, widths, bottleneck_ratio, groups):
+ """Adjusts the compatibility of widths and groups.
+
+ Args:
+ widths (list[int]): Width of each stage.
+ bottleneck_ratio (float): Bottleneck ratio.
+ groups (int): number of groups in each stage
+
+ Returns:
+ tuple(list): The adjusted widths and groups of each stage.
+ """
+ bottleneck_width = [
+ int(w * b) for w, b in zip(widths, bottleneck_ratio)
+ ]
+ groups = [min(g, w_bot) for g, w_bot in zip(groups, bottleneck_width)]
+ bottleneck_width = [
+ self.quantize_float(w_bot, g)
+ for w_bot, g in zip(bottleneck_width, groups)
+ ]
+ widths = [
+ int(w_bot / b)
+ for w_bot, b in zip(bottleneck_width, bottleneck_ratio)
+ ]
+ return widths, groups
+
+ def get_stages_from_blocks(self, widths):
+ """Gets widths/stage_blocks of network at each stage.
+
+ Args:
+ widths (list[int]): Width in each stage.
+
+ Returns:
+ tuple(list): width and depth of each stage
+ """
+ width_diff = [
+ width != width_prev
+ for width, width_prev in zip(widths + [0], [0] + widths)
+ ]
+ stage_widths = [
+ width for width, diff in zip(widths, width_diff[:-1]) if diff
+ ]
+ stage_blocks = np.diff([
+ depth for depth, diff in zip(range(len(width_diff)), width_diff)
+ if diff
+ ]).tolist()
+ return stage_widths, stage_blocks
+
+ def forward(self, x):
+ """Forward function."""
+ x = self.conv1(x)
+ x = self.norm1(x)
+ x = self.relu(x)
+
+ outs = []
+ for i, layer_name in enumerate(self.res_layers):
+ res_layer = getattr(self, layer_name)
+ x = res_layer(x)
+ if i in self.out_indices:
+ outs.append(x)
+ return tuple(outs)
diff --git a/annotator/uniformer/mmdet/models/backbones/res2net.py b/annotator/uniformer/mmdet/models/backbones/res2net.py
new file mode 100644
index 0000000000000000000000000000000000000000..7901b7f2fa29741d72328bdbdbf92fc4d5c5f847
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/backbones/res2net.py
@@ -0,0 +1,351 @@
+import math
+
+import torch
+import torch.nn as nn
+import torch.utils.checkpoint as cp
+from mmcv.cnn import (build_conv_layer, build_norm_layer, constant_init,
+ kaiming_init)
+from mmcv.runner import load_checkpoint
+from torch.nn.modules.batchnorm import _BatchNorm
+
+from mmdet.utils import get_root_logger
+from ..builder import BACKBONES
+from .resnet import Bottleneck as _Bottleneck
+from .resnet import ResNet
+
+
+class Bottle2neck(_Bottleneck):
+ expansion = 4
+
+ def __init__(self,
+ inplanes,
+ planes,
+ scales=4,
+ base_width=26,
+ base_channels=64,
+ stage_type='normal',
+ **kwargs):
+ """Bottle2neck block for Res2Net.
+
+ If style is "pytorch", the stride-two layer is the 3x3 conv layer, if
+ it is "caffe", the stride-two layer is the first 1x1 conv layer.
+ """
+ super(Bottle2neck, self).__init__(inplanes, planes, **kwargs)
+ assert scales > 1, 'Res2Net degenerates to ResNet when scales = 1.'
+ width = int(math.floor(self.planes * (base_width / base_channels)))
+
+ self.norm1_name, norm1 = build_norm_layer(
+ self.norm_cfg, width * scales, postfix=1)
+ self.norm3_name, norm3 = build_norm_layer(
+ self.norm_cfg, self.planes * self.expansion, postfix=3)
+
+ self.conv1 = build_conv_layer(
+ self.conv_cfg,
+ self.inplanes,
+ width * scales,
+ kernel_size=1,
+ stride=self.conv1_stride,
+ bias=False)
+ self.add_module(self.norm1_name, norm1)
+
+ if stage_type == 'stage' and self.conv2_stride != 1:
+ self.pool = nn.AvgPool2d(
+ kernel_size=3, stride=self.conv2_stride, padding=1)
+ convs = []
+ bns = []
+
+ fallback_on_stride = False
+ if self.with_dcn:
+ fallback_on_stride = self.dcn.pop('fallback_on_stride', False)
+ if not self.with_dcn or fallback_on_stride:
+ for i in range(scales - 1):
+ convs.append(
+ build_conv_layer(
+ self.conv_cfg,
+ width,
+ width,
+ kernel_size=3,
+ stride=self.conv2_stride,
+ padding=self.dilation,
+ dilation=self.dilation,
+ bias=False))
+ bns.append(
+ build_norm_layer(self.norm_cfg, width, postfix=i + 1)[1])
+ self.convs = nn.ModuleList(convs)
+ self.bns = nn.ModuleList(bns)
+ else:
+ assert self.conv_cfg is None, 'conv_cfg must be None for DCN'
+ for i in range(scales - 1):
+ convs.append(
+ build_conv_layer(
+ self.dcn,
+ width,
+ width,
+ kernel_size=3,
+ stride=self.conv2_stride,
+ padding=self.dilation,
+ dilation=self.dilation,
+ bias=False))
+ bns.append(
+ build_norm_layer(self.norm_cfg, width, postfix=i + 1)[1])
+ self.convs = nn.ModuleList(convs)
+ self.bns = nn.ModuleList(bns)
+
+ self.conv3 = build_conv_layer(
+ self.conv_cfg,
+ width * scales,
+ self.planes * self.expansion,
+ kernel_size=1,
+ bias=False)
+ self.add_module(self.norm3_name, norm3)
+
+ self.stage_type = stage_type
+ self.scales = scales
+ self.width = width
+ delattr(self, 'conv2')
+ delattr(self, self.norm2_name)
+
+ def forward(self, x):
+ """Forward function."""
+
+ def _inner_forward(x):
+ identity = x
+
+ out = self.conv1(x)
+ out = self.norm1(out)
+ out = self.relu(out)
+
+ if self.with_plugins:
+ out = self.forward_plugin(out, self.after_conv1_plugin_names)
+
+ spx = torch.split(out, self.width, 1)
+ sp = self.convs[0](spx[0].contiguous())
+ sp = self.relu(self.bns[0](sp))
+ out = sp
+ for i in range(1, self.scales - 1):
+ if self.stage_type == 'stage':
+ sp = spx[i]
+ else:
+ sp = sp + spx[i]
+ sp = self.convs[i](sp.contiguous())
+ sp = self.relu(self.bns[i](sp))
+ out = torch.cat((out, sp), 1)
+
+ if self.stage_type == 'normal' or self.conv2_stride == 1:
+ out = torch.cat((out, spx[self.scales - 1]), 1)
+ elif self.stage_type == 'stage':
+ out = torch.cat((out, self.pool(spx[self.scales - 1])), 1)
+
+ if self.with_plugins:
+ out = self.forward_plugin(out, self.after_conv2_plugin_names)
+
+ out = self.conv3(out)
+ out = self.norm3(out)
+
+ if self.with_plugins:
+ out = self.forward_plugin(out, self.after_conv3_plugin_names)
+
+ if self.downsample is not None:
+ identity = self.downsample(x)
+
+ out += identity
+
+ return out
+
+ if self.with_cp and x.requires_grad:
+ out = cp.checkpoint(_inner_forward, x)
+ else:
+ out = _inner_forward(x)
+
+ out = self.relu(out)
+
+ return out
+
+
+class Res2Layer(nn.Sequential):
+ """Res2Layer to build Res2Net style backbone.
+
+ Args:
+ block (nn.Module): block used to build ResLayer.
+ inplanes (int): inplanes of block.
+ planes (int): planes of block.
+ num_blocks (int): number of blocks.
+ stride (int): stride of the first block. Default: 1
+ avg_down (bool): Use AvgPool instead of stride conv when
+ downsampling in the bottle2neck. Default: False
+ conv_cfg (dict): dictionary to construct and config conv layer.
+ Default: None
+ norm_cfg (dict): dictionary to construct and config norm layer.
+ Default: dict(type='BN')
+ scales (int): Scales used in Res2Net. Default: 4
+ base_width (int): Basic width of each scale. Default: 26
+ """
+
+ def __init__(self,
+ block,
+ inplanes,
+ planes,
+ num_blocks,
+ stride=1,
+ avg_down=True,
+ conv_cfg=None,
+ norm_cfg=dict(type='BN'),
+ scales=4,
+ base_width=26,
+ **kwargs):
+ self.block = block
+
+ downsample = None
+ if stride != 1 or inplanes != planes * block.expansion:
+ downsample = nn.Sequential(
+ nn.AvgPool2d(
+ kernel_size=stride,
+ stride=stride,
+ ceil_mode=True,
+ count_include_pad=False),
+ build_conv_layer(
+ conv_cfg,
+ inplanes,
+ planes * block.expansion,
+ kernel_size=1,
+ stride=1,
+ bias=False),
+ build_norm_layer(norm_cfg, planes * block.expansion)[1],
+ )
+
+ layers = []
+ layers.append(
+ block(
+ inplanes=inplanes,
+ planes=planes,
+ stride=stride,
+ downsample=downsample,
+ conv_cfg=conv_cfg,
+ norm_cfg=norm_cfg,
+ scales=scales,
+ base_width=base_width,
+ stage_type='stage',
+ **kwargs))
+ inplanes = planes * block.expansion
+ for i in range(1, num_blocks):
+ layers.append(
+ block(
+ inplanes=inplanes,
+ planes=planes,
+ stride=1,
+ conv_cfg=conv_cfg,
+ norm_cfg=norm_cfg,
+ scales=scales,
+ base_width=base_width,
+ **kwargs))
+ super(Res2Layer, self).__init__(*layers)
+
+
+@BACKBONES.register_module()
+class Res2Net(ResNet):
+ """Res2Net backbone.
+
+ Args:
+ scales (int): Scales used in Res2Net. Default: 4
+ base_width (int): Basic width of each scale. Default: 26
+ depth (int): Depth of res2net, from {50, 101, 152}.
+ in_channels (int): Number of input image channels. Default: 3.
+ num_stages (int): Res2net stages. Default: 4.
+ strides (Sequence[int]): Strides of the first block of each stage.
+ dilations (Sequence[int]): Dilation of each stage.
+ out_indices (Sequence[int]): Output from which stages.
+ style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two
+ layer is the 3x3 conv layer, otherwise the stride-two layer is
+ the first 1x1 conv layer.
+ deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv
+ avg_down (bool): Use AvgPool instead of stride conv when
+ downsampling in the bottle2neck.
+ frozen_stages (int): Stages to be frozen (stop grad and set eval mode).
+ -1 means not freezing any parameters.
+ norm_cfg (dict): Dictionary to construct and config norm layer.
+ norm_eval (bool): Whether to set norm layers to eval mode, namely,
+ freeze running stats (mean and var). Note: Effect on Batch Norm
+ and its variants only.
+ plugins (list[dict]): List of plugins for stages, each dict contains:
+
+ - cfg (dict, required): Cfg dict to build plugin.
+ - position (str, required): Position inside block to insert
+ plugin, options are 'after_conv1', 'after_conv2', 'after_conv3'.
+ - stages (tuple[bool], optional): Stages to apply plugin, length
+ should be same as 'num_stages'.
+ with_cp (bool): Use checkpoint or not. Using checkpoint will save some
+ memory while slowing down the training speed.
+ zero_init_residual (bool): Whether to use zero init for last norm layer
+ in resblocks to let them behave as identity.
+
+ Example:
+ >>> from mmdet.models import Res2Net
+ >>> import torch
+ >>> self = Res2Net(depth=50, scales=4, base_width=26)
+ >>> self.eval()
+ >>> inputs = torch.rand(1, 3, 32, 32)
+ >>> level_outputs = self.forward(inputs)
+ >>> for level_out in level_outputs:
+ ... print(tuple(level_out.shape))
+ (1, 256, 8, 8)
+ (1, 512, 4, 4)
+ (1, 1024, 2, 2)
+ (1, 2048, 1, 1)
+ """
+
+ arch_settings = {
+ 50: (Bottle2neck, (3, 4, 6, 3)),
+ 101: (Bottle2neck, (3, 4, 23, 3)),
+ 152: (Bottle2neck, (3, 8, 36, 3))
+ }
+
+ def __init__(self,
+ scales=4,
+ base_width=26,
+ style='pytorch',
+ deep_stem=True,
+ avg_down=True,
+ **kwargs):
+ self.scales = scales
+ self.base_width = base_width
+ super(Res2Net, self).__init__(
+ style='pytorch', deep_stem=True, avg_down=True, **kwargs)
+
+ def make_res_layer(self, **kwargs):
+ return Res2Layer(
+ scales=self.scales,
+ base_width=self.base_width,
+ base_channels=self.base_channels,
+ **kwargs)
+
+ def init_weights(self, pretrained=None):
+ """Initialize the weights in backbone.
+
+ Args:
+ pretrained (str, optional): Path to pre-trained weights.
+ Defaults to None.
+ """
+ if isinstance(pretrained, str):
+ logger = get_root_logger()
+ load_checkpoint(self, pretrained, strict=False, logger=logger)
+ elif pretrained is None:
+ for m in self.modules():
+ if isinstance(m, nn.Conv2d):
+ kaiming_init(m)
+ elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
+ constant_init(m, 1)
+
+ if self.dcn is not None:
+ for m in self.modules():
+ if isinstance(m, Bottle2neck):
+ # dcn in Res2Net bottle2neck is in ModuleList
+ for n in m.convs:
+ if hasattr(n, 'conv_offset'):
+ constant_init(n.conv_offset, 0)
+
+ if self.zero_init_residual:
+ for m in self.modules():
+ if isinstance(m, Bottle2neck):
+ constant_init(m.norm3, 0)
+ else:
+ raise TypeError('pretrained must be a str or None')
diff --git a/annotator/uniformer/mmdet/models/backbones/resnest.py b/annotator/uniformer/mmdet/models/backbones/resnest.py
new file mode 100644
index 0000000000000000000000000000000000000000..48e1d8bfa47348a13f0da0b9ecf32354fa270340
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/backbones/resnest.py
@@ -0,0 +1,317 @@
+import math
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+import torch.utils.checkpoint as cp
+from mmcv.cnn import build_conv_layer, build_norm_layer
+
+from ..builder import BACKBONES
+from ..utils import ResLayer
+from .resnet import Bottleneck as _Bottleneck
+from .resnet import ResNetV1d
+
+
+class RSoftmax(nn.Module):
+ """Radix Softmax module in ``SplitAttentionConv2d``.
+
+ Args:
+ radix (int): Radix of input.
+ groups (int): Groups of input.
+ """
+
+ def __init__(self, radix, groups):
+ super().__init__()
+ self.radix = radix
+ self.groups = groups
+
+ def forward(self, x):
+ batch = x.size(0)
+ if self.radix > 1:
+ x = x.view(batch, self.groups, self.radix, -1).transpose(1, 2)
+ x = F.softmax(x, dim=1)
+ x = x.reshape(batch, -1)
+ else:
+ x = torch.sigmoid(x)
+ return x
+
+
+class SplitAttentionConv2d(nn.Module):
+ """Split-Attention Conv2d in ResNeSt.
+
+ Args:
+ in_channels (int): Number of channels in the input feature map.
+ channels (int): Number of intermediate channels.
+ kernel_size (int | tuple[int]): Size of the convolution kernel.
+ stride (int | tuple[int]): Stride of the convolution.
+ padding (int | tuple[int]): Zero-padding added to both sides of
+ dilation (int | tuple[int]): Spacing between kernel elements.
+ groups (int): Number of blocked connections from input channels to
+ output channels.
+ groups (int): Same as nn.Conv2d.
+ radix (int): Radix of SpltAtConv2d. Default: 2
+ reduction_factor (int): Reduction factor of inter_channels. Default: 4.
+ conv_cfg (dict): Config dict for convolution layer. Default: None,
+ which means using conv2d.
+ norm_cfg (dict): Config dict for normalization layer. Default: None.
+ dcn (dict): Config dict for DCN. Default: None.
+ """
+
+ def __init__(self,
+ in_channels,
+ channels,
+ kernel_size,
+ stride=1,
+ padding=0,
+ dilation=1,
+ groups=1,
+ radix=2,
+ reduction_factor=4,
+ conv_cfg=None,
+ norm_cfg=dict(type='BN'),
+ dcn=None):
+ super(SplitAttentionConv2d, self).__init__()
+ inter_channels = max(in_channels * radix // reduction_factor, 32)
+ self.radix = radix
+ self.groups = groups
+ self.channels = channels
+ self.with_dcn = dcn is not None
+ self.dcn = dcn
+ fallback_on_stride = False
+ if self.with_dcn:
+ fallback_on_stride = self.dcn.pop('fallback_on_stride', False)
+ if self.with_dcn and not fallback_on_stride:
+ assert conv_cfg is None, 'conv_cfg must be None for DCN'
+ conv_cfg = dcn
+ self.conv = build_conv_layer(
+ conv_cfg,
+ in_channels,
+ channels * radix,
+ kernel_size,
+ stride=stride,
+ padding=padding,
+ dilation=dilation,
+ groups=groups * radix,
+ bias=False)
+ # To be consistent with original implementation, starting from 0
+ self.norm0_name, norm0 = build_norm_layer(
+ norm_cfg, channels * radix, postfix=0)
+ self.add_module(self.norm0_name, norm0)
+ self.relu = nn.ReLU(inplace=True)
+ self.fc1 = build_conv_layer(
+ None, channels, inter_channels, 1, groups=self.groups)
+ self.norm1_name, norm1 = build_norm_layer(
+ norm_cfg, inter_channels, postfix=1)
+ self.add_module(self.norm1_name, norm1)
+ self.fc2 = build_conv_layer(
+ None, inter_channels, channels * radix, 1, groups=self.groups)
+ self.rsoftmax = RSoftmax(radix, groups)
+
+ @property
+ def norm0(self):
+ """nn.Module: the normalization layer named "norm0" """
+ return getattr(self, self.norm0_name)
+
+ @property
+ def norm1(self):
+ """nn.Module: the normalization layer named "norm1" """
+ return getattr(self, self.norm1_name)
+
+ def forward(self, x):
+ x = self.conv(x)
+ x = self.norm0(x)
+ x = self.relu(x)
+
+ batch, rchannel = x.shape[:2]
+ batch = x.size(0)
+ if self.radix > 1:
+ splits = x.view(batch, self.radix, -1, *x.shape[2:])
+ gap = splits.sum(dim=1)
+ else:
+ gap = x
+ gap = F.adaptive_avg_pool2d(gap, 1)
+ gap = self.fc1(gap)
+
+ gap = self.norm1(gap)
+ gap = self.relu(gap)
+
+ atten = self.fc2(gap)
+ atten = self.rsoftmax(atten).view(batch, -1, 1, 1)
+
+ if self.radix > 1:
+ attens = atten.view(batch, self.radix, -1, *atten.shape[2:])
+ out = torch.sum(attens * splits, dim=1)
+ else:
+ out = atten * x
+ return out.contiguous()
+
+
+class Bottleneck(_Bottleneck):
+ """Bottleneck block for ResNeSt.
+
+ Args:
+ inplane (int): Input planes of this block.
+ planes (int): Middle planes of this block.
+ groups (int): Groups of conv2.
+ base_width (int): Base of width in terms of base channels. Default: 4.
+ base_channels (int): Base of channels for calculating width.
+ Default: 64.
+ radix (int): Radix of SpltAtConv2d. Default: 2
+ reduction_factor (int): Reduction factor of inter_channels in
+ SplitAttentionConv2d. Default: 4.
+ avg_down_stride (bool): Whether to use average pool for stride in
+ Bottleneck. Default: True.
+ kwargs (dict): Key word arguments for base class.
+ """
+ expansion = 4
+
+ def __init__(self,
+ inplanes,
+ planes,
+ groups=1,
+ base_width=4,
+ base_channels=64,
+ radix=2,
+ reduction_factor=4,
+ avg_down_stride=True,
+ **kwargs):
+ """Bottleneck block for ResNeSt."""
+ super(Bottleneck, self).__init__(inplanes, planes, **kwargs)
+
+ if groups == 1:
+ width = self.planes
+ else:
+ width = math.floor(self.planes *
+ (base_width / base_channels)) * groups
+
+ self.avg_down_stride = avg_down_stride and self.conv2_stride > 1
+
+ self.norm1_name, norm1 = build_norm_layer(
+ self.norm_cfg, width, postfix=1)
+ self.norm3_name, norm3 = build_norm_layer(
+ self.norm_cfg, self.planes * self.expansion, postfix=3)
+
+ self.conv1 = build_conv_layer(
+ self.conv_cfg,
+ self.inplanes,
+ width,
+ kernel_size=1,
+ stride=self.conv1_stride,
+ bias=False)
+ self.add_module(self.norm1_name, norm1)
+ self.with_modulated_dcn = False
+ self.conv2 = SplitAttentionConv2d(
+ width,
+ width,
+ kernel_size=3,
+ stride=1 if self.avg_down_stride else self.conv2_stride,
+ padding=self.dilation,
+ dilation=self.dilation,
+ groups=groups,
+ radix=radix,
+ reduction_factor=reduction_factor,
+ conv_cfg=self.conv_cfg,
+ norm_cfg=self.norm_cfg,
+ dcn=self.dcn)
+ delattr(self, self.norm2_name)
+
+ if self.avg_down_stride:
+ self.avd_layer = nn.AvgPool2d(3, self.conv2_stride, padding=1)
+
+ self.conv3 = build_conv_layer(
+ self.conv_cfg,
+ width,
+ self.planes * self.expansion,
+ kernel_size=1,
+ bias=False)
+ self.add_module(self.norm3_name, norm3)
+
+ def forward(self, x):
+
+ def _inner_forward(x):
+ identity = x
+
+ out = self.conv1(x)
+ out = self.norm1(out)
+ out = self.relu(out)
+
+ if self.with_plugins:
+ out = self.forward_plugin(out, self.after_conv1_plugin_names)
+
+ out = self.conv2(out)
+
+ if self.avg_down_stride:
+ out = self.avd_layer(out)
+
+ if self.with_plugins:
+ out = self.forward_plugin(out, self.after_conv2_plugin_names)
+
+ out = self.conv3(out)
+ out = self.norm3(out)
+
+ if self.with_plugins:
+ out = self.forward_plugin(out, self.after_conv3_plugin_names)
+
+ if self.downsample is not None:
+ identity = self.downsample(x)
+
+ out += identity
+
+ return out
+
+ if self.with_cp and x.requires_grad:
+ out = cp.checkpoint(_inner_forward, x)
+ else:
+ out = _inner_forward(x)
+
+ out = self.relu(out)
+
+ return out
+
+
+@BACKBONES.register_module()
+class ResNeSt(ResNetV1d):
+ """ResNeSt backbone.
+
+ Args:
+ groups (int): Number of groups of Bottleneck. Default: 1
+ base_width (int): Base width of Bottleneck. Default: 4
+ radix (int): Radix of SplitAttentionConv2d. Default: 2
+ reduction_factor (int): Reduction factor of inter_channels in
+ SplitAttentionConv2d. Default: 4.
+ avg_down_stride (bool): Whether to use average pool for stride in
+ Bottleneck. Default: True.
+ kwargs (dict): Keyword arguments for ResNet.
+ """
+
+ arch_settings = {
+ 50: (Bottleneck, (3, 4, 6, 3)),
+ 101: (Bottleneck, (3, 4, 23, 3)),
+ 152: (Bottleneck, (3, 8, 36, 3)),
+ 200: (Bottleneck, (3, 24, 36, 3))
+ }
+
+ def __init__(self,
+ groups=1,
+ base_width=4,
+ radix=2,
+ reduction_factor=4,
+ avg_down_stride=True,
+ **kwargs):
+ self.groups = groups
+ self.base_width = base_width
+ self.radix = radix
+ self.reduction_factor = reduction_factor
+ self.avg_down_stride = avg_down_stride
+ super(ResNeSt, self).__init__(**kwargs)
+
+ def make_res_layer(self, **kwargs):
+ """Pack all blocks in a stage into a ``ResLayer``."""
+ return ResLayer(
+ groups=self.groups,
+ base_width=self.base_width,
+ base_channels=self.base_channels,
+ radix=self.radix,
+ reduction_factor=self.reduction_factor,
+ avg_down_stride=self.avg_down_stride,
+ **kwargs)
diff --git a/annotator/uniformer/mmdet/models/backbones/resnet.py b/annotator/uniformer/mmdet/models/backbones/resnet.py
new file mode 100644
index 0000000000000000000000000000000000000000..3826815a6d94fdc4c54001d4c186d10ca3380e80
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/backbones/resnet.py
@@ -0,0 +1,663 @@
+import torch.nn as nn
+import torch.utils.checkpoint as cp
+from mmcv.cnn import (build_conv_layer, build_norm_layer, build_plugin_layer,
+ constant_init, kaiming_init)
+from mmcv.runner import load_checkpoint
+from torch.nn.modules.batchnorm import _BatchNorm
+
+from mmdet.utils import get_root_logger
+from ..builder import BACKBONES
+from ..utils import ResLayer
+
+
+class BasicBlock(nn.Module):
+ expansion = 1
+
+ def __init__(self,
+ inplanes,
+ planes,
+ stride=1,
+ dilation=1,
+ downsample=None,
+ style='pytorch',
+ with_cp=False,
+ conv_cfg=None,
+ norm_cfg=dict(type='BN'),
+ dcn=None,
+ plugins=None):
+ super(BasicBlock, self).__init__()
+ assert dcn is None, 'Not implemented yet.'
+ assert plugins is None, 'Not implemented yet.'
+
+ self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1)
+ self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2)
+
+ self.conv1 = build_conv_layer(
+ conv_cfg,
+ inplanes,
+ planes,
+ 3,
+ stride=stride,
+ padding=dilation,
+ dilation=dilation,
+ bias=False)
+ self.add_module(self.norm1_name, norm1)
+ self.conv2 = build_conv_layer(
+ conv_cfg, planes, planes, 3, padding=1, bias=False)
+ self.add_module(self.norm2_name, norm2)
+
+ self.relu = nn.ReLU(inplace=True)
+ self.downsample = downsample
+ self.stride = stride
+ self.dilation = dilation
+ self.with_cp = with_cp
+
+ @property
+ def norm1(self):
+ """nn.Module: normalization layer after the first convolution layer"""
+ return getattr(self, self.norm1_name)
+
+ @property
+ def norm2(self):
+ """nn.Module: normalization layer after the second convolution layer"""
+ return getattr(self, self.norm2_name)
+
+ def forward(self, x):
+ """Forward function."""
+
+ def _inner_forward(x):
+ identity = x
+
+ out = self.conv1(x)
+ out = self.norm1(out)
+ out = self.relu(out)
+
+ out = self.conv2(out)
+ out = self.norm2(out)
+
+ if self.downsample is not None:
+ identity = self.downsample(x)
+
+ out += identity
+
+ return out
+
+ if self.with_cp and x.requires_grad:
+ out = cp.checkpoint(_inner_forward, x)
+ else:
+ out = _inner_forward(x)
+
+ out = self.relu(out)
+
+ return out
+
+
+class Bottleneck(nn.Module):
+ expansion = 4
+
+ def __init__(self,
+ inplanes,
+ planes,
+ stride=1,
+ dilation=1,
+ downsample=None,
+ style='pytorch',
+ with_cp=False,
+ conv_cfg=None,
+ norm_cfg=dict(type='BN'),
+ dcn=None,
+ plugins=None):
+ """Bottleneck block for ResNet.
+
+ If style is "pytorch", the stride-two layer is the 3x3 conv layer, if
+ it is "caffe", the stride-two layer is the first 1x1 conv layer.
+ """
+ super(Bottleneck, self).__init__()
+ assert style in ['pytorch', 'caffe']
+ assert dcn is None or isinstance(dcn, dict)
+ assert plugins is None or isinstance(plugins, list)
+ if plugins is not None:
+ allowed_position = ['after_conv1', 'after_conv2', 'after_conv3']
+ assert all(p['position'] in allowed_position for p in plugins)
+
+ self.inplanes = inplanes
+ self.planes = planes
+ self.stride = stride
+ self.dilation = dilation
+ self.style = style
+ self.with_cp = with_cp
+ self.conv_cfg = conv_cfg
+ self.norm_cfg = norm_cfg
+ self.dcn = dcn
+ self.with_dcn = dcn is not None
+ self.plugins = plugins
+ self.with_plugins = plugins is not None
+
+ if self.with_plugins:
+ # collect plugins for conv1/conv2/conv3
+ self.after_conv1_plugins = [
+ plugin['cfg'] for plugin in plugins
+ if plugin['position'] == 'after_conv1'
+ ]
+ self.after_conv2_plugins = [
+ plugin['cfg'] for plugin in plugins
+ if plugin['position'] == 'after_conv2'
+ ]
+ self.after_conv3_plugins = [
+ plugin['cfg'] for plugin in plugins
+ if plugin['position'] == 'after_conv3'
+ ]
+
+ if self.style == 'pytorch':
+ self.conv1_stride = 1
+ self.conv2_stride = stride
+ else:
+ self.conv1_stride = stride
+ self.conv2_stride = 1
+
+ self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1)
+ self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2)
+ self.norm3_name, norm3 = build_norm_layer(
+ norm_cfg, planes * self.expansion, postfix=3)
+
+ self.conv1 = build_conv_layer(
+ conv_cfg,
+ inplanes,
+ planes,
+ kernel_size=1,
+ stride=self.conv1_stride,
+ bias=False)
+ self.add_module(self.norm1_name, norm1)
+ fallback_on_stride = False
+ if self.with_dcn:
+ fallback_on_stride = dcn.pop('fallback_on_stride', False)
+ if not self.with_dcn or fallback_on_stride:
+ self.conv2 = build_conv_layer(
+ conv_cfg,
+ planes,
+ planes,
+ kernel_size=3,
+ stride=self.conv2_stride,
+ padding=dilation,
+ dilation=dilation,
+ bias=False)
+ else:
+ assert self.conv_cfg is None, 'conv_cfg must be None for DCN'
+ self.conv2 = build_conv_layer(
+ dcn,
+ planes,
+ planes,
+ kernel_size=3,
+ stride=self.conv2_stride,
+ padding=dilation,
+ dilation=dilation,
+ bias=False)
+
+ self.add_module(self.norm2_name, norm2)
+ self.conv3 = build_conv_layer(
+ conv_cfg,
+ planes,
+ planes * self.expansion,
+ kernel_size=1,
+ bias=False)
+ self.add_module(self.norm3_name, norm3)
+
+ self.relu = nn.ReLU(inplace=True)
+ self.downsample = downsample
+
+ if self.with_plugins:
+ self.after_conv1_plugin_names = self.make_block_plugins(
+ planes, self.after_conv1_plugins)
+ self.after_conv2_plugin_names = self.make_block_plugins(
+ planes, self.after_conv2_plugins)
+ self.after_conv3_plugin_names = self.make_block_plugins(
+ planes * self.expansion, self.after_conv3_plugins)
+
+ def make_block_plugins(self, in_channels, plugins):
+ """make plugins for block.
+
+ Args:
+ in_channels (int): Input channels of plugin.
+ plugins (list[dict]): List of plugins cfg to build.
+
+ Returns:
+ list[str]: List of the names of plugin.
+ """
+ assert isinstance(plugins, list)
+ plugin_names = []
+ for plugin in plugins:
+ plugin = plugin.copy()
+ name, layer = build_plugin_layer(
+ plugin,
+ in_channels=in_channels,
+ postfix=plugin.pop('postfix', ''))
+ assert not hasattr(self, name), f'duplicate plugin {name}'
+ self.add_module(name, layer)
+ plugin_names.append(name)
+ return plugin_names
+
+ def forward_plugin(self, x, plugin_names):
+ out = x
+ for name in plugin_names:
+ out = getattr(self, name)(x)
+ return out
+
+ @property
+ def norm1(self):
+ """nn.Module: normalization layer after the first convolution layer"""
+ return getattr(self, self.norm1_name)
+
+ @property
+ def norm2(self):
+ """nn.Module: normalization layer after the second convolution layer"""
+ return getattr(self, self.norm2_name)
+
+ @property
+ def norm3(self):
+ """nn.Module: normalization layer after the third convolution layer"""
+ return getattr(self, self.norm3_name)
+
+ def forward(self, x):
+ """Forward function."""
+
+ def _inner_forward(x):
+ identity = x
+ out = self.conv1(x)
+ out = self.norm1(out)
+ out = self.relu(out)
+
+ if self.with_plugins:
+ out = self.forward_plugin(out, self.after_conv1_plugin_names)
+
+ out = self.conv2(out)
+ out = self.norm2(out)
+ out = self.relu(out)
+
+ if self.with_plugins:
+ out = self.forward_plugin(out, self.after_conv2_plugin_names)
+
+ out = self.conv3(out)
+ out = self.norm3(out)
+
+ if self.with_plugins:
+ out = self.forward_plugin(out, self.after_conv3_plugin_names)
+
+ if self.downsample is not None:
+ identity = self.downsample(x)
+
+ out += identity
+
+ return out
+
+ if self.with_cp and x.requires_grad:
+ out = cp.checkpoint(_inner_forward, x)
+ else:
+ out = _inner_forward(x)
+
+ out = self.relu(out)
+
+ return out
+
+
+@BACKBONES.register_module()
+class ResNet(nn.Module):
+ """ResNet backbone.
+
+ Args:
+ depth (int): Depth of resnet, from {18, 34, 50, 101, 152}.
+ stem_channels (int | None): Number of stem channels. If not specified,
+ it will be the same as `base_channels`. Default: None.
+ base_channels (int): Number of base channels of res layer. Default: 64.
+ in_channels (int): Number of input image channels. Default: 3.
+ num_stages (int): Resnet stages. Default: 4.
+ strides (Sequence[int]): Strides of the first block of each stage.
+ dilations (Sequence[int]): Dilation of each stage.
+ out_indices (Sequence[int]): Output from which stages.
+ style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two
+ layer is the 3x3 conv layer, otherwise the stride-two layer is
+ the first 1x1 conv layer.
+ deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv
+ avg_down (bool): Use AvgPool instead of stride conv when
+ downsampling in the bottleneck.
+ frozen_stages (int): Stages to be frozen (stop grad and set eval mode).
+ -1 means not freezing any parameters.
+ norm_cfg (dict): Dictionary to construct and config norm layer.
+ norm_eval (bool): Whether to set norm layers to eval mode, namely,
+ freeze running stats (mean and var). Note: Effect on Batch Norm
+ and its variants only.
+ plugins (list[dict]): List of plugins for stages, each dict contains:
+
+ - cfg (dict, required): Cfg dict to build plugin.
+ - position (str, required): Position inside block to insert
+ plugin, options are 'after_conv1', 'after_conv2', 'after_conv3'.
+ - stages (tuple[bool], optional): Stages to apply plugin, length
+ should be same as 'num_stages'.
+ with_cp (bool): Use checkpoint or not. Using checkpoint will save some
+ memory while slowing down the training speed.
+ zero_init_residual (bool): Whether to use zero init for last norm layer
+ in resblocks to let them behave as identity.
+
+ Example:
+ >>> from mmdet.models import ResNet
+ >>> import torch
+ >>> self = ResNet(depth=18)
+ >>> self.eval()
+ >>> inputs = torch.rand(1, 3, 32, 32)
+ >>> level_outputs = self.forward(inputs)
+ >>> for level_out in level_outputs:
+ ... print(tuple(level_out.shape))
+ (1, 64, 8, 8)
+ (1, 128, 4, 4)
+ (1, 256, 2, 2)
+ (1, 512, 1, 1)
+ """
+
+ arch_settings = {
+ 18: (BasicBlock, (2, 2, 2, 2)),
+ 34: (BasicBlock, (3, 4, 6, 3)),
+ 50: (Bottleneck, (3, 4, 6, 3)),
+ 101: (Bottleneck, (3, 4, 23, 3)),
+ 152: (Bottleneck, (3, 8, 36, 3))
+ }
+
+ def __init__(self,
+ depth,
+ in_channels=3,
+ stem_channels=None,
+ base_channels=64,
+ num_stages=4,
+ strides=(1, 2, 2, 2),
+ dilations=(1, 1, 1, 1),
+ out_indices=(0, 1, 2, 3),
+ style='pytorch',
+ deep_stem=False,
+ avg_down=False,
+ frozen_stages=-1,
+ conv_cfg=None,
+ norm_cfg=dict(type='BN', requires_grad=True),
+ norm_eval=True,
+ dcn=None,
+ stage_with_dcn=(False, False, False, False),
+ plugins=None,
+ with_cp=False,
+ zero_init_residual=True):
+ super(ResNet, self).__init__()
+ if depth not in self.arch_settings:
+ raise KeyError(f'invalid depth {depth} for resnet')
+ self.depth = depth
+ if stem_channels is None:
+ stem_channels = base_channels
+ self.stem_channels = stem_channels
+ self.base_channels = base_channels
+ self.num_stages = num_stages
+ assert num_stages >= 1 and num_stages <= 4
+ self.strides = strides
+ self.dilations = dilations
+ assert len(strides) == len(dilations) == num_stages
+ self.out_indices = out_indices
+ assert max(out_indices) < num_stages
+ self.style = style
+ self.deep_stem = deep_stem
+ self.avg_down = avg_down
+ self.frozen_stages = frozen_stages
+ self.conv_cfg = conv_cfg
+ self.norm_cfg = norm_cfg
+ self.with_cp = with_cp
+ self.norm_eval = norm_eval
+ self.dcn = dcn
+ self.stage_with_dcn = stage_with_dcn
+ if dcn is not None:
+ assert len(stage_with_dcn) == num_stages
+ self.plugins = plugins
+ self.zero_init_residual = zero_init_residual
+ self.block, stage_blocks = self.arch_settings[depth]
+ self.stage_blocks = stage_blocks[:num_stages]
+ self.inplanes = stem_channels
+
+ self._make_stem_layer(in_channels, stem_channels)
+
+ self.res_layers = []
+ for i, num_blocks in enumerate(self.stage_blocks):
+ stride = strides[i]
+ dilation = dilations[i]
+ dcn = self.dcn if self.stage_with_dcn[i] else None
+ if plugins is not None:
+ stage_plugins = self.make_stage_plugins(plugins, i)
+ else:
+ stage_plugins = None
+ planes = base_channels * 2**i
+ res_layer = self.make_res_layer(
+ block=self.block,
+ inplanes=self.inplanes,
+ planes=planes,
+ num_blocks=num_blocks,
+ stride=stride,
+ dilation=dilation,
+ style=self.style,
+ avg_down=self.avg_down,
+ with_cp=with_cp,
+ conv_cfg=conv_cfg,
+ norm_cfg=norm_cfg,
+ dcn=dcn,
+ plugins=stage_plugins)
+ self.inplanes = planes * self.block.expansion
+ layer_name = f'layer{i + 1}'
+ self.add_module(layer_name, res_layer)
+ self.res_layers.append(layer_name)
+
+ self._freeze_stages()
+
+ self.feat_dim = self.block.expansion * base_channels * 2**(
+ len(self.stage_blocks) - 1)
+
+ def make_stage_plugins(self, plugins, stage_idx):
+ """Make plugins for ResNet ``stage_idx`` th stage.
+
+ Currently we support to insert ``context_block``,
+ ``empirical_attention_block``, ``nonlocal_block`` into the backbone
+ like ResNet/ResNeXt. They could be inserted after conv1/conv2/conv3 of
+ Bottleneck.
+
+ An example of plugins format could be:
+
+ Examples:
+ >>> plugins=[
+ ... dict(cfg=dict(type='xxx', arg1='xxx'),
+ ... stages=(False, True, True, True),
+ ... position='after_conv2'),
+ ... dict(cfg=dict(type='yyy'),
+ ... stages=(True, True, True, True),
+ ... position='after_conv3'),
+ ... dict(cfg=dict(type='zzz', postfix='1'),
+ ... stages=(True, True, True, True),
+ ... position='after_conv3'),
+ ... dict(cfg=dict(type='zzz', postfix='2'),
+ ... stages=(True, True, True, True),
+ ... position='after_conv3')
+ ... ]
+ >>> self = ResNet(depth=18)
+ >>> stage_plugins = self.make_stage_plugins(plugins, 0)
+ >>> assert len(stage_plugins) == 3
+
+ Suppose ``stage_idx=0``, the structure of blocks in the stage would be:
+
+ .. code-block:: none
+
+ conv1-> conv2->conv3->yyy->zzz1->zzz2
+
+ Suppose 'stage_idx=1', the structure of blocks in the stage would be:
+
+ .. code-block:: none
+
+ conv1-> conv2->xxx->conv3->yyy->zzz1->zzz2
+
+ If stages is missing, the plugin would be applied to all stages.
+
+ Args:
+ plugins (list[dict]): List of plugins cfg to build. The postfix is
+ required if multiple same type plugins are inserted.
+ stage_idx (int): Index of stage to build
+
+ Returns:
+ list[dict]: Plugins for current stage
+ """
+ stage_plugins = []
+ for plugin in plugins:
+ plugin = plugin.copy()
+ stages = plugin.pop('stages', None)
+ assert stages is None or len(stages) == self.num_stages
+ # whether to insert plugin into current stage
+ if stages is None or stages[stage_idx]:
+ stage_plugins.append(plugin)
+
+ return stage_plugins
+
+ def make_res_layer(self, **kwargs):
+ """Pack all blocks in a stage into a ``ResLayer``."""
+ return ResLayer(**kwargs)
+
+ @property
+ def norm1(self):
+ """nn.Module: the normalization layer named "norm1" """
+ return getattr(self, self.norm1_name)
+
+ def _make_stem_layer(self, in_channels, stem_channels):
+ if self.deep_stem:
+ self.stem = nn.Sequential(
+ build_conv_layer(
+ self.conv_cfg,
+ in_channels,
+ stem_channels // 2,
+ kernel_size=3,
+ stride=2,
+ padding=1,
+ bias=False),
+ build_norm_layer(self.norm_cfg, stem_channels // 2)[1],
+ nn.ReLU(inplace=True),
+ build_conv_layer(
+ self.conv_cfg,
+ stem_channels // 2,
+ stem_channels // 2,
+ kernel_size=3,
+ stride=1,
+ padding=1,
+ bias=False),
+ build_norm_layer(self.norm_cfg, stem_channels // 2)[1],
+ nn.ReLU(inplace=True),
+ build_conv_layer(
+ self.conv_cfg,
+ stem_channels // 2,
+ stem_channels,
+ kernel_size=3,
+ stride=1,
+ padding=1,
+ bias=False),
+ build_norm_layer(self.norm_cfg, stem_channels)[1],
+ nn.ReLU(inplace=True))
+ else:
+ self.conv1 = build_conv_layer(
+ self.conv_cfg,
+ in_channels,
+ stem_channels,
+ kernel_size=7,
+ stride=2,
+ padding=3,
+ bias=False)
+ self.norm1_name, norm1 = build_norm_layer(
+ self.norm_cfg, stem_channels, postfix=1)
+ self.add_module(self.norm1_name, norm1)
+ self.relu = nn.ReLU(inplace=True)
+ self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
+
+ def _freeze_stages(self):
+ if self.frozen_stages >= 0:
+ if self.deep_stem:
+ self.stem.eval()
+ for param in self.stem.parameters():
+ param.requires_grad = False
+ else:
+ self.norm1.eval()
+ for m in [self.conv1, self.norm1]:
+ for param in m.parameters():
+ param.requires_grad = False
+
+ for i in range(1, self.frozen_stages + 1):
+ m = getattr(self, f'layer{i}')
+ m.eval()
+ for param in m.parameters():
+ param.requires_grad = False
+
+ def init_weights(self, pretrained=None):
+ """Initialize the weights in backbone.
+
+ Args:
+ pretrained (str, optional): Path to pre-trained weights.
+ Defaults to None.
+ """
+ if isinstance(pretrained, str):
+ logger = get_root_logger()
+ load_checkpoint(self, pretrained, strict=False, logger=logger)
+ elif pretrained is None:
+ for m in self.modules():
+ if isinstance(m, nn.Conv2d):
+ kaiming_init(m)
+ elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
+ constant_init(m, 1)
+
+ if self.dcn is not None:
+ for m in self.modules():
+ if isinstance(m, Bottleneck) and hasattr(
+ m.conv2, 'conv_offset'):
+ constant_init(m.conv2.conv_offset, 0)
+
+ if self.zero_init_residual:
+ for m in self.modules():
+ if isinstance(m, Bottleneck):
+ constant_init(m.norm3, 0)
+ elif isinstance(m, BasicBlock):
+ constant_init(m.norm2, 0)
+ else:
+ raise TypeError('pretrained must be a str or None')
+
+ def forward(self, x):
+ """Forward function."""
+ if self.deep_stem:
+ x = self.stem(x)
+ else:
+ x = self.conv1(x)
+ x = self.norm1(x)
+ x = self.relu(x)
+ x = self.maxpool(x)
+ outs = []
+ for i, layer_name in enumerate(self.res_layers):
+ res_layer = getattr(self, layer_name)
+ x = res_layer(x)
+ if i in self.out_indices:
+ outs.append(x)
+ return tuple(outs)
+
+ def train(self, mode=True):
+ """Convert the model into training mode while keep normalization layer
+ freezed."""
+ super(ResNet, self).train(mode)
+ self._freeze_stages()
+ if mode and self.norm_eval:
+ for m in self.modules():
+ # trick: eval have effect on BatchNorm only
+ if isinstance(m, _BatchNorm):
+ m.eval()
+
+
+@BACKBONES.register_module()
+class ResNetV1d(ResNet):
+ r"""ResNetV1d variant described in `Bag of Tricks
+ `_.
+
+ Compared with default ResNet(ResNetV1b), ResNetV1d replaces the 7x7 conv in
+ the input stem with three 3x3 convs. And in the downsampling block, a 2x2
+ avg_pool with stride 2 is added before conv, whose stride is changed to 1.
+ """
+
+ def __init__(self, **kwargs):
+ super(ResNetV1d, self).__init__(
+ deep_stem=True, avg_down=True, **kwargs)
diff --git a/annotator/uniformer/mmdet/models/backbones/resnext.py b/annotator/uniformer/mmdet/models/backbones/resnext.py
new file mode 100644
index 0000000000000000000000000000000000000000..6dbcbd516fd308b1d703eecb83ab275f6b159516
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/backbones/resnext.py
@@ -0,0 +1,153 @@
+import math
+
+from mmcv.cnn import build_conv_layer, build_norm_layer
+
+from ..builder import BACKBONES
+from ..utils import ResLayer
+from .resnet import Bottleneck as _Bottleneck
+from .resnet import ResNet
+
+
+class Bottleneck(_Bottleneck):
+ expansion = 4
+
+ def __init__(self,
+ inplanes,
+ planes,
+ groups=1,
+ base_width=4,
+ base_channels=64,
+ **kwargs):
+ """Bottleneck block for ResNeXt.
+
+ If style is "pytorch", the stride-two layer is the 3x3 conv layer, if
+ it is "caffe", the stride-two layer is the first 1x1 conv layer.
+ """
+ super(Bottleneck, self).__init__(inplanes, planes, **kwargs)
+
+ if groups == 1:
+ width = self.planes
+ else:
+ width = math.floor(self.planes *
+ (base_width / base_channels)) * groups
+
+ self.norm1_name, norm1 = build_norm_layer(
+ self.norm_cfg, width, postfix=1)
+ self.norm2_name, norm2 = build_norm_layer(
+ self.norm_cfg, width, postfix=2)
+ self.norm3_name, norm3 = build_norm_layer(
+ self.norm_cfg, self.planes * self.expansion, postfix=3)
+
+ self.conv1 = build_conv_layer(
+ self.conv_cfg,
+ self.inplanes,
+ width,
+ kernel_size=1,
+ stride=self.conv1_stride,
+ bias=False)
+ self.add_module(self.norm1_name, norm1)
+ fallback_on_stride = False
+ self.with_modulated_dcn = False
+ if self.with_dcn:
+ fallback_on_stride = self.dcn.pop('fallback_on_stride', False)
+ if not self.with_dcn or fallback_on_stride:
+ self.conv2 = build_conv_layer(
+ self.conv_cfg,
+ width,
+ width,
+ kernel_size=3,
+ stride=self.conv2_stride,
+ padding=self.dilation,
+ dilation=self.dilation,
+ groups=groups,
+ bias=False)
+ else:
+ assert self.conv_cfg is None, 'conv_cfg must be None for DCN'
+ self.conv2 = build_conv_layer(
+ self.dcn,
+ width,
+ width,
+ kernel_size=3,
+ stride=self.conv2_stride,
+ padding=self.dilation,
+ dilation=self.dilation,
+ groups=groups,
+ bias=False)
+
+ self.add_module(self.norm2_name, norm2)
+ self.conv3 = build_conv_layer(
+ self.conv_cfg,
+ width,
+ self.planes * self.expansion,
+ kernel_size=1,
+ bias=False)
+ self.add_module(self.norm3_name, norm3)
+
+ if self.with_plugins:
+ self._del_block_plugins(self.after_conv1_plugin_names +
+ self.after_conv2_plugin_names +
+ self.after_conv3_plugin_names)
+ self.after_conv1_plugin_names = self.make_block_plugins(
+ width, self.after_conv1_plugins)
+ self.after_conv2_plugin_names = self.make_block_plugins(
+ width, self.after_conv2_plugins)
+ self.after_conv3_plugin_names = self.make_block_plugins(
+ self.planes * self.expansion, self.after_conv3_plugins)
+
+ def _del_block_plugins(self, plugin_names):
+ """delete plugins for block if exist.
+
+ Args:
+ plugin_names (list[str]): List of plugins name to delete.
+ """
+ assert isinstance(plugin_names, list)
+ for plugin_name in plugin_names:
+ del self._modules[plugin_name]
+
+
+@BACKBONES.register_module()
+class ResNeXt(ResNet):
+ """ResNeXt backbone.
+
+ Args:
+ depth (int): Depth of resnet, from {18, 34, 50, 101, 152}.
+ in_channels (int): Number of input image channels. Default: 3.
+ num_stages (int): Resnet stages. Default: 4.
+ groups (int): Group of resnext.
+ base_width (int): Base width of resnext.
+ strides (Sequence[int]): Strides of the first block of each stage.
+ dilations (Sequence[int]): Dilation of each stage.
+ out_indices (Sequence[int]): Output from which stages.
+ style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two
+ layer is the 3x3 conv layer, otherwise the stride-two layer is
+ the first 1x1 conv layer.
+ frozen_stages (int): Stages to be frozen (all param fixed). -1 means
+ not freezing any parameters.
+ norm_cfg (dict): dictionary to construct and config norm layer.
+ norm_eval (bool): Whether to set norm layers to eval mode, namely,
+ freeze running stats (mean and var). Note: Effect on Batch Norm
+ and its variants only.
+ with_cp (bool): Use checkpoint or not. Using checkpoint will save some
+ memory while slowing down the training speed.
+ zero_init_residual (bool): whether to use zero init for last norm layer
+ in resblocks to let them behave as identity.
+ """
+
+ arch_settings = {
+ 50: (Bottleneck, (3, 4, 6, 3)),
+ 101: (Bottleneck, (3, 4, 23, 3)),
+ 152: (Bottleneck, (3, 8, 36, 3))
+ }
+
+ def __init__(self, groups=1, base_width=4, **kwargs):
+ self.groups = groups
+ self.base_width = base_width
+ super(ResNeXt, self).__init__(**kwargs)
+
+ def make_res_layer(self, **kwargs):
+ """Pack all blocks in a stage into a ``ResLayer``"""
+ return ResLayer(
+ groups=self.groups,
+ base_width=self.base_width,
+ base_channels=self.base_channels,
+ **kwargs)
diff --git a/annotator/uniformer/mmdet/models/backbones/ssd_vgg.py b/annotator/uniformer/mmdet/models/backbones/ssd_vgg.py
new file mode 100644
index 0000000000000000000000000000000000000000..cbc4fbb2301afc002f47abb9ed133a500d6cf23f
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/backbones/ssd_vgg.py
@@ -0,0 +1,169 @@
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from mmcv.cnn import VGG, constant_init, kaiming_init, normal_init, xavier_init
+from mmcv.runner import load_checkpoint
+
+from mmdet.utils import get_root_logger
+from ..builder import BACKBONES
+
+
+@BACKBONES.register_module()
+class SSDVGG(VGG):
+ """VGG Backbone network for single-shot-detection.
+
+ Args:
+ input_size (int): width and height of input, from {300, 512}.
+ depth (int): Depth of vgg, from {11, 13, 16, 19}.
+ out_indices (Sequence[int]): Output from which stages.
+
+ Example:
+ >>> self = SSDVGG(input_size=300, depth=11)
+ >>> self.eval()
+ >>> inputs = torch.rand(1, 3, 300, 300)
+ >>> level_outputs = self.forward(inputs)
+ >>> for level_out in level_outputs:
+ ... print(tuple(level_out.shape))
+ (1, 1024, 19, 19)
+ (1, 512, 10, 10)
+ (1, 256, 5, 5)
+ (1, 256, 3, 3)
+ (1, 256, 1, 1)
+ """
+ extra_setting = {
+ 300: (256, 'S', 512, 128, 'S', 256, 128, 256, 128, 256),
+ 512: (256, 'S', 512, 128, 'S', 256, 128, 'S', 256, 128, 'S', 256, 128),
+ }
+
+ def __init__(self,
+ input_size,
+ depth,
+ with_last_pool=False,
+ ceil_mode=True,
+ out_indices=(3, 4),
+ out_feature_indices=(22, 34),
+ l2_norm_scale=20.):
+ # TODO: in_channels for mmcv.VGG
+ super(SSDVGG, self).__init__(
+ depth,
+ with_last_pool=with_last_pool,
+ ceil_mode=ceil_mode,
+ out_indices=out_indices)
+ assert input_size in (300, 512)
+ self.input_size = input_size
+
+ self.features.add_module(
+ str(len(self.features)),
+ nn.MaxPool2d(kernel_size=3, stride=1, padding=1))
+ self.features.add_module(
+ str(len(self.features)),
+ nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6))
+ self.features.add_module(
+ str(len(self.features)), nn.ReLU(inplace=True))
+ self.features.add_module(
+ str(len(self.features)), nn.Conv2d(1024, 1024, kernel_size=1))
+ self.features.add_module(
+ str(len(self.features)), nn.ReLU(inplace=True))
+ self.out_feature_indices = out_feature_indices
+
+ self.inplanes = 1024
+ self.extra = self._make_extra_layers(self.extra_setting[input_size])
+ self.l2_norm = L2Norm(
+ self.features[out_feature_indices[0] - 1].out_channels,
+ l2_norm_scale)
+
+ def init_weights(self, pretrained=None):
+ """Initialize the weights in backbone.
+
+ Args:
+ pretrained (str, optional): Path to pre-trained weights.
+ Defaults to None.
+ """
+ if isinstance(pretrained, str):
+ logger = get_root_logger()
+ load_checkpoint(self, pretrained, strict=False, logger=logger)
+ elif pretrained is None:
+ for m in self.features.modules():
+ if isinstance(m, nn.Conv2d):
+ kaiming_init(m)
+ elif isinstance(m, nn.BatchNorm2d):
+ constant_init(m, 1)
+ elif isinstance(m, nn.Linear):
+ normal_init(m, std=0.01)
+ else:
+ raise TypeError('pretrained must be a str or None')
+
+ for m in self.extra.modules():
+ if isinstance(m, nn.Conv2d):
+ xavier_init(m, distribution='uniform')
+
+ constant_init(self.l2_norm, self.l2_norm.scale)
+
+ def forward(self, x):
+ """Forward function."""
+ outs = []
+ for i, layer in enumerate(self.features):
+ x = layer(x)
+ if i in self.out_feature_indices:
+ outs.append(x)
+ for i, layer in enumerate(self.extra):
+ x = F.relu(layer(x), inplace=True)
+ if i % 2 == 1:
+ outs.append(x)
+ outs[0] = self.l2_norm(outs[0])
+ if len(outs) == 1:
+ return outs[0]
+ else:
+ return tuple(outs)
+
+ def _make_extra_layers(self, outplanes):
+ layers = []
+ kernel_sizes = (1, 3)
+ num_layers = 0
+ outplane = None
+ for i in range(len(outplanes)):
+ if self.inplanes == 'S':
+ self.inplanes = outplane
+ continue
+ k = kernel_sizes[num_layers % 2]
+ if outplanes[i] == 'S':
+ outplane = outplanes[i + 1]
+ conv = nn.Conv2d(
+ self.inplanes, outplane, k, stride=2, padding=1)
+ else:
+ outplane = outplanes[i]
+ conv = nn.Conv2d(
+ self.inplanes, outplane, k, stride=1, padding=0)
+ layers.append(conv)
+ self.inplanes = outplanes[i]
+ num_layers += 1
+ if self.input_size == 512:
+ layers.append(nn.Conv2d(self.inplanes, 256, 4, padding=1))
+
+ return nn.Sequential(*layers)
+
+
+class L2Norm(nn.Module):
+
+ def __init__(self, n_dims, scale=20., eps=1e-10):
+ """L2 normalization layer.
+
+ Args:
+ n_dims (int): Number of dimensions to be normalized
+ scale (float, optional): Defaults to 20..
+ eps (float, optional): Used to avoid division by zero.
+ Defaults to 1e-10.
+ """
+ super(L2Norm, self).__init__()
+ self.n_dims = n_dims
+ self.weight = nn.Parameter(torch.Tensor(self.n_dims))
+ self.eps = eps
+ self.scale = scale
+
+ def forward(self, x):
+ """Forward function."""
+ # normalization layer convert to FP32 in FP16 training
+ x_float = x.float()
+ norm = x_float.pow(2).sum(1, keepdim=True).sqrt() + self.eps
+ return (self.weight[None, :, None, None].float().expand_as(x_float) *
+ x_float / norm).type_as(x)
diff --git a/annotator/uniformer/mmdet/models/backbones/swin_transformer.py b/annotator/uniformer/mmdet/models/backbones/swin_transformer.py
new file mode 100644
index 0000000000000000000000000000000000000000..bb41850d8480a08a6a7698bf6129ffd1ab239681
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/backbones/swin_transformer.py
@@ -0,0 +1,630 @@
+# --------------------------------------------------------
+# Swin Transformer
+# Copyright (c) 2021 Microsoft
+# Licensed under The MIT License [see LICENSE for details]
+# Written by Ze Liu, Yutong Lin, Yixuan Wei
+# --------------------------------------------------------
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+import torch.utils.checkpoint as checkpoint
+import numpy as np
+from timm.models.layers import DropPath, to_2tuple, trunc_normal_
+
+from mmcv_custom import load_checkpoint
+from mmdet.utils import get_root_logger
+from ..builder import BACKBONES
+
+
+class Mlp(nn.Module):
+ """ Multilayer perceptron."""
+
+ def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
+ super().__init__()
+ out_features = out_features or in_features
+ hidden_features = hidden_features or in_features
+ self.fc1 = nn.Linear(in_features, hidden_features)
+ self.act = act_layer()
+ self.fc2 = nn.Linear(hidden_features, out_features)
+ self.drop = nn.Dropout(drop)
+
+ def forward(self, x):
+ x = self.fc1(x)
+ x = self.act(x)
+ x = self.drop(x)
+ x = self.fc2(x)
+ x = self.drop(x)
+ return x
+
+
+def window_partition(x, window_size):
+ """
+ Args:
+ x: (B, H, W, C)
+ window_size (int): window size
+
+ Returns:
+ windows: (num_windows*B, window_size, window_size, C)
+ """
+ B, H, W, C = x.shape
+ x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
+ windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
+ return windows
+
+
+def window_reverse(windows, window_size, H, W):
+ """
+ Args:
+ windows: (num_windows*B, window_size, window_size, C)
+ window_size (int): Window size
+ H (int): Height of image
+ W (int): Width of image
+
+ Returns:
+ x: (B, H, W, C)
+ """
+ B = int(windows.shape[0] / (H * W / window_size / window_size))
+ x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
+ x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
+ return x
+
+
+class WindowAttention(nn.Module):
+ """ Window based multi-head self attention (W-MSA) module with relative position bias.
+ It supports both of shifted and non-shifted window.
+
+ Args:
+ dim (int): Number of input channels.
+ window_size (tuple[int]): The height and width of the window.
+ num_heads (int): Number of attention heads.
+ qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
+ qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set
+ attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
+ proj_drop (float, optional): Dropout ratio of output. Default: 0.0
+ """
+
+ def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.):
+
+ super().__init__()
+ self.dim = dim
+ self.window_size = window_size # Wh, Ww
+ self.num_heads = num_heads
+ head_dim = dim // num_heads
+ self.scale = qk_scale or head_dim ** -0.5
+
+ # define a parameter table of relative position bias
+ self.relative_position_bias_table = nn.Parameter(
+ torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH
+
+ # get pair-wise relative position index for each token inside the window
+ coords_h = torch.arange(self.window_size[0])
+ coords_w = torch.arange(self.window_size[1])
+ coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
+ coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
+ relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
+ relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
+ relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
+ relative_coords[:, :, 1] += self.window_size[1] - 1
+ relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
+ relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
+ self.register_buffer("relative_position_index", relative_position_index)
+
+ self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
+ self.attn_drop = nn.Dropout(attn_drop)
+ self.proj = nn.Linear(dim, dim)
+ self.proj_drop = nn.Dropout(proj_drop)
+
+ trunc_normal_(self.relative_position_bias_table, std=.02)
+ self.softmax = nn.Softmax(dim=-1)
+
+ def forward(self, x, mask=None):
+ """ Forward function.
+
+ Args:
+ x: input features with shape of (num_windows*B, N, C)
+ mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
+ """
+ B_, N, C = x.shape
+ qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
+ q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
+
+ q = q * self.scale
+ attn = (q @ k.transpose(-2, -1))
+
+ relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
+ self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH
+ relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
+ attn = attn + relative_position_bias.unsqueeze(0)
+
+ if mask is not None:
+ nW = mask.shape[0]
+ attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)
+ attn = attn.view(-1, self.num_heads, N, N)
+ attn = self.softmax(attn)
+ else:
+ attn = self.softmax(attn)
+
+ attn = self.attn_drop(attn)
+
+ x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
+ x = self.proj(x)
+ x = self.proj_drop(x)
+ return x
+
+
+class SwinTransformerBlock(nn.Module):
+ """ Swin Transformer Block.
+
+ Args:
+ dim (int): Number of input channels.
+ num_heads (int): Number of attention heads.
+ window_size (int): Window size.
+ shift_size (int): Shift size for SW-MSA.
+ mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
+ qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
+ qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
+ drop (float, optional): Dropout rate. Default: 0.0
+ attn_drop (float, optional): Attention dropout rate. Default: 0.0
+ drop_path (float, optional): Stochastic depth rate. Default: 0.0
+ act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
+ norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
+ """
+
+ def __init__(self, dim, num_heads, window_size=7, shift_size=0,
+ mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path=0.,
+ act_layer=nn.GELU, norm_layer=nn.LayerNorm):
+ super().__init__()
+ self.dim = dim
+ self.num_heads = num_heads
+ self.window_size = window_size
+ self.shift_size = shift_size
+ self.mlp_ratio = mlp_ratio
+ assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size"
+
+ self.norm1 = norm_layer(dim)
+ self.attn = WindowAttention(
+ dim, window_size=to_2tuple(self.window_size), num_heads=num_heads,
+ qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
+
+ self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
+ self.norm2 = norm_layer(dim)
+ mlp_hidden_dim = int(dim * mlp_ratio)
+ self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
+
+ self.H = None
+ self.W = None
+
+ def forward(self, x, mask_matrix):
+ """ Forward function.
+
+ Args:
+ x: Input feature, tensor size (B, H*W, C).
+ H, W: Spatial resolution of the input feature.
+ mask_matrix: Attention mask for cyclic shift.
+ """
+ B, L, C = x.shape
+ H, W = self.H, self.W
+ assert L == H * W, "input feature has wrong size"
+
+ shortcut = x
+ x = self.norm1(x)
+ x = x.view(B, H, W, C)
+
+ # pad feature maps to multiples of window size
+ pad_l = pad_t = 0
+ pad_r = (self.window_size - W % self.window_size) % self.window_size
+ pad_b = (self.window_size - H % self.window_size) % self.window_size
+ x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b))
+ _, Hp, Wp, _ = x.shape
+
+ # cyclic shift
+ if self.shift_size > 0:
+ shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
+ attn_mask = mask_matrix
+ else:
+ shifted_x = x
+ attn_mask = None
+
+ # partition windows
+ x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C
+ x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C
+
+ # W-MSA/SW-MSA
+ attn_windows = self.attn(x_windows, mask=attn_mask) # nW*B, window_size*window_size, C
+
+ # merge windows
+ attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)
+ shifted_x = window_reverse(attn_windows, self.window_size, Hp, Wp) # B H' W' C
+
+ # reverse cyclic shift
+ if self.shift_size > 0:
+ x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
+ else:
+ x = shifted_x
+
+ if pad_r > 0 or pad_b > 0:
+ x = x[:, :H, :W, :].contiguous()
+
+ x = x.view(B, H * W, C)
+
+ # FFN
+ x = shortcut + self.drop_path(x)
+ x = x + self.drop_path(self.mlp(self.norm2(x)))
+
+ return x
+
+
+class PatchMerging(nn.Module):
+ """ Patch Merging Layer
+
+ Args:
+ dim (int): Number of input channels.
+ norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
+ """
+ def __init__(self, dim, norm_layer=nn.LayerNorm):
+ super().__init__()
+ self.dim = dim
+ self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
+ self.norm = norm_layer(4 * dim)
+
+ def forward(self, x, H, W):
+ """ Forward function.
+
+ Args:
+ x: Input feature, tensor size (B, H*W, C).
+ H, W: Spatial resolution of the input feature.
+ """
+ B, L, C = x.shape
+ assert L == H * W, "input feature has wrong size"
+
+ x = x.view(B, H, W, C)
+
+ # padding
+ pad_input = (H % 2 == 1) or (W % 2 == 1)
+ if pad_input:
+ x = F.pad(x, (0, 0, 0, W % 2, 0, H % 2))
+
+ x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C
+ x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C
+ x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C
+ x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C
+ x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C
+ x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C
+
+ x = self.norm(x)
+ x = self.reduction(x)
+
+ return x
+
+
+class BasicLayer(nn.Module):
+ """ A basic Swin Transformer layer for one stage.
+
+ Args:
+ dim (int): Number of feature channels
+ depth (int): Depths of this stage.
+ num_heads (int): Number of attention head.
+ window_size (int): Local window size. Default: 7.
+ mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4.
+ qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
+ qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
+ drop (float, optional): Dropout rate. Default: 0.0
+ attn_drop (float, optional): Attention dropout rate. Default: 0.0
+ drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
+ norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
+ downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
+ use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
+ """
+
+ def __init__(self,
+ dim,
+ depth,
+ num_heads,
+ window_size=7,
+ mlp_ratio=4.,
+ qkv_bias=True,
+ qk_scale=None,
+ drop=0.,
+ attn_drop=0.,
+ drop_path=0.,
+ norm_layer=nn.LayerNorm,
+ downsample=None,
+ use_checkpoint=False):
+ super().__init__()
+ self.window_size = window_size
+ self.shift_size = window_size // 2
+ self.depth = depth
+ self.use_checkpoint = use_checkpoint
+
+ # build blocks
+ self.blocks = nn.ModuleList([
+ SwinTransformerBlock(
+ dim=dim,
+ num_heads=num_heads,
+ window_size=window_size,
+ shift_size=0 if (i % 2 == 0) else window_size // 2,
+ mlp_ratio=mlp_ratio,
+ qkv_bias=qkv_bias,
+ qk_scale=qk_scale,
+ drop=drop,
+ attn_drop=attn_drop,
+ drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
+ norm_layer=norm_layer)
+ for i in range(depth)])
+
+ # patch merging layer
+ if downsample is not None:
+ self.downsample = downsample(dim=dim, norm_layer=norm_layer)
+ else:
+ self.downsample = None
+
+ def forward(self, x, H, W):
+ """ Forward function.
+
+ Args:
+ x: Input feature, tensor size (B, H*W, C).
+ H, W: Spatial resolution of the input feature.
+ """
+
+ # calculate attention mask for SW-MSA
+ Hp = int(np.ceil(H / self.window_size)) * self.window_size
+ Wp = int(np.ceil(W / self.window_size)) * self.window_size
+ img_mask = torch.zeros((1, Hp, Wp, 1), device=x.device) # 1 Hp Wp 1
+ h_slices = (slice(0, -self.window_size),
+ slice(-self.window_size, -self.shift_size),
+ slice(-self.shift_size, None))
+ w_slices = (slice(0, -self.window_size),
+ slice(-self.window_size, -self.shift_size),
+ slice(-self.shift_size, None))
+ cnt = 0
+ for h in h_slices:
+ for w in w_slices:
+ img_mask[:, h, w, :] = cnt
+ cnt += 1
+
+ mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1
+ mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
+ attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
+ attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
+
+ for blk in self.blocks:
+ blk.H, blk.W = H, W
+ if self.use_checkpoint:
+ x = checkpoint.checkpoint(blk, x, attn_mask)
+ else:
+ x = blk(x, attn_mask)
+ if self.downsample is not None:
+ x_down = self.downsample(x, H, W)
+ Wh, Ww = (H + 1) // 2, (W + 1) // 2
+ return x, H, W, x_down, Wh, Ww
+ else:
+ return x, H, W, x, H, W
+
+
+class PatchEmbed(nn.Module):
+ """ Image to Patch Embedding
+
+ Args:
+ patch_size (int): Patch token size. Default: 4.
+ in_chans (int): Number of input image channels. Default: 3.
+ embed_dim (int): Number of linear projection output channels. Default: 96.
+ norm_layer (nn.Module, optional): Normalization layer. Default: None
+ """
+
+ def __init__(self, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None):
+ super().__init__()
+ patch_size = to_2tuple(patch_size)
+ self.patch_size = patch_size
+
+ self.in_chans = in_chans
+ self.embed_dim = embed_dim
+
+ self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
+ if norm_layer is not None:
+ self.norm = norm_layer(embed_dim)
+ else:
+ self.norm = None
+
+ def forward(self, x):
+ """Forward function."""
+ # padding
+ _, _, H, W = x.size()
+ if W % self.patch_size[1] != 0:
+ x = F.pad(x, (0, self.patch_size[1] - W % self.patch_size[1]))
+ if H % self.patch_size[0] != 0:
+ x = F.pad(x, (0, 0, 0, self.patch_size[0] - H % self.patch_size[0]))
+
+ x = self.proj(x) # B C Wh Ww
+ if self.norm is not None:
+ Wh, Ww = x.size(2), x.size(3)
+ x = x.flatten(2).transpose(1, 2)
+ x = self.norm(x)
+ x = x.transpose(1, 2).view(-1, self.embed_dim, Wh, Ww)
+
+ return x
+
+
+@BACKBONES.register_module()
+class SwinTransformer(nn.Module):
+ """ Swin Transformer backbone.
+ A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` -
+ https://arxiv.org/pdf/2103.14030
+
+ Args:
+ pretrain_img_size (int): Input image size for training the pretrained model,
+ used in absolute postion embedding. Default 224.
+ patch_size (int | tuple(int)): Patch size. Default: 4.
+ in_chans (int): Number of input image channels. Default: 3.
+ embed_dim (int): Number of linear projection output channels. Default: 96.
+ depths (tuple[int]): Depths of each Swin Transformer stage.
+ num_heads (tuple[int]): Number of attention head of each stage.
+ window_size (int): Window size. Default: 7.
+ mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4.
+ qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True
+ qk_scale (float): Override default qk scale of head_dim ** -0.5 if set.
+ drop_rate (float): Dropout rate.
+ attn_drop_rate (float): Attention dropout rate. Default: 0.
+ drop_path_rate (float): Stochastic depth rate. Default: 0.2.
+ norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
+ ape (bool): If True, add absolute position embedding to the patch embedding. Default: False.
+ patch_norm (bool): If True, add normalization after patch embedding. Default: True.
+ out_indices (Sequence[int]): Output from which stages.
+ frozen_stages (int): Stages to be frozen (stop grad and set eval mode).
+ -1 means not freezing any parameters.
+ use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
+ """
+
+ def __init__(self,
+ pretrain_img_size=224,
+ patch_size=4,
+ in_chans=3,
+ embed_dim=96,
+ depths=[2, 2, 6, 2],
+ num_heads=[3, 6, 12, 24],
+ window_size=7,
+ mlp_ratio=4.,
+ qkv_bias=True,
+ qk_scale=None,
+ drop_rate=0.,
+ attn_drop_rate=0.,
+ drop_path_rate=0.2,
+ norm_layer=nn.LayerNorm,
+ ape=False,
+ patch_norm=True,
+ out_indices=(0, 1, 2, 3),
+ frozen_stages=-1,
+ use_checkpoint=False):
+ super().__init__()
+
+ self.pretrain_img_size = pretrain_img_size
+ self.num_layers = len(depths)
+ self.embed_dim = embed_dim
+ self.ape = ape
+ self.patch_norm = patch_norm
+ self.out_indices = out_indices
+ self.frozen_stages = frozen_stages
+
+ # split image into non-overlapping patches
+ self.patch_embed = PatchEmbed(
+ patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim,
+ norm_layer=norm_layer if self.patch_norm else None)
+
+ # absolute position embedding
+ if self.ape:
+ pretrain_img_size = to_2tuple(pretrain_img_size)
+ patch_size = to_2tuple(patch_size)
+ patches_resolution = [pretrain_img_size[0] // patch_size[0], pretrain_img_size[1] // patch_size[1]]
+
+ self.absolute_pos_embed = nn.Parameter(torch.zeros(1, embed_dim, patches_resolution[0], patches_resolution[1]))
+ trunc_normal_(self.absolute_pos_embed, std=.02)
+
+ self.pos_drop = nn.Dropout(p=drop_rate)
+
+ # stochastic depth
+ dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
+
+ # build layers
+ self.layers = nn.ModuleList()
+ for i_layer in range(self.num_layers):
+ layer = BasicLayer(
+ dim=int(embed_dim * 2 ** i_layer),
+ depth=depths[i_layer],
+ num_heads=num_heads[i_layer],
+ window_size=window_size,
+ mlp_ratio=mlp_ratio,
+ qkv_bias=qkv_bias,
+ qk_scale=qk_scale,
+ drop=drop_rate,
+ attn_drop=attn_drop_rate,
+ drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],
+ norm_layer=norm_layer,
+ downsample=PatchMerging if (i_layer < self.num_layers - 1) else None,
+ use_checkpoint=use_checkpoint)
+ self.layers.append(layer)
+
+ num_features = [int(embed_dim * 2 ** i) for i in range(self.num_layers)]
+ self.num_features = num_features
+
+ # add a norm layer for each output
+ for i_layer in out_indices:
+ layer = norm_layer(num_features[i_layer])
+ layer_name = f'norm{i_layer}'
+ self.add_module(layer_name, layer)
+
+ self._freeze_stages()
+
+ def _freeze_stages(self):
+ if self.frozen_stages >= 0:
+ self.patch_embed.eval()
+ for param in self.patch_embed.parameters():
+ param.requires_grad = False
+
+ if self.frozen_stages >= 1 and self.ape:
+ self.absolute_pos_embed.requires_grad = False
+
+ if self.frozen_stages >= 2:
+ self.pos_drop.eval()
+ for i in range(0, self.frozen_stages - 1):
+ m = self.layers[i]
+ m.eval()
+ for param in m.parameters():
+ param.requires_grad = False
+
+ def init_weights(self, pretrained=None):
+ """Initialize the weights in backbone.
+
+ Args:
+ pretrained (str, optional): Path to pre-trained weights.
+ Defaults to None.
+ """
+
+ def _init_weights(m):
+ if isinstance(m, nn.Linear):
+ trunc_normal_(m.weight, std=.02)
+ if isinstance(m, nn.Linear) and m.bias is not None:
+ nn.init.constant_(m.bias, 0)
+ elif isinstance(m, nn.LayerNorm):
+ nn.init.constant_(m.bias, 0)
+ nn.init.constant_(m.weight, 1.0)
+
+ if isinstance(pretrained, str):
+ self.apply(_init_weights)
+ logger = get_root_logger()
+ load_checkpoint(self, pretrained, strict=False, logger=logger)
+ elif pretrained is None:
+ self.apply(_init_weights)
+ else:
+ raise TypeError('pretrained must be a str or None')
+
+ def forward(self, x):
+ """Forward function."""
+ x = self.patch_embed(x)
+
+ Wh, Ww = x.size(2), x.size(3)
+ if self.ape:
+ # interpolate the position embedding to the corresponding size
+ absolute_pos_embed = F.interpolate(self.absolute_pos_embed, size=(Wh, Ww), mode='bicubic')
+ x = (x + absolute_pos_embed).flatten(2).transpose(1, 2) # B Wh*Ww C
+ else:
+ x = x.flatten(2).transpose(1, 2)
+ x = self.pos_drop(x)
+
+ outs = []
+ for i in range(self.num_layers):
+ layer = self.layers[i]
+ x_out, H, W, x, Wh, Ww = layer(x, Wh, Ww)
+
+ if i in self.out_indices:
+ norm_layer = getattr(self, f'norm{i}')
+ x_out = norm_layer(x_out)
+
+ out = x_out.view(-1, H, W, self.num_features[i]).permute(0, 3, 1, 2).contiguous()
+ outs.append(out)
+
+ return tuple(outs)
+
+ def train(self, mode=True):
+ """Convert the model into training mode while keep layers freezed."""
+ super(SwinTransformer, self).train(mode)
+ self._freeze_stages()
diff --git a/annotator/uniformer/mmdet/models/backbones/trident_resnet.py b/annotator/uniformer/mmdet/models/backbones/trident_resnet.py
new file mode 100644
index 0000000000000000000000000000000000000000..e6100132b0f4120585da8a309cba4488b4b0ea72
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/backbones/trident_resnet.py
@@ -0,0 +1,292 @@
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+import torch.utils.checkpoint as cp
+from mmcv.cnn import build_conv_layer, build_norm_layer, kaiming_init
+from torch.nn.modules.utils import _pair
+
+from mmdet.models.backbones.resnet import Bottleneck, ResNet
+from mmdet.models.builder import BACKBONES
+
+
+class TridentConv(nn.Module):
+ """Trident Convolution Module.
+
+ Args:
+ in_channels (int): Number of channels in input.
+ out_channels (int): Number of channels in output.
+ kernel_size (int): Size of convolution kernel.
+ stride (int, optional): Convolution stride. Default: 1.
+ trident_dilations (tuple[int, int, int], optional): Dilations of
+ different trident branch. Default: (1, 2, 3).
+ test_branch_idx (int, optional): In inference, all 3 branches will
+ be used if `test_branch_idx==-1`, otherwise only branch with
+ index `test_branch_idx` will be used. Default: 1.
+ bias (bool, optional): Whether to use bias in convolution or not.
+ Default: False.
+ """
+
+ def __init__(self,
+ in_channels,
+ out_channels,
+ kernel_size,
+ stride=1,
+ trident_dilations=(1, 2, 3),
+ test_branch_idx=1,
+ bias=False):
+ super(TridentConv, self).__init__()
+ self.num_branch = len(trident_dilations)
+ self.with_bias = bias
+ self.test_branch_idx = test_branch_idx
+ self.stride = _pair(stride)
+ self.kernel_size = _pair(kernel_size)
+ self.paddings = _pair(trident_dilations)
+ self.dilations = trident_dilations
+ self.in_channels = in_channels
+ self.out_channels = out_channels
+ self.bias = bias
+
+ self.weight = nn.Parameter(
+ torch.Tensor(out_channels, in_channels, *self.kernel_size))
+ if bias:
+ self.bias = nn.Parameter(torch.Tensor(out_channels))
+ else:
+ self.bias = None
+ self.init_weights()
+
+ def init_weights(self):
+ kaiming_init(self, distribution='uniform', mode='fan_in')
+
+ def extra_repr(self):
+ tmpstr = f'in_channels={self.in_channels}'
+ tmpstr += f', out_channels={self.out_channels}'
+ tmpstr += f', kernel_size={self.kernel_size}'
+ tmpstr += f', num_branch={self.num_branch}'
+ tmpstr += f', test_branch_idx={self.test_branch_idx}'
+ tmpstr += f', stride={self.stride}'
+ tmpstr += f', paddings={self.paddings}'
+ tmpstr += f', dilations={self.dilations}'
+ tmpstr += f', bias={self.bias}'
+ return tmpstr
+
+ def forward(self, inputs):
+ if self.training or self.test_branch_idx == -1:
+ outputs = [
+ F.conv2d(input, self.weight, self.bias, self.stride, padding,
+ dilation) for input, dilation, padding in zip(
+ inputs, self.dilations, self.paddings)
+ ]
+ else:
+ assert len(inputs) == 1
+ outputs = [
+ F.conv2d(inputs[0], self.weight, self.bias, self.stride,
+ self.paddings[self.test_branch_idx],
+ self.dilations[self.test_branch_idx])
+ ]
+
+ return outputs
+
+
+# Since TridentNet is defined over ResNet50 and ResNet101, here we
+# only support TridentBottleneckBlock.
+class TridentBottleneck(Bottleneck):
+ """BottleBlock for TridentResNet.
+
+ Args:
+ trident_dilations (tuple[int, int, int]): Dilations of different
+ trident branch.
+ test_branch_idx (int): In inference, all 3 branches will be used
+ if `test_branch_idx==-1`, otherwise only branch with index
+ `test_branch_idx` will be used.
+ concat_output (bool): Whether to concat the output list to a Tensor.
+ `True` only in the last Block.
+ """
+
+ def __init__(self, trident_dilations, test_branch_idx, concat_output,
+ **kwargs):
+
+ super(TridentBottleneck, self).__init__(**kwargs)
+ self.trident_dilations = trident_dilations
+ self.num_branch = len(trident_dilations)
+ self.concat_output = concat_output
+ self.test_branch_idx = test_branch_idx
+ self.conv2 = TridentConv(
+ self.planes,
+ self.planes,
+ kernel_size=3,
+ stride=self.conv2_stride,
+ bias=False,
+ trident_dilations=self.trident_dilations,
+ test_branch_idx=test_branch_idx)
+
+ def forward(self, x):
+
+ def _inner_forward(x):
+ num_branch = (
+ self.num_branch
+ if self.training or self.test_branch_idx == -1 else 1)
+ identity = x
+ if not isinstance(x, list):
+ x = (x, ) * num_branch
+ identity = x
+ if self.downsample is not None:
+ identity = [self.downsample(b) for b in x]
+
+ out = [self.conv1(b) for b in x]
+ out = [self.norm1(b) for b in out]
+ out = [self.relu(b) for b in out]
+
+ if self.with_plugins:
+ for k in range(len(out)):
+ out[k] = self.forward_plugin(out[k],
+ self.after_conv1_plugin_names)
+
+ out = self.conv2(out)
+ out = [self.norm2(b) for b in out]
+ out = [self.relu(b) for b in out]
+ if self.with_plugins:
+ for k in range(len(out)):
+ out[k] = self.forward_plugin(out[k],
+ self.after_conv2_plugin_names)
+
+ out = [self.conv3(b) for b in out]
+ out = [self.norm3(b) for b in out]
+
+ if self.with_plugins:
+ for k in range(len(out)):
+ out[k] = self.forward_plugin(out[k],
+ self.after_conv3_plugin_names)
+
+ out = [
+ out_b + identity_b for out_b, identity_b in zip(out, identity)
+ ]
+ return out
+
+ if self.with_cp and x.requires_grad:
+ out = cp.checkpoint(_inner_forward, x)
+ else:
+ out = _inner_forward(x)
+
+ out = [self.relu(b) for b in out]
+ if self.concat_output:
+ out = torch.cat(out, dim=0)
+ return out
+
+
+def make_trident_res_layer(block,
+ inplanes,
+ planes,
+ num_blocks,
+ stride=1,
+ trident_dilations=(1, 2, 3),
+ style='pytorch',
+ with_cp=False,
+ conv_cfg=None,
+ norm_cfg=dict(type='BN'),
+ dcn=None,
+ plugins=None,
+ test_branch_idx=-1):
+ """Build Trident Res Layers."""
+
+ downsample = None
+ if stride != 1 or inplanes != planes * block.expansion:
+ downsample = []
+ conv_stride = stride
+ downsample.extend([
+ build_conv_layer(
+ conv_cfg,
+ inplanes,
+ planes * block.expansion,
+ kernel_size=1,
+ stride=conv_stride,
+ bias=False),
+ build_norm_layer(norm_cfg, planes * block.expansion)[1]
+ ])
+ downsample = nn.Sequential(*downsample)
+
+ layers = []
+ for i in range(num_blocks):
+ layers.append(
+ block(
+ inplanes=inplanes,
+ planes=planes,
+ stride=stride if i == 0 else 1,
+ trident_dilations=trident_dilations,
+ downsample=downsample if i == 0 else None,
+ style=style,
+ with_cp=with_cp,
+ conv_cfg=conv_cfg,
+ norm_cfg=norm_cfg,
+ dcn=dcn,
+ plugins=plugins,
+ test_branch_idx=test_branch_idx,
+ concat_output=True if i == num_blocks - 1 else False))
+ inplanes = planes * block.expansion
+ return nn.Sequential(*layers)
+
+
+@BACKBONES.register_module()
+class TridentResNet(ResNet):
+ """The stem layer, stage 1 and stage 2 in Trident ResNet are identical to
+ ResNet, while in stage 3, Trident BottleBlock is utilized to replace the
+ normal BottleBlock to yield trident output. Different branch shares the
+ convolution weight but uses different dilations to achieve multi-scale
+ output.
+
+ / stage3(b0) \
+ x - stem - stage1 - stage2 - stage3(b1) - output
+ \ stage3(b2) /
+
+ Args:
+ depth (int): Depth of resnet, from {50, 101, 152}.
+ num_branch (int): Number of branches in TridentNet.
+ test_branch_idx (int): In inference, all 3 branches will be used
+ if `test_branch_idx==-1`, otherwise only branch with index
+ `test_branch_idx` will be used.
+ trident_dilations (tuple[int]): Dilations of different trident branch.
+ len(trident_dilations) should be equal to num_branch.
+ """ # noqa
+
+ def __init__(self, depth, num_branch, test_branch_idx, trident_dilations,
+ **kwargs):
+
+ assert num_branch == len(trident_dilations)
+ assert depth in (50, 101, 152)
+ super(TridentResNet, self).__init__(depth, **kwargs)
+ assert self.num_stages == 3
+ self.test_branch_idx = test_branch_idx
+ self.num_branch = num_branch
+
+ last_stage_idx = self.num_stages - 1
+ stride = self.strides[last_stage_idx]
+ dilation = trident_dilations
+ dcn = self.dcn if self.stage_with_dcn[last_stage_idx] else None
+ if self.plugins is not None:
+ stage_plugins = self.make_stage_plugins(self.plugins,
+ last_stage_idx)
+ else:
+ stage_plugins = None
+ planes = self.base_channels * 2**last_stage_idx
+ res_layer = make_trident_res_layer(
+ TridentBottleneck,
+ inplanes=(self.block.expansion * self.base_channels *
+ 2**(last_stage_idx - 1)),
+ planes=planes,
+ num_blocks=self.stage_blocks[last_stage_idx],
+ stride=stride,
+ trident_dilations=dilation,
+ style=self.style,
+ with_cp=self.with_cp,
+ conv_cfg=self.conv_cfg,
+ norm_cfg=self.norm_cfg,
+ dcn=dcn,
+ plugins=stage_plugins,
+ test_branch_idx=self.test_branch_idx)
+
+ layer_name = f'layer{last_stage_idx + 1}'
+
+ self.__setattr__(layer_name, res_layer)
+ self.res_layers.pop(last_stage_idx)
+ self.res_layers.insert(last_stage_idx, layer_name)
+
+ self._freeze_stages()
diff --git a/annotator/uniformer/mmdet/models/backbones/uniformer.py b/annotator/uniformer/mmdet/models/backbones/uniformer.py
new file mode 100644
index 0000000000000000000000000000000000000000..5705a6dd7019f51bc04e4a2c7ff42021821dbd49
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/backbones/uniformer.py
@@ -0,0 +1,422 @@
+# --------------------------------------------------------
+# UniFormer
+# Copyright (c) 2022 SenseTime X-Lab
+# Licensed under The MIT License [see LICENSE for details]
+# Written by Kunchang Li
+# --------------------------------------------------------
+
+from collections import OrderedDict
+import math
+
+from functools import partial
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+import torch.utils.checkpoint as checkpoint
+import numpy as np
+from timm.models.layers import DropPath, to_2tuple, trunc_normal_
+
+from mmcv_custom import load_checkpoint
+from mmdet.utils import get_root_logger
+from ..builder import BACKBONES
+
+
+class Mlp(nn.Module):
+ def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
+ super().__init__()
+ out_features = out_features or in_features
+ hidden_features = hidden_features or in_features
+ self.fc1 = nn.Linear(in_features, hidden_features)
+ self.act = act_layer()
+ self.fc2 = nn.Linear(hidden_features, out_features)
+ self.drop = nn.Dropout(drop)
+
+ def forward(self, x):
+ x = self.fc1(x)
+ x = self.act(x)
+ x = self.drop(x)
+ x = self.fc2(x)
+ x = self.drop(x)
+ return x
+
+
+class CMlp(nn.Module):
+ def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
+ super().__init__()
+ out_features = out_features or in_features
+ hidden_features = hidden_features or in_features
+ self.fc1 = nn.Conv2d(in_features, hidden_features, 1)
+ self.act = act_layer()
+ self.fc2 = nn.Conv2d(hidden_features, out_features, 1)
+ self.drop = nn.Dropout(drop)
+
+ def forward(self, x):
+ x = self.fc1(x)
+ x = self.act(x)
+ x = self.drop(x)
+ x = self.fc2(x)
+ x = self.drop(x)
+ return x
+
+
+class CBlock(nn.Module):
+ def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
+ drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
+ super().__init__()
+ self.pos_embed = nn.Conv2d(dim, dim, 3, padding=1, groups=dim)
+ self.norm1 = nn.BatchNorm2d(dim)
+ self.conv1 = nn.Conv2d(dim, dim, 1)
+ self.conv2 = nn.Conv2d(dim, dim, 1)
+ self.attn = nn.Conv2d(dim, dim, 5, padding=2, groups=dim)
+ # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
+ self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
+ self.norm2 = nn.BatchNorm2d(dim)
+ mlp_hidden_dim = int(dim * mlp_ratio)
+ self.mlp = CMlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
+
+ def forward(self, x):
+ x = x + self.pos_embed(x)
+ x = x + self.drop_path(self.conv2(self.attn(self.conv1(self.norm1(x)))))
+ x = x + self.drop_path(self.mlp(self.norm2(x)))
+ return x
+
+
+class Attention(nn.Module):
+ def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
+ super().__init__()
+ self.num_heads = num_heads
+ head_dim = dim // num_heads
+ # NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights
+ self.scale = qk_scale or head_dim ** -0.5
+
+ self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
+ self.attn_drop = nn.Dropout(attn_drop)
+ self.proj = nn.Linear(dim, dim)
+ self.proj_drop = nn.Dropout(proj_drop)
+
+ def forward(self, x):
+ B, N, C = x.shape
+ qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
+ q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
+
+ attn = (q @ k.transpose(-2, -1)) * self.scale
+ attn = attn.softmax(dim=-1)
+ attn = self.attn_drop(attn)
+
+ x = (attn @ v).transpose(1, 2).reshape(B, N, C)
+ x = self.proj(x)
+ x = self.proj_drop(x)
+ return x
+
+
+class SABlock(nn.Module):
+ def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
+ drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
+ super().__init__()
+ self.pos_embed = nn.Conv2d(dim, dim, 3, padding=1, groups=dim)
+ self.norm1 = norm_layer(dim)
+ self.attn = Attention(
+ dim,
+ num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale,
+ attn_drop=attn_drop, proj_drop=drop)
+ # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
+ self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
+ self.norm2 = norm_layer(dim)
+ mlp_hidden_dim = int(dim * mlp_ratio)
+ self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
+
+ def forward(self, x):
+ x = x + self.pos_embed(x)
+ B, N, H, W = x.shape
+ x = x.flatten(2).transpose(1, 2)
+ x = x + self.drop_path(self.attn(self.norm1(x)))
+ x = x + self.drop_path(self.mlp(self.norm2(x)))
+ x = x.transpose(1, 2).reshape(B, N, H, W)
+ return x
+
+
+def window_partition(x, window_size):
+ """
+ Args:
+ x: (B, H, W, C)
+ window_size (int): window size
+ Returns:
+ windows: (num_windows*B, window_size, window_size, C)
+ """
+ B, H, W, C = x.shape
+ x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
+ windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
+ return windows
+
+
+def window_reverse(windows, window_size, H, W):
+ """
+ Args:
+ windows: (num_windows*B, window_size, window_size, C)
+ window_size (int): Window size
+ H (int): Height of image
+ W (int): Width of image
+ Returns:
+ x: (B, H, W, C)
+ """
+ B = int(windows.shape[0] / (H * W / window_size / window_size))
+ x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
+ x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
+ return x
+
+
+class SABlock_Windows(nn.Module):
+ def __init__(self, dim, num_heads, window_size=14, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
+ drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
+ super().__init__()
+ self.window_size=window_size
+ self.pos_embed = nn.Conv2d(dim, dim, 3, padding=1, groups=dim)
+ self.norm1 = norm_layer(dim)
+ self.attn = Attention(
+ dim,
+ num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale,
+ attn_drop=attn_drop, proj_drop=drop)
+ # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
+ self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
+ self.norm2 = norm_layer(dim)
+ mlp_hidden_dim = int(dim * mlp_ratio)
+ self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
+
+ def forward(self, x):
+ x = x + self.pos_embed(x)
+ x = x.permute(0, 2, 3, 1)
+ B, H, W, C = x.shape
+ shortcut = x
+ x = self.norm1(x)
+
+ pad_l = pad_t = 0
+ pad_r = (self.window_size - W % self.window_size) % self.window_size
+ pad_b = (self.window_size - H % self.window_size) % self.window_size
+ x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b))
+ _, Hp, Wp, _ = x.shape
+
+ x_windows = window_partition(x, self.window_size) # nW*B, window_size, window_size, C
+ x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C
+
+ # W-MSA/SW-MSA
+ attn_windows = self.attn(x_windows) # nW*B, window_size*window_size, C
+
+ # merge windows
+ attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)
+ x = window_reverse(attn_windows, self.window_size, Hp, Wp) # B H' W' C
+
+ # reverse cyclic shift
+ if pad_r > 0 or pad_b > 0:
+ x = x[:, :H, :W, :].contiguous()
+
+ x = shortcut + self.drop_path(x)
+ x = x + self.drop_path(self.mlp(self.norm2(x)))
+ x = x.permute(0, 3, 1, 2).reshape(B, C, H, W)
+ return x
+
+
+class PatchEmbed(nn.Module):
+ """ Image to Patch Embedding
+ """
+ def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
+ super().__init__()
+ img_size = to_2tuple(img_size)
+ patch_size = to_2tuple(patch_size)
+ num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])
+ self.img_size = img_size
+ self.patch_size = patch_size
+ self.num_patches = num_patches
+ self.norm = nn.LayerNorm(embed_dim)
+ self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
+
+ def forward(self, x):
+ B, _, H, W = x.shape
+ x = self.proj(x)
+ B, _, H, W = x.shape
+ x = x.flatten(2).transpose(1, 2)
+ x = self.norm(x)
+ x = x.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous()
+ return x
+
+
+@BACKBONES.register_module()
+class UniFormer(nn.Module):
+ """ Vision Transformer
+ A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` -
+ https://arxiv.org/abs/2010.11929
+ """
+ def __init__(self, layers=[3, 4, 8, 3], img_size=224, in_chans=3, num_classes=80, embed_dim=[64, 128, 320, 512],
+ head_dim=64, mlp_ratio=4., qkv_bias=True, qk_scale=None, representation_size=None,
+ drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=partial(nn.LayerNorm, eps=1e-6),
+ pretrained_path=None, use_checkpoint=False, checkpoint_num=[0, 0, 0, 0],
+ windows=False, hybrid=False, window_size=14):
+ """
+ Args:
+ layer (list): number of block in each layer
+ img_size (int, tuple): input image size
+ in_chans (int): number of input channels
+ num_classes (int): number of classes for classification head
+ embed_dim (int): embedding dimension
+ head_dim (int): dimension of attention heads
+ mlp_ratio (int): ratio of mlp hidden dim to embedding dim
+ qkv_bias (bool): enable bias for qkv if True
+ qk_scale (float): override default qk scale of head_dim ** -0.5 if set
+ representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set
+ drop_rate (float): dropout rate
+ attn_drop_rate (float): attention dropout rate
+ drop_path_rate (float): stochastic depth rate
+ norm_layer (nn.Module): normalization layer
+ pretrained_path (str): path of pretrained model
+ use_checkpoint (bool): whether use checkpoint
+ checkpoint_num (list): index for using checkpoint in every stage
+ windows (bool): whether use window MHRA
+ hybrid (bool): whether use hybrid MHRA
+ window_size (int): size of window (>14)
+ """
+ super().__init__()
+ self.num_classes = num_classes
+ self.use_checkpoint = use_checkpoint
+ self.checkpoint_num = checkpoint_num
+ self.windows = windows
+ print(f'Use Checkpoint: {self.use_checkpoint}')
+ print(f'Checkpoint Number: {self.checkpoint_num}')
+ self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
+ norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)
+
+ self.patch_embed1 = PatchEmbed(
+ img_size=img_size, patch_size=4, in_chans=in_chans, embed_dim=embed_dim[0])
+ self.patch_embed2 = PatchEmbed(
+ img_size=img_size // 4, patch_size=2, in_chans=embed_dim[0], embed_dim=embed_dim[1])
+ self.patch_embed3 = PatchEmbed(
+ img_size=img_size // 8, patch_size=2, in_chans=embed_dim[1], embed_dim=embed_dim[2])
+ self.patch_embed4 = PatchEmbed(
+ img_size=img_size // 16, patch_size=2, in_chans=embed_dim[2], embed_dim=embed_dim[3])
+
+ self.pos_drop = nn.Dropout(p=drop_rate)
+ dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(layers))] # stochastic depth decay rule
+ num_heads = [dim // head_dim for dim in embed_dim]
+ self.blocks1 = nn.ModuleList([
+ CBlock(
+ dim=embed_dim[0], num_heads=num_heads[0], mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
+ drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer)
+ for i in range(layers[0])])
+ self.norm1=norm_layer(embed_dim[0])
+ self.blocks2 = nn.ModuleList([
+ CBlock(
+ dim=embed_dim[1], num_heads=num_heads[1], mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
+ drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i+layers[0]], norm_layer=norm_layer)
+ for i in range(layers[1])])
+ self.norm2 = norm_layer(embed_dim[1])
+ if self.windows:
+ print('Use local window for all blocks in stage3')
+ self.blocks3 = nn.ModuleList([
+ SABlock_Windows(
+ dim=embed_dim[2], num_heads=num_heads[2], window_size=window_size, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
+ drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i+layers[0]+layers[1]], norm_layer=norm_layer)
+ for i in range(layers[2])])
+ elif hybrid:
+ print('Use hybrid window for blocks in stage3')
+ block3 = []
+ for i in range(layers[2]):
+ if (i + 1) % 4 == 0:
+ block3.append(SABlock(
+ dim=embed_dim[2], num_heads=num_heads[2], mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
+ drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i+layers[0]+layers[1]], norm_layer=norm_layer))
+ else:
+ block3.append(SABlock_Windows(
+ dim=embed_dim[2], num_heads=num_heads[2], window_size=window_size, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
+ drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i+layers[0]+layers[1]], norm_layer=norm_layer))
+ self.blocks3 = nn.ModuleList(block3)
+ else:
+ print('Use global window for all blocks in stage3')
+ self.blocks3 = nn.ModuleList([
+ SABlock(
+ dim=embed_dim[2], num_heads=num_heads[2], mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
+ drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i+layers[0]+layers[1]], norm_layer=norm_layer)
+ for i in range(layers[2])])
+ self.norm3 = norm_layer(embed_dim[2])
+ self.blocks4 = nn.ModuleList([
+ SABlock(
+ dim=embed_dim[3], num_heads=num_heads[3], mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
+ drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i+layers[0]+layers[1]+layers[2]], norm_layer=norm_layer)
+ for i in range(layers[3])])
+ self.norm4 = norm_layer(embed_dim[3])
+
+ # Representation layer
+ if representation_size:
+ self.num_features = representation_size
+ self.pre_logits = nn.Sequential(OrderedDict([
+ ('fc', nn.Linear(embed_dim, representation_size)),
+ ('act', nn.Tanh())
+ ]))
+ else:
+ self.pre_logits = nn.Identity()
+
+ self.apply(self._init_weights)
+ self.init_weights(pretrained=pretrained_path)
+
+ def init_weights(self, pretrained):
+ if isinstance(pretrained, str):
+ logger = get_root_logger()
+ load_checkpoint(self, pretrained, map_location='cpu', strict=False, logger=logger)
+ print(f'Load pretrained model from {pretrained}')
+ def _init_weights(self, m):
+ if isinstance(m, nn.Linear):
+ trunc_normal_(m.weight, std=.02)
+ if isinstance(m, nn.Linear) and m.bias is not None:
+ nn.init.constant_(m.bias, 0)
+ elif isinstance(m, nn.LayerNorm):
+ nn.init.constant_(m.bias, 0)
+ nn.init.constant_(m.weight, 1.0)
+
+ @torch.jit.ignore
+ def no_weight_decay(self):
+ return {'pos_embed', 'cls_token'}
+
+ def get_classifier(self):
+ return self.head
+
+ def reset_classifier(self, num_classes, global_pool=''):
+ self.num_classes = num_classes
+ self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
+
+ def forward_features(self, x):
+ out = []
+ x = self.patch_embed1(x)
+ x = self.pos_drop(x)
+ for i, blk in enumerate(self.blocks1):
+ if self.use_checkpoint and i < self.checkpoint_num[0]:
+ x = checkpoint.checkpoint(blk, x)
+ else:
+ x = blk(x)
+ x_out = self.norm1(x.permute(0, 2, 3, 1))
+ out.append(x_out.permute(0, 3, 1, 2).contiguous())
+ x = self.patch_embed2(x)
+ for i, blk in enumerate(self.blocks2):
+ if self.use_checkpoint and i < self.checkpoint_num[1]:
+ x = checkpoint.checkpoint(blk, x)
+ else:
+ x = blk(x)
+ x_out = self.norm2(x.permute(0, 2, 3, 1))
+ out.append(x_out.permute(0, 3, 1, 2).contiguous())
+ x = self.patch_embed3(x)
+ for i, blk in enumerate(self.blocks3):
+ if self.use_checkpoint and i < self.checkpoint_num[2]:
+ x = checkpoint.checkpoint(blk, x)
+ else:
+ x = blk(x)
+ x_out = self.norm3(x.permute(0, 2, 3, 1))
+ out.append(x_out.permute(0, 3, 1, 2).contiguous())
+ x = self.patch_embed4(x)
+ for i, blk in enumerate(self.blocks4):
+ if self.use_checkpoint and i < self.checkpoint_num[3]:
+ x = checkpoint.checkpoint(blk, x)
+ else:
+ x = blk(x)
+ x_out = self.norm4(x.permute(0, 2, 3, 1))
+ out.append(x_out.permute(0, 3, 1, 2).contiguous())
+ return tuple(out)
+
+ def forward(self, x):
+ x = self.forward_features(x)
+ return x
diff --git a/annotator/uniformer/mmdet/models/builder.py b/annotator/uniformer/mmdet/models/builder.py
new file mode 100644
index 0000000000000000000000000000000000000000..81c927e507a7c1625ffb114de10e93c94927af25
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/builder.py
@@ -0,0 +1,77 @@
+import warnings
+
+from mmcv.utils import Registry, build_from_cfg
+from torch import nn
+
+BACKBONES = Registry('backbone')
+NECKS = Registry('neck')
+ROI_EXTRACTORS = Registry('roi_extractor')
+SHARED_HEADS = Registry('shared_head')
+HEADS = Registry('head')
+LOSSES = Registry('loss')
+DETECTORS = Registry('detector')
+
+
+def build(cfg, registry, default_args=None):
+ """Build a module.
+
+ Args:
+ cfg (dict, list[dict]): The config of modules, is is either a dict
+ or a list of configs.
+ registry (:obj:`Registry`): A registry the module belongs to.
+ default_args (dict, optional): Default arguments to build the module.
+ Defaults to None.
+
+ Returns:
+ nn.Module: A built nn module.
+ """
+ if isinstance(cfg, list):
+ modules = [
+ build_from_cfg(cfg_, registry, default_args) for cfg_ in cfg
+ ]
+ return nn.Sequential(*modules)
+ else:
+ return build_from_cfg(cfg, registry, default_args)
+
+
+def build_backbone(cfg):
+ """Build backbone."""
+ return build(cfg, BACKBONES)
+
+
+def build_neck(cfg):
+ """Build neck."""
+ return build(cfg, NECKS)
+
+
+def build_roi_extractor(cfg):
+ """Build roi extractor."""
+ return build(cfg, ROI_EXTRACTORS)
+
+
+def build_shared_head(cfg):
+ """Build shared head."""
+ return build(cfg, SHARED_HEADS)
+
+
+def build_head(cfg):
+ """Build head."""
+ return build(cfg, HEADS)
+
+
+def build_loss(cfg):
+ """Build loss."""
+ return build(cfg, LOSSES)
+
+
+def build_detector(cfg, train_cfg=None, test_cfg=None):
+ """Build detector."""
+ if train_cfg is not None or test_cfg is not None:
+ warnings.warn(
+ 'train_cfg and test_cfg is deprecated, '
+ 'please specify them in model', UserWarning)
+ assert cfg.get('train_cfg') is None or train_cfg is None, \
+ 'train_cfg specified in both outer field and model field '
+ assert cfg.get('test_cfg') is None or test_cfg is None, \
+ 'test_cfg specified in both outer field and model field '
+ return build(cfg, DETECTORS, dict(train_cfg=train_cfg, test_cfg=test_cfg))
diff --git a/annotator/uniformer/mmdet/models/dense_heads/__init__.py b/annotator/uniformer/mmdet/models/dense_heads/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..f004dd95d97df16167f932587b3ce73b05b04a37
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/dense_heads/__init__.py
@@ -0,0 +1,41 @@
+from .anchor_free_head import AnchorFreeHead
+from .anchor_head import AnchorHead
+from .atss_head import ATSSHead
+from .cascade_rpn_head import CascadeRPNHead, StageCascadeRPNHead
+from .centripetal_head import CentripetalHead
+from .corner_head import CornerHead
+from .embedding_rpn_head import EmbeddingRPNHead
+from .fcos_head import FCOSHead
+from .fovea_head import FoveaHead
+from .free_anchor_retina_head import FreeAnchorRetinaHead
+from .fsaf_head import FSAFHead
+from .ga_retina_head import GARetinaHead
+from .ga_rpn_head import GARPNHead
+from .gfl_head import GFLHead
+from .guided_anchor_head import FeatureAdaption, GuidedAnchorHead
+from .ld_head import LDHead
+from .nasfcos_head import NASFCOSHead
+from .paa_head import PAAHead
+from .pisa_retinanet_head import PISARetinaHead
+from .pisa_ssd_head import PISASSDHead
+from .reppoints_head import RepPointsHead
+from .retina_head import RetinaHead
+from .retina_sepbn_head import RetinaSepBNHead
+from .rpn_head import RPNHead
+from .sabl_retina_head import SABLRetinaHead
+from .ssd_head import SSDHead
+from .transformer_head import TransformerHead
+from .vfnet_head import VFNetHead
+from .yolact_head import YOLACTHead, YOLACTProtonet, YOLACTSegmHead
+from .yolo_head import YOLOV3Head
+
+__all__ = [
+ 'AnchorFreeHead', 'AnchorHead', 'GuidedAnchorHead', 'FeatureAdaption',
+ 'RPNHead', 'GARPNHead', 'RetinaHead', 'RetinaSepBNHead', 'GARetinaHead',
+ 'SSDHead', 'FCOSHead', 'RepPointsHead', 'FoveaHead',
+ 'FreeAnchorRetinaHead', 'ATSSHead', 'FSAFHead', 'NASFCOSHead',
+ 'PISARetinaHead', 'PISASSDHead', 'GFLHead', 'CornerHead', 'YOLACTHead',
+ 'YOLACTSegmHead', 'YOLACTProtonet', 'YOLOV3Head', 'PAAHead',
+ 'SABLRetinaHead', 'CentripetalHead', 'VFNetHead', 'TransformerHead',
+ 'StageCascadeRPNHead', 'CascadeRPNHead', 'EmbeddingRPNHead', 'LDHead'
+]
diff --git a/annotator/uniformer/mmdet/models/dense_heads/anchor_free_head.py b/annotator/uniformer/mmdet/models/dense_heads/anchor_free_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..1814a0cc4f577f470f74f025440073a0aaa1ebd0
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/dense_heads/anchor_free_head.py
@@ -0,0 +1,340 @@
+from abc import abstractmethod
+
+import torch
+import torch.nn as nn
+from mmcv.cnn import ConvModule, bias_init_with_prob, normal_init
+from mmcv.runner import force_fp32
+
+from mmdet.core import multi_apply
+from ..builder import HEADS, build_loss
+from .base_dense_head import BaseDenseHead
+from .dense_test_mixins import BBoxTestMixin
+
+
+@HEADS.register_module()
+class AnchorFreeHead(BaseDenseHead, BBoxTestMixin):
+ """Anchor-free head (FCOS, Fovea, RepPoints, etc.).
+
+ Args:
+ num_classes (int): Number of categories excluding the background
+ category.
+ in_channels (int): Number of channels in the input feature map.
+ feat_channels (int): Number of hidden channels. Used in child classes.
+ stacked_convs (int): Number of stacking convs of the head.
+ strides (tuple): Downsample factor of each feature map.
+ dcn_on_last_conv (bool): If true, use dcn in the last layer of
+ towers. Default: False.
+ conv_bias (bool | str): If specified as `auto`, it will be decided by
+ the norm_cfg. Bias of conv will be set as True if `norm_cfg` is
+ None, otherwise False. Default: "auto".
+ loss_cls (dict): Config of classification loss.
+ loss_bbox (dict): Config of localization loss.
+ conv_cfg (dict): Config dict for convolution layer. Default: None.
+ norm_cfg (dict): Config dict for normalization layer. Default: None.
+ train_cfg (dict): Training config of anchor head.
+ test_cfg (dict): Testing config of anchor head.
+ """ # noqa: W605
+
+ _version = 1
+
+ def __init__(self,
+ num_classes,
+ in_channels,
+ feat_channels=256,
+ stacked_convs=4,
+ strides=(4, 8, 16, 32, 64),
+ dcn_on_last_conv=False,
+ conv_bias='auto',
+ loss_cls=dict(
+ type='FocalLoss',
+ use_sigmoid=True,
+ gamma=2.0,
+ alpha=0.25,
+ loss_weight=1.0),
+ loss_bbox=dict(type='IoULoss', loss_weight=1.0),
+ conv_cfg=None,
+ norm_cfg=None,
+ train_cfg=None,
+ test_cfg=None):
+ super(AnchorFreeHead, self).__init__()
+ self.num_classes = num_classes
+ self.cls_out_channels = num_classes
+ self.in_channels = in_channels
+ self.feat_channels = feat_channels
+ self.stacked_convs = stacked_convs
+ self.strides = strides
+ self.dcn_on_last_conv = dcn_on_last_conv
+ assert conv_bias == 'auto' or isinstance(conv_bias, bool)
+ self.conv_bias = conv_bias
+ self.loss_cls = build_loss(loss_cls)
+ self.loss_bbox = build_loss(loss_bbox)
+ self.train_cfg = train_cfg
+ self.test_cfg = test_cfg
+ self.conv_cfg = conv_cfg
+ self.norm_cfg = norm_cfg
+ self.fp16_enabled = False
+
+ self._init_layers()
+
+ def _init_layers(self):
+ """Initialize layers of the head."""
+ self._init_cls_convs()
+ self._init_reg_convs()
+ self._init_predictor()
+
+ def _init_cls_convs(self):
+ """Initialize classification conv layers of the head."""
+ self.cls_convs = nn.ModuleList()
+ for i in range(self.stacked_convs):
+ chn = self.in_channels if i == 0 else self.feat_channels
+ if self.dcn_on_last_conv and i == self.stacked_convs - 1:
+ conv_cfg = dict(type='DCNv2')
+ else:
+ conv_cfg = self.conv_cfg
+ self.cls_convs.append(
+ ConvModule(
+ chn,
+ self.feat_channels,
+ 3,
+ stride=1,
+ padding=1,
+ conv_cfg=conv_cfg,
+ norm_cfg=self.norm_cfg,
+ bias=self.conv_bias))
+
+ def _init_reg_convs(self):
+ """Initialize bbox regression conv layers of the head."""
+ self.reg_convs = nn.ModuleList()
+ for i in range(self.stacked_convs):
+ chn = self.in_channels if i == 0 else self.feat_channels
+ if self.dcn_on_last_conv and i == self.stacked_convs - 1:
+ conv_cfg = dict(type='DCNv2')
+ else:
+ conv_cfg = self.conv_cfg
+ self.reg_convs.append(
+ ConvModule(
+ chn,
+ self.feat_channels,
+ 3,
+ stride=1,
+ padding=1,
+ conv_cfg=conv_cfg,
+ norm_cfg=self.norm_cfg,
+ bias=self.conv_bias))
+
+ def _init_predictor(self):
+ """Initialize predictor layers of the head."""
+ self.conv_cls = nn.Conv2d(
+ self.feat_channels, self.cls_out_channels, 3, padding=1)
+ self.conv_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1)
+
+ def init_weights(self):
+ """Initialize weights of the head."""
+ for m in self.cls_convs:
+ if isinstance(m.conv, nn.Conv2d):
+ normal_init(m.conv, std=0.01)
+ for m in self.reg_convs:
+ if isinstance(m.conv, nn.Conv2d):
+ normal_init(m.conv, std=0.01)
+ bias_cls = bias_init_with_prob(0.01)
+ normal_init(self.conv_cls, std=0.01, bias=bias_cls)
+ normal_init(self.conv_reg, std=0.01)
+
+ def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
+ missing_keys, unexpected_keys, error_msgs):
+ """Hack some keys of the model state dict so that can load checkpoints
+ of previous version."""
+ version = local_metadata.get('version', None)
+ if version is None:
+ # the key is different in early versions
+ # for example, 'fcos_cls' become 'conv_cls' now
+ bbox_head_keys = [
+ k for k in state_dict.keys() if k.startswith(prefix)
+ ]
+ ori_predictor_keys = []
+ new_predictor_keys = []
+ # e.g. 'fcos_cls' or 'fcos_reg'
+ for key in bbox_head_keys:
+ ori_predictor_keys.append(key)
+ key = key.split('.')
+ conv_name = None
+ if key[1].endswith('cls'):
+ conv_name = 'conv_cls'
+ elif key[1].endswith('reg'):
+ conv_name = 'conv_reg'
+ elif key[1].endswith('centerness'):
+ conv_name = 'conv_centerness'
+ else:
+ assert NotImplementedError
+ if conv_name is not None:
+ key[1] = conv_name
+ new_predictor_keys.append('.'.join(key))
+ else:
+ ori_predictor_keys.pop(-1)
+ for i in range(len(new_predictor_keys)):
+ state_dict[new_predictor_keys[i]] = state_dict.pop(
+ ori_predictor_keys[i])
+ super()._load_from_state_dict(state_dict, prefix, local_metadata,
+ strict, missing_keys, unexpected_keys,
+ error_msgs)
+
+ def forward(self, feats):
+ """Forward features from the upstream network.
+
+ Args:
+ feats (tuple[Tensor]): Features from the upstream network, each is
+ a 4D-tensor.
+
+ Returns:
+ tuple: Usually contain classification scores and bbox predictions.
+ cls_scores (list[Tensor]): Box scores for each scale level,
+ each is a 4D-tensor, the channel number is
+ num_points * num_classes.
+ bbox_preds (list[Tensor]): Box energies / deltas for each scale
+ level, each is a 4D-tensor, the channel number is
+ num_points * 4.
+ """
+ return multi_apply(self.forward_single, feats)[:2]
+
+ def forward_single(self, x):
+ """Forward features of a single scale level.
+
+ Args:
+ x (Tensor): FPN feature maps of the specified stride.
+
+ Returns:
+ tuple: Scores for each class, bbox predictions, features
+ after classification and regression conv layers, some
+ models needs these features like FCOS.
+ """
+ cls_feat = x
+ reg_feat = x
+
+ for cls_layer in self.cls_convs:
+ cls_feat = cls_layer(cls_feat)
+ cls_score = self.conv_cls(cls_feat)
+
+ for reg_layer in self.reg_convs:
+ reg_feat = reg_layer(reg_feat)
+ bbox_pred = self.conv_reg(reg_feat)
+ return cls_score, bbox_pred, cls_feat, reg_feat
+
+ @abstractmethod
+ @force_fp32(apply_to=('cls_scores', 'bbox_preds'))
+ def loss(self,
+ cls_scores,
+ bbox_preds,
+ gt_bboxes,
+ gt_labels,
+ img_metas,
+ gt_bboxes_ignore=None):
+ """Compute loss of the head.
+
+ Args:
+ cls_scores (list[Tensor]): Box scores for each scale level,
+ each is a 4D-tensor, the channel number is
+ num_points * num_classes.
+ bbox_preds (list[Tensor]): Box energies / deltas for each scale
+ level, each is a 4D-tensor, the channel number is
+ num_points * 4.
+ gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
+ shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
+ gt_labels (list[Tensor]): class indices corresponding to each box
+ img_metas (list[dict]): Meta information of each image, e.g.,
+ image size, scaling factor, etc.
+ gt_bboxes_ignore (None | list[Tensor]): specify which bounding
+ boxes can be ignored when computing the loss.
+ """
+
+ raise NotImplementedError
+
+ @abstractmethod
+ @force_fp32(apply_to=('cls_scores', 'bbox_preds'))
+ def get_bboxes(self,
+ cls_scores,
+ bbox_preds,
+ img_metas,
+ cfg=None,
+ rescale=None):
+ """Transform network output for a batch into bbox predictions.
+
+ Args:
+ cls_scores (list[Tensor]): Box scores for each scale level
+ Has shape (N, num_points * num_classes, H, W)
+ bbox_preds (list[Tensor]): Box energies / deltas for each scale
+ level with shape (N, num_points * 4, H, W)
+ img_metas (list[dict]): Meta information of each image, e.g.,
+ image size, scaling factor, etc.
+ cfg (mmcv.Config): Test / postprocessing configuration,
+ if None, test_cfg would be used
+ rescale (bool): If True, return boxes in original image space
+ """
+
+ raise NotImplementedError
+
+ @abstractmethod
+ def get_targets(self, points, gt_bboxes_list, gt_labels_list):
+ """Compute regression, classification and centerness targets for points
+ in multiple images.
+
+ Args:
+ points (list[Tensor]): Points of each fpn level, each has shape
+ (num_points, 2).
+ gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image,
+ each has shape (num_gt, 4).
+ gt_labels_list (list[Tensor]): Ground truth labels of each box,
+ each has shape (num_gt,).
+ """
+ raise NotImplementedError
+
+ def _get_points_single(self,
+ featmap_size,
+ stride,
+ dtype,
+ device,
+ flatten=False):
+ """Get points of a single scale level."""
+ h, w = featmap_size
+ x_range = torch.arange(w, dtype=dtype, device=device)
+ y_range = torch.arange(h, dtype=dtype, device=device)
+ y, x = torch.meshgrid(y_range, x_range)
+ if flatten:
+ y = y.flatten()
+ x = x.flatten()
+ return y, x
+
+ def get_points(self, featmap_sizes, dtype, device, flatten=False):
+ """Get points according to feature map sizes.
+
+ Args:
+ featmap_sizes (list[tuple]): Multi-level feature map sizes.
+ dtype (torch.dtype): Type of points.
+ device (torch.device): Device of points.
+
+ Returns:
+ tuple: points of each image.
+ """
+ mlvl_points = []
+ for i in range(len(featmap_sizes)):
+ mlvl_points.append(
+ self._get_points_single(featmap_sizes[i], self.strides[i],
+ dtype, device, flatten))
+ return mlvl_points
+
+ def aug_test(self, feats, img_metas, rescale=False):
+ """Test function with test time augmentation.
+
+ Args:
+ feats (list[Tensor]): the outer list indicates test-time
+ augmentations and inner Tensor should have a shape NxCxHxW,
+ which contains features for all images in the batch.
+ img_metas (list[list[dict]]): the outer list indicates test-time
+ augs (multiscale, flip, etc.) and the inner list indicates
+ images in a batch. each dict has image information.
+ rescale (bool, optional): Whether to rescale the results.
+ Defaults to False.
+
+ Returns:
+ list[ndarray]: bbox results of each class
+ """
+ return self.aug_test_bboxes(feats, img_metas, rescale=rescale)
diff --git a/annotator/uniformer/mmdet/models/dense_heads/anchor_head.py b/annotator/uniformer/mmdet/models/dense_heads/anchor_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..eea73520572725f547216ab639c1ebbdfb50834c
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/dense_heads/anchor_head.py
@@ -0,0 +1,751 @@
+import torch
+import torch.nn as nn
+from mmcv.cnn import normal_init
+from mmcv.runner import force_fp32
+
+from mmdet.core import (anchor_inside_flags, build_anchor_generator,
+ build_assigner, build_bbox_coder, build_sampler,
+ images_to_levels, multi_apply, multiclass_nms, unmap)
+from ..builder import HEADS, build_loss
+from .base_dense_head import BaseDenseHead
+from .dense_test_mixins import BBoxTestMixin
+
+
+@HEADS.register_module()
+class AnchorHead(BaseDenseHead, BBoxTestMixin):
+ """Anchor-based head (RPN, RetinaNet, SSD, etc.).
+
+ Args:
+ num_classes (int): Number of categories excluding the background
+ category.
+ in_channels (int): Number of channels in the input feature map.
+ feat_channels (int): Number of hidden channels. Used in child classes.
+ anchor_generator (dict): Config dict for anchor generator
+ bbox_coder (dict): Config of bounding box coder.
+ reg_decoded_bbox (bool): If true, the regression loss would be
+ applied directly on decoded bounding boxes, converting both
+ the predicted boxes and regression targets to absolute
+ coordinates format. Default False. It should be `True` when
+ using `IoULoss`, `GIoULoss`, or `DIoULoss` in the bbox head.
+ loss_cls (dict): Config of classification loss.
+ loss_bbox (dict): Config of localization loss.
+ train_cfg (dict): Training config of anchor head.
+ test_cfg (dict): Testing config of anchor head.
+ """ # noqa: W605
+
+ def __init__(self,
+ num_classes,
+ in_channels,
+ feat_channels=256,
+ anchor_generator=dict(
+ type='AnchorGenerator',
+ scales=[8, 16, 32],
+ ratios=[0.5, 1.0, 2.0],
+ strides=[4, 8, 16, 32, 64]),
+ bbox_coder=dict(
+ type='DeltaXYWHBBoxCoder',
+ clip_border=True,
+ target_means=(.0, .0, .0, .0),
+ target_stds=(1.0, 1.0, 1.0, 1.0)),
+ reg_decoded_bbox=False,
+ loss_cls=dict(
+ type='CrossEntropyLoss',
+ use_sigmoid=True,
+ loss_weight=1.0),
+ loss_bbox=dict(
+ type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0),
+ train_cfg=None,
+ test_cfg=None):
+ super(AnchorHead, self).__init__()
+ self.in_channels = in_channels
+ self.num_classes = num_classes
+ self.feat_channels = feat_channels
+ self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False)
+ # TODO better way to determine whether sample or not
+ self.sampling = loss_cls['type'] not in [
+ 'FocalLoss', 'GHMC', 'QualityFocalLoss'
+ ]
+ if self.use_sigmoid_cls:
+ self.cls_out_channels = num_classes
+ else:
+ self.cls_out_channels = num_classes + 1
+
+ if self.cls_out_channels <= 0:
+ raise ValueError(f'num_classes={num_classes} is too small')
+ self.reg_decoded_bbox = reg_decoded_bbox
+
+ self.bbox_coder = build_bbox_coder(bbox_coder)
+ self.loss_cls = build_loss(loss_cls)
+ self.loss_bbox = build_loss(loss_bbox)
+ self.train_cfg = train_cfg
+ self.test_cfg = test_cfg
+ if self.train_cfg:
+ self.assigner = build_assigner(self.train_cfg.assigner)
+ # use PseudoSampler when sampling is False
+ if self.sampling and hasattr(self.train_cfg, 'sampler'):
+ sampler_cfg = self.train_cfg.sampler
+ else:
+ sampler_cfg = dict(type='PseudoSampler')
+ self.sampler = build_sampler(sampler_cfg, context=self)
+ self.fp16_enabled = False
+
+ self.anchor_generator = build_anchor_generator(anchor_generator)
+ # usually the numbers of anchors for each level are the same
+ # except SSD detectors
+ self.num_anchors = self.anchor_generator.num_base_anchors[0]
+ self._init_layers()
+
+ def _init_layers(self):
+ """Initialize layers of the head."""
+ self.conv_cls = nn.Conv2d(self.in_channels,
+ self.num_anchors * self.cls_out_channels, 1)
+ self.conv_reg = nn.Conv2d(self.in_channels, self.num_anchors * 4, 1)
+
+ def init_weights(self):
+ """Initialize weights of the head."""
+ normal_init(self.conv_cls, std=0.01)
+ normal_init(self.conv_reg, std=0.01)
+
+ def forward_single(self, x):
+ """Forward feature of a single scale level.
+
+ Args:
+ x (Tensor): Features of a single scale level.
+
+ Returns:
+ tuple:
+ cls_score (Tensor): Cls scores for a single scale level \
+ the channels number is num_anchors * num_classes.
+ bbox_pred (Tensor): Box energies / deltas for a single scale \
+ level, the channels number is num_anchors * 4.
+ """
+ cls_score = self.conv_cls(x)
+ bbox_pred = self.conv_reg(x)
+ return cls_score, bbox_pred
+
+ def forward(self, feats):
+ """Forward features from the upstream network.
+
+ Args:
+ feats (tuple[Tensor]): Features from the upstream network, each is
+ a 4D-tensor.
+
+ Returns:
+ tuple: A tuple of classification scores and bbox prediction.
+
+ - cls_scores (list[Tensor]): Classification scores for all \
+ scale levels, each is a 4D-tensor, the channels number \
+ is num_anchors * num_classes.
+ - bbox_preds (list[Tensor]): Box energies / deltas for all \
+ scale levels, each is a 4D-tensor, the channels number \
+ is num_anchors * 4.
+ """
+ return multi_apply(self.forward_single, feats)
+
+ def get_anchors(self, featmap_sizes, img_metas, device='cuda'):
+ """Get anchors according to feature map sizes.
+
+ Args:
+ featmap_sizes (list[tuple]): Multi-level feature map sizes.
+ img_metas (list[dict]): Image meta info.
+ device (torch.device | str): Device for returned tensors
+
+ Returns:
+ tuple:
+ anchor_list (list[Tensor]): Anchors of each image.
+ valid_flag_list (list[Tensor]): Valid flags of each image.
+ """
+ num_imgs = len(img_metas)
+
+ # since feature map sizes of all images are the same, we only compute
+ # anchors for one time
+ multi_level_anchors = self.anchor_generator.grid_anchors(
+ featmap_sizes, device)
+ anchor_list = [multi_level_anchors for _ in range(num_imgs)]
+
+ # for each image, we compute valid flags of multi level anchors
+ valid_flag_list = []
+ for img_id, img_meta in enumerate(img_metas):
+ multi_level_flags = self.anchor_generator.valid_flags(
+ featmap_sizes, img_meta['pad_shape'], device)
+ valid_flag_list.append(multi_level_flags)
+
+ return anchor_list, valid_flag_list
+
+ def _get_targets_single(self,
+ flat_anchors,
+ valid_flags,
+ gt_bboxes,
+ gt_bboxes_ignore,
+ gt_labels,
+ img_meta,
+ label_channels=1,
+ unmap_outputs=True):
+ """Compute regression and classification targets for anchors in a
+ single image.
+
+ Args:
+ flat_anchors (Tensor): Multi-level anchors of the image, which are
+ concatenated into a single tensor of shape (num_anchors ,4)
+ valid_flags (Tensor): Multi level valid flags of the image,
+ which are concatenated into a single tensor of
+ shape (num_anchors,).
+ gt_bboxes (Tensor): Ground truth bboxes of the image,
+ shape (num_gts, 4).
+ gt_bboxes_ignore (Tensor): Ground truth bboxes to be
+ ignored, shape (num_ignored_gts, 4).
+ img_meta (dict): Meta info of the image.
+ gt_labels (Tensor): Ground truth labels of each box,
+ shape (num_gts,).
+ label_channels (int): Channel of label.
+ unmap_outputs (bool): Whether to map outputs back to the original
+ set of anchors.
+
+ Returns:
+ tuple:
+ labels_list (list[Tensor]): Labels of each level
+ label_weights_list (list[Tensor]): Label weights of each level
+ bbox_targets_list (list[Tensor]): BBox targets of each level
+ bbox_weights_list (list[Tensor]): BBox weights of each level
+ num_total_pos (int): Number of positive samples in all images
+ num_total_neg (int): Number of negative samples in all images
+ """
+ inside_flags = anchor_inside_flags(flat_anchors, valid_flags,
+ img_meta['img_shape'][:2],
+ self.train_cfg.allowed_border)
+ if not inside_flags.any():
+ return (None, ) * 7
+ # assign gt and sample anchors
+ anchors = flat_anchors[inside_flags, :]
+
+ assign_result = self.assigner.assign(
+ anchors, gt_bboxes, gt_bboxes_ignore,
+ None if self.sampling else gt_labels)
+ sampling_result = self.sampler.sample(assign_result, anchors,
+ gt_bboxes)
+
+ num_valid_anchors = anchors.shape[0]
+ bbox_targets = torch.zeros_like(anchors)
+ bbox_weights = torch.zeros_like(anchors)
+ labels = anchors.new_full((num_valid_anchors, ),
+ self.num_classes,
+ dtype=torch.long)
+ label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float)
+
+ pos_inds = sampling_result.pos_inds
+ neg_inds = sampling_result.neg_inds
+ if len(pos_inds) > 0:
+ if not self.reg_decoded_bbox:
+ pos_bbox_targets = self.bbox_coder.encode(
+ sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes)
+ else:
+ pos_bbox_targets = sampling_result.pos_gt_bboxes
+ bbox_targets[pos_inds, :] = pos_bbox_targets
+ bbox_weights[pos_inds, :] = 1.0
+ if gt_labels is None:
+ # Only rpn gives gt_labels as None
+ # Foreground is the first class since v2.5.0
+ labels[pos_inds] = 0
+ else:
+ labels[pos_inds] = gt_labels[
+ sampling_result.pos_assigned_gt_inds]
+ if self.train_cfg.pos_weight <= 0:
+ label_weights[pos_inds] = 1.0
+ else:
+ label_weights[pos_inds] = self.train_cfg.pos_weight
+ if len(neg_inds) > 0:
+ label_weights[neg_inds] = 1.0
+
+ # map up to original set of anchors
+ if unmap_outputs:
+ num_total_anchors = flat_anchors.size(0)
+ labels = unmap(
+ labels, num_total_anchors, inside_flags,
+ fill=self.num_classes) # fill bg label
+ label_weights = unmap(label_weights, num_total_anchors,
+ inside_flags)
+ bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags)
+ bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags)
+
+ return (labels, label_weights, bbox_targets, bbox_weights, pos_inds,
+ neg_inds, sampling_result)
+
+ def get_targets(self,
+ anchor_list,
+ valid_flag_list,
+ gt_bboxes_list,
+ img_metas,
+ gt_bboxes_ignore_list=None,
+ gt_labels_list=None,
+ label_channels=1,
+ unmap_outputs=True,
+ return_sampling_results=False):
+ """Compute regression and classification targets for anchors in
+ multiple images.
+
+ Args:
+ anchor_list (list[list[Tensor]]): Multi level anchors of each
+ image. The outer list indicates images, and the inner list
+ corresponds to feature levels of the image. Each element of
+ the inner list is a tensor of shape (num_anchors, 4).
+ valid_flag_list (list[list[Tensor]]): Multi level valid flags of
+ each image. The outer list indicates images, and the inner list
+ corresponds to feature levels of the image. Each element of
+ the inner list is a tensor of shape (num_anchors, )
+ gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image.
+ img_metas (list[dict]): Meta info of each image.
+ gt_bboxes_ignore_list (list[Tensor]): Ground truth bboxes to be
+ ignored.
+ gt_labels_list (list[Tensor]): Ground truth labels of each box.
+ label_channels (int): Channel of label.
+ unmap_outputs (bool): Whether to map outputs back to the original
+ set of anchors.
+
+ Returns:
+ tuple: Usually returns a tuple containing learning targets.
+
+ - labels_list (list[Tensor]): Labels of each level.
+ - label_weights_list (list[Tensor]): Label weights of each \
+ level.
+ - bbox_targets_list (list[Tensor]): BBox targets of each level.
+ - bbox_weights_list (list[Tensor]): BBox weights of each level.
+ - num_total_pos (int): Number of positive samples in all \
+ images.
+ - num_total_neg (int): Number of negative samples in all \
+ images.
+ additional_returns: This function enables user-defined returns from
+ `self._get_targets_single`. These returns are currently refined
+ to properties at each feature map (i.e. having HxW dimension).
+ The results will be concatenated after the end
+ """
+ num_imgs = len(img_metas)
+ assert len(anchor_list) == len(valid_flag_list) == num_imgs
+
+ # anchor number of multi levels
+ num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]
+ # concat all level anchors to a single tensor
+ concat_anchor_list = []
+ concat_valid_flag_list = []
+ for i in range(num_imgs):
+ assert len(anchor_list[i]) == len(valid_flag_list[i])
+ concat_anchor_list.append(torch.cat(anchor_list[i]))
+ concat_valid_flag_list.append(torch.cat(valid_flag_list[i]))
+
+ # compute targets for each image
+ if gt_bboxes_ignore_list is None:
+ gt_bboxes_ignore_list = [None for _ in range(num_imgs)]
+ if gt_labels_list is None:
+ gt_labels_list = [None for _ in range(num_imgs)]
+ results = multi_apply(
+ self._get_targets_single,
+ concat_anchor_list,
+ concat_valid_flag_list,
+ gt_bboxes_list,
+ gt_bboxes_ignore_list,
+ gt_labels_list,
+ img_metas,
+ label_channels=label_channels,
+ unmap_outputs=unmap_outputs)
+ (all_labels, all_label_weights, all_bbox_targets, all_bbox_weights,
+ pos_inds_list, neg_inds_list, sampling_results_list) = results[:7]
+ rest_results = list(results[7:]) # user-added return values
+ # no valid anchors
+ if any([labels is None for labels in all_labels]):
+ return None
+ # sampled anchors of all images
+ num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list])
+ num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list])
+ # split targets to a list w.r.t. multiple levels
+ labels_list = images_to_levels(all_labels, num_level_anchors)
+ label_weights_list = images_to_levels(all_label_weights,
+ num_level_anchors)
+ bbox_targets_list = images_to_levels(all_bbox_targets,
+ num_level_anchors)
+ bbox_weights_list = images_to_levels(all_bbox_weights,
+ num_level_anchors)
+ res = (labels_list, label_weights_list, bbox_targets_list,
+ bbox_weights_list, num_total_pos, num_total_neg)
+ if return_sampling_results:
+ res = res + (sampling_results_list, )
+ for i, r in enumerate(rest_results): # user-added return values
+ rest_results[i] = images_to_levels(r, num_level_anchors)
+
+ return res + tuple(rest_results)
+
+ def loss_single(self, cls_score, bbox_pred, anchors, labels, label_weights,
+ bbox_targets, bbox_weights, num_total_samples):
+ """Compute loss of a single scale level.
+
+ Args:
+ cls_score (Tensor): Box scores for each scale level
+ Has shape (N, num_anchors * num_classes, H, W).
+ bbox_pred (Tensor): Box energies / deltas for each scale
+ level with shape (N, num_anchors * 4, H, W).
+ anchors (Tensor): Box reference for each scale level with shape
+ (N, num_total_anchors, 4).
+ labels (Tensor): Labels of each anchors with shape
+ (N, num_total_anchors).
+ label_weights (Tensor): Label weights of each anchor with shape
+ (N, num_total_anchors)
+ bbox_targets (Tensor): BBox regression targets of each anchor wight
+ shape (N, num_total_anchors, 4).
+ bbox_weights (Tensor): BBox regression loss weights of each anchor
+ with shape (N, num_total_anchors, 4).
+ num_total_samples (int): If sampling, num total samples equal to
+ the number of total anchors; Otherwise, it is the number of
+ positive anchors.
+
+ Returns:
+ dict[str, Tensor]: A dictionary of loss components.
+ """
+ # classification loss
+ labels = labels.reshape(-1)
+ label_weights = label_weights.reshape(-1)
+ cls_score = cls_score.permute(0, 2, 3,
+ 1).reshape(-1, self.cls_out_channels)
+ loss_cls = self.loss_cls(
+ cls_score, labels, label_weights, avg_factor=num_total_samples)
+ # regression loss
+ bbox_targets = bbox_targets.reshape(-1, 4)
+ bbox_weights = bbox_weights.reshape(-1, 4)
+ bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4)
+ if self.reg_decoded_bbox:
+ # When the regression loss (e.g. `IouLoss`, `GIouLoss`)
+ # is applied directly on the decoded bounding boxes, it
+ # decodes the already encoded coordinates to absolute format.
+ anchors = anchors.reshape(-1, 4)
+ bbox_pred = self.bbox_coder.decode(anchors, bbox_pred)
+ loss_bbox = self.loss_bbox(
+ bbox_pred,
+ bbox_targets,
+ bbox_weights,
+ avg_factor=num_total_samples)
+ return loss_cls, loss_bbox
+
+ @force_fp32(apply_to=('cls_scores', 'bbox_preds'))
+ def loss(self,
+ cls_scores,
+ bbox_preds,
+ gt_bboxes,
+ gt_labels,
+ img_metas,
+ gt_bboxes_ignore=None):
+ """Compute losses of the head.
+
+ Args:
+ cls_scores (list[Tensor]): Box scores for each scale level
+ Has shape (N, num_anchors * num_classes, H, W)
+ bbox_preds (list[Tensor]): Box energies / deltas for each scale
+ level with shape (N, num_anchors * 4, H, W)
+ gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
+ shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
+ gt_labels (list[Tensor]): class indices corresponding to each box
+ img_metas (list[dict]): Meta information of each image, e.g.,
+ image size, scaling factor, etc.
+ gt_bboxes_ignore (None | list[Tensor]): specify which bounding
+ boxes can be ignored when computing the loss. Default: None
+
+ Returns:
+ dict[str, Tensor]: A dictionary of loss components.
+ """
+ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
+ assert len(featmap_sizes) == self.anchor_generator.num_levels
+
+ device = cls_scores[0].device
+
+ anchor_list, valid_flag_list = self.get_anchors(
+ featmap_sizes, img_metas, device=device)
+ label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1
+ cls_reg_targets = self.get_targets(
+ anchor_list,
+ valid_flag_list,
+ gt_bboxes,
+ img_metas,
+ gt_bboxes_ignore_list=gt_bboxes_ignore,
+ gt_labels_list=gt_labels,
+ label_channels=label_channels)
+ if cls_reg_targets is None:
+ return None
+ (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,
+ num_total_pos, num_total_neg) = cls_reg_targets
+ num_total_samples = (
+ num_total_pos + num_total_neg if self.sampling else num_total_pos)
+
+ # anchor number of multi levels
+ num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]
+ # concat all level anchors and flags to a single tensor
+ concat_anchor_list = []
+ for i in range(len(anchor_list)):
+ concat_anchor_list.append(torch.cat(anchor_list[i]))
+ all_anchor_list = images_to_levels(concat_anchor_list,
+ num_level_anchors)
+
+ losses_cls, losses_bbox = multi_apply(
+ self.loss_single,
+ cls_scores,
+ bbox_preds,
+ all_anchor_list,
+ labels_list,
+ label_weights_list,
+ bbox_targets_list,
+ bbox_weights_list,
+ num_total_samples=num_total_samples)
+ return dict(loss_cls=losses_cls, loss_bbox=losses_bbox)
+
+ @force_fp32(apply_to=('cls_scores', 'bbox_preds'))
+ def get_bboxes(self,
+ cls_scores,
+ bbox_preds,
+ img_metas,
+ cfg=None,
+ rescale=False,
+ with_nms=True):
+ """Transform network output for a batch into bbox predictions.
+
+ Args:
+ cls_scores (list[Tensor]): Box scores for each level in the
+ feature pyramid, has shape
+ (N, num_anchors * num_classes, H, W).
+ bbox_preds (list[Tensor]): Box energies / deltas for each
+ level in the feature pyramid, has shape
+ (N, num_anchors * 4, H, W).
+ img_metas (list[dict]): Meta information of each image, e.g.,
+ image size, scaling factor, etc.
+ cfg (mmcv.Config | None): Test / postprocessing configuration,
+ if None, test_cfg would be used
+ rescale (bool): If True, return boxes in original image space.
+ Default: False.
+ with_nms (bool): If True, do nms before return boxes.
+ Default: True.
+
+ Returns:
+ list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple.
+ The first item is an (n, 5) tensor, where 5 represent
+ (tl_x, tl_y, br_x, br_y, score) and the score between 0 and 1.
+ The shape of the second tensor in the tuple is (n,), and
+ each element represents the class label of the corresponding
+ box.
+
+ Example:
+ >>> import mmcv
+ >>> self = AnchorHead(
+ >>> num_classes=9,
+ >>> in_channels=1,
+ >>> anchor_generator=dict(
+ >>> type='AnchorGenerator',
+ >>> scales=[8],
+ >>> ratios=[0.5, 1.0, 2.0],
+ >>> strides=[4,]))
+ >>> img_metas = [{'img_shape': (32, 32, 3), 'scale_factor': 1}]
+ >>> cfg = mmcv.Config(dict(
+ >>> score_thr=0.00,
+ >>> nms=dict(type='nms', iou_thr=1.0),
+ >>> max_per_img=10))
+ >>> feat = torch.rand(1, 1, 3, 3)
+ >>> cls_score, bbox_pred = self.forward_single(feat)
+ >>> # note the input lists are over different levels, not images
+ >>> cls_scores, bbox_preds = [cls_score], [bbox_pred]
+ >>> result_list = self.get_bboxes(cls_scores, bbox_preds,
+ >>> img_metas, cfg)
+ >>> det_bboxes, det_labels = result_list[0]
+ >>> assert len(result_list) == 1
+ >>> assert det_bboxes.shape[1] == 5
+ >>> assert len(det_bboxes) == len(det_labels) == cfg.max_per_img
+ """
+ assert len(cls_scores) == len(bbox_preds)
+ num_levels = len(cls_scores)
+
+ device = cls_scores[0].device
+ featmap_sizes = [cls_scores[i].shape[-2:] for i in range(num_levels)]
+ mlvl_anchors = self.anchor_generator.grid_anchors(
+ featmap_sizes, device=device)
+
+ mlvl_cls_scores = [cls_scores[i].detach() for i in range(num_levels)]
+ mlvl_bbox_preds = [bbox_preds[i].detach() for i in range(num_levels)]
+
+ if torch.onnx.is_in_onnx_export():
+ assert len(
+ img_metas
+ ) == 1, 'Only support one input image while in exporting to ONNX'
+ img_shapes = img_metas[0]['img_shape_for_onnx']
+ else:
+ img_shapes = [
+ img_metas[i]['img_shape']
+ for i in range(cls_scores[0].shape[0])
+ ]
+ scale_factors = [
+ img_metas[i]['scale_factor'] for i in range(cls_scores[0].shape[0])
+ ]
+
+ if with_nms:
+ # some heads don't support with_nms argument
+ result_list = self._get_bboxes(mlvl_cls_scores, mlvl_bbox_preds,
+ mlvl_anchors, img_shapes,
+ scale_factors, cfg, rescale)
+ else:
+ result_list = self._get_bboxes(mlvl_cls_scores, mlvl_bbox_preds,
+ mlvl_anchors, img_shapes,
+ scale_factors, cfg, rescale,
+ with_nms)
+ return result_list
+
+ def _get_bboxes(self,
+ mlvl_cls_scores,
+ mlvl_bbox_preds,
+ mlvl_anchors,
+ img_shapes,
+ scale_factors,
+ cfg,
+ rescale=False,
+ with_nms=True):
+ """Transform outputs for a batch item into bbox predictions.
+
+ Args:
+ mlvl_cls_scores (list[Tensor]): Each element in the list is
+ the scores of bboxes of single level in the feature pyramid,
+ has shape (N, num_anchors * num_classes, H, W).
+ mlvl_bbox_preds (list[Tensor]): Each element in the list is the
+ bboxes predictions of single level in the feature pyramid,
+ has shape (N, num_anchors * 4, H, W).
+ mlvl_anchors (list[Tensor]): Each element in the list is
+ the anchors of single level in feature pyramid, has shape
+ (num_anchors, 4).
+ img_shapes (list[tuple[int]]): Each tuple in the list represent
+ the shape(height, width, 3) of single image in the batch.
+ scale_factors (list[ndarray]): Scale factor of the batch
+ image arange as list[(w_scale, h_scale, w_scale, h_scale)].
+ cfg (mmcv.Config): Test / postprocessing configuration,
+ if None, test_cfg would be used.
+ rescale (bool): If True, return boxes in original image space.
+ Default: False.
+ with_nms (bool): If True, do nms before return boxes.
+ Default: True.
+
+ Returns:
+ list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple.
+ The first item is an (n, 5) tensor, where 5 represent
+ (tl_x, tl_y, br_x, br_y, score) and the score between 0 and 1.
+ The shape of the second tensor in the tuple is (n,), and
+ each element represents the class label of the corresponding
+ box.
+ """
+ cfg = self.test_cfg if cfg is None else cfg
+ assert len(mlvl_cls_scores) == len(mlvl_bbox_preds) == len(
+ mlvl_anchors)
+ batch_size = mlvl_cls_scores[0].shape[0]
+ # convert to tensor to keep tracing
+ nms_pre_tensor = torch.tensor(
+ cfg.get('nms_pre', -1),
+ device=mlvl_cls_scores[0].device,
+ dtype=torch.long)
+
+ mlvl_bboxes = []
+ mlvl_scores = []
+ for cls_score, bbox_pred, anchors in zip(mlvl_cls_scores,
+ mlvl_bbox_preds,
+ mlvl_anchors):
+ assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
+ cls_score = cls_score.permute(0, 2, 3,
+ 1).reshape(batch_size, -1,
+ self.cls_out_channels)
+ if self.use_sigmoid_cls:
+ scores = cls_score.sigmoid()
+ else:
+ scores = cls_score.softmax(-1)
+ bbox_pred = bbox_pred.permute(0, 2, 3,
+ 1).reshape(batch_size, -1, 4)
+ anchors = anchors.expand_as(bbox_pred)
+ # Always keep topk op for dynamic input in onnx
+ if nms_pre_tensor > 0 and (torch.onnx.is_in_onnx_export()
+ or scores.shape[-2] > nms_pre_tensor):
+ from torch import _shape_as_tensor
+ # keep shape as tensor and get k
+ num_anchor = _shape_as_tensor(scores)[-2].to(
+ nms_pre_tensor.device)
+ nms_pre = torch.where(nms_pre_tensor < num_anchor,
+ nms_pre_tensor, num_anchor)
+
+ # Get maximum scores for foreground classes.
+ if self.use_sigmoid_cls:
+ max_scores, _ = scores.max(-1)
+ else:
+ # remind that we set FG labels to [0, num_class-1]
+ # since mmdet v2.0
+ # BG cat_id: num_class
+ max_scores, _ = scores[..., :-1].max(-1)
+
+ _, topk_inds = max_scores.topk(nms_pre)
+ batch_inds = torch.arange(batch_size).view(
+ -1, 1).expand_as(topk_inds)
+ anchors = anchors[batch_inds, topk_inds, :]
+ bbox_pred = bbox_pred[batch_inds, topk_inds, :]
+ scores = scores[batch_inds, topk_inds, :]
+
+ bboxes = self.bbox_coder.decode(
+ anchors, bbox_pred, max_shape=img_shapes)
+ mlvl_bboxes.append(bboxes)
+ mlvl_scores.append(scores)
+
+ batch_mlvl_bboxes = torch.cat(mlvl_bboxes, dim=1)
+ if rescale:
+ batch_mlvl_bboxes /= batch_mlvl_bboxes.new_tensor(
+ scale_factors).unsqueeze(1)
+ batch_mlvl_scores = torch.cat(mlvl_scores, dim=1)
+
+ # Set max number of box to be feed into nms in deployment
+ deploy_nms_pre = cfg.get('deploy_nms_pre', -1)
+ if deploy_nms_pre > 0 and torch.onnx.is_in_onnx_export():
+ # Get maximum scores for foreground classes.
+ if self.use_sigmoid_cls:
+ max_scores, _ = batch_mlvl_scores.max(-1)
+ else:
+ # remind that we set FG labels to [0, num_class-1]
+ # since mmdet v2.0
+ # BG cat_id: num_class
+ max_scores, _ = batch_mlvl_scores[..., :-1].max(-1)
+ _, topk_inds = max_scores.topk(deploy_nms_pre)
+ batch_inds = torch.arange(batch_size).view(-1,
+ 1).expand_as(topk_inds)
+ batch_mlvl_scores = batch_mlvl_scores[batch_inds, topk_inds]
+ batch_mlvl_bboxes = batch_mlvl_bboxes[batch_inds, topk_inds]
+ if self.use_sigmoid_cls:
+ # Add a dummy background class to the backend when using sigmoid
+ # remind that we set FG labels to [0, num_class-1] since mmdet v2.0
+ # BG cat_id: num_class
+ padding = batch_mlvl_scores.new_zeros(batch_size,
+ batch_mlvl_scores.shape[1],
+ 1)
+ batch_mlvl_scores = torch.cat([batch_mlvl_scores, padding], dim=-1)
+
+ if with_nms:
+ det_results = []
+ for (mlvl_bboxes, mlvl_scores) in zip(batch_mlvl_bboxes,
+ batch_mlvl_scores):
+ det_bbox, det_label = multiclass_nms(mlvl_bboxes, mlvl_scores,
+ cfg.score_thr, cfg.nms,
+ cfg.max_per_img)
+ det_results.append(tuple([det_bbox, det_label]))
+ else:
+ det_results = [
+ tuple(mlvl_bs)
+ for mlvl_bs in zip(batch_mlvl_bboxes, batch_mlvl_scores)
+ ]
+ return det_results
+
+ def aug_test(self, feats, img_metas, rescale=False):
+ """Test function with test time augmentation.
+
+ Args:
+ feats (list[Tensor]): the outer list indicates test-time
+ augmentations and inner Tensor should have a shape NxCxHxW,
+ which contains features for all images in the batch.
+ img_metas (list[list[dict]]): the outer list indicates test-time
+ augs (multiscale, flip, etc.) and the inner list indicates
+ images in a batch. each dict has image information.
+ rescale (bool, optional): Whether to rescale the results.
+ Defaults to False.
+
+ Returns:
+ list[ndarray]: bbox results of each class
+ """
+ return self.aug_test_bboxes(feats, img_metas, rescale=rescale)
diff --git a/annotator/uniformer/mmdet/models/dense_heads/atss_head.py b/annotator/uniformer/mmdet/models/dense_heads/atss_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..ff55dfa1790ba270539fc9f623dbb2984fa1a99e
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/dense_heads/atss_head.py
@@ -0,0 +1,689 @@
+import torch
+import torch.nn as nn
+from mmcv.cnn import ConvModule, Scale, bias_init_with_prob, normal_init
+from mmcv.runner import force_fp32
+
+from mmdet.core import (anchor_inside_flags, build_assigner, build_sampler,
+ images_to_levels, multi_apply, multiclass_nms,
+ reduce_mean, unmap)
+from ..builder import HEADS, build_loss
+from .anchor_head import AnchorHead
+
+EPS = 1e-12
+
+
+@HEADS.register_module()
+class ATSSHead(AnchorHead):
+ """Bridging the Gap Between Anchor-based and Anchor-free Detection via
+ Adaptive Training Sample Selection.
+
+ ATSS head structure is similar with FCOS, however ATSS use anchor boxes
+ and assign label by Adaptive Training Sample Selection instead max-iou.
+
+ https://arxiv.org/abs/1912.02424
+ """
+
+ def __init__(self,
+ num_classes,
+ in_channels,
+ stacked_convs=4,
+ conv_cfg=None,
+ norm_cfg=dict(type='GN', num_groups=32, requires_grad=True),
+ loss_centerness=dict(
+ type='CrossEntropyLoss',
+ use_sigmoid=True,
+ loss_weight=1.0),
+ **kwargs):
+ self.stacked_convs = stacked_convs
+ self.conv_cfg = conv_cfg
+ self.norm_cfg = norm_cfg
+ super(ATSSHead, self).__init__(num_classes, in_channels, **kwargs)
+
+ self.sampling = False
+ if self.train_cfg:
+ self.assigner = build_assigner(self.train_cfg.assigner)
+ # SSD sampling=False so use PseudoSampler
+ sampler_cfg = dict(type='PseudoSampler')
+ self.sampler = build_sampler(sampler_cfg, context=self)
+ self.loss_centerness = build_loss(loss_centerness)
+
+ def _init_layers(self):
+ """Initialize layers of the head."""
+ self.relu = nn.ReLU(inplace=True)
+ self.cls_convs = nn.ModuleList()
+ self.reg_convs = nn.ModuleList()
+ for i in range(self.stacked_convs):
+ chn = self.in_channels if i == 0 else self.feat_channels
+ self.cls_convs.append(
+ ConvModule(
+ chn,
+ self.feat_channels,
+ 3,
+ stride=1,
+ padding=1,
+ conv_cfg=self.conv_cfg,
+ norm_cfg=self.norm_cfg))
+ self.reg_convs.append(
+ ConvModule(
+ chn,
+ self.feat_channels,
+ 3,
+ stride=1,
+ padding=1,
+ conv_cfg=self.conv_cfg,
+ norm_cfg=self.norm_cfg))
+ self.atss_cls = nn.Conv2d(
+ self.feat_channels,
+ self.num_anchors * self.cls_out_channels,
+ 3,
+ padding=1)
+ self.atss_reg = nn.Conv2d(
+ self.feat_channels, self.num_anchors * 4, 3, padding=1)
+ self.atss_centerness = nn.Conv2d(
+ self.feat_channels, self.num_anchors * 1, 3, padding=1)
+ self.scales = nn.ModuleList(
+ [Scale(1.0) for _ in self.anchor_generator.strides])
+
+ def init_weights(self):
+ """Initialize weights of the head."""
+ for m in self.cls_convs:
+ normal_init(m.conv, std=0.01)
+ for m in self.reg_convs:
+ normal_init(m.conv, std=0.01)
+ bias_cls = bias_init_with_prob(0.01)
+ normal_init(self.atss_cls, std=0.01, bias=bias_cls)
+ normal_init(self.atss_reg, std=0.01)
+ normal_init(self.atss_centerness, std=0.01)
+
+ def forward(self, feats):
+ """Forward features from the upstream network.
+
+ Args:
+ feats (tuple[Tensor]): Features from the upstream network, each is
+ a 4D-tensor.
+
+ Returns:
+ tuple: Usually a tuple of classification scores and bbox prediction
+ cls_scores (list[Tensor]): Classification scores for all scale
+ levels, each is a 4D-tensor, the channels number is
+ num_anchors * num_classes.
+ bbox_preds (list[Tensor]): Box energies / deltas for all scale
+ levels, each is a 4D-tensor, the channels number is
+ num_anchors * 4.
+ """
+ return multi_apply(self.forward_single, feats, self.scales)
+
+ def forward_single(self, x, scale):
+ """Forward feature of a single scale level.
+
+ Args:
+ x (Tensor): Features of a single scale level.
+ scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize
+ the bbox prediction.
+
+ Returns:
+ tuple:
+ cls_score (Tensor): Cls scores for a single scale level
+ the channels number is num_anchors * num_classes.
+ bbox_pred (Tensor): Box energies / deltas for a single scale
+ level, the channels number is num_anchors * 4.
+ centerness (Tensor): Centerness for a single scale level, the
+ channel number is (N, num_anchors * 1, H, W).
+ """
+ cls_feat = x
+ reg_feat = x
+ for cls_conv in self.cls_convs:
+ cls_feat = cls_conv(cls_feat)
+ for reg_conv in self.reg_convs:
+ reg_feat = reg_conv(reg_feat)
+ cls_score = self.atss_cls(cls_feat)
+ # we just follow atss, not apply exp in bbox_pred
+ bbox_pred = scale(self.atss_reg(reg_feat)).float()
+ centerness = self.atss_centerness(reg_feat)
+ return cls_score, bbox_pred, centerness
+
+ def loss_single(self, anchors, cls_score, bbox_pred, centerness, labels,
+ label_weights, bbox_targets, num_total_samples):
+ """Compute loss of a single scale level.
+
+ Args:
+ cls_score (Tensor): Box scores for each scale level
+ Has shape (N, num_anchors * num_classes, H, W).
+ bbox_pred (Tensor): Box energies / deltas for each scale
+ level with shape (N, num_anchors * 4, H, W).
+ anchors (Tensor): Box reference for each scale level with shape
+ (N, num_total_anchors, 4).
+ labels (Tensor): Labels of each anchors with shape
+ (N, num_total_anchors).
+ label_weights (Tensor): Label weights of each anchor with shape
+ (N, num_total_anchors)
+ bbox_targets (Tensor): BBox regression targets of each anchor wight
+ shape (N, num_total_anchors, 4).
+ num_total_samples (int): Number os positive samples that is
+ reduced over all GPUs.
+
+ Returns:
+ dict[str, Tensor]: A dictionary of loss components.
+ """
+
+ anchors = anchors.reshape(-1, 4)
+ cls_score = cls_score.permute(0, 2, 3, 1).reshape(
+ -1, self.cls_out_channels).contiguous()
+ bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4)
+ centerness = centerness.permute(0, 2, 3, 1).reshape(-1)
+ bbox_targets = bbox_targets.reshape(-1, 4)
+ labels = labels.reshape(-1)
+ label_weights = label_weights.reshape(-1)
+
+ # classification loss
+ loss_cls = self.loss_cls(
+ cls_score, labels, label_weights, avg_factor=num_total_samples)
+
+ # FG cat_id: [0, num_classes -1], BG cat_id: num_classes
+ bg_class_ind = self.num_classes
+ pos_inds = ((labels >= 0)
+ & (labels < bg_class_ind)).nonzero().squeeze(1)
+
+ if len(pos_inds) > 0:
+ pos_bbox_targets = bbox_targets[pos_inds]
+ pos_bbox_pred = bbox_pred[pos_inds]
+ pos_anchors = anchors[pos_inds]
+ pos_centerness = centerness[pos_inds]
+
+ centerness_targets = self.centerness_target(
+ pos_anchors, pos_bbox_targets)
+ pos_decode_bbox_pred = self.bbox_coder.decode(
+ pos_anchors, pos_bbox_pred)
+ pos_decode_bbox_targets = self.bbox_coder.decode(
+ pos_anchors, pos_bbox_targets)
+
+ # regression loss
+ loss_bbox = self.loss_bbox(
+ pos_decode_bbox_pred,
+ pos_decode_bbox_targets,
+ weight=centerness_targets,
+ avg_factor=1.0)
+
+ # centerness loss
+ loss_centerness = self.loss_centerness(
+ pos_centerness,
+ centerness_targets,
+ avg_factor=num_total_samples)
+
+ else:
+ loss_bbox = bbox_pred.sum() * 0
+ loss_centerness = centerness.sum() * 0
+ centerness_targets = bbox_targets.new_tensor(0.)
+
+ return loss_cls, loss_bbox, loss_centerness, centerness_targets.sum()
+
+ @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'centernesses'))
+ def loss(self,
+ cls_scores,
+ bbox_preds,
+ centernesses,
+ gt_bboxes,
+ gt_labels,
+ img_metas,
+ gt_bboxes_ignore=None):
+ """Compute losses of the head.
+
+ Args:
+ cls_scores (list[Tensor]): Box scores for each scale level
+ Has shape (N, num_anchors * num_classes, H, W)
+ bbox_preds (list[Tensor]): Box energies / deltas for each scale
+ level with shape (N, num_anchors * 4, H, W)
+ centernesses (list[Tensor]): Centerness for each scale
+ level with shape (N, num_anchors * 1, H, W)
+ gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
+ shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
+ gt_labels (list[Tensor]): class indices corresponding to each box
+ img_metas (list[dict]): Meta information of each image, e.g.,
+ image size, scaling factor, etc.
+ gt_bboxes_ignore (list[Tensor] | None): specify which bounding
+ boxes can be ignored when computing the loss.
+
+ Returns:
+ dict[str, Tensor]: A dictionary of loss components.
+ """
+ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
+ assert len(featmap_sizes) == self.anchor_generator.num_levels
+
+ device = cls_scores[0].device
+ anchor_list, valid_flag_list = self.get_anchors(
+ featmap_sizes, img_metas, device=device)
+ label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1
+
+ cls_reg_targets = self.get_targets(
+ anchor_list,
+ valid_flag_list,
+ gt_bboxes,
+ img_metas,
+ gt_bboxes_ignore_list=gt_bboxes_ignore,
+ gt_labels_list=gt_labels,
+ label_channels=label_channels)
+ if cls_reg_targets is None:
+ return None
+
+ (anchor_list, labels_list, label_weights_list, bbox_targets_list,
+ bbox_weights_list, num_total_pos, num_total_neg) = cls_reg_targets
+
+ num_total_samples = reduce_mean(
+ torch.tensor(num_total_pos, dtype=torch.float,
+ device=device)).item()
+ num_total_samples = max(num_total_samples, 1.0)
+
+ losses_cls, losses_bbox, loss_centerness,\
+ bbox_avg_factor = multi_apply(
+ self.loss_single,
+ anchor_list,
+ cls_scores,
+ bbox_preds,
+ centernesses,
+ labels_list,
+ label_weights_list,
+ bbox_targets_list,
+ num_total_samples=num_total_samples)
+
+ bbox_avg_factor = sum(bbox_avg_factor)
+ bbox_avg_factor = reduce_mean(bbox_avg_factor).item()
+ if bbox_avg_factor < EPS:
+ bbox_avg_factor = 1
+ losses_bbox = list(map(lambda x: x / bbox_avg_factor, losses_bbox))
+ return dict(
+ loss_cls=losses_cls,
+ loss_bbox=losses_bbox,
+ loss_centerness=loss_centerness)
+
+ def centerness_target(self, anchors, bbox_targets):
+ # only calculate pos centerness targets, otherwise there may be nan
+ gts = self.bbox_coder.decode(anchors, bbox_targets)
+ anchors_cx = (anchors[:, 2] + anchors[:, 0]) / 2
+ anchors_cy = (anchors[:, 3] + anchors[:, 1]) / 2
+ l_ = anchors_cx - gts[:, 0]
+ t_ = anchors_cy - gts[:, 1]
+ r_ = gts[:, 2] - anchors_cx
+ b_ = gts[:, 3] - anchors_cy
+
+ left_right = torch.stack([l_, r_], dim=1)
+ top_bottom = torch.stack([t_, b_], dim=1)
+ centerness = torch.sqrt(
+ (left_right.min(dim=-1)[0] / left_right.max(dim=-1)[0]) *
+ (top_bottom.min(dim=-1)[0] / top_bottom.max(dim=-1)[0]))
+ assert not torch.isnan(centerness).any()
+ return centerness
+
+ @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'centernesses'))
+ def get_bboxes(self,
+ cls_scores,
+ bbox_preds,
+ centernesses,
+ img_metas,
+ cfg=None,
+ rescale=False,
+ with_nms=True):
+ """Transform network output for a batch into bbox predictions.
+
+ Args:
+ cls_scores (list[Tensor]): Box scores for each scale level
+ with shape (N, num_anchors * num_classes, H, W).
+ bbox_preds (list[Tensor]): Box energies / deltas for each scale
+ level with shape (N, num_anchors * 4, H, W).
+ centernesses (list[Tensor]): Centerness for each scale level with
+ shape (N, num_anchors * 1, H, W).
+ img_metas (list[dict]): Meta information of each image, e.g.,
+ image size, scaling factor, etc.
+ cfg (mmcv.Config | None): Test / postprocessing configuration,
+ if None, test_cfg would be used. Default: None.
+ rescale (bool): If True, return boxes in original image space.
+ Default: False.
+ with_nms (bool): If True, do nms before return boxes.
+ Default: True.
+
+ Returns:
+ list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple.
+ The first item is an (n, 5) tensor, where 5 represent
+ (tl_x, tl_y, br_x, br_y, score) and the score between 0 and 1.
+ The shape of the second tensor in the tuple is (n,), and
+ each element represents the class label of the corresponding
+ box.
+ """
+ cfg = self.test_cfg if cfg is None else cfg
+ assert len(cls_scores) == len(bbox_preds)
+ num_levels = len(cls_scores)
+ device = cls_scores[0].device
+ featmap_sizes = [cls_scores[i].shape[-2:] for i in range(num_levels)]
+ mlvl_anchors = self.anchor_generator.grid_anchors(
+ featmap_sizes, device=device)
+
+ cls_score_list = [cls_scores[i].detach() for i in range(num_levels)]
+ bbox_pred_list = [bbox_preds[i].detach() for i in range(num_levels)]
+ centerness_pred_list = [
+ centernesses[i].detach() for i in range(num_levels)
+ ]
+ img_shapes = [
+ img_metas[i]['img_shape'] for i in range(cls_scores[0].shape[0])
+ ]
+ scale_factors = [
+ img_metas[i]['scale_factor'] for i in range(cls_scores[0].shape[0])
+ ]
+ result_list = self._get_bboxes(cls_score_list, bbox_pred_list,
+ centerness_pred_list, mlvl_anchors,
+ img_shapes, scale_factors, cfg, rescale,
+ with_nms)
+ return result_list
+
+ def _get_bboxes(self,
+ cls_scores,
+ bbox_preds,
+ centernesses,
+ mlvl_anchors,
+ img_shapes,
+ scale_factors,
+ cfg,
+ rescale=False,
+ with_nms=True):
+ """Transform outputs for a single batch item into labeled boxes.
+
+ Args:
+ cls_scores (list[Tensor]): Box scores for a single scale level
+ with shape (N, num_anchors * num_classes, H, W).
+ bbox_preds (list[Tensor]): Box energies / deltas for a single
+ scale level with shape (N, num_anchors * 4, H, W).
+ centernesses (list[Tensor]): Centerness for a single scale level
+ with shape (N, num_anchors * 1, H, W).
+ mlvl_anchors (list[Tensor]): Box reference for a single scale level
+ with shape (num_total_anchors, 4).
+ img_shapes (list[tuple[int]]): Shape of the input image,
+ list[(height, width, 3)].
+ scale_factors (list[ndarray]): Scale factor of the image arrange as
+ (w_scale, h_scale, w_scale, h_scale).
+ cfg (mmcv.Config | None): Test / postprocessing configuration,
+ if None, test_cfg would be used.
+ rescale (bool): If True, return boxes in original image space.
+ Default: False.
+ with_nms (bool): If True, do nms before return boxes.
+ Default: True.
+
+ Returns:
+ list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple.
+ The first item is an (n, 5) tensor, where 5 represent
+ (tl_x, tl_y, br_x, br_y, score) and the score between 0 and 1.
+ The shape of the second tensor in the tuple is (n,), and
+ each element represents the class label of the corresponding
+ box.
+ """
+ assert len(cls_scores) == len(bbox_preds) == len(mlvl_anchors)
+ device = cls_scores[0].device
+ batch_size = cls_scores[0].shape[0]
+ # convert to tensor to keep tracing
+ nms_pre_tensor = torch.tensor(
+ cfg.get('nms_pre', -1), device=device, dtype=torch.long)
+ mlvl_bboxes = []
+ mlvl_scores = []
+ mlvl_centerness = []
+ for cls_score, bbox_pred, centerness, anchors in zip(
+ cls_scores, bbox_preds, centernesses, mlvl_anchors):
+ assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
+ scores = cls_score.permute(0, 2, 3, 1).reshape(
+ batch_size, -1, self.cls_out_channels).sigmoid()
+ centerness = centerness.permute(0, 2, 3,
+ 1).reshape(batch_size,
+ -1).sigmoid()
+ bbox_pred = bbox_pred.permute(0, 2, 3,
+ 1).reshape(batch_size, -1, 4)
+
+ # Always keep topk op for dynamic input in onnx
+ if nms_pre_tensor > 0 and (torch.onnx.is_in_onnx_export()
+ or scores.shape[-2] > nms_pre_tensor):
+ from torch import _shape_as_tensor
+ # keep shape as tensor and get k
+ num_anchor = _shape_as_tensor(scores)[-2].to(device)
+ nms_pre = torch.where(nms_pre_tensor < num_anchor,
+ nms_pre_tensor, num_anchor)
+
+ max_scores, _ = (scores * centerness[..., None]).max(-1)
+ _, topk_inds = max_scores.topk(nms_pre)
+ anchors = anchors[topk_inds, :]
+ batch_inds = torch.arange(batch_size).view(
+ -1, 1).expand_as(topk_inds).long()
+ bbox_pred = bbox_pred[batch_inds, topk_inds, :]
+ scores = scores[batch_inds, topk_inds, :]
+ centerness = centerness[batch_inds, topk_inds]
+ else:
+ anchors = anchors.expand_as(bbox_pred)
+
+ bboxes = self.bbox_coder.decode(
+ anchors, bbox_pred, max_shape=img_shapes)
+ mlvl_bboxes.append(bboxes)
+ mlvl_scores.append(scores)
+ mlvl_centerness.append(centerness)
+
+ batch_mlvl_bboxes = torch.cat(mlvl_bboxes, dim=1)
+ if rescale:
+ batch_mlvl_bboxes /= batch_mlvl_bboxes.new_tensor(
+ scale_factors).unsqueeze(1)
+ batch_mlvl_scores = torch.cat(mlvl_scores, dim=1)
+ batch_mlvl_centerness = torch.cat(mlvl_centerness, dim=1)
+
+ # Set max number of box to be feed into nms in deployment
+ deploy_nms_pre = cfg.get('deploy_nms_pre', -1)
+ if deploy_nms_pre > 0 and torch.onnx.is_in_onnx_export():
+ batch_mlvl_scores, _ = (
+ batch_mlvl_scores *
+ batch_mlvl_centerness.unsqueeze(2).expand_as(batch_mlvl_scores)
+ ).max(-1)
+ _, topk_inds = batch_mlvl_scores.topk(deploy_nms_pre)
+ batch_inds = torch.arange(batch_size).view(-1,
+ 1).expand_as(topk_inds)
+ batch_mlvl_scores = batch_mlvl_scores[batch_inds, topk_inds, :]
+ batch_mlvl_bboxes = batch_mlvl_bboxes[batch_inds, topk_inds, :]
+ batch_mlvl_centerness = batch_mlvl_centerness[batch_inds,
+ topk_inds]
+ # remind that we set FG labels to [0, num_class-1] since mmdet v2.0
+ # BG cat_id: num_class
+ padding = batch_mlvl_scores.new_zeros(batch_size,
+ batch_mlvl_scores.shape[1], 1)
+ batch_mlvl_scores = torch.cat([batch_mlvl_scores, padding], dim=-1)
+
+ if with_nms:
+ det_results = []
+ for (mlvl_bboxes, mlvl_scores,
+ mlvl_centerness) in zip(batch_mlvl_bboxes, batch_mlvl_scores,
+ batch_mlvl_centerness):
+ det_bbox, det_label = multiclass_nms(
+ mlvl_bboxes,
+ mlvl_scores,
+ cfg.score_thr,
+ cfg.nms,
+ cfg.max_per_img,
+ score_factors=mlvl_centerness)
+ det_results.append(tuple([det_bbox, det_label]))
+ else:
+ det_results = [
+ tuple(mlvl_bs)
+ for mlvl_bs in zip(batch_mlvl_bboxes, batch_mlvl_scores,
+ batch_mlvl_centerness)
+ ]
+ return det_results
+
+ def get_targets(self,
+ anchor_list,
+ valid_flag_list,
+ gt_bboxes_list,
+ img_metas,
+ gt_bboxes_ignore_list=None,
+ gt_labels_list=None,
+ label_channels=1,
+ unmap_outputs=True):
+ """Get targets for ATSS head.
+
+ This method is almost the same as `AnchorHead.get_targets()`. Besides
+ returning the targets as the parent method does, it also returns the
+ anchors as the first element of the returned tuple.
+ """
+ num_imgs = len(img_metas)
+ assert len(anchor_list) == len(valid_flag_list) == num_imgs
+
+ # anchor number of multi levels
+ num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]
+ num_level_anchors_list = [num_level_anchors] * num_imgs
+
+ # concat all level anchors and flags to a single tensor
+ for i in range(num_imgs):
+ assert len(anchor_list[i]) == len(valid_flag_list[i])
+ anchor_list[i] = torch.cat(anchor_list[i])
+ valid_flag_list[i] = torch.cat(valid_flag_list[i])
+
+ # compute targets for each image
+ if gt_bboxes_ignore_list is None:
+ gt_bboxes_ignore_list = [None for _ in range(num_imgs)]
+ if gt_labels_list is None:
+ gt_labels_list = [None for _ in range(num_imgs)]
+ (all_anchors, all_labels, all_label_weights, all_bbox_targets,
+ all_bbox_weights, pos_inds_list, neg_inds_list) = multi_apply(
+ self._get_target_single,
+ anchor_list,
+ valid_flag_list,
+ num_level_anchors_list,
+ gt_bboxes_list,
+ gt_bboxes_ignore_list,
+ gt_labels_list,
+ img_metas,
+ label_channels=label_channels,
+ unmap_outputs=unmap_outputs)
+ # no valid anchors
+ if any([labels is None for labels in all_labels]):
+ return None
+ # sampled anchors of all images
+ num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list])
+ num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list])
+ # split targets to a list w.r.t. multiple levels
+ anchors_list = images_to_levels(all_anchors, num_level_anchors)
+ labels_list = images_to_levels(all_labels, num_level_anchors)
+ label_weights_list = images_to_levels(all_label_weights,
+ num_level_anchors)
+ bbox_targets_list = images_to_levels(all_bbox_targets,
+ num_level_anchors)
+ bbox_weights_list = images_to_levels(all_bbox_weights,
+ num_level_anchors)
+ return (anchors_list, labels_list, label_weights_list,
+ bbox_targets_list, bbox_weights_list, num_total_pos,
+ num_total_neg)
+
+ def _get_target_single(self,
+ flat_anchors,
+ valid_flags,
+ num_level_anchors,
+ gt_bboxes,
+ gt_bboxes_ignore,
+ gt_labels,
+ img_meta,
+ label_channels=1,
+ unmap_outputs=True):
+ """Compute regression, classification targets for anchors in a single
+ image.
+
+ Args:
+ flat_anchors (Tensor): Multi-level anchors of the image, which are
+ concatenated into a single tensor of shape (num_anchors ,4)
+ valid_flags (Tensor): Multi level valid flags of the image,
+ which are concatenated into a single tensor of
+ shape (num_anchors,).
+ num_level_anchors Tensor): Number of anchors of each scale level.
+ gt_bboxes (Tensor): Ground truth bboxes of the image,
+ shape (num_gts, 4).
+ gt_bboxes_ignore (Tensor): Ground truth bboxes to be
+ ignored, shape (num_ignored_gts, 4).
+ gt_labels (Tensor): Ground truth labels of each box,
+ shape (num_gts,).
+ img_meta (dict): Meta info of the image.
+ label_channels (int): Channel of label.
+ unmap_outputs (bool): Whether to map outputs back to the original
+ set of anchors.
+
+ Returns:
+ tuple: N is the number of total anchors in the image.
+ labels (Tensor): Labels of all anchors in the image with shape
+ (N,).
+ label_weights (Tensor): Label weights of all anchor in the
+ image with shape (N,).
+ bbox_targets (Tensor): BBox targets of all anchors in the
+ image with shape (N, 4).
+ bbox_weights (Tensor): BBox weights of all anchors in the
+ image with shape (N, 4)
+ pos_inds (Tensor): Indices of positive anchor with shape
+ (num_pos,).
+ neg_inds (Tensor): Indices of negative anchor with shape
+ (num_neg,).
+ """
+ inside_flags = anchor_inside_flags(flat_anchors, valid_flags,
+ img_meta['img_shape'][:2],
+ self.train_cfg.allowed_border)
+ if not inside_flags.any():
+ return (None, ) * 7
+ # assign gt and sample anchors
+ anchors = flat_anchors[inside_flags, :]
+
+ num_level_anchors_inside = self.get_num_level_anchors_inside(
+ num_level_anchors, inside_flags)
+ assign_result = self.assigner.assign(anchors, num_level_anchors_inside,
+ gt_bboxes, gt_bboxes_ignore,
+ gt_labels)
+
+ sampling_result = self.sampler.sample(assign_result, anchors,
+ gt_bboxes)
+
+ num_valid_anchors = anchors.shape[0]
+ bbox_targets = torch.zeros_like(anchors)
+ bbox_weights = torch.zeros_like(anchors)
+ labels = anchors.new_full((num_valid_anchors, ),
+ self.num_classes,
+ dtype=torch.long)
+ label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float)
+
+ pos_inds = sampling_result.pos_inds
+ neg_inds = sampling_result.neg_inds
+ if len(pos_inds) > 0:
+ if hasattr(self, 'bbox_coder'):
+ pos_bbox_targets = self.bbox_coder.encode(
+ sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes)
+ else:
+ # used in VFNetHead
+ pos_bbox_targets = sampling_result.pos_gt_bboxes
+ bbox_targets[pos_inds, :] = pos_bbox_targets
+ bbox_weights[pos_inds, :] = 1.0
+ if gt_labels is None:
+ # Only rpn gives gt_labels as None
+ # Foreground is the first class since v2.5.0
+ labels[pos_inds] = 0
+ else:
+ labels[pos_inds] = gt_labels[
+ sampling_result.pos_assigned_gt_inds]
+ if self.train_cfg.pos_weight <= 0:
+ label_weights[pos_inds] = 1.0
+ else:
+ label_weights[pos_inds] = self.train_cfg.pos_weight
+ if len(neg_inds) > 0:
+ label_weights[neg_inds] = 1.0
+
+ # map up to original set of anchors
+ if unmap_outputs:
+ num_total_anchors = flat_anchors.size(0)
+ anchors = unmap(anchors, num_total_anchors, inside_flags)
+ labels = unmap(
+ labels, num_total_anchors, inside_flags, fill=self.num_classes)
+ label_weights = unmap(label_weights, num_total_anchors,
+ inside_flags)
+ bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags)
+ bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags)
+
+ return (anchors, labels, label_weights, bbox_targets, bbox_weights,
+ pos_inds, neg_inds)
+
+ def get_num_level_anchors_inside(self, num_level_anchors, inside_flags):
+ split_inside_flags = torch.split(inside_flags, num_level_anchors)
+ num_level_anchors_inside = [
+ int(flags.sum()) for flags in split_inside_flags
+ ]
+ return num_level_anchors_inside
diff --git a/annotator/uniformer/mmdet/models/dense_heads/base_dense_head.py b/annotator/uniformer/mmdet/models/dense_heads/base_dense_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..de11e4a2197b1dfe241ce7a66daa1907a8fc5661
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/dense_heads/base_dense_head.py
@@ -0,0 +1,59 @@
+from abc import ABCMeta, abstractmethod
+
+import torch.nn as nn
+
+
+class BaseDenseHead(nn.Module, metaclass=ABCMeta):
+ """Base class for DenseHeads."""
+
+ def __init__(self):
+ super(BaseDenseHead, self).__init__()
+
+ @abstractmethod
+ def loss(self, **kwargs):
+ """Compute losses of the head."""
+ pass
+
+ @abstractmethod
+ def get_bboxes(self, **kwargs):
+ """Transform network output for a batch into bbox predictions."""
+ pass
+
+ def forward_train(self,
+ x,
+ img_metas,
+ gt_bboxes,
+ gt_labels=None,
+ gt_bboxes_ignore=None,
+ proposal_cfg=None,
+ **kwargs):
+ """
+ Args:
+ x (list[Tensor]): Features from FPN.
+ img_metas (list[dict]): Meta information of each image, e.g.,
+ image size, scaling factor, etc.
+ gt_bboxes (Tensor): Ground truth bboxes of the image,
+ shape (num_gts, 4).
+ gt_labels (Tensor): Ground truth labels of each box,
+ shape (num_gts,).
+ gt_bboxes_ignore (Tensor): Ground truth bboxes to be
+ ignored, shape (num_ignored_gts, 4).
+ proposal_cfg (mmcv.Config): Test / postprocessing configuration,
+ if None, test_cfg would be used
+
+ Returns:
+ tuple:
+ losses: (dict[str, Tensor]): A dictionary of loss components.
+ proposal_list (list[Tensor]): Proposals of each image.
+ """
+ outs = self(x)
+ if gt_labels is None:
+ loss_inputs = outs + (gt_bboxes, img_metas)
+ else:
+ loss_inputs = outs + (gt_bboxes, gt_labels, img_metas)
+ losses = self.loss(*loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore)
+ if proposal_cfg is None:
+ return losses
+ else:
+ proposal_list = self.get_bboxes(*outs, img_metas, cfg=proposal_cfg)
+ return losses, proposal_list
diff --git a/annotator/uniformer/mmdet/models/dense_heads/cascade_rpn_head.py b/annotator/uniformer/mmdet/models/dense_heads/cascade_rpn_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..e32ee461951e685fb44a461033293159e3439717
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/dense_heads/cascade_rpn_head.py
@@ -0,0 +1,784 @@
+from __future__ import division
+import copy
+import warnings
+
+import torch
+import torch.nn as nn
+from mmcv import ConfigDict
+from mmcv.cnn import normal_init
+from mmcv.ops import DeformConv2d, batched_nms
+
+from mmdet.core import (RegionAssigner, build_assigner, build_sampler,
+ images_to_levels, multi_apply)
+from ..builder import HEADS, build_head
+from .base_dense_head import BaseDenseHead
+from .rpn_head import RPNHead
+
+
+class AdaptiveConv(nn.Module):
+ """AdaptiveConv used to adapt the sampling location with the anchors.
+
+ Args:
+ in_channels (int): Number of channels in the input image
+ out_channels (int): Number of channels produced by the convolution
+ kernel_size (int or tuple): Size of the conv kernel. Default: 3
+ stride (int or tuple, optional): Stride of the convolution. Default: 1
+ padding (int or tuple, optional): Zero-padding added to both sides of
+ the input. Default: 1
+ dilation (int or tuple, optional): Spacing between kernel elements.
+ Default: 3
+ groups (int, optional): Number of blocked connections from input
+ channels to output channels. Default: 1
+ bias (bool, optional): If set True, adds a learnable bias to the
+ output. Default: False.
+ type (str, optional): Type of adaptive conv, can be either 'offset'
+ (arbitrary anchors) or 'dilation' (uniform anchor).
+ Default: 'dilation'.
+ """
+
+ def __init__(self,
+ in_channels,
+ out_channels,
+ kernel_size=3,
+ stride=1,
+ padding=1,
+ dilation=3,
+ groups=1,
+ bias=False,
+ type='dilation'):
+ super(AdaptiveConv, self).__init__()
+ assert type in ['offset', 'dilation']
+ self.adapt_type = type
+
+ assert kernel_size == 3, 'Adaptive conv only supports kernels 3'
+ if self.adapt_type == 'offset':
+ assert stride == 1 and padding == 1 and groups == 1, \
+ 'Adaptive conv offset mode only supports padding: {1}, ' \
+ f'stride: {1}, groups: {1}'
+ self.conv = DeformConv2d(
+ in_channels,
+ out_channels,
+ kernel_size,
+ padding=padding,
+ stride=stride,
+ groups=groups,
+ bias=bias)
+ else:
+ self.conv = nn.Conv2d(
+ in_channels,
+ out_channels,
+ kernel_size,
+ padding=dilation,
+ dilation=dilation)
+
+ def init_weights(self):
+ """Init weights."""
+ normal_init(self.conv, std=0.01)
+
+ def forward(self, x, offset):
+ """Forward function."""
+ if self.adapt_type == 'offset':
+ N, _, H, W = x.shape
+ assert offset is not None
+ assert H * W == offset.shape[1]
+ # reshape [N, NA, 18] to (N, 18, H, W)
+ offset = offset.permute(0, 2, 1).reshape(N, -1, H, W)
+ offset = offset.contiguous()
+ x = self.conv(x, offset)
+ else:
+ assert offset is None
+ x = self.conv(x)
+ return x
+
+
+@HEADS.register_module()
+class StageCascadeRPNHead(RPNHead):
+ """Stage of CascadeRPNHead.
+
+ Args:
+ in_channels (int): Number of channels in the input feature map.
+ anchor_generator (dict): anchor generator config.
+ adapt_cfg (dict): adaptation config.
+ bridged_feature (bool, optional): whether update rpn feature.
+ Default: False.
+ with_cls (bool, optional): wheather use classification branch.
+ Default: True.
+ sampling (bool, optional): wheather use sampling. Default: True.
+ """
+
+ def __init__(self,
+ in_channels,
+ anchor_generator=dict(
+ type='AnchorGenerator',
+ scales=[8],
+ ratios=[1.0],
+ strides=[4, 8, 16, 32, 64]),
+ adapt_cfg=dict(type='dilation', dilation=3),
+ bridged_feature=False,
+ with_cls=True,
+ sampling=True,
+ **kwargs):
+ self.with_cls = with_cls
+ self.anchor_strides = anchor_generator['strides']
+ self.anchor_scales = anchor_generator['scales']
+ self.bridged_feature = bridged_feature
+ self.adapt_cfg = adapt_cfg
+ super(StageCascadeRPNHead, self).__init__(
+ in_channels, anchor_generator=anchor_generator, **kwargs)
+
+ # override sampling and sampler
+ self.sampling = sampling
+ if self.train_cfg:
+ self.assigner = build_assigner(self.train_cfg.assigner)
+ # use PseudoSampler when sampling is False
+ if self.sampling and hasattr(self.train_cfg, 'sampler'):
+ sampler_cfg = self.train_cfg.sampler
+ else:
+ sampler_cfg = dict(type='PseudoSampler')
+ self.sampler = build_sampler(sampler_cfg, context=self)
+
+ def _init_layers(self):
+ """Init layers of a CascadeRPN stage."""
+ self.rpn_conv = AdaptiveConv(self.in_channels, self.feat_channels,
+ **self.adapt_cfg)
+ if self.with_cls:
+ self.rpn_cls = nn.Conv2d(self.feat_channels,
+ self.num_anchors * self.cls_out_channels,
+ 1)
+ self.rpn_reg = nn.Conv2d(self.feat_channels, self.num_anchors * 4, 1)
+ self.relu = nn.ReLU(inplace=True)
+
+ def init_weights(self):
+ """Init weights of a CascadeRPN stage."""
+ self.rpn_conv.init_weights()
+ normal_init(self.rpn_reg, std=0.01)
+ if self.with_cls:
+ normal_init(self.rpn_cls, std=0.01)
+
+ def forward_single(self, x, offset):
+ """Forward function of single scale."""
+ bridged_x = x
+ x = self.relu(self.rpn_conv(x, offset))
+ if self.bridged_feature:
+ bridged_x = x # update feature
+ cls_score = self.rpn_cls(x) if self.with_cls else None
+ bbox_pred = self.rpn_reg(x)
+ return bridged_x, cls_score, bbox_pred
+
+ def forward(self, feats, offset_list=None):
+ """Forward function."""
+ if offset_list is None:
+ offset_list = [None for _ in range(len(feats))]
+ return multi_apply(self.forward_single, feats, offset_list)
+
+ def _region_targets_single(self,
+ anchors,
+ valid_flags,
+ gt_bboxes,
+ gt_bboxes_ignore,
+ gt_labels,
+ img_meta,
+ featmap_sizes,
+ label_channels=1):
+ """Get anchor targets based on region for single level."""
+ assign_result = self.assigner.assign(
+ anchors,
+ valid_flags,
+ gt_bboxes,
+ img_meta,
+ featmap_sizes,
+ self.anchor_scales[0],
+ self.anchor_strides,
+ gt_bboxes_ignore=gt_bboxes_ignore,
+ gt_labels=None,
+ allowed_border=self.train_cfg.allowed_border)
+ flat_anchors = torch.cat(anchors)
+ sampling_result = self.sampler.sample(assign_result, flat_anchors,
+ gt_bboxes)
+
+ num_anchors = flat_anchors.shape[0]
+ bbox_targets = torch.zeros_like(flat_anchors)
+ bbox_weights = torch.zeros_like(flat_anchors)
+ labels = flat_anchors.new_zeros(num_anchors, dtype=torch.long)
+ label_weights = flat_anchors.new_zeros(num_anchors, dtype=torch.float)
+
+ pos_inds = sampling_result.pos_inds
+ neg_inds = sampling_result.neg_inds
+ if len(pos_inds) > 0:
+ if not self.reg_decoded_bbox:
+ pos_bbox_targets = self.bbox_coder.encode(
+ sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes)
+ else:
+ pos_bbox_targets = sampling_result.pos_gt_bboxes
+ bbox_targets[pos_inds, :] = pos_bbox_targets
+ bbox_weights[pos_inds, :] = 1.0
+ if gt_labels is None:
+ labels[pos_inds] = 1
+ else:
+ labels[pos_inds] = gt_labels[
+ sampling_result.pos_assigned_gt_inds]
+ if self.train_cfg.pos_weight <= 0:
+ label_weights[pos_inds] = 1.0
+ else:
+ label_weights[pos_inds] = self.train_cfg.pos_weight
+ if len(neg_inds) > 0:
+ label_weights[neg_inds] = 1.0
+
+ return (labels, label_weights, bbox_targets, bbox_weights, pos_inds,
+ neg_inds)
+
+ def region_targets(self,
+ anchor_list,
+ valid_flag_list,
+ gt_bboxes_list,
+ img_metas,
+ featmap_sizes,
+ gt_bboxes_ignore_list=None,
+ gt_labels_list=None,
+ label_channels=1,
+ unmap_outputs=True):
+ """See :func:`StageCascadeRPNHead.get_targets`."""
+ num_imgs = len(img_metas)
+ assert len(anchor_list) == len(valid_flag_list) == num_imgs
+
+ # anchor number of multi levels
+ num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]
+
+ # compute targets for each image
+ if gt_bboxes_ignore_list is None:
+ gt_bboxes_ignore_list = [None for _ in range(num_imgs)]
+ if gt_labels_list is None:
+ gt_labels_list = [None for _ in range(num_imgs)]
+ (all_labels, all_label_weights, all_bbox_targets, all_bbox_weights,
+ pos_inds_list, neg_inds_list) = multi_apply(
+ self._region_targets_single,
+ anchor_list,
+ valid_flag_list,
+ gt_bboxes_list,
+ gt_bboxes_ignore_list,
+ gt_labels_list,
+ img_metas,
+ featmap_sizes=featmap_sizes,
+ label_channels=label_channels)
+ # no valid anchors
+ if any([labels is None for labels in all_labels]):
+ return None
+ # sampled anchors of all images
+ num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list])
+ num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list])
+ # split targets to a list w.r.t. multiple levels
+ labels_list = images_to_levels(all_labels, num_level_anchors)
+ label_weights_list = images_to_levels(all_label_weights,
+ num_level_anchors)
+ bbox_targets_list = images_to_levels(all_bbox_targets,
+ num_level_anchors)
+ bbox_weights_list = images_to_levels(all_bbox_weights,
+ num_level_anchors)
+ return (labels_list, label_weights_list, bbox_targets_list,
+ bbox_weights_list, num_total_pos, num_total_neg)
+
+ def get_targets(self,
+ anchor_list,
+ valid_flag_list,
+ gt_bboxes,
+ img_metas,
+ featmap_sizes,
+ gt_bboxes_ignore=None,
+ label_channels=1):
+ """Compute regression and classification targets for anchors.
+
+ Args:
+ anchor_list (list[list]): Multi level anchors of each image.
+ valid_flag_list (list[list]): Multi level valid flags of each
+ image.
+ gt_bboxes (list[Tensor]): Ground truth bboxes of each image.
+ img_metas (list[dict]): Meta info of each image.
+ featmap_sizes (list[Tensor]): Feature mapsize each level
+ gt_bboxes_ignore (list[Tensor]): Ignore bboxes of each images
+ label_channels (int): Channel of label.
+
+ Returns:
+ cls_reg_targets (tuple)
+ """
+ if isinstance(self.assigner, RegionAssigner):
+ cls_reg_targets = self.region_targets(
+ anchor_list,
+ valid_flag_list,
+ gt_bboxes,
+ img_metas,
+ featmap_sizes,
+ gt_bboxes_ignore_list=gt_bboxes_ignore,
+ label_channels=label_channels)
+ else:
+ cls_reg_targets = super(StageCascadeRPNHead, self).get_targets(
+ anchor_list,
+ valid_flag_list,
+ gt_bboxes,
+ img_metas,
+ gt_bboxes_ignore_list=gt_bboxes_ignore,
+ label_channels=label_channels)
+ return cls_reg_targets
+
+ def anchor_offset(self, anchor_list, anchor_strides, featmap_sizes):
+ """ Get offest for deformable conv based on anchor shape
+ NOTE: currently support deformable kernel_size=3 and dilation=1
+
+ Args:
+ anchor_list (list[list[tensor])): [NI, NLVL, NA, 4] list of
+ multi-level anchors
+ anchor_strides (list[int]): anchor stride of each level
+
+ Returns:
+ offset_list (list[tensor]): [NLVL, NA, 2, 18]: offset of DeformConv
+ kernel.
+ """
+
+ def _shape_offset(anchors, stride, ks=3, dilation=1):
+ # currently support kernel_size=3 and dilation=1
+ assert ks == 3 and dilation == 1
+ pad = (ks - 1) // 2
+ idx = torch.arange(-pad, pad + 1, dtype=dtype, device=device)
+ yy, xx = torch.meshgrid(idx, idx) # return order matters
+ xx = xx.reshape(-1)
+ yy = yy.reshape(-1)
+ w = (anchors[:, 2] - anchors[:, 0]) / stride
+ h = (anchors[:, 3] - anchors[:, 1]) / stride
+ w = w / (ks - 1) - dilation
+ h = h / (ks - 1) - dilation
+ offset_x = w[:, None] * xx # (NA, ks**2)
+ offset_y = h[:, None] * yy # (NA, ks**2)
+ return offset_x, offset_y
+
+ def _ctr_offset(anchors, stride, featmap_size):
+ feat_h, feat_w = featmap_size
+ assert len(anchors) == feat_h * feat_w
+
+ x = (anchors[:, 0] + anchors[:, 2]) * 0.5
+ y = (anchors[:, 1] + anchors[:, 3]) * 0.5
+ # compute centers on feature map
+ x = x / stride
+ y = y / stride
+ # compute predefine centers
+ xx = torch.arange(0, feat_w, device=anchors.device)
+ yy = torch.arange(0, feat_h, device=anchors.device)
+ yy, xx = torch.meshgrid(yy, xx)
+ xx = xx.reshape(-1).type_as(x)
+ yy = yy.reshape(-1).type_as(y)
+
+ offset_x = x - xx # (NA, )
+ offset_y = y - yy # (NA, )
+ return offset_x, offset_y
+
+ num_imgs = len(anchor_list)
+ num_lvls = len(anchor_list[0])
+ dtype = anchor_list[0][0].dtype
+ device = anchor_list[0][0].device
+ num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]
+
+ offset_list = []
+ for i in range(num_imgs):
+ mlvl_offset = []
+ for lvl in range(num_lvls):
+ c_offset_x, c_offset_y = _ctr_offset(anchor_list[i][lvl],
+ anchor_strides[lvl],
+ featmap_sizes[lvl])
+ s_offset_x, s_offset_y = _shape_offset(anchor_list[i][lvl],
+ anchor_strides[lvl])
+
+ # offset = ctr_offset + shape_offset
+ offset_x = s_offset_x + c_offset_x[:, None]
+ offset_y = s_offset_y + c_offset_y[:, None]
+
+ # offset order (y0, x0, y1, x2, .., y8, x8, y9, x9)
+ offset = torch.stack([offset_y, offset_x], dim=-1)
+ offset = offset.reshape(offset.size(0), -1) # [NA, 2*ks**2]
+ mlvl_offset.append(offset)
+ offset_list.append(torch.cat(mlvl_offset)) # [totalNA, 2*ks**2]
+ offset_list = images_to_levels(offset_list, num_level_anchors)
+ return offset_list
+
+ def loss_single(self, cls_score, bbox_pred, anchors, labels, label_weights,
+ bbox_targets, bbox_weights, num_total_samples):
+ """Loss function on single scale."""
+ # classification loss
+ if self.with_cls:
+ labels = labels.reshape(-1)
+ label_weights = label_weights.reshape(-1)
+ cls_score = cls_score.permute(0, 2, 3,
+ 1).reshape(-1, self.cls_out_channels)
+ loss_cls = self.loss_cls(
+ cls_score, labels, label_weights, avg_factor=num_total_samples)
+ # regression loss
+ bbox_targets = bbox_targets.reshape(-1, 4)
+ bbox_weights = bbox_weights.reshape(-1, 4)
+ bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4)
+ if self.reg_decoded_bbox:
+ # When the regression loss (e.g. `IouLoss`, `GIouLoss`)
+ # is applied directly on the decoded bounding boxes, it
+ # decodes the already encoded coordinates to absolute format.
+ anchors = anchors.reshape(-1, 4)
+ bbox_pred = self.bbox_coder.decode(anchors, bbox_pred)
+ loss_reg = self.loss_bbox(
+ bbox_pred,
+ bbox_targets,
+ bbox_weights,
+ avg_factor=num_total_samples)
+ if self.with_cls:
+ return loss_cls, loss_reg
+ return None, loss_reg
+
+ def loss(self,
+ anchor_list,
+ valid_flag_list,
+ cls_scores,
+ bbox_preds,
+ gt_bboxes,
+ img_metas,
+ gt_bboxes_ignore=None):
+ """Compute losses of the head.
+
+ Args:
+ anchor_list (list[list]): Multi level anchors of each image.
+ cls_scores (list[Tensor]): Box scores for each scale level
+ Has shape (N, num_anchors * num_classes, H, W)
+ bbox_preds (list[Tensor]): Box energies / deltas for each scale
+ level with shape (N, num_anchors * 4, H, W)
+ gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
+ shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
+ img_metas (list[dict]): Meta information of each image, e.g.,
+ image size, scaling factor, etc.
+ gt_bboxes_ignore (None | list[Tensor]): specify which bounding
+ boxes can be ignored when computing the loss. Default: None
+
+ Returns:
+ dict[str, Tensor]: A dictionary of loss components.
+ """
+ featmap_sizes = [featmap.size()[-2:] for featmap in bbox_preds]
+ label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1
+ cls_reg_targets = self.get_targets(
+ anchor_list,
+ valid_flag_list,
+ gt_bboxes,
+ img_metas,
+ featmap_sizes,
+ gt_bboxes_ignore=gt_bboxes_ignore,
+ label_channels=label_channels)
+ if cls_reg_targets is None:
+ return None
+ (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,
+ num_total_pos, num_total_neg) = cls_reg_targets
+ if self.sampling:
+ num_total_samples = num_total_pos + num_total_neg
+ else:
+ # 200 is hard-coded average factor,
+ # which follows guided anchoring.
+ num_total_samples = sum([label.numel()
+ for label in labels_list]) / 200.0
+
+ # change per image, per level anchor_list to per_level, per_image
+ mlvl_anchor_list = list(zip(*anchor_list))
+ # concat mlvl_anchor_list
+ mlvl_anchor_list = [
+ torch.cat(anchors, dim=0) for anchors in mlvl_anchor_list
+ ]
+
+ losses = multi_apply(
+ self.loss_single,
+ cls_scores,
+ bbox_preds,
+ mlvl_anchor_list,
+ labels_list,
+ label_weights_list,
+ bbox_targets_list,
+ bbox_weights_list,
+ num_total_samples=num_total_samples)
+ if self.with_cls:
+ return dict(loss_rpn_cls=losses[0], loss_rpn_reg=losses[1])
+ return dict(loss_rpn_reg=losses[1])
+
+ def get_bboxes(self,
+ anchor_list,
+ cls_scores,
+ bbox_preds,
+ img_metas,
+ cfg,
+ rescale=False):
+ """Get proposal predict."""
+ assert len(cls_scores) == len(bbox_preds)
+ num_levels = len(cls_scores)
+
+ result_list = []
+ for img_id in range(len(img_metas)):
+ cls_score_list = [
+ cls_scores[i][img_id].detach() for i in range(num_levels)
+ ]
+ bbox_pred_list = [
+ bbox_preds[i][img_id].detach() for i in range(num_levels)
+ ]
+ img_shape = img_metas[img_id]['img_shape']
+ scale_factor = img_metas[img_id]['scale_factor']
+ proposals = self._get_bboxes_single(cls_score_list, bbox_pred_list,
+ anchor_list[img_id], img_shape,
+ scale_factor, cfg, rescale)
+ result_list.append(proposals)
+ return result_list
+
+ def refine_bboxes(self, anchor_list, bbox_preds, img_metas):
+ """Refine bboxes through stages."""
+ num_levels = len(bbox_preds)
+ new_anchor_list = []
+ for img_id in range(len(img_metas)):
+ mlvl_anchors = []
+ for i in range(num_levels):
+ bbox_pred = bbox_preds[i][img_id].detach()
+ bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4)
+ img_shape = img_metas[img_id]['img_shape']
+ bboxes = self.bbox_coder.decode(anchor_list[img_id][i],
+ bbox_pred, img_shape)
+ mlvl_anchors.append(bboxes)
+ new_anchor_list.append(mlvl_anchors)
+ return new_anchor_list
+
+ # TODO: temporary plan
+ def _get_bboxes_single(self,
+ cls_scores,
+ bbox_preds,
+ mlvl_anchors,
+ img_shape,
+ scale_factor,
+ cfg,
+ rescale=False):
+ """Transform outputs for a single batch item into bbox predictions.
+
+ Args:
+ cls_scores (list[Tensor]): Box scores for each scale level
+ Has shape (num_anchors * num_classes, H, W).
+ bbox_preds (list[Tensor]): Box energies / deltas for each scale
+ level with shape (num_anchors * 4, H, W).
+ mlvl_anchors (list[Tensor]): Box reference for each scale level
+ with shape (num_total_anchors, 4).
+ img_shape (tuple[int]): Shape of the input image,
+ (height, width, 3).
+ scale_factor (ndarray): Scale factor of the image arange as
+ (w_scale, h_scale, w_scale, h_scale).
+ cfg (mmcv.Config): Test / postprocessing configuration,
+ if None, test_cfg would be used.
+ rescale (bool): If True, return boxes in original image space.
+
+ Returns:
+ Tensor: Labeled boxes have the shape of (n,5), where the
+ first 4 columns are bounding box positions
+ (tl_x, tl_y, br_x, br_y) and the 5-th column is a score
+ between 0 and 1.
+ """
+ cfg = self.test_cfg if cfg is None else cfg
+ cfg = copy.deepcopy(cfg)
+ # bboxes from different level should be independent during NMS,
+ # level_ids are used as labels for batched NMS to separate them
+ level_ids = []
+ mlvl_scores = []
+ mlvl_bbox_preds = []
+ mlvl_valid_anchors = []
+ for idx in range(len(cls_scores)):
+ rpn_cls_score = cls_scores[idx]
+ rpn_bbox_pred = bbox_preds[idx]
+ assert rpn_cls_score.size()[-2:] == rpn_bbox_pred.size()[-2:]
+ rpn_cls_score = rpn_cls_score.permute(1, 2, 0)
+ if self.use_sigmoid_cls:
+ rpn_cls_score = rpn_cls_score.reshape(-1)
+ scores = rpn_cls_score.sigmoid()
+ else:
+ rpn_cls_score = rpn_cls_score.reshape(-1, 2)
+ # We set FG labels to [0, num_class-1] and BG label to
+ # num_class in RPN head since mmdet v2.5, which is unified to
+ # be consistent with other head since mmdet v2.0. In mmdet v2.0
+ # to v2.4 we keep BG label as 0 and FG label as 1 in rpn head.
+ scores = rpn_cls_score.softmax(dim=1)[:, 0]
+ rpn_bbox_pred = rpn_bbox_pred.permute(1, 2, 0).reshape(-1, 4)
+ anchors = mlvl_anchors[idx]
+ if cfg.nms_pre > 0 and scores.shape[0] > cfg.nms_pre:
+ # sort is faster than topk
+ # _, topk_inds = scores.topk(cfg.nms_pre)
+ if torch.onnx.is_in_onnx_export():
+ # sort op will be converted to TopK in onnx
+ # and k<=3480 in TensorRT
+ _, topk_inds = scores.topk(cfg.nms_pre)
+ scores = scores[topk_inds]
+ else:
+ ranked_scores, rank_inds = scores.sort(descending=True)
+ topk_inds = rank_inds[:cfg.nms_pre]
+ scores = ranked_scores[:cfg.nms_pre]
+ rpn_bbox_pred = rpn_bbox_pred[topk_inds, :]
+ anchors = anchors[topk_inds, :]
+ mlvl_scores.append(scores)
+ mlvl_bbox_preds.append(rpn_bbox_pred)
+ mlvl_valid_anchors.append(anchors)
+ level_ids.append(
+ scores.new_full((scores.size(0), ), idx, dtype=torch.long))
+
+ scores = torch.cat(mlvl_scores)
+ anchors = torch.cat(mlvl_valid_anchors)
+ rpn_bbox_pred = torch.cat(mlvl_bbox_preds)
+ proposals = self.bbox_coder.decode(
+ anchors, rpn_bbox_pred, max_shape=img_shape)
+ ids = torch.cat(level_ids)
+
+ # Skip nonzero op while exporting to ONNX
+ if cfg.min_bbox_size > 0 and (not torch.onnx.is_in_onnx_export()):
+ w = proposals[:, 2] - proposals[:, 0]
+ h = proposals[:, 3] - proposals[:, 1]
+ valid_inds = torch.nonzero(
+ (w >= cfg.min_bbox_size)
+ & (h >= cfg.min_bbox_size),
+ as_tuple=False).squeeze()
+ if valid_inds.sum().item() != len(proposals):
+ proposals = proposals[valid_inds, :]
+ scores = scores[valid_inds]
+ ids = ids[valid_inds]
+
+ # deprecate arguments warning
+ if 'nms' not in cfg or 'max_num' in cfg or 'nms_thr' in cfg:
+ warnings.warn(
+ 'In rpn_proposal or test_cfg, '
+ 'nms_thr has been moved to a dict named nms as '
+ 'iou_threshold, max_num has been renamed as max_per_img, '
+ 'name of original arguments and the way to specify '
+ 'iou_threshold of NMS will be deprecated.')
+ if 'nms' not in cfg:
+ cfg.nms = ConfigDict(dict(type='nms', iou_threshold=cfg.nms_thr))
+ if 'max_num' in cfg:
+ if 'max_per_img' in cfg:
+ assert cfg.max_num == cfg.max_per_img, f'You ' \
+ f'set max_num and ' \
+ f'max_per_img at the same time, but get {cfg.max_num} ' \
+ f'and {cfg.max_per_img} respectively' \
+ 'Please delete max_num which will be deprecated.'
+ else:
+ cfg.max_per_img = cfg.max_num
+ if 'nms_thr' in cfg:
+ assert cfg.nms.iou_threshold == cfg.nms_thr, f'You set' \
+ f' iou_threshold in nms and ' \
+ f'nms_thr at the same time, but get' \
+ f' {cfg.nms.iou_threshold} and {cfg.nms_thr}' \
+ f' respectively. Please delete the nms_thr ' \
+ f'which will be deprecated.'
+
+ dets, keep = batched_nms(proposals, scores, ids, cfg.nms)
+ return dets[:cfg.max_per_img]
+
+
+@HEADS.register_module()
+class CascadeRPNHead(BaseDenseHead):
+ """The CascadeRPNHead will predict more accurate region proposals, which is
+ required for two-stage detectors (such as Fast/Faster R-CNN). CascadeRPN
+ consists of a sequence of RPNStage to progressively improve the accuracy of
+ the detected proposals.
+
+ More details can be found in ``https://arxiv.org/abs/1909.06720``.
+
+ Args:
+ num_stages (int): number of CascadeRPN stages.
+ stages (list[dict]): list of configs to build the stages.
+ train_cfg (list[dict]): list of configs at training time each stage.
+ test_cfg (dict): config at testing time.
+ """
+
+ def __init__(self, num_stages, stages, train_cfg, test_cfg):
+ super(CascadeRPNHead, self).__init__()
+ assert num_stages == len(stages)
+ self.num_stages = num_stages
+ self.stages = nn.ModuleList()
+ for i in range(len(stages)):
+ train_cfg_i = train_cfg[i] if train_cfg is not None else None
+ stages[i].update(train_cfg=train_cfg_i)
+ stages[i].update(test_cfg=test_cfg)
+ self.stages.append(build_head(stages[i]))
+ self.train_cfg = train_cfg
+ self.test_cfg = test_cfg
+
+ def init_weights(self):
+ """Init weight of CascadeRPN."""
+ for i in range(self.num_stages):
+ self.stages[i].init_weights()
+
+ def loss(self):
+ """loss() is implemented in StageCascadeRPNHead."""
+ pass
+
+ def get_bboxes(self):
+ """get_bboxes() is implemented in StageCascadeRPNHead."""
+ pass
+
+ def forward_train(self,
+ x,
+ img_metas,
+ gt_bboxes,
+ gt_labels=None,
+ gt_bboxes_ignore=None,
+ proposal_cfg=None):
+ """Forward train function."""
+ assert gt_labels is None, 'RPN does not require gt_labels'
+
+ featmap_sizes = [featmap.size()[-2:] for featmap in x]
+ device = x[0].device
+ anchor_list, valid_flag_list = self.stages[0].get_anchors(
+ featmap_sizes, img_metas, device=device)
+
+ losses = dict()
+
+ for i in range(self.num_stages):
+ stage = self.stages[i]
+
+ if stage.adapt_cfg['type'] == 'offset':
+ offset_list = stage.anchor_offset(anchor_list,
+ stage.anchor_strides,
+ featmap_sizes)
+ else:
+ offset_list = None
+ x, cls_score, bbox_pred = stage(x, offset_list)
+ rpn_loss_inputs = (anchor_list, valid_flag_list, cls_score,
+ bbox_pred, gt_bboxes, img_metas)
+ stage_loss = stage.loss(*rpn_loss_inputs)
+ for name, value in stage_loss.items():
+ losses['s{}.{}'.format(i, name)] = value
+
+ # refine boxes
+ if i < self.num_stages - 1:
+ anchor_list = stage.refine_bboxes(anchor_list, bbox_pred,
+ img_metas)
+ if proposal_cfg is None:
+ return losses
+ else:
+ proposal_list = self.stages[-1].get_bboxes(anchor_list, cls_score,
+ bbox_pred, img_metas,
+ self.test_cfg)
+ return losses, proposal_list
+
+ def simple_test_rpn(self, x, img_metas):
+ """Simple forward test function."""
+ featmap_sizes = [featmap.size()[-2:] for featmap in x]
+ device = x[0].device
+ anchor_list, _ = self.stages[0].get_anchors(
+ featmap_sizes, img_metas, device=device)
+
+ for i in range(self.num_stages):
+ stage = self.stages[i]
+ if stage.adapt_cfg['type'] == 'offset':
+ offset_list = stage.anchor_offset(anchor_list,
+ stage.anchor_strides,
+ featmap_sizes)
+ else:
+ offset_list = None
+ x, cls_score, bbox_pred = stage(x, offset_list)
+ if i < self.num_stages - 1:
+ anchor_list = stage.refine_bboxes(anchor_list, bbox_pred,
+ img_metas)
+
+ proposal_list = self.stages[-1].get_bboxes(anchor_list, cls_score,
+ bbox_pred, img_metas,
+ self.test_cfg)
+ return proposal_list
+
+ def aug_test_rpn(self, x, img_metas):
+ """Augmented forward test function."""
+ raise NotImplementedError
diff --git a/annotator/uniformer/mmdet/models/dense_heads/centripetal_head.py b/annotator/uniformer/mmdet/models/dense_heads/centripetal_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..6728218b60539a71f6353645635f741a1ad7263d
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/dense_heads/centripetal_head.py
@@ -0,0 +1,421 @@
+import torch.nn as nn
+from mmcv.cnn import ConvModule, normal_init
+from mmcv.ops import DeformConv2d
+
+from mmdet.core import multi_apply
+from ..builder import HEADS, build_loss
+from .corner_head import CornerHead
+
+
+@HEADS.register_module()
+class CentripetalHead(CornerHead):
+ """Head of CentripetalNet: Pursuing High-quality Keypoint Pairs for Object
+ Detection.
+
+ CentripetalHead inherits from :class:`CornerHead`. It removes the
+ embedding branch and adds guiding shift and centripetal shift branches.
+ More details can be found in the `paper
+ `_ .
+
+ Args:
+ num_classes (int): Number of categories excluding the background
+ category.
+ in_channels (int): Number of channels in the input feature map.
+ num_feat_levels (int): Levels of feature from the previous module. 2
+ for HourglassNet-104 and 1 for HourglassNet-52. HourglassNet-104
+ outputs the final feature and intermediate supervision feature and
+ HourglassNet-52 only outputs the final feature. Default: 2.
+ corner_emb_channels (int): Channel of embedding vector. Default: 1.
+ train_cfg (dict | None): Training config. Useless in CornerHead,
+ but we keep this variable for SingleStageDetector. Default: None.
+ test_cfg (dict | None): Testing config of CornerHead. Default: None.
+ loss_heatmap (dict | None): Config of corner heatmap loss. Default:
+ GaussianFocalLoss.
+ loss_embedding (dict | None): Config of corner embedding loss. Default:
+ AssociativeEmbeddingLoss.
+ loss_offset (dict | None): Config of corner offset loss. Default:
+ SmoothL1Loss.
+ loss_guiding_shift (dict): Config of guiding shift loss. Default:
+ SmoothL1Loss.
+ loss_centripetal_shift (dict): Config of centripetal shift loss.
+ Default: SmoothL1Loss.
+ """
+
+ def __init__(self,
+ *args,
+ centripetal_shift_channels=2,
+ guiding_shift_channels=2,
+ feat_adaption_conv_kernel=3,
+ loss_guiding_shift=dict(
+ type='SmoothL1Loss', beta=1.0, loss_weight=0.05),
+ loss_centripetal_shift=dict(
+ type='SmoothL1Loss', beta=1.0, loss_weight=1),
+ **kwargs):
+ assert centripetal_shift_channels == 2, (
+ 'CentripetalHead only support centripetal_shift_channels == 2')
+ self.centripetal_shift_channels = centripetal_shift_channels
+ assert guiding_shift_channels == 2, (
+ 'CentripetalHead only support guiding_shift_channels == 2')
+ self.guiding_shift_channels = guiding_shift_channels
+ self.feat_adaption_conv_kernel = feat_adaption_conv_kernel
+ super(CentripetalHead, self).__init__(*args, **kwargs)
+ self.loss_guiding_shift = build_loss(loss_guiding_shift)
+ self.loss_centripetal_shift = build_loss(loss_centripetal_shift)
+
+ def _init_centripetal_layers(self):
+ """Initialize centripetal layers.
+
+ Including feature adaption deform convs (feat_adaption), deform offset
+ prediction convs (dcn_off), guiding shift (guiding_shift) and
+ centripetal shift ( centripetal_shift). Each branch has two parts:
+ prefix `tl_` for top-left and `br_` for bottom-right.
+ """
+ self.tl_feat_adaption = nn.ModuleList()
+ self.br_feat_adaption = nn.ModuleList()
+ self.tl_dcn_offset = nn.ModuleList()
+ self.br_dcn_offset = nn.ModuleList()
+ self.tl_guiding_shift = nn.ModuleList()
+ self.br_guiding_shift = nn.ModuleList()
+ self.tl_centripetal_shift = nn.ModuleList()
+ self.br_centripetal_shift = nn.ModuleList()
+
+ for _ in range(self.num_feat_levels):
+ self.tl_feat_adaption.append(
+ DeformConv2d(self.in_channels, self.in_channels,
+ self.feat_adaption_conv_kernel, 1, 1))
+ self.br_feat_adaption.append(
+ DeformConv2d(self.in_channels, self.in_channels,
+ self.feat_adaption_conv_kernel, 1, 1))
+
+ self.tl_guiding_shift.append(
+ self._make_layers(
+ out_channels=self.guiding_shift_channels,
+ in_channels=self.in_channels))
+ self.br_guiding_shift.append(
+ self._make_layers(
+ out_channels=self.guiding_shift_channels,
+ in_channels=self.in_channels))
+
+ self.tl_dcn_offset.append(
+ ConvModule(
+ self.guiding_shift_channels,
+ self.feat_adaption_conv_kernel**2 *
+ self.guiding_shift_channels,
+ 1,
+ bias=False,
+ act_cfg=None))
+ self.br_dcn_offset.append(
+ ConvModule(
+ self.guiding_shift_channels,
+ self.feat_adaption_conv_kernel**2 *
+ self.guiding_shift_channels,
+ 1,
+ bias=False,
+ act_cfg=None))
+
+ self.tl_centripetal_shift.append(
+ self._make_layers(
+ out_channels=self.centripetal_shift_channels,
+ in_channels=self.in_channels))
+ self.br_centripetal_shift.append(
+ self._make_layers(
+ out_channels=self.centripetal_shift_channels,
+ in_channels=self.in_channels))
+
+ def _init_layers(self):
+ """Initialize layers for CentripetalHead.
+
+ Including two parts: CornerHead layers and CentripetalHead layers
+ """
+ super()._init_layers() # using _init_layers in CornerHead
+ self._init_centripetal_layers()
+
+ def init_weights(self):
+ """Initialize weights of the head."""
+ super().init_weights()
+ for i in range(self.num_feat_levels):
+ normal_init(self.tl_feat_adaption[i], std=0.01)
+ normal_init(self.br_feat_adaption[i], std=0.01)
+ normal_init(self.tl_dcn_offset[i].conv, std=0.1)
+ normal_init(self.br_dcn_offset[i].conv, std=0.1)
+ _ = [x.conv.reset_parameters() for x in self.tl_guiding_shift[i]]
+ _ = [x.conv.reset_parameters() for x in self.br_guiding_shift[i]]
+ _ = [
+ x.conv.reset_parameters() for x in self.tl_centripetal_shift[i]
+ ]
+ _ = [
+ x.conv.reset_parameters() for x in self.br_centripetal_shift[i]
+ ]
+
+ def forward_single(self, x, lvl_ind):
+ """Forward feature of a single level.
+
+ Args:
+ x (Tensor): Feature of a single level.
+ lvl_ind (int): Level index of current feature.
+
+ Returns:
+ tuple[Tensor]: A tuple of CentripetalHead's output for current
+ feature level. Containing the following Tensors:
+
+ - tl_heat (Tensor): Predicted top-left corner heatmap.
+ - br_heat (Tensor): Predicted bottom-right corner heatmap.
+ - tl_off (Tensor): Predicted top-left offset heatmap.
+ - br_off (Tensor): Predicted bottom-right offset heatmap.
+ - tl_guiding_shift (Tensor): Predicted top-left guiding shift
+ heatmap.
+ - br_guiding_shift (Tensor): Predicted bottom-right guiding
+ shift heatmap.
+ - tl_centripetal_shift (Tensor): Predicted top-left centripetal
+ shift heatmap.
+ - br_centripetal_shift (Tensor): Predicted bottom-right
+ centripetal shift heatmap.
+ """
+ tl_heat, br_heat, _, _, tl_off, br_off, tl_pool, br_pool = super(
+ ).forward_single(
+ x, lvl_ind, return_pool=True)
+
+ tl_guiding_shift = self.tl_guiding_shift[lvl_ind](tl_pool)
+ br_guiding_shift = self.br_guiding_shift[lvl_ind](br_pool)
+
+ tl_dcn_offset = self.tl_dcn_offset[lvl_ind](tl_guiding_shift.detach())
+ br_dcn_offset = self.br_dcn_offset[lvl_ind](br_guiding_shift.detach())
+
+ tl_feat_adaption = self.tl_feat_adaption[lvl_ind](tl_pool,
+ tl_dcn_offset)
+ br_feat_adaption = self.br_feat_adaption[lvl_ind](br_pool,
+ br_dcn_offset)
+
+ tl_centripetal_shift = self.tl_centripetal_shift[lvl_ind](
+ tl_feat_adaption)
+ br_centripetal_shift = self.br_centripetal_shift[lvl_ind](
+ br_feat_adaption)
+
+ result_list = [
+ tl_heat, br_heat, tl_off, br_off, tl_guiding_shift,
+ br_guiding_shift, tl_centripetal_shift, br_centripetal_shift
+ ]
+ return result_list
+
+ def loss(self,
+ tl_heats,
+ br_heats,
+ tl_offs,
+ br_offs,
+ tl_guiding_shifts,
+ br_guiding_shifts,
+ tl_centripetal_shifts,
+ br_centripetal_shifts,
+ gt_bboxes,
+ gt_labels,
+ img_metas,
+ gt_bboxes_ignore=None):
+ """Compute losses of the head.
+
+ Args:
+ tl_heats (list[Tensor]): Top-left corner heatmaps for each level
+ with shape (N, num_classes, H, W).
+ br_heats (list[Tensor]): Bottom-right corner heatmaps for each
+ level with shape (N, num_classes, H, W).
+ tl_offs (list[Tensor]): Top-left corner offsets for each level
+ with shape (N, corner_offset_channels, H, W).
+ br_offs (list[Tensor]): Bottom-right corner offsets for each level
+ with shape (N, corner_offset_channels, H, W).
+ tl_guiding_shifts (list[Tensor]): Top-left guiding shifts for each
+ level with shape (N, guiding_shift_channels, H, W).
+ br_guiding_shifts (list[Tensor]): Bottom-right guiding shifts for
+ each level with shape (N, guiding_shift_channels, H, W).
+ tl_centripetal_shifts (list[Tensor]): Top-left centripetal shifts
+ for each level with shape (N, centripetal_shift_channels, H,
+ W).
+ br_centripetal_shifts (list[Tensor]): Bottom-right centripetal
+ shifts for each level with shape (N,
+ centripetal_shift_channels, H, W).
+ gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
+ shape (num_gts, 4) in [left, top, right, bottom] format.
+ gt_labels (list[Tensor]): Class indices corresponding to each box.
+ img_metas (list[dict]): Meta information of each image, e.g.,
+ image size, scaling factor, etc.
+ gt_bboxes_ignore (list[Tensor] | None): Specify which bounding
+ boxes can be ignored when computing the loss.
+
+ Returns:
+ dict[str, Tensor]: A dictionary of loss components. Containing the
+ following losses:
+
+ - det_loss (list[Tensor]): Corner keypoint losses of all
+ feature levels.
+ - off_loss (list[Tensor]): Corner offset losses of all feature
+ levels.
+ - guiding_loss (list[Tensor]): Guiding shift losses of all
+ feature levels.
+ - centripetal_loss (list[Tensor]): Centripetal shift losses of
+ all feature levels.
+ """
+ targets = self.get_targets(
+ gt_bboxes,
+ gt_labels,
+ tl_heats[-1].shape,
+ img_metas[0]['pad_shape'],
+ with_corner_emb=self.with_corner_emb,
+ with_guiding_shift=True,
+ with_centripetal_shift=True)
+ mlvl_targets = [targets for _ in range(self.num_feat_levels)]
+ [det_losses, off_losses, guiding_losses, centripetal_losses
+ ] = multi_apply(self.loss_single, tl_heats, br_heats, tl_offs,
+ br_offs, tl_guiding_shifts, br_guiding_shifts,
+ tl_centripetal_shifts, br_centripetal_shifts,
+ mlvl_targets)
+ loss_dict = dict(
+ det_loss=det_losses,
+ off_loss=off_losses,
+ guiding_loss=guiding_losses,
+ centripetal_loss=centripetal_losses)
+ return loss_dict
+
+ def loss_single(self, tl_hmp, br_hmp, tl_off, br_off, tl_guiding_shift,
+ br_guiding_shift, tl_centripetal_shift,
+ br_centripetal_shift, targets):
+ """Compute losses for single level.
+
+ Args:
+ tl_hmp (Tensor): Top-left corner heatmap for current level with
+ shape (N, num_classes, H, W).
+ br_hmp (Tensor): Bottom-right corner heatmap for current level with
+ shape (N, num_classes, H, W).
+ tl_off (Tensor): Top-left corner offset for current level with
+ shape (N, corner_offset_channels, H, W).
+ br_off (Tensor): Bottom-right corner offset for current level with
+ shape (N, corner_offset_channels, H, W).
+ tl_guiding_shift (Tensor): Top-left guiding shift for current level
+ with shape (N, guiding_shift_channels, H, W).
+ br_guiding_shift (Tensor): Bottom-right guiding shift for current
+ level with shape (N, guiding_shift_channels, H, W).
+ tl_centripetal_shift (Tensor): Top-left centripetal shift for
+ current level with shape (N, centripetal_shift_channels, H, W).
+ br_centripetal_shift (Tensor): Bottom-right centripetal shift for
+ current level with shape (N, centripetal_shift_channels, H, W).
+ targets (dict): Corner target generated by `get_targets`.
+
+ Returns:
+ tuple[torch.Tensor]: Losses of the head's differnet branches
+ containing the following losses:
+
+ - det_loss (Tensor): Corner keypoint loss.
+ - off_loss (Tensor): Corner offset loss.
+ - guiding_loss (Tensor): Guiding shift loss.
+ - centripetal_loss (Tensor): Centripetal shift loss.
+ """
+ targets['corner_embedding'] = None
+
+ det_loss, _, _, off_loss = super().loss_single(tl_hmp, br_hmp, None,
+ None, tl_off, br_off,
+ targets)
+
+ gt_tl_guiding_shift = targets['topleft_guiding_shift']
+ gt_br_guiding_shift = targets['bottomright_guiding_shift']
+ gt_tl_centripetal_shift = targets['topleft_centripetal_shift']
+ gt_br_centripetal_shift = targets['bottomright_centripetal_shift']
+
+ gt_tl_heatmap = targets['topleft_heatmap']
+ gt_br_heatmap = targets['bottomright_heatmap']
+ # We only compute the offset loss at the real corner position.
+ # The value of real corner would be 1 in heatmap ground truth.
+ # The mask is computed in class agnostic mode and its shape is
+ # batch * 1 * width * height.
+ tl_mask = gt_tl_heatmap.eq(1).sum(1).gt(0).unsqueeze(1).type_as(
+ gt_tl_heatmap)
+ br_mask = gt_br_heatmap.eq(1).sum(1).gt(0).unsqueeze(1).type_as(
+ gt_br_heatmap)
+
+ # Guiding shift loss
+ tl_guiding_loss = self.loss_guiding_shift(
+ tl_guiding_shift,
+ gt_tl_guiding_shift,
+ tl_mask,
+ avg_factor=tl_mask.sum())
+ br_guiding_loss = self.loss_guiding_shift(
+ br_guiding_shift,
+ gt_br_guiding_shift,
+ br_mask,
+ avg_factor=br_mask.sum())
+ guiding_loss = (tl_guiding_loss + br_guiding_loss) / 2.0
+ # Centripetal shift loss
+ tl_centripetal_loss = self.loss_centripetal_shift(
+ tl_centripetal_shift,
+ gt_tl_centripetal_shift,
+ tl_mask,
+ avg_factor=tl_mask.sum())
+ br_centripetal_loss = self.loss_centripetal_shift(
+ br_centripetal_shift,
+ gt_br_centripetal_shift,
+ br_mask,
+ avg_factor=br_mask.sum())
+ centripetal_loss = (tl_centripetal_loss + br_centripetal_loss) / 2.0
+
+ return det_loss, off_loss, guiding_loss, centripetal_loss
+
+ def get_bboxes(self,
+ tl_heats,
+ br_heats,
+ tl_offs,
+ br_offs,
+ tl_guiding_shifts,
+ br_guiding_shifts,
+ tl_centripetal_shifts,
+ br_centripetal_shifts,
+ img_metas,
+ rescale=False,
+ with_nms=True):
+ """Transform network output for a batch into bbox predictions.
+
+ Args:
+ tl_heats (list[Tensor]): Top-left corner heatmaps for each level
+ with shape (N, num_classes, H, W).
+ br_heats (list[Tensor]): Bottom-right corner heatmaps for each
+ level with shape (N, num_classes, H, W).
+ tl_offs (list[Tensor]): Top-left corner offsets for each level
+ with shape (N, corner_offset_channels, H, W).
+ br_offs (list[Tensor]): Bottom-right corner offsets for each level
+ with shape (N, corner_offset_channels, H, W).
+ tl_guiding_shifts (list[Tensor]): Top-left guiding shifts for each
+ level with shape (N, guiding_shift_channels, H, W). Useless in
+ this function, we keep this arg because it's the raw output
+ from CentripetalHead.
+ br_guiding_shifts (list[Tensor]): Bottom-right guiding shifts for
+ each level with shape (N, guiding_shift_channels, H, W).
+ Useless in this function, we keep this arg because it's the
+ raw output from CentripetalHead.
+ tl_centripetal_shifts (list[Tensor]): Top-left centripetal shifts
+ for each level with shape (N, centripetal_shift_channels, H,
+ W).
+ br_centripetal_shifts (list[Tensor]): Bottom-right centripetal
+ shifts for each level with shape (N,
+ centripetal_shift_channels, H, W).
+ img_metas (list[dict]): Meta information of each image, e.g.,
+ image size, scaling factor, etc.
+ rescale (bool): If True, return boxes in original image space.
+ Default: False.
+ with_nms (bool): If True, do nms before return boxes.
+ Default: True.
+ """
+ assert tl_heats[-1].shape[0] == br_heats[-1].shape[0] == len(img_metas)
+ result_list = []
+ for img_id in range(len(img_metas)):
+ result_list.append(
+ self._get_bboxes_single(
+ tl_heats[-1][img_id:img_id + 1, :],
+ br_heats[-1][img_id:img_id + 1, :],
+ tl_offs[-1][img_id:img_id + 1, :],
+ br_offs[-1][img_id:img_id + 1, :],
+ img_metas[img_id],
+ tl_emb=None,
+ br_emb=None,
+ tl_centripetal_shift=tl_centripetal_shifts[-1][
+ img_id:img_id + 1, :],
+ br_centripetal_shift=br_centripetal_shifts[-1][
+ img_id:img_id + 1, :],
+ rescale=rescale,
+ with_nms=with_nms))
+
+ return result_list
diff --git a/annotator/uniformer/mmdet/models/dense_heads/corner_head.py b/annotator/uniformer/mmdet/models/dense_heads/corner_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..50cdb49a29f2ced1a31a50e654a3bdc14f5f5004
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/dense_heads/corner_head.py
@@ -0,0 +1,1074 @@
+from logging import warning
+from math import ceil, log
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from mmcv.cnn import ConvModule, bias_init_with_prob
+from mmcv.ops import CornerPool, batched_nms
+
+from mmdet.core import multi_apply
+from ..builder import HEADS, build_loss
+from ..utils import gaussian_radius, gen_gaussian_target
+from .base_dense_head import BaseDenseHead
+
+
+class BiCornerPool(nn.Module):
+ """Bidirectional Corner Pooling Module (TopLeft, BottomRight, etc.)
+
+ Args:
+ in_channels (int): Input channels of module.
+ out_channels (int): Output channels of module.
+ feat_channels (int): Feature channels of module.
+ directions (list[str]): Directions of two CornerPools.
+ norm_cfg (dict): Dictionary to construct and config norm layer.
+ """
+
+ def __init__(self,
+ in_channels,
+ directions,
+ feat_channels=128,
+ out_channels=128,
+ norm_cfg=dict(type='BN', requires_grad=True)):
+ super(BiCornerPool, self).__init__()
+ self.direction1_conv = ConvModule(
+ in_channels, feat_channels, 3, padding=1, norm_cfg=norm_cfg)
+ self.direction2_conv = ConvModule(
+ in_channels, feat_channels, 3, padding=1, norm_cfg=norm_cfg)
+
+ self.aftpool_conv = ConvModule(
+ feat_channels,
+ out_channels,
+ 3,
+ padding=1,
+ norm_cfg=norm_cfg,
+ act_cfg=None)
+
+ self.conv1 = ConvModule(
+ in_channels, out_channels, 1, norm_cfg=norm_cfg, act_cfg=None)
+ self.conv2 = ConvModule(
+ in_channels, out_channels, 3, padding=1, norm_cfg=norm_cfg)
+
+ self.direction1_pool = CornerPool(directions[0])
+ self.direction2_pool = CornerPool(directions[1])
+ self.relu = nn.ReLU(inplace=True)
+
+ def forward(self, x):
+ """Forward features from the upstream network.
+
+ Args:
+ x (tensor): Input feature of BiCornerPool.
+
+ Returns:
+ conv2 (tensor): Output feature of BiCornerPool.
+ """
+ direction1_conv = self.direction1_conv(x)
+ direction2_conv = self.direction2_conv(x)
+ direction1_feat = self.direction1_pool(direction1_conv)
+ direction2_feat = self.direction2_pool(direction2_conv)
+ aftpool_conv = self.aftpool_conv(direction1_feat + direction2_feat)
+ conv1 = self.conv1(x)
+ relu = self.relu(aftpool_conv + conv1)
+ conv2 = self.conv2(relu)
+ return conv2
+
+
+@HEADS.register_module()
+class CornerHead(BaseDenseHead):
+ """Head of CornerNet: Detecting Objects as Paired Keypoints.
+
+ Code is modified from the `official github repo
+ `_ .
+
+ More details can be found in the `paper
+ `_ .
+
+ Args:
+ num_classes (int): Number of categories excluding the background
+ category.
+ in_channels (int): Number of channels in the input feature map.
+ num_feat_levels (int): Levels of feature from the previous module. 2
+ for HourglassNet-104 and 1 for HourglassNet-52. Because
+ HourglassNet-104 outputs the final feature and intermediate
+ supervision feature and HourglassNet-52 only outputs the final
+ feature. Default: 2.
+ corner_emb_channels (int): Channel of embedding vector. Default: 1.
+ train_cfg (dict | None): Training config. Useless in CornerHead,
+ but we keep this variable for SingleStageDetector. Default: None.
+ test_cfg (dict | None): Testing config of CornerHead. Default: None.
+ loss_heatmap (dict | None): Config of corner heatmap loss. Default:
+ GaussianFocalLoss.
+ loss_embedding (dict | None): Config of corner embedding loss. Default:
+ AssociativeEmbeddingLoss.
+ loss_offset (dict | None): Config of corner offset loss. Default:
+ SmoothL1Loss.
+ """
+
+ def __init__(self,
+ num_classes,
+ in_channels,
+ num_feat_levels=2,
+ corner_emb_channels=1,
+ train_cfg=None,
+ test_cfg=None,
+ loss_heatmap=dict(
+ type='GaussianFocalLoss',
+ alpha=2.0,
+ gamma=4.0,
+ loss_weight=1),
+ loss_embedding=dict(
+ type='AssociativeEmbeddingLoss',
+ pull_weight=0.25,
+ push_weight=0.25),
+ loss_offset=dict(
+ type='SmoothL1Loss', beta=1.0, loss_weight=1)):
+ super(CornerHead, self).__init__()
+ self.num_classes = num_classes
+ self.in_channels = in_channels
+ self.corner_emb_channels = corner_emb_channels
+ self.with_corner_emb = self.corner_emb_channels > 0
+ self.corner_offset_channels = 2
+ self.num_feat_levels = num_feat_levels
+ self.loss_heatmap = build_loss(
+ loss_heatmap) if loss_heatmap is not None else None
+ self.loss_embedding = build_loss(
+ loss_embedding) if loss_embedding is not None else None
+ self.loss_offset = build_loss(
+ loss_offset) if loss_offset is not None else None
+ self.train_cfg = train_cfg
+ self.test_cfg = test_cfg
+
+ self._init_layers()
+
+ def _make_layers(self, out_channels, in_channels=256, feat_channels=256):
+ """Initialize conv sequential for CornerHead."""
+ return nn.Sequential(
+ ConvModule(in_channels, feat_channels, 3, padding=1),
+ ConvModule(
+ feat_channels, out_channels, 1, norm_cfg=None, act_cfg=None))
+
+ def _init_corner_kpt_layers(self):
+ """Initialize corner keypoint layers.
+
+ Including corner heatmap branch and corner offset branch. Each branch
+ has two parts: prefix `tl_` for top-left and `br_` for bottom-right.
+ """
+ self.tl_pool, self.br_pool = nn.ModuleList(), nn.ModuleList()
+ self.tl_heat, self.br_heat = nn.ModuleList(), nn.ModuleList()
+ self.tl_off, self.br_off = nn.ModuleList(), nn.ModuleList()
+
+ for _ in range(self.num_feat_levels):
+ self.tl_pool.append(
+ BiCornerPool(
+ self.in_channels, ['top', 'left'],
+ out_channels=self.in_channels))
+ self.br_pool.append(
+ BiCornerPool(
+ self.in_channels, ['bottom', 'right'],
+ out_channels=self.in_channels))
+
+ self.tl_heat.append(
+ self._make_layers(
+ out_channels=self.num_classes,
+ in_channels=self.in_channels))
+ self.br_heat.append(
+ self._make_layers(
+ out_channels=self.num_classes,
+ in_channels=self.in_channels))
+
+ self.tl_off.append(
+ self._make_layers(
+ out_channels=self.corner_offset_channels,
+ in_channels=self.in_channels))
+ self.br_off.append(
+ self._make_layers(
+ out_channels=self.corner_offset_channels,
+ in_channels=self.in_channels))
+
+ def _init_corner_emb_layers(self):
+ """Initialize corner embedding layers.
+
+ Only include corner embedding branch with two parts: prefix `tl_` for
+ top-left and `br_` for bottom-right.
+ """
+ self.tl_emb, self.br_emb = nn.ModuleList(), nn.ModuleList()
+
+ for _ in range(self.num_feat_levels):
+ self.tl_emb.append(
+ self._make_layers(
+ out_channels=self.corner_emb_channels,
+ in_channels=self.in_channels))
+ self.br_emb.append(
+ self._make_layers(
+ out_channels=self.corner_emb_channels,
+ in_channels=self.in_channels))
+
+ def _init_layers(self):
+ """Initialize layers for CornerHead.
+
+ Including two parts: corner keypoint layers and corner embedding layers
+ """
+ self._init_corner_kpt_layers()
+ if self.with_corner_emb:
+ self._init_corner_emb_layers()
+
+ def init_weights(self):
+ """Initialize weights of the head."""
+ bias_init = bias_init_with_prob(0.1)
+ for i in range(self.num_feat_levels):
+ # The initialization of parameters are different between nn.Conv2d
+ # and ConvModule. Our experiments show that using the original
+ # initialization of nn.Conv2d increases the final mAP by about 0.2%
+ self.tl_heat[i][-1].conv.reset_parameters()
+ self.tl_heat[i][-1].conv.bias.data.fill_(bias_init)
+ self.br_heat[i][-1].conv.reset_parameters()
+ self.br_heat[i][-1].conv.bias.data.fill_(bias_init)
+ self.tl_off[i][-1].conv.reset_parameters()
+ self.br_off[i][-1].conv.reset_parameters()
+ if self.with_corner_emb:
+ self.tl_emb[i][-1].conv.reset_parameters()
+ self.br_emb[i][-1].conv.reset_parameters()
+
+ def forward(self, feats):
+ """Forward features from the upstream network.
+
+ Args:
+ feats (tuple[Tensor]): Features from the upstream network, each is
+ a 4D-tensor.
+
+ Returns:
+ tuple: Usually a tuple of corner heatmaps, offset heatmaps and
+ embedding heatmaps.
+ - tl_heats (list[Tensor]): Top-left corner heatmaps for all
+ levels, each is a 4D-tensor, the channels number is
+ num_classes.
+ - br_heats (list[Tensor]): Bottom-right corner heatmaps for all
+ levels, each is a 4D-tensor, the channels number is
+ num_classes.
+ - tl_embs (list[Tensor] | list[None]): Top-left embedding
+ heatmaps for all levels, each is a 4D-tensor or None.
+ If not None, the channels number is corner_emb_channels.
+ - br_embs (list[Tensor] | list[None]): Bottom-right embedding
+ heatmaps for all levels, each is a 4D-tensor or None.
+ If not None, the channels number is corner_emb_channels.
+ - tl_offs (list[Tensor]): Top-left offset heatmaps for all
+ levels, each is a 4D-tensor. The channels number is
+ corner_offset_channels.
+ - br_offs (list[Tensor]): Bottom-right offset heatmaps for all
+ levels, each is a 4D-tensor. The channels number is
+ corner_offset_channels.
+ """
+ lvl_ind = list(range(self.num_feat_levels))
+ return multi_apply(self.forward_single, feats, lvl_ind)
+
+ def forward_single(self, x, lvl_ind, return_pool=False):
+ """Forward feature of a single level.
+
+ Args:
+ x (Tensor): Feature of a single level.
+ lvl_ind (int): Level index of current feature.
+ return_pool (bool): Return corner pool feature or not.
+
+ Returns:
+ tuple[Tensor]: A tuple of CornerHead's output for current feature
+ level. Containing the following Tensors:
+
+ - tl_heat (Tensor): Predicted top-left corner heatmap.
+ - br_heat (Tensor): Predicted bottom-right corner heatmap.
+ - tl_emb (Tensor | None): Predicted top-left embedding heatmap.
+ None for `self.with_corner_emb == False`.
+ - br_emb (Tensor | None): Predicted bottom-right embedding
+ heatmap. None for `self.with_corner_emb == False`.
+ - tl_off (Tensor): Predicted top-left offset heatmap.
+ - br_off (Tensor): Predicted bottom-right offset heatmap.
+ - tl_pool (Tensor): Top-left corner pool feature. Not must
+ have.
+ - br_pool (Tensor): Bottom-right corner pool feature. Not must
+ have.
+ """
+ tl_pool = self.tl_pool[lvl_ind](x)
+ tl_heat = self.tl_heat[lvl_ind](tl_pool)
+ br_pool = self.br_pool[lvl_ind](x)
+ br_heat = self.br_heat[lvl_ind](br_pool)
+
+ tl_emb, br_emb = None, None
+ if self.with_corner_emb:
+ tl_emb = self.tl_emb[lvl_ind](tl_pool)
+ br_emb = self.br_emb[lvl_ind](br_pool)
+
+ tl_off = self.tl_off[lvl_ind](tl_pool)
+ br_off = self.br_off[lvl_ind](br_pool)
+
+ result_list = [tl_heat, br_heat, tl_emb, br_emb, tl_off, br_off]
+ if return_pool:
+ result_list.append(tl_pool)
+ result_list.append(br_pool)
+
+ return result_list
+
+ def get_targets(self,
+ gt_bboxes,
+ gt_labels,
+ feat_shape,
+ img_shape,
+ with_corner_emb=False,
+ with_guiding_shift=False,
+ with_centripetal_shift=False):
+ """Generate corner targets.
+
+ Including corner heatmap, corner offset.
+
+ Optional: corner embedding, corner guiding shift, centripetal shift.
+
+ For CornerNet, we generate corner heatmap, corner offset and corner
+ embedding from this function.
+
+ For CentripetalNet, we generate corner heatmap, corner offset, guiding
+ shift and centripetal shift from this function.
+
+ Args:
+ gt_bboxes (list[Tensor]): Ground truth bboxes of each image, each
+ has shape (num_gt, 4).
+ gt_labels (list[Tensor]): Ground truth labels of each box, each has
+ shape (num_gt,).
+ feat_shape (list[int]): Shape of output feature,
+ [batch, channel, height, width].
+ img_shape (list[int]): Shape of input image,
+ [height, width, channel].
+ with_corner_emb (bool): Generate corner embedding target or not.
+ Default: False.
+ with_guiding_shift (bool): Generate guiding shift target or not.
+ Default: False.
+ with_centripetal_shift (bool): Generate centripetal shift target or
+ not. Default: False.
+
+ Returns:
+ dict: Ground truth of corner heatmap, corner offset, corner
+ embedding, guiding shift and centripetal shift. Containing the
+ following keys:
+
+ - topleft_heatmap (Tensor): Ground truth top-left corner
+ heatmap.
+ - bottomright_heatmap (Tensor): Ground truth bottom-right
+ corner heatmap.
+ - topleft_offset (Tensor): Ground truth top-left corner offset.
+ - bottomright_offset (Tensor): Ground truth bottom-right corner
+ offset.
+ - corner_embedding (list[list[list[int]]]): Ground truth corner
+ embedding. Not must have.
+ - topleft_guiding_shift (Tensor): Ground truth top-left corner
+ guiding shift. Not must have.
+ - bottomright_guiding_shift (Tensor): Ground truth bottom-right
+ corner guiding shift. Not must have.
+ - topleft_centripetal_shift (Tensor): Ground truth top-left
+ corner centripetal shift. Not must have.
+ - bottomright_centripetal_shift (Tensor): Ground truth
+ bottom-right corner centripetal shift. Not must have.
+ """
+ batch_size, _, height, width = feat_shape
+ img_h, img_w = img_shape[:2]
+
+ width_ratio = float(width / img_w)
+ height_ratio = float(height / img_h)
+
+ gt_tl_heatmap = gt_bboxes[-1].new_zeros(
+ [batch_size, self.num_classes, height, width])
+ gt_br_heatmap = gt_bboxes[-1].new_zeros(
+ [batch_size, self.num_classes, height, width])
+ gt_tl_offset = gt_bboxes[-1].new_zeros([batch_size, 2, height, width])
+ gt_br_offset = gt_bboxes[-1].new_zeros([batch_size, 2, height, width])
+
+ if with_corner_emb:
+ match = []
+
+ # Guiding shift is a kind of offset, from center to corner
+ if with_guiding_shift:
+ gt_tl_guiding_shift = gt_bboxes[-1].new_zeros(
+ [batch_size, 2, height, width])
+ gt_br_guiding_shift = gt_bboxes[-1].new_zeros(
+ [batch_size, 2, height, width])
+ # Centripetal shift is also a kind of offset, from center to corner
+ # and normalized by log.
+ if with_centripetal_shift:
+ gt_tl_centripetal_shift = gt_bboxes[-1].new_zeros(
+ [batch_size, 2, height, width])
+ gt_br_centripetal_shift = gt_bboxes[-1].new_zeros(
+ [batch_size, 2, height, width])
+
+ for batch_id in range(batch_size):
+ # Ground truth of corner embedding per image is a list of coord set
+ corner_match = []
+ for box_id in range(len(gt_labels[batch_id])):
+ left, top, right, bottom = gt_bboxes[batch_id][box_id]
+ center_x = (left + right) / 2.0
+ center_y = (top + bottom) / 2.0
+ label = gt_labels[batch_id][box_id]
+
+ # Use coords in the feature level to generate ground truth
+ scale_left = left * width_ratio
+ scale_right = right * width_ratio
+ scale_top = top * height_ratio
+ scale_bottom = bottom * height_ratio
+ scale_center_x = center_x * width_ratio
+ scale_center_y = center_y * height_ratio
+
+ # Int coords on feature map/ground truth tensor
+ left_idx = int(min(scale_left, width - 1))
+ right_idx = int(min(scale_right, width - 1))
+ top_idx = int(min(scale_top, height - 1))
+ bottom_idx = int(min(scale_bottom, height - 1))
+
+ # Generate gaussian heatmap
+ scale_box_width = ceil(scale_right - scale_left)
+ scale_box_height = ceil(scale_bottom - scale_top)
+ radius = gaussian_radius((scale_box_height, scale_box_width),
+ min_overlap=0.3)
+ radius = max(0, int(radius))
+ gt_tl_heatmap[batch_id, label] = gen_gaussian_target(
+ gt_tl_heatmap[batch_id, label], [left_idx, top_idx],
+ radius)
+ gt_br_heatmap[batch_id, label] = gen_gaussian_target(
+ gt_br_heatmap[batch_id, label], [right_idx, bottom_idx],
+ radius)
+
+ # Generate corner offset
+ left_offset = scale_left - left_idx
+ top_offset = scale_top - top_idx
+ right_offset = scale_right - right_idx
+ bottom_offset = scale_bottom - bottom_idx
+ gt_tl_offset[batch_id, 0, top_idx, left_idx] = left_offset
+ gt_tl_offset[batch_id, 1, top_idx, left_idx] = top_offset
+ gt_br_offset[batch_id, 0, bottom_idx, right_idx] = right_offset
+ gt_br_offset[batch_id, 1, bottom_idx,
+ right_idx] = bottom_offset
+
+ # Generate corner embedding
+ if with_corner_emb:
+ corner_match.append([[top_idx, left_idx],
+ [bottom_idx, right_idx]])
+ # Generate guiding shift
+ if with_guiding_shift:
+ gt_tl_guiding_shift[batch_id, 0, top_idx,
+ left_idx] = scale_center_x - left_idx
+ gt_tl_guiding_shift[batch_id, 1, top_idx,
+ left_idx] = scale_center_y - top_idx
+ gt_br_guiding_shift[batch_id, 0, bottom_idx,
+ right_idx] = right_idx - scale_center_x
+ gt_br_guiding_shift[
+ batch_id, 1, bottom_idx,
+ right_idx] = bottom_idx - scale_center_y
+ # Generate centripetal shift
+ if with_centripetal_shift:
+ gt_tl_centripetal_shift[batch_id, 0, top_idx,
+ left_idx] = log(scale_center_x -
+ scale_left)
+ gt_tl_centripetal_shift[batch_id, 1, top_idx,
+ left_idx] = log(scale_center_y -
+ scale_top)
+ gt_br_centripetal_shift[batch_id, 0, bottom_idx,
+ right_idx] = log(scale_right -
+ scale_center_x)
+ gt_br_centripetal_shift[batch_id, 1, bottom_idx,
+ right_idx] = log(scale_bottom -
+ scale_center_y)
+
+ if with_corner_emb:
+ match.append(corner_match)
+
+ target_result = dict(
+ topleft_heatmap=gt_tl_heatmap,
+ topleft_offset=gt_tl_offset,
+ bottomright_heatmap=gt_br_heatmap,
+ bottomright_offset=gt_br_offset)
+
+ if with_corner_emb:
+ target_result.update(corner_embedding=match)
+ if with_guiding_shift:
+ target_result.update(
+ topleft_guiding_shift=gt_tl_guiding_shift,
+ bottomright_guiding_shift=gt_br_guiding_shift)
+ if with_centripetal_shift:
+ target_result.update(
+ topleft_centripetal_shift=gt_tl_centripetal_shift,
+ bottomright_centripetal_shift=gt_br_centripetal_shift)
+
+ return target_result
+
+ def loss(self,
+ tl_heats,
+ br_heats,
+ tl_embs,
+ br_embs,
+ tl_offs,
+ br_offs,
+ gt_bboxes,
+ gt_labels,
+ img_metas,
+ gt_bboxes_ignore=None):
+ """Compute losses of the head.
+
+ Args:
+ tl_heats (list[Tensor]): Top-left corner heatmaps for each level
+ with shape (N, num_classes, H, W).
+ br_heats (list[Tensor]): Bottom-right corner heatmaps for each
+ level with shape (N, num_classes, H, W).
+ tl_embs (list[Tensor]): Top-left corner embeddings for each level
+ with shape (N, corner_emb_channels, H, W).
+ br_embs (list[Tensor]): Bottom-right corner embeddings for each
+ level with shape (N, corner_emb_channels, H, W).
+ tl_offs (list[Tensor]): Top-left corner offsets for each level
+ with shape (N, corner_offset_channels, H, W).
+ br_offs (list[Tensor]): Bottom-right corner offsets for each level
+ with shape (N, corner_offset_channels, H, W).
+ gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
+ shape (num_gts, 4) in [left, top, right, bottom] format.
+ gt_labels (list[Tensor]): Class indices corresponding to each box.
+ img_metas (list[dict]): Meta information of each image, e.g.,
+ image size, scaling factor, etc.
+ gt_bboxes_ignore (list[Tensor] | None): Specify which bounding
+ boxes can be ignored when computing the loss.
+
+ Returns:
+ dict[str, Tensor]: A dictionary of loss components. Containing the
+ following losses:
+
+ - det_loss (list[Tensor]): Corner keypoint losses of all
+ feature levels.
+ - pull_loss (list[Tensor]): Part one of AssociativeEmbedding
+ losses of all feature levels.
+ - push_loss (list[Tensor]): Part two of AssociativeEmbedding
+ losses of all feature levels.
+ - off_loss (list[Tensor]): Corner offset losses of all feature
+ levels.
+ """
+ targets = self.get_targets(
+ gt_bboxes,
+ gt_labels,
+ tl_heats[-1].shape,
+ img_metas[0]['pad_shape'],
+ with_corner_emb=self.with_corner_emb)
+ mlvl_targets = [targets for _ in range(self.num_feat_levels)]
+ det_losses, pull_losses, push_losses, off_losses = multi_apply(
+ self.loss_single, tl_heats, br_heats, tl_embs, br_embs, tl_offs,
+ br_offs, mlvl_targets)
+ loss_dict = dict(det_loss=det_losses, off_loss=off_losses)
+ if self.with_corner_emb:
+ loss_dict.update(pull_loss=pull_losses, push_loss=push_losses)
+ return loss_dict
+
+ def loss_single(self, tl_hmp, br_hmp, tl_emb, br_emb, tl_off, br_off,
+ targets):
+ """Compute losses for single level.
+
+ Args:
+ tl_hmp (Tensor): Top-left corner heatmap for current level with
+ shape (N, num_classes, H, W).
+ br_hmp (Tensor): Bottom-right corner heatmap for current level with
+ shape (N, num_classes, H, W).
+ tl_emb (Tensor): Top-left corner embedding for current level with
+ shape (N, corner_emb_channels, H, W).
+ br_emb (Tensor): Bottom-right corner embedding for current level
+ with shape (N, corner_emb_channels, H, W).
+ tl_off (Tensor): Top-left corner offset for current level with
+ shape (N, corner_offset_channels, H, W).
+ br_off (Tensor): Bottom-right corner offset for current level with
+ shape (N, corner_offset_channels, H, W).
+ targets (dict): Corner target generated by `get_targets`.
+
+ Returns:
+ tuple[torch.Tensor]: Losses of the head's differnet branches
+ containing the following losses:
+
+ - det_loss (Tensor): Corner keypoint loss.
+ - pull_loss (Tensor): Part one of AssociativeEmbedding loss.
+ - push_loss (Tensor): Part two of AssociativeEmbedding loss.
+ - off_loss (Tensor): Corner offset loss.
+ """
+ gt_tl_hmp = targets['topleft_heatmap']
+ gt_br_hmp = targets['bottomright_heatmap']
+ gt_tl_off = targets['topleft_offset']
+ gt_br_off = targets['bottomright_offset']
+ gt_embedding = targets['corner_embedding']
+
+ # Detection loss
+ tl_det_loss = self.loss_heatmap(
+ tl_hmp.sigmoid(),
+ gt_tl_hmp,
+ avg_factor=max(1,
+ gt_tl_hmp.eq(1).sum()))
+ br_det_loss = self.loss_heatmap(
+ br_hmp.sigmoid(),
+ gt_br_hmp,
+ avg_factor=max(1,
+ gt_br_hmp.eq(1).sum()))
+ det_loss = (tl_det_loss + br_det_loss) / 2.0
+
+ # AssociativeEmbedding loss
+ if self.with_corner_emb and self.loss_embedding is not None:
+ pull_loss, push_loss = self.loss_embedding(tl_emb, br_emb,
+ gt_embedding)
+ else:
+ pull_loss, push_loss = None, None
+
+ # Offset loss
+ # We only compute the offset loss at the real corner position.
+ # The value of real corner would be 1 in heatmap ground truth.
+ # The mask is computed in class agnostic mode and its shape is
+ # batch * 1 * width * height.
+ tl_off_mask = gt_tl_hmp.eq(1).sum(1).gt(0).unsqueeze(1).type_as(
+ gt_tl_hmp)
+ br_off_mask = gt_br_hmp.eq(1).sum(1).gt(0).unsqueeze(1).type_as(
+ gt_br_hmp)
+ tl_off_loss = self.loss_offset(
+ tl_off,
+ gt_tl_off,
+ tl_off_mask,
+ avg_factor=max(1, tl_off_mask.sum()))
+ br_off_loss = self.loss_offset(
+ br_off,
+ gt_br_off,
+ br_off_mask,
+ avg_factor=max(1, br_off_mask.sum()))
+
+ off_loss = (tl_off_loss + br_off_loss) / 2.0
+
+ return det_loss, pull_loss, push_loss, off_loss
+
+ def get_bboxes(self,
+ tl_heats,
+ br_heats,
+ tl_embs,
+ br_embs,
+ tl_offs,
+ br_offs,
+ img_metas,
+ rescale=False,
+ with_nms=True):
+ """Transform network output for a batch into bbox predictions.
+
+ Args:
+ tl_heats (list[Tensor]): Top-left corner heatmaps for each level
+ with shape (N, num_classes, H, W).
+ br_heats (list[Tensor]): Bottom-right corner heatmaps for each
+ level with shape (N, num_classes, H, W).
+ tl_embs (list[Tensor]): Top-left corner embeddings for each level
+ with shape (N, corner_emb_channels, H, W).
+ br_embs (list[Tensor]): Bottom-right corner embeddings for each
+ level with shape (N, corner_emb_channels, H, W).
+ tl_offs (list[Tensor]): Top-left corner offsets for each level
+ with shape (N, corner_offset_channels, H, W).
+ br_offs (list[Tensor]): Bottom-right corner offsets for each level
+ with shape (N, corner_offset_channels, H, W).
+ img_metas (list[dict]): Meta information of each image, e.g.,
+ image size, scaling factor, etc.
+ rescale (bool): If True, return boxes in original image space.
+ Default: False.
+ with_nms (bool): If True, do nms before return boxes.
+ Default: True.
+ """
+ assert tl_heats[-1].shape[0] == br_heats[-1].shape[0] == len(img_metas)
+ result_list = []
+ for img_id in range(len(img_metas)):
+ result_list.append(
+ self._get_bboxes_single(
+ tl_heats[-1][img_id:img_id + 1, :],
+ br_heats[-1][img_id:img_id + 1, :],
+ tl_offs[-1][img_id:img_id + 1, :],
+ br_offs[-1][img_id:img_id + 1, :],
+ img_metas[img_id],
+ tl_emb=tl_embs[-1][img_id:img_id + 1, :],
+ br_emb=br_embs[-1][img_id:img_id + 1, :],
+ rescale=rescale,
+ with_nms=with_nms))
+
+ return result_list
+
+ def _get_bboxes_single(self,
+ tl_heat,
+ br_heat,
+ tl_off,
+ br_off,
+ img_meta,
+ tl_emb=None,
+ br_emb=None,
+ tl_centripetal_shift=None,
+ br_centripetal_shift=None,
+ rescale=False,
+ with_nms=True):
+ """Transform outputs for a single batch item into bbox predictions.
+
+ Args:
+ tl_heat (Tensor): Top-left corner heatmap for current level with
+ shape (N, num_classes, H, W).
+ br_heat (Tensor): Bottom-right corner heatmap for current level
+ with shape (N, num_classes, H, W).
+ tl_off (Tensor): Top-left corner offset for current level with
+ shape (N, corner_offset_channels, H, W).
+ br_off (Tensor): Bottom-right corner offset for current level with
+ shape (N, corner_offset_channels, H, W).
+ img_meta (dict): Meta information of current image, e.g.,
+ image size, scaling factor, etc.
+ tl_emb (Tensor): Top-left corner embedding for current level with
+ shape (N, corner_emb_channels, H, W).
+ br_emb (Tensor): Bottom-right corner embedding for current level
+ with shape (N, corner_emb_channels, H, W).
+ tl_centripetal_shift: Top-left corner's centripetal shift for
+ current level with shape (N, 2, H, W).
+ br_centripetal_shift: Bottom-right corner's centripetal shift for
+ current level with shape (N, 2, H, W).
+ rescale (bool): If True, return boxes in original image space.
+ Default: False.
+ with_nms (bool): If True, do nms before return boxes.
+ Default: True.
+ """
+ if isinstance(img_meta, (list, tuple)):
+ img_meta = img_meta[0]
+
+ batch_bboxes, batch_scores, batch_clses = self.decode_heatmap(
+ tl_heat=tl_heat.sigmoid(),
+ br_heat=br_heat.sigmoid(),
+ tl_off=tl_off,
+ br_off=br_off,
+ tl_emb=tl_emb,
+ br_emb=br_emb,
+ tl_centripetal_shift=tl_centripetal_shift,
+ br_centripetal_shift=br_centripetal_shift,
+ img_meta=img_meta,
+ k=self.test_cfg.corner_topk,
+ kernel=self.test_cfg.local_maximum_kernel,
+ distance_threshold=self.test_cfg.distance_threshold)
+
+ if rescale:
+ batch_bboxes /= batch_bboxes.new_tensor(img_meta['scale_factor'])
+
+ bboxes = batch_bboxes.view([-1, 4])
+ scores = batch_scores.view([-1, 1])
+ clses = batch_clses.view([-1, 1])
+
+ idx = scores.argsort(dim=0, descending=True)
+ bboxes = bboxes[idx].view([-1, 4])
+ scores = scores[idx].view(-1)
+ clses = clses[idx].view(-1)
+
+ detections = torch.cat([bboxes, scores.unsqueeze(-1)], -1)
+ keepinds = (detections[:, -1] > -0.1)
+ detections = detections[keepinds]
+ labels = clses[keepinds]
+
+ if with_nms:
+ detections, labels = self._bboxes_nms(detections, labels,
+ self.test_cfg)
+
+ return detections, labels
+
+ def _bboxes_nms(self, bboxes, labels, cfg):
+ if labels.numel() == 0:
+ return bboxes, labels
+
+ if 'nms_cfg' in cfg:
+ warning.warn('nms_cfg in test_cfg will be deprecated. '
+ 'Please rename it as nms')
+ if 'nms' not in cfg:
+ cfg.nms = cfg.nms_cfg
+
+ out_bboxes, keep = batched_nms(bboxes[:, :4], bboxes[:, -1], labels,
+ cfg.nms)
+ out_labels = labels[keep]
+
+ if len(out_bboxes) > 0:
+ idx = torch.argsort(out_bboxes[:, -1], descending=True)
+ idx = idx[:cfg.max_per_img]
+ out_bboxes = out_bboxes[idx]
+ out_labels = out_labels[idx]
+
+ return out_bboxes, out_labels
+
+ def _gather_feat(self, feat, ind, mask=None):
+ """Gather feature according to index.
+
+ Args:
+ feat (Tensor): Target feature map.
+ ind (Tensor): Target coord index.
+ mask (Tensor | None): Mask of featuremap. Default: None.
+
+ Returns:
+ feat (Tensor): Gathered feature.
+ """
+ dim = feat.size(2)
+ ind = ind.unsqueeze(2).repeat(1, 1, dim)
+ feat = feat.gather(1, ind)
+ if mask is not None:
+ mask = mask.unsqueeze(2).expand_as(feat)
+ feat = feat[mask]
+ feat = feat.view(-1, dim)
+ return feat
+
+ def _local_maximum(self, heat, kernel=3):
+ """Extract local maximum pixel with given kernel.
+
+ Args:
+ heat (Tensor): Target heatmap.
+ kernel (int): Kernel size of max pooling. Default: 3.
+
+ Returns:
+ heat (Tensor): A heatmap where local maximum pixels maintain its
+ own value and other positions are 0.
+ """
+ pad = (kernel - 1) // 2
+ hmax = F.max_pool2d(heat, kernel, stride=1, padding=pad)
+ keep = (hmax == heat).float()
+ return heat * keep
+
+ def _transpose_and_gather_feat(self, feat, ind):
+ """Transpose and gather feature according to index.
+
+ Args:
+ feat (Tensor): Target feature map.
+ ind (Tensor): Target coord index.
+
+ Returns:
+ feat (Tensor): Transposed and gathered feature.
+ """
+ feat = feat.permute(0, 2, 3, 1).contiguous()
+ feat = feat.view(feat.size(0), -1, feat.size(3))
+ feat = self._gather_feat(feat, ind)
+ return feat
+
+ def _topk(self, scores, k=20):
+ """Get top k positions from heatmap.
+
+ Args:
+ scores (Tensor): Target heatmap with shape
+ [batch, num_classes, height, width].
+ k (int): Target number. Default: 20.
+
+ Returns:
+ tuple[torch.Tensor]: Scores, indexes, categories and coords of
+ topk keypoint. Containing following Tensors:
+
+ - topk_scores (Tensor): Max scores of each topk keypoint.
+ - topk_inds (Tensor): Indexes of each topk keypoint.
+ - topk_clses (Tensor): Categories of each topk keypoint.
+ - topk_ys (Tensor): Y-coord of each topk keypoint.
+ - topk_xs (Tensor): X-coord of each topk keypoint.
+ """
+ batch, _, height, width = scores.size()
+ topk_scores, topk_inds = torch.topk(scores.view(batch, -1), k)
+ topk_clses = topk_inds // (height * width)
+ topk_inds = topk_inds % (height * width)
+ topk_ys = topk_inds // width
+ topk_xs = (topk_inds % width).int().float()
+ return topk_scores, topk_inds, topk_clses, topk_ys, topk_xs
+
+ def decode_heatmap(self,
+ tl_heat,
+ br_heat,
+ tl_off,
+ br_off,
+ tl_emb=None,
+ br_emb=None,
+ tl_centripetal_shift=None,
+ br_centripetal_shift=None,
+ img_meta=None,
+ k=100,
+ kernel=3,
+ distance_threshold=0.5,
+ num_dets=1000):
+ """Transform outputs for a single batch item into raw bbox predictions.
+
+ Args:
+ tl_heat (Tensor): Top-left corner heatmap for current level with
+ shape (N, num_classes, H, W).
+ br_heat (Tensor): Bottom-right corner heatmap for current level
+ with shape (N, num_classes, H, W).
+ tl_off (Tensor): Top-left corner offset for current level with
+ shape (N, corner_offset_channels, H, W).
+ br_off (Tensor): Bottom-right corner offset for current level with
+ shape (N, corner_offset_channels, H, W).
+ tl_emb (Tensor | None): Top-left corner embedding for current
+ level with shape (N, corner_emb_channels, H, W).
+ br_emb (Tensor | None): Bottom-right corner embedding for current
+ level with shape (N, corner_emb_channels, H, W).
+ tl_centripetal_shift (Tensor | None): Top-left centripetal shift
+ for current level with shape (N, 2, H, W).
+ br_centripetal_shift (Tensor | None): Bottom-right centripetal
+ shift for current level with shape (N, 2, H, W).
+ img_meta (dict): Meta information of current image, e.g.,
+ image size, scaling factor, etc.
+ k (int): Get top k corner keypoints from heatmap.
+ kernel (int): Max pooling kernel for extract local maximum pixels.
+ distance_threshold (float): Distance threshold. Top-left and
+ bottom-right corner keypoints with feature distance less than
+ the threshold will be regarded as keypoints from same object.
+ num_dets (int): Num of raw boxes before doing nms.
+
+ Returns:
+ tuple[torch.Tensor]: Decoded output of CornerHead, containing the
+ following Tensors:
+
+ - bboxes (Tensor): Coords of each box.
+ - scores (Tensor): Scores of each box.
+ - clses (Tensor): Categories of each box.
+ """
+ with_embedding = tl_emb is not None and br_emb is not None
+ with_centripetal_shift = (
+ tl_centripetal_shift is not None
+ and br_centripetal_shift is not None)
+ assert with_embedding + with_centripetal_shift == 1
+ batch, _, height, width = tl_heat.size()
+ inp_h, inp_w, _ = img_meta['pad_shape']
+
+ # perform nms on heatmaps
+ tl_heat = self._local_maximum(tl_heat, kernel=kernel)
+ br_heat = self._local_maximum(br_heat, kernel=kernel)
+
+ tl_scores, tl_inds, tl_clses, tl_ys, tl_xs = self._topk(tl_heat, k=k)
+ br_scores, br_inds, br_clses, br_ys, br_xs = self._topk(br_heat, k=k)
+
+ # We use repeat instead of expand here because expand is a
+ # shallow-copy function. Thus it could cause unexpected testing result
+ # sometimes. Using expand will decrease about 10% mAP during testing
+ # compared to repeat.
+ tl_ys = tl_ys.view(batch, k, 1).repeat(1, 1, k)
+ tl_xs = tl_xs.view(batch, k, 1).repeat(1, 1, k)
+ br_ys = br_ys.view(batch, 1, k).repeat(1, k, 1)
+ br_xs = br_xs.view(batch, 1, k).repeat(1, k, 1)
+
+ tl_off = self._transpose_and_gather_feat(tl_off, tl_inds)
+ tl_off = tl_off.view(batch, k, 1, 2)
+ br_off = self._transpose_and_gather_feat(br_off, br_inds)
+ br_off = br_off.view(batch, 1, k, 2)
+
+ tl_xs = tl_xs + tl_off[..., 0]
+ tl_ys = tl_ys + tl_off[..., 1]
+ br_xs = br_xs + br_off[..., 0]
+ br_ys = br_ys + br_off[..., 1]
+
+ if with_centripetal_shift:
+ tl_centripetal_shift = self._transpose_and_gather_feat(
+ tl_centripetal_shift, tl_inds).view(batch, k, 1, 2).exp()
+ br_centripetal_shift = self._transpose_and_gather_feat(
+ br_centripetal_shift, br_inds).view(batch, 1, k, 2).exp()
+
+ tl_ctxs = tl_xs + tl_centripetal_shift[..., 0]
+ tl_ctys = tl_ys + tl_centripetal_shift[..., 1]
+ br_ctxs = br_xs - br_centripetal_shift[..., 0]
+ br_ctys = br_ys - br_centripetal_shift[..., 1]
+
+ # all possible boxes based on top k corners (ignoring class)
+ tl_xs *= (inp_w / width)
+ tl_ys *= (inp_h / height)
+ br_xs *= (inp_w / width)
+ br_ys *= (inp_h / height)
+
+ if with_centripetal_shift:
+ tl_ctxs *= (inp_w / width)
+ tl_ctys *= (inp_h / height)
+ br_ctxs *= (inp_w / width)
+ br_ctys *= (inp_h / height)
+
+ x_off = img_meta['border'][2]
+ y_off = img_meta['border'][0]
+
+ tl_xs -= x_off
+ tl_ys -= y_off
+ br_xs -= x_off
+ br_ys -= y_off
+
+ tl_xs *= tl_xs.gt(0.0).type_as(tl_xs)
+ tl_ys *= tl_ys.gt(0.0).type_as(tl_ys)
+ br_xs *= br_xs.gt(0.0).type_as(br_xs)
+ br_ys *= br_ys.gt(0.0).type_as(br_ys)
+
+ bboxes = torch.stack((tl_xs, tl_ys, br_xs, br_ys), dim=3)
+ area_bboxes = ((br_xs - tl_xs) * (br_ys - tl_ys)).abs()
+
+ if with_centripetal_shift:
+ tl_ctxs -= x_off
+ tl_ctys -= y_off
+ br_ctxs -= x_off
+ br_ctys -= y_off
+
+ tl_ctxs *= tl_ctxs.gt(0.0).type_as(tl_ctxs)
+ tl_ctys *= tl_ctys.gt(0.0).type_as(tl_ctys)
+ br_ctxs *= br_ctxs.gt(0.0).type_as(br_ctxs)
+ br_ctys *= br_ctys.gt(0.0).type_as(br_ctys)
+
+ ct_bboxes = torch.stack((tl_ctxs, tl_ctys, br_ctxs, br_ctys),
+ dim=3)
+ area_ct_bboxes = ((br_ctxs - tl_ctxs) * (br_ctys - tl_ctys)).abs()
+
+ rcentral = torch.zeros_like(ct_bboxes)
+ # magic nums from paper section 4.1
+ mu = torch.ones_like(area_bboxes) / 2.4
+ mu[area_bboxes > 3500] = 1 / 2.1 # large bbox have smaller mu
+
+ bboxes_center_x = (bboxes[..., 0] + bboxes[..., 2]) / 2
+ bboxes_center_y = (bboxes[..., 1] + bboxes[..., 3]) / 2
+ rcentral[..., 0] = bboxes_center_x - mu * (bboxes[..., 2] -
+ bboxes[..., 0]) / 2
+ rcentral[..., 1] = bboxes_center_y - mu * (bboxes[..., 3] -
+ bboxes[..., 1]) / 2
+ rcentral[..., 2] = bboxes_center_x + mu * (bboxes[..., 2] -
+ bboxes[..., 0]) / 2
+ rcentral[..., 3] = bboxes_center_y + mu * (bboxes[..., 3] -
+ bboxes[..., 1]) / 2
+ area_rcentral = ((rcentral[..., 2] - rcentral[..., 0]) *
+ (rcentral[..., 3] - rcentral[..., 1])).abs()
+ dists = area_ct_bboxes / area_rcentral
+
+ tl_ctx_inds = (ct_bboxes[..., 0] <= rcentral[..., 0]) | (
+ ct_bboxes[..., 0] >= rcentral[..., 2])
+ tl_cty_inds = (ct_bboxes[..., 1] <= rcentral[..., 1]) | (
+ ct_bboxes[..., 1] >= rcentral[..., 3])
+ br_ctx_inds = (ct_bboxes[..., 2] <= rcentral[..., 0]) | (
+ ct_bboxes[..., 2] >= rcentral[..., 2])
+ br_cty_inds = (ct_bboxes[..., 3] <= rcentral[..., 1]) | (
+ ct_bboxes[..., 3] >= rcentral[..., 3])
+
+ if with_embedding:
+ tl_emb = self._transpose_and_gather_feat(tl_emb, tl_inds)
+ tl_emb = tl_emb.view(batch, k, 1)
+ br_emb = self._transpose_and_gather_feat(br_emb, br_inds)
+ br_emb = br_emb.view(batch, 1, k)
+ dists = torch.abs(tl_emb - br_emb)
+
+ tl_scores = tl_scores.view(batch, k, 1).repeat(1, 1, k)
+ br_scores = br_scores.view(batch, 1, k).repeat(1, k, 1)
+
+ scores = (tl_scores + br_scores) / 2 # scores for all possible boxes
+
+ # tl and br should have same class
+ tl_clses = tl_clses.view(batch, k, 1).repeat(1, 1, k)
+ br_clses = br_clses.view(batch, 1, k).repeat(1, k, 1)
+ cls_inds = (tl_clses != br_clses)
+
+ # reject boxes based on distances
+ dist_inds = dists > distance_threshold
+
+ # reject boxes based on widths and heights
+ width_inds = (br_xs <= tl_xs)
+ height_inds = (br_ys <= tl_ys)
+
+ scores[cls_inds] = -1
+ scores[width_inds] = -1
+ scores[height_inds] = -1
+ scores[dist_inds] = -1
+ if with_centripetal_shift:
+ scores[tl_ctx_inds] = -1
+ scores[tl_cty_inds] = -1
+ scores[br_ctx_inds] = -1
+ scores[br_cty_inds] = -1
+
+ scores = scores.view(batch, -1)
+ scores, inds = torch.topk(scores, num_dets)
+ scores = scores.unsqueeze(2)
+
+ bboxes = bboxes.view(batch, -1, 4)
+ bboxes = self._gather_feat(bboxes, inds)
+
+ clses = tl_clses.contiguous().view(batch, -1, 1)
+ clses = self._gather_feat(clses, inds).float()
+
+ return bboxes, scores, clses
diff --git a/annotator/uniformer/mmdet/models/dense_heads/dense_test_mixins.py b/annotator/uniformer/mmdet/models/dense_heads/dense_test_mixins.py
new file mode 100644
index 0000000000000000000000000000000000000000..dd81364dec90e97c30a6e2220a5e0fe96373c5bd
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/dense_heads/dense_test_mixins.py
@@ -0,0 +1,100 @@
+from inspect import signature
+
+import torch
+
+from mmdet.core import bbox2result, bbox_mapping_back, multiclass_nms
+
+
+class BBoxTestMixin(object):
+ """Mixin class for test time augmentation of bboxes."""
+
+ def merge_aug_bboxes(self, aug_bboxes, aug_scores, img_metas):
+ """Merge augmented detection bboxes and scores.
+
+ Args:
+ aug_bboxes (list[Tensor]): shape (n, 4*#class)
+ aug_scores (list[Tensor] or None): shape (n, #class)
+ img_shapes (list[Tensor]): shape (3, ).
+
+ Returns:
+ tuple: (bboxes, scores)
+ """
+ recovered_bboxes = []
+ for bboxes, img_info in zip(aug_bboxes, img_metas):
+ img_shape = img_info[0]['img_shape']
+ scale_factor = img_info[0]['scale_factor']
+ flip = img_info[0]['flip']
+ flip_direction = img_info[0]['flip_direction']
+ bboxes = bbox_mapping_back(bboxes, img_shape, scale_factor, flip,
+ flip_direction)
+ recovered_bboxes.append(bboxes)
+ bboxes = torch.cat(recovered_bboxes, dim=0)
+ if aug_scores is None:
+ return bboxes
+ else:
+ scores = torch.cat(aug_scores, dim=0)
+ return bboxes, scores
+
+ def aug_test_bboxes(self, feats, img_metas, rescale=False):
+ """Test det bboxes with test time augmentation.
+
+ Args:
+ feats (list[Tensor]): the outer list indicates test-time
+ augmentations and inner Tensor should have a shape NxCxHxW,
+ which contains features for all images in the batch.
+ img_metas (list[list[dict]]): the outer list indicates test-time
+ augs (multiscale, flip, etc.) and the inner list indicates
+ images in a batch. each dict has image information.
+ rescale (bool, optional): Whether to rescale the results.
+ Defaults to False.
+
+ Returns:
+ list[ndarray]: bbox results of each class
+ """
+ # check with_nms argument
+ gb_sig = signature(self.get_bboxes)
+ gb_args = [p.name for p in gb_sig.parameters.values()]
+ if hasattr(self, '_get_bboxes'):
+ gbs_sig = signature(self._get_bboxes)
+ else:
+ gbs_sig = signature(self._get_bboxes_single)
+ gbs_args = [p.name for p in gbs_sig.parameters.values()]
+ assert ('with_nms' in gb_args) and ('with_nms' in gbs_args), \
+ f'{self.__class__.__name__}' \
+ ' does not support test-time augmentation'
+
+ aug_bboxes = []
+ aug_scores = []
+ aug_factors = [] # score_factors for NMS
+ for x, img_meta in zip(feats, img_metas):
+ # only one image in the batch
+ outs = self.forward(x)
+ bbox_inputs = outs + (img_meta, self.test_cfg, False, False)
+ bbox_outputs = self.get_bboxes(*bbox_inputs)[0]
+ aug_bboxes.append(bbox_outputs[0])
+ aug_scores.append(bbox_outputs[1])
+ # bbox_outputs of some detectors (e.g., ATSS, FCOS, YOLOv3)
+ # contains additional element to adjust scores before NMS
+ if len(bbox_outputs) >= 3:
+ aug_factors.append(bbox_outputs[2])
+
+ # after merging, bboxes will be rescaled to the original image size
+ merged_bboxes, merged_scores = self.merge_aug_bboxes(
+ aug_bboxes, aug_scores, img_metas)
+ merged_factors = torch.cat(aug_factors, dim=0) if aug_factors else None
+ det_bboxes, det_labels = multiclass_nms(
+ merged_bboxes,
+ merged_scores,
+ self.test_cfg.score_thr,
+ self.test_cfg.nms,
+ self.test_cfg.max_per_img,
+ score_factors=merged_factors)
+
+ if rescale:
+ _det_bboxes = det_bboxes
+ else:
+ _det_bboxes = det_bboxes.clone()
+ _det_bboxes[:, :4] *= det_bboxes.new_tensor(
+ img_metas[0][0]['scale_factor'])
+ bbox_results = bbox2result(_det_bboxes, det_labels, self.num_classes)
+ return bbox_results
diff --git a/annotator/uniformer/mmdet/models/dense_heads/embedding_rpn_head.py b/annotator/uniformer/mmdet/models/dense_heads/embedding_rpn_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..200ce8d20c5503f98c5c21f30bb9d00437e25f34
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/dense_heads/embedding_rpn_head.py
@@ -0,0 +1,100 @@
+import torch
+import torch.nn as nn
+
+from mmdet.models.builder import HEADS
+from ...core import bbox_cxcywh_to_xyxy
+
+
+@HEADS.register_module()
+class EmbeddingRPNHead(nn.Module):
+ """RPNHead in the `Sparse R-CNN `_ .
+
+ Unlike traditional RPNHead, this module does not need FPN input, but just
+ decode `init_proposal_bboxes` and expand the first dimension of
+ `init_proposal_bboxes` and `init_proposal_features` to the batch_size.
+
+ Args:
+ num_proposals (int): Number of init_proposals. Default 100.
+ proposal_feature_channel (int): Channel number of
+ init_proposal_feature. Defaults to 256.
+ """
+
+ def __init__(self,
+ num_proposals=100,
+ proposal_feature_channel=256,
+ **kwargs):
+ super(EmbeddingRPNHead, self).__init__()
+ self.num_proposals = num_proposals
+ self.proposal_feature_channel = proposal_feature_channel
+ self._init_layers()
+
+ def _init_layers(self):
+ """Initialize a sparse set of proposal boxes and proposal features."""
+ self.init_proposal_bboxes = nn.Embedding(self.num_proposals, 4)
+ self.init_proposal_features = nn.Embedding(
+ self.num_proposals, self.proposal_feature_channel)
+
+ def init_weights(self):
+ """Initialize the init_proposal_bboxes as normalized.
+
+ [c_x, c_y, w, h], and we initialize it to the size of the entire
+ image.
+ """
+ nn.init.constant_(self.init_proposal_bboxes.weight[:, :2], 0.5)
+ nn.init.constant_(self.init_proposal_bboxes.weight[:, 2:], 1)
+
+ def _decode_init_proposals(self, imgs, img_metas):
+ """Decode init_proposal_bboxes according to the size of images and
+ expand dimension of init_proposal_features to batch_size.
+
+ Args:
+ imgs (list[Tensor]): List of FPN features.
+ img_metas (list[dict]): List of meta-information of
+ images. Need the img_shape to decode the init_proposals.
+
+ Returns:
+ Tuple(Tensor):
+
+ - proposals (Tensor): Decoded proposal bboxes,
+ has shape (batch_size, num_proposals, 4).
+ - init_proposal_features (Tensor): Expanded proposal
+ features, has shape
+ (batch_size, num_proposals, proposal_feature_channel).
+ - imgs_whwh (Tensor): Tensor with shape
+ (batch_size, 4), the dimension means
+ [img_width, img_height, img_width, img_height].
+ """
+ proposals = self.init_proposal_bboxes.weight.clone()
+ proposals = bbox_cxcywh_to_xyxy(proposals)
+ num_imgs = len(imgs[0])
+ imgs_whwh = []
+ for meta in img_metas:
+ h, w, _ = meta['img_shape']
+ imgs_whwh.append(imgs[0].new_tensor([[w, h, w, h]]))
+ imgs_whwh = torch.cat(imgs_whwh, dim=0)
+ imgs_whwh = imgs_whwh[:, None, :]
+
+ # imgs_whwh has shape (batch_size, 1, 4)
+ # The shape of proposals change from (num_proposals, 4)
+ # to (batch_size ,num_proposals, 4)
+ proposals = proposals * imgs_whwh
+
+ init_proposal_features = self.init_proposal_features.weight.clone()
+ init_proposal_features = init_proposal_features[None].expand(
+ num_imgs, *init_proposal_features.size())
+ return proposals, init_proposal_features, imgs_whwh
+
+ def forward_dummy(self, img, img_metas):
+ """Dummy forward function.
+
+ Used in flops calculation.
+ """
+ return self._decode_init_proposals(img, img_metas)
+
+ def forward_train(self, img, img_metas):
+ """Forward function in training stage."""
+ return self._decode_init_proposals(img, img_metas)
+
+ def simple_test_rpn(self, img, img_metas):
+ """Forward function in testing stage."""
+ return self._decode_init_proposals(img, img_metas)
diff --git a/annotator/uniformer/mmdet/models/dense_heads/fcos_head.py b/annotator/uniformer/mmdet/models/dense_heads/fcos_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..905a703507f279ac8d34cff23c99af33c0d5f973
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/dense_heads/fcos_head.py
@@ -0,0 +1,629 @@
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from mmcv.cnn import Scale, normal_init
+from mmcv.runner import force_fp32
+
+from mmdet.core import distance2bbox, multi_apply, multiclass_nms, reduce_mean
+from ..builder import HEADS, build_loss
+from .anchor_free_head import AnchorFreeHead
+
+INF = 1e8
+
+
+@HEADS.register_module()
+class FCOSHead(AnchorFreeHead):
+ """Anchor-free head used in `FCOS `_.
+
+ The FCOS head does not use anchor boxes. Instead bounding boxes are
+ predicted at each pixel and a centerness measure is used to suppress
+ low-quality predictions.
+ Here norm_on_bbox, centerness_on_reg, dcn_on_last_conv are training
+ tricks used in official repo, which will bring remarkable mAP gains
+ of up to 4.9. Please see https://github.com/tianzhi0549/FCOS for
+ more detail.
+
+ Args:
+ num_classes (int): Number of categories excluding the background
+ category.
+ in_channels (int): Number of channels in the input feature map.
+ strides (list[int] | list[tuple[int, int]]): Strides of points
+ in multiple feature levels. Default: (4, 8, 16, 32, 64).
+ regress_ranges (tuple[tuple[int, int]]): Regress range of multiple
+ level points.
+ center_sampling (bool): If true, use center sampling. Default: False.
+ center_sample_radius (float): Radius of center sampling. Default: 1.5.
+ norm_on_bbox (bool): If true, normalize the regression targets
+ with FPN strides. Default: False.
+ centerness_on_reg (bool): If true, position centerness on the
+ regress branch. Please refer to https://github.com/tianzhi0549/FCOS/issues/89#issuecomment-516877042.
+ Default: False.
+ conv_bias (bool | str): If specified as `auto`, it will be decided by the
+ norm_cfg. Bias of conv will be set as True if `norm_cfg` is None, otherwise
+ False. Default: "auto".
+ loss_cls (dict): Config of classification loss.
+ loss_bbox (dict): Config of localization loss.
+ loss_centerness (dict): Config of centerness loss.
+ norm_cfg (dict): dictionary to construct and config norm layer.
+ Default: norm_cfg=dict(type='GN', num_groups=32, requires_grad=True).
+
+ Example:
+ >>> self = FCOSHead(11, 7)
+ >>> feats = [torch.rand(1, 7, s, s) for s in [4, 8, 16, 32, 64]]
+ >>> cls_score, bbox_pred, centerness = self.forward(feats)
+ >>> assert len(cls_score) == len(self.scales)
+ """ # noqa: E501
+
+ def __init__(self,
+ num_classes,
+ in_channels,
+ regress_ranges=((-1, 64), (64, 128), (128, 256), (256, 512),
+ (512, INF)),
+ center_sampling=False,
+ center_sample_radius=1.5,
+ norm_on_bbox=False,
+ centerness_on_reg=False,
+ loss_cls=dict(
+ type='FocalLoss',
+ use_sigmoid=True,
+ gamma=2.0,
+ alpha=0.25,
+ loss_weight=1.0),
+ loss_bbox=dict(type='IoULoss', loss_weight=1.0),
+ loss_centerness=dict(
+ type='CrossEntropyLoss',
+ use_sigmoid=True,
+ loss_weight=1.0),
+ norm_cfg=dict(type='GN', num_groups=32, requires_grad=True),
+ **kwargs):
+ self.regress_ranges = regress_ranges
+ self.center_sampling = center_sampling
+ self.center_sample_radius = center_sample_radius
+ self.norm_on_bbox = norm_on_bbox
+ self.centerness_on_reg = centerness_on_reg
+ super().__init__(
+ num_classes,
+ in_channels,
+ loss_cls=loss_cls,
+ loss_bbox=loss_bbox,
+ norm_cfg=norm_cfg,
+ **kwargs)
+ self.loss_centerness = build_loss(loss_centerness)
+
+ def _init_layers(self):
+ """Initialize layers of the head."""
+ super()._init_layers()
+ self.conv_centerness = nn.Conv2d(self.feat_channels, 1, 3, padding=1)
+ self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides])
+
+ def init_weights(self):
+ """Initialize weights of the head."""
+ super().init_weights()
+ normal_init(self.conv_centerness, std=0.01)
+
+ def forward(self, feats):
+ """Forward features from the upstream network.
+
+ Args:
+ feats (tuple[Tensor]): Features from the upstream network, each is
+ a 4D-tensor.
+
+ Returns:
+ tuple:
+ cls_scores (list[Tensor]): Box scores for each scale level, \
+ each is a 4D-tensor, the channel number is \
+ num_points * num_classes.
+ bbox_preds (list[Tensor]): Box energies / deltas for each \
+ scale level, each is a 4D-tensor, the channel number is \
+ num_points * 4.
+ centernesses (list[Tensor]): centerness for each scale level, \
+ each is a 4D-tensor, the channel number is num_points * 1.
+ """
+ return multi_apply(self.forward_single, feats, self.scales,
+ self.strides)
+
+ def forward_single(self, x, scale, stride):
+ """Forward features of a single scale level.
+
+ Args:
+ x (Tensor): FPN feature maps of the specified stride.
+ scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize
+ the bbox prediction.
+ stride (int): The corresponding stride for feature maps, only
+ used to normalize the bbox prediction when self.norm_on_bbox
+ is True.
+
+ Returns:
+ tuple: scores for each class, bbox predictions and centerness \
+ predictions of input feature maps.
+ """
+ cls_score, bbox_pred, cls_feat, reg_feat = super().forward_single(x)
+ if self.centerness_on_reg:
+ centerness = self.conv_centerness(reg_feat)
+ else:
+ centerness = self.conv_centerness(cls_feat)
+ # scale the bbox_pred of different level
+ # float to avoid overflow when enabling FP16
+ bbox_pred = scale(bbox_pred).float()
+ if self.norm_on_bbox:
+ bbox_pred = F.relu(bbox_pred)
+ if not self.training:
+ bbox_pred *= stride
+ else:
+ bbox_pred = bbox_pred.exp()
+ return cls_score, bbox_pred, centerness
+
+ @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'centernesses'))
+ def loss(self,
+ cls_scores,
+ bbox_preds,
+ centernesses,
+ gt_bboxes,
+ gt_labels,
+ img_metas,
+ gt_bboxes_ignore=None):
+ """Compute loss of the head.
+
+ Args:
+ cls_scores (list[Tensor]): Box scores for each scale level,
+ each is a 4D-tensor, the channel number is
+ num_points * num_classes.
+ bbox_preds (list[Tensor]): Box energies / deltas for each scale
+ level, each is a 4D-tensor, the channel number is
+ num_points * 4.
+ centernesses (list[Tensor]): centerness for each scale level, each
+ is a 4D-tensor, the channel number is num_points * 1.
+ gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
+ shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
+ gt_labels (list[Tensor]): class indices corresponding to each box
+ img_metas (list[dict]): Meta information of each image, e.g.,
+ image size, scaling factor, etc.
+ gt_bboxes_ignore (None | list[Tensor]): specify which bounding
+ boxes can be ignored when computing the loss.
+
+ Returns:
+ dict[str, Tensor]: A dictionary of loss components.
+ """
+ assert len(cls_scores) == len(bbox_preds) == len(centernesses)
+ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
+ all_level_points = self.get_points(featmap_sizes, bbox_preds[0].dtype,
+ bbox_preds[0].device)
+ labels, bbox_targets = self.get_targets(all_level_points, gt_bboxes,
+ gt_labels)
+
+ num_imgs = cls_scores[0].size(0)
+ # flatten cls_scores, bbox_preds and centerness
+ flatten_cls_scores = [
+ cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels)
+ for cls_score in cls_scores
+ ]
+ flatten_bbox_preds = [
+ bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4)
+ for bbox_pred in bbox_preds
+ ]
+ flatten_centerness = [
+ centerness.permute(0, 2, 3, 1).reshape(-1)
+ for centerness in centernesses
+ ]
+ flatten_cls_scores = torch.cat(flatten_cls_scores)
+ flatten_bbox_preds = torch.cat(flatten_bbox_preds)
+ flatten_centerness = torch.cat(flatten_centerness)
+ flatten_labels = torch.cat(labels)
+ flatten_bbox_targets = torch.cat(bbox_targets)
+ # repeat points to align with bbox_preds
+ flatten_points = torch.cat(
+ [points.repeat(num_imgs, 1) for points in all_level_points])
+
+ # FG cat_id: [0, num_classes -1], BG cat_id: num_classes
+ bg_class_ind = self.num_classes
+ pos_inds = ((flatten_labels >= 0)
+ & (flatten_labels < bg_class_ind)).nonzero().reshape(-1)
+ num_pos = torch.tensor(
+ len(pos_inds), dtype=torch.float, device=bbox_preds[0].device)
+ num_pos = max(reduce_mean(num_pos), 1.0)
+ loss_cls = self.loss_cls(
+ flatten_cls_scores, flatten_labels, avg_factor=num_pos)
+
+ pos_bbox_preds = flatten_bbox_preds[pos_inds]
+ pos_centerness = flatten_centerness[pos_inds]
+
+ if len(pos_inds) > 0:
+ pos_bbox_targets = flatten_bbox_targets[pos_inds]
+ pos_centerness_targets = self.centerness_target(pos_bbox_targets)
+ pos_points = flatten_points[pos_inds]
+ pos_decoded_bbox_preds = distance2bbox(pos_points, pos_bbox_preds)
+ pos_decoded_target_preds = distance2bbox(pos_points,
+ pos_bbox_targets)
+ # centerness weighted iou loss
+ centerness_denorm = max(
+ reduce_mean(pos_centerness_targets.sum().detach()), 1e-6)
+ loss_bbox = self.loss_bbox(
+ pos_decoded_bbox_preds,
+ pos_decoded_target_preds,
+ weight=pos_centerness_targets,
+ avg_factor=centerness_denorm)
+ loss_centerness = self.loss_centerness(
+ pos_centerness, pos_centerness_targets, avg_factor=num_pos)
+ else:
+ loss_bbox = pos_bbox_preds.sum()
+ loss_centerness = pos_centerness.sum()
+
+ return dict(
+ loss_cls=loss_cls,
+ loss_bbox=loss_bbox,
+ loss_centerness=loss_centerness)
+
+ @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'centernesses'))
+ def get_bboxes(self,
+ cls_scores,
+ bbox_preds,
+ centernesses,
+ img_metas,
+ cfg=None,
+ rescale=False,
+ with_nms=True):
+ """Transform network output for a batch into bbox predictions.
+
+ Args:
+ cls_scores (list[Tensor]): Box scores for each scale level
+ with shape (N, num_points * num_classes, H, W).
+ bbox_preds (list[Tensor]): Box energies / deltas for each scale
+ level with shape (N, num_points * 4, H, W).
+ centernesses (list[Tensor]): Centerness for each scale level with
+ shape (N, num_points * 1, H, W).
+ img_metas (list[dict]): Meta information of each image, e.g.,
+ image size, scaling factor, etc.
+ cfg (mmcv.Config | None): Test / postprocessing configuration,
+ if None, test_cfg would be used. Default: None.
+ rescale (bool): If True, return boxes in original image space.
+ Default: False.
+ with_nms (bool): If True, do nms before return boxes.
+ Default: True.
+
+ Returns:
+ list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple.
+ The first item is an (n, 5) tensor, where 5 represent
+ (tl_x, tl_y, br_x, br_y, score) and the score between 0 and 1.
+ The shape of the second tensor in the tuple is (n,), and
+ each element represents the class label of the corresponding
+ box.
+ """
+ assert len(cls_scores) == len(bbox_preds)
+ num_levels = len(cls_scores)
+
+ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
+ mlvl_points = self.get_points(featmap_sizes, bbox_preds[0].dtype,
+ bbox_preds[0].device)
+
+ cls_score_list = [cls_scores[i].detach() for i in range(num_levels)]
+ bbox_pred_list = [bbox_preds[i].detach() for i in range(num_levels)]
+ centerness_pred_list = [
+ centernesses[i].detach() for i in range(num_levels)
+ ]
+ if torch.onnx.is_in_onnx_export():
+ assert len(
+ img_metas
+ ) == 1, 'Only support one input image while in exporting to ONNX'
+ img_shapes = img_metas[0]['img_shape_for_onnx']
+ else:
+ img_shapes = [
+ img_metas[i]['img_shape']
+ for i in range(cls_scores[0].shape[0])
+ ]
+ scale_factors = [
+ img_metas[i]['scale_factor'] for i in range(cls_scores[0].shape[0])
+ ]
+ result_list = self._get_bboxes(cls_score_list, bbox_pred_list,
+ centerness_pred_list, mlvl_points,
+ img_shapes, scale_factors, cfg, rescale,
+ with_nms)
+ return result_list
+
+ def _get_bboxes(self,
+ cls_scores,
+ bbox_preds,
+ centernesses,
+ mlvl_points,
+ img_shapes,
+ scale_factors,
+ cfg,
+ rescale=False,
+ with_nms=True):
+ """Transform outputs for a single batch item into bbox predictions.
+
+ Args:
+ cls_scores (list[Tensor]): Box scores for a single scale level
+ with shape (N, num_points * num_classes, H, W).
+ bbox_preds (list[Tensor]): Box energies / deltas for a single scale
+ level with shape (N, num_points * 4, H, W).
+ centernesses (list[Tensor]): Centerness for a single scale level
+ with shape (N, num_points * 4, H, W).
+ mlvl_points (list[Tensor]): Box reference for a single scale level
+ with shape (num_total_points, 4).
+ img_shapes (list[tuple[int]]): Shape of the input image,
+ list[(height, width, 3)].
+ scale_factors (list[ndarray]): Scale factor of the image arrange as
+ (w_scale, h_scale, w_scale, h_scale).
+ cfg (mmcv.Config | None): Test / postprocessing configuration,
+ if None, test_cfg would be used.
+ rescale (bool): If True, return boxes in original image space.
+ Default: False.
+ with_nms (bool): If True, do nms before return boxes.
+ Default: True.
+
+ Returns:
+ tuple(Tensor):
+ det_bboxes (Tensor): BBox predictions in shape (n, 5), where
+ the first 4 columns are bounding box positions
+ (tl_x, tl_y, br_x, br_y) and the 5-th column is a score
+ between 0 and 1.
+ det_labels (Tensor): A (n,) tensor where each item is the
+ predicted class label of the corresponding box.
+ """
+ cfg = self.test_cfg if cfg is None else cfg
+ assert len(cls_scores) == len(bbox_preds) == len(mlvl_points)
+ device = cls_scores[0].device
+ batch_size = cls_scores[0].shape[0]
+ # convert to tensor to keep tracing
+ nms_pre_tensor = torch.tensor(
+ cfg.get('nms_pre', -1), device=device, dtype=torch.long)
+ mlvl_bboxes = []
+ mlvl_scores = []
+ mlvl_centerness = []
+ for cls_score, bbox_pred, centerness, points in zip(
+ cls_scores, bbox_preds, centernesses, mlvl_points):
+ assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
+ scores = cls_score.permute(0, 2, 3, 1).reshape(
+ batch_size, -1, self.cls_out_channels).sigmoid()
+ centerness = centerness.permute(0, 2, 3,
+ 1).reshape(batch_size,
+ -1).sigmoid()
+
+ bbox_pred = bbox_pred.permute(0, 2, 3,
+ 1).reshape(batch_size, -1, 4)
+ # Always keep topk op for dynamic input in onnx
+ if nms_pre_tensor > 0 and (torch.onnx.is_in_onnx_export()
+ or scores.shape[-2] > nms_pre_tensor):
+ from torch import _shape_as_tensor
+ # keep shape as tensor and get k
+ num_anchor = _shape_as_tensor(scores)[-2].to(device)
+ nms_pre = torch.where(nms_pre_tensor < num_anchor,
+ nms_pre_tensor, num_anchor)
+
+ max_scores, _ = (scores * centerness[..., None]).max(-1)
+ _, topk_inds = max_scores.topk(nms_pre)
+ points = points[topk_inds, :]
+ batch_inds = torch.arange(batch_size).view(
+ -1, 1).expand_as(topk_inds).long()
+ bbox_pred = bbox_pred[batch_inds, topk_inds, :]
+ scores = scores[batch_inds, topk_inds, :]
+ centerness = centerness[batch_inds, topk_inds]
+
+ bboxes = distance2bbox(points, bbox_pred, max_shape=img_shapes)
+ mlvl_bboxes.append(bboxes)
+ mlvl_scores.append(scores)
+ mlvl_centerness.append(centerness)
+
+ batch_mlvl_bboxes = torch.cat(mlvl_bboxes, dim=1)
+ if rescale:
+ batch_mlvl_bboxes /= batch_mlvl_bboxes.new_tensor(
+ scale_factors).unsqueeze(1)
+ batch_mlvl_scores = torch.cat(mlvl_scores, dim=1)
+ batch_mlvl_centerness = torch.cat(mlvl_centerness, dim=1)
+
+ # Set max number of box to be feed into nms in deployment
+ deploy_nms_pre = cfg.get('deploy_nms_pre', -1)
+ if deploy_nms_pre > 0 and torch.onnx.is_in_onnx_export():
+ batch_mlvl_scores, _ = (
+ batch_mlvl_scores *
+ batch_mlvl_centerness.unsqueeze(2).expand_as(batch_mlvl_scores)
+ ).max(-1)
+ _, topk_inds = batch_mlvl_scores.topk(deploy_nms_pre)
+ batch_inds = torch.arange(batch_mlvl_scores.shape[0]).view(
+ -1, 1).expand_as(topk_inds)
+ batch_mlvl_scores = batch_mlvl_scores[batch_inds, topk_inds, :]
+ batch_mlvl_bboxes = batch_mlvl_bboxes[batch_inds, topk_inds, :]
+ batch_mlvl_centerness = batch_mlvl_centerness[batch_inds,
+ topk_inds]
+
+ # remind that we set FG labels to [0, num_class-1] since mmdet v2.0
+ # BG cat_id: num_class
+ padding = batch_mlvl_scores.new_zeros(batch_size,
+ batch_mlvl_scores.shape[1], 1)
+ batch_mlvl_scores = torch.cat([batch_mlvl_scores, padding], dim=-1)
+
+ if with_nms:
+ det_results = []
+ for (mlvl_bboxes, mlvl_scores,
+ mlvl_centerness) in zip(batch_mlvl_bboxes, batch_mlvl_scores,
+ batch_mlvl_centerness):
+ det_bbox, det_label = multiclass_nms(
+ mlvl_bboxes,
+ mlvl_scores,
+ cfg.score_thr,
+ cfg.nms,
+ cfg.max_per_img,
+ score_factors=mlvl_centerness)
+ det_results.append(tuple([det_bbox, det_label]))
+ else:
+ det_results = [
+ tuple(mlvl_bs)
+ for mlvl_bs in zip(batch_mlvl_bboxes, batch_mlvl_scores,
+ batch_mlvl_centerness)
+ ]
+ return det_results
+
+ def _get_points_single(self,
+ featmap_size,
+ stride,
+ dtype,
+ device,
+ flatten=False):
+ """Get points according to feature map sizes."""
+ y, x = super()._get_points_single(featmap_size, stride, dtype, device)
+ points = torch.stack((x.reshape(-1) * stride, y.reshape(-1) * stride),
+ dim=-1) + stride // 2
+ return points
+
+ def get_targets(self, points, gt_bboxes_list, gt_labels_list):
+ """Compute regression, classification and centerness targets for points
+ in multiple images.
+
+ Args:
+ points (list[Tensor]): Points of each fpn level, each has shape
+ (num_points, 2).
+ gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image,
+ each has shape (num_gt, 4).
+ gt_labels_list (list[Tensor]): Ground truth labels of each box,
+ each has shape (num_gt,).
+
+ Returns:
+ tuple:
+ concat_lvl_labels (list[Tensor]): Labels of each level. \
+ concat_lvl_bbox_targets (list[Tensor]): BBox targets of each \
+ level.
+ """
+ assert len(points) == len(self.regress_ranges)
+ num_levels = len(points)
+ # expand regress ranges to align with points
+ expanded_regress_ranges = [
+ points[i].new_tensor(self.regress_ranges[i])[None].expand_as(
+ points[i]) for i in range(num_levels)
+ ]
+ # concat all levels points and regress ranges
+ concat_regress_ranges = torch.cat(expanded_regress_ranges, dim=0)
+ concat_points = torch.cat(points, dim=0)
+
+ # the number of points per img, per lvl
+ num_points = [center.size(0) for center in points]
+
+ # get labels and bbox_targets of each image
+ labels_list, bbox_targets_list = multi_apply(
+ self._get_target_single,
+ gt_bboxes_list,
+ gt_labels_list,
+ points=concat_points,
+ regress_ranges=concat_regress_ranges,
+ num_points_per_lvl=num_points)
+
+ # split to per img, per level
+ labels_list = [labels.split(num_points, 0) for labels in labels_list]
+ bbox_targets_list = [
+ bbox_targets.split(num_points, 0)
+ for bbox_targets in bbox_targets_list
+ ]
+
+ # concat per level image
+ concat_lvl_labels = []
+ concat_lvl_bbox_targets = []
+ for i in range(num_levels):
+ concat_lvl_labels.append(
+ torch.cat([labels[i] for labels in labels_list]))
+ bbox_targets = torch.cat(
+ [bbox_targets[i] for bbox_targets in bbox_targets_list])
+ if self.norm_on_bbox:
+ bbox_targets = bbox_targets / self.strides[i]
+ concat_lvl_bbox_targets.append(bbox_targets)
+ return concat_lvl_labels, concat_lvl_bbox_targets
+
+ def _get_target_single(self, gt_bboxes, gt_labels, points, regress_ranges,
+ num_points_per_lvl):
+ """Compute regression and classification targets for a single image."""
+ num_points = points.size(0)
+ num_gts = gt_labels.size(0)
+ if num_gts == 0:
+ return gt_labels.new_full((num_points,), self.num_classes), \
+ gt_bboxes.new_zeros((num_points, 4))
+
+ areas = (gt_bboxes[:, 2] - gt_bboxes[:, 0]) * (
+ gt_bboxes[:, 3] - gt_bboxes[:, 1])
+ # TODO: figure out why these two are different
+ # areas = areas[None].expand(num_points, num_gts)
+ areas = areas[None].repeat(num_points, 1)
+ regress_ranges = regress_ranges[:, None, :].expand(
+ num_points, num_gts, 2)
+ gt_bboxes = gt_bboxes[None].expand(num_points, num_gts, 4)
+ xs, ys = points[:, 0], points[:, 1]
+ xs = xs[:, None].expand(num_points, num_gts)
+ ys = ys[:, None].expand(num_points, num_gts)
+
+ left = xs - gt_bboxes[..., 0]
+ right = gt_bboxes[..., 2] - xs
+ top = ys - gt_bboxes[..., 1]
+ bottom = gt_bboxes[..., 3] - ys
+ bbox_targets = torch.stack((left, top, right, bottom), -1)
+
+ if self.center_sampling:
+ # condition1: inside a `center bbox`
+ radius = self.center_sample_radius
+ center_xs = (gt_bboxes[..., 0] + gt_bboxes[..., 2]) / 2
+ center_ys = (gt_bboxes[..., 1] + gt_bboxes[..., 3]) / 2
+ center_gts = torch.zeros_like(gt_bboxes)
+ stride = center_xs.new_zeros(center_xs.shape)
+
+ # project the points on current lvl back to the `original` sizes
+ lvl_begin = 0
+ for lvl_idx, num_points_lvl in enumerate(num_points_per_lvl):
+ lvl_end = lvl_begin + num_points_lvl
+ stride[lvl_begin:lvl_end] = self.strides[lvl_idx] * radius
+ lvl_begin = lvl_end
+
+ x_mins = center_xs - stride
+ y_mins = center_ys - stride
+ x_maxs = center_xs + stride
+ y_maxs = center_ys + stride
+ center_gts[..., 0] = torch.where(x_mins > gt_bboxes[..., 0],
+ x_mins, gt_bboxes[..., 0])
+ center_gts[..., 1] = torch.where(y_mins > gt_bboxes[..., 1],
+ y_mins, gt_bboxes[..., 1])
+ center_gts[..., 2] = torch.where(x_maxs > gt_bboxes[..., 2],
+ gt_bboxes[..., 2], x_maxs)
+ center_gts[..., 3] = torch.where(y_maxs > gt_bboxes[..., 3],
+ gt_bboxes[..., 3], y_maxs)
+
+ cb_dist_left = xs - center_gts[..., 0]
+ cb_dist_right = center_gts[..., 2] - xs
+ cb_dist_top = ys - center_gts[..., 1]
+ cb_dist_bottom = center_gts[..., 3] - ys
+ center_bbox = torch.stack(
+ (cb_dist_left, cb_dist_top, cb_dist_right, cb_dist_bottom), -1)
+ inside_gt_bbox_mask = center_bbox.min(-1)[0] > 0
+ else:
+ # condition1: inside a gt bbox
+ inside_gt_bbox_mask = bbox_targets.min(-1)[0] > 0
+
+ # condition2: limit the regression range for each location
+ max_regress_distance = bbox_targets.max(-1)[0]
+ inside_regress_range = (
+ (max_regress_distance >= regress_ranges[..., 0])
+ & (max_regress_distance <= regress_ranges[..., 1]))
+
+ # if there are still more than one objects for a location,
+ # we choose the one with minimal area
+ areas[inside_gt_bbox_mask == 0] = INF
+ areas[inside_regress_range == 0] = INF
+ min_area, min_area_inds = areas.min(dim=1)
+
+ labels = gt_labels[min_area_inds]
+ labels[min_area == INF] = self.num_classes # set as BG
+ bbox_targets = bbox_targets[range(num_points), min_area_inds]
+
+ return labels, bbox_targets
+
+ def centerness_target(self, pos_bbox_targets):
+ """Compute centerness targets.
+
+ Args:
+ pos_bbox_targets (Tensor): BBox targets of positive bboxes in shape
+ (num_pos, 4)
+
+ Returns:
+ Tensor: Centerness target.
+ """
+ # only calculate pos centerness targets, otherwise there may be nan
+ left_right = pos_bbox_targets[:, [0, 2]]
+ top_bottom = pos_bbox_targets[:, [1, 3]]
+ centerness_targets = (
+ left_right.min(dim=-1)[0] / left_right.max(dim=-1)[0]) * (
+ top_bottom.min(dim=-1)[0] / top_bottom.max(dim=-1)[0])
+ return torch.sqrt(centerness_targets)
diff --git a/annotator/uniformer/mmdet/models/dense_heads/fovea_head.py b/annotator/uniformer/mmdet/models/dense_heads/fovea_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..c8ccea787cba3d092284d4a5e209adaf6521c86a
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/dense_heads/fovea_head.py
@@ -0,0 +1,341 @@
+import torch
+import torch.nn as nn
+from mmcv.cnn import ConvModule, normal_init
+from mmcv.ops import DeformConv2d
+
+from mmdet.core import multi_apply, multiclass_nms
+from ..builder import HEADS
+from .anchor_free_head import AnchorFreeHead
+
+INF = 1e8
+
+
+class FeatureAlign(nn.Module):
+
+ def __init__(self,
+ in_channels,
+ out_channels,
+ kernel_size=3,
+ deform_groups=4):
+ super(FeatureAlign, self).__init__()
+ offset_channels = kernel_size * kernel_size * 2
+ self.conv_offset = nn.Conv2d(
+ 4, deform_groups * offset_channels, 1, bias=False)
+ self.conv_adaption = DeformConv2d(
+ in_channels,
+ out_channels,
+ kernel_size=kernel_size,
+ padding=(kernel_size - 1) // 2,
+ deform_groups=deform_groups)
+ self.relu = nn.ReLU(inplace=True)
+
+ def init_weights(self):
+ normal_init(self.conv_offset, std=0.1)
+ normal_init(self.conv_adaption, std=0.01)
+
+ def forward(self, x, shape):
+ offset = self.conv_offset(shape)
+ x = self.relu(self.conv_adaption(x, offset))
+ return x
+
+
+@HEADS.register_module()
+class FoveaHead(AnchorFreeHead):
+ """FoveaBox: Beyond Anchor-based Object Detector
+ https://arxiv.org/abs/1904.03797
+ """
+
+ def __init__(self,
+ num_classes,
+ in_channels,
+ base_edge_list=(16, 32, 64, 128, 256),
+ scale_ranges=((8, 32), (16, 64), (32, 128), (64, 256), (128,
+ 512)),
+ sigma=0.4,
+ with_deform=False,
+ deform_groups=4,
+ **kwargs):
+ self.base_edge_list = base_edge_list
+ self.scale_ranges = scale_ranges
+ self.sigma = sigma
+ self.with_deform = with_deform
+ self.deform_groups = deform_groups
+ super().__init__(num_classes, in_channels, **kwargs)
+
+ def _init_layers(self):
+ # box branch
+ super()._init_reg_convs()
+ self.conv_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1)
+
+ # cls branch
+ if not self.with_deform:
+ super()._init_cls_convs()
+ self.conv_cls = nn.Conv2d(
+ self.feat_channels, self.cls_out_channels, 3, padding=1)
+ else:
+ self.cls_convs = nn.ModuleList()
+ self.cls_convs.append(
+ ConvModule(
+ self.feat_channels, (self.feat_channels * 4),
+ 3,
+ stride=1,
+ padding=1,
+ conv_cfg=self.conv_cfg,
+ norm_cfg=self.norm_cfg,
+ bias=self.norm_cfg is None))
+ self.cls_convs.append(
+ ConvModule((self.feat_channels * 4), (self.feat_channels * 4),
+ 1,
+ stride=1,
+ padding=0,
+ conv_cfg=self.conv_cfg,
+ norm_cfg=self.norm_cfg,
+ bias=self.norm_cfg is None))
+ self.feature_adaption = FeatureAlign(
+ self.feat_channels,
+ self.feat_channels,
+ kernel_size=3,
+ deform_groups=self.deform_groups)
+ self.conv_cls = nn.Conv2d(
+ int(self.feat_channels * 4),
+ self.cls_out_channels,
+ 3,
+ padding=1)
+
+ def init_weights(self):
+ super().init_weights()
+ if self.with_deform:
+ self.feature_adaption.init_weights()
+
+ def forward_single(self, x):
+ cls_feat = x
+ reg_feat = x
+ for reg_layer in self.reg_convs:
+ reg_feat = reg_layer(reg_feat)
+ bbox_pred = self.conv_reg(reg_feat)
+ if self.with_deform:
+ cls_feat = self.feature_adaption(cls_feat, bbox_pred.exp())
+ for cls_layer in self.cls_convs:
+ cls_feat = cls_layer(cls_feat)
+ cls_score = self.conv_cls(cls_feat)
+ return cls_score, bbox_pred
+
+ def _get_points_single(self, *args, **kwargs):
+ y, x = super()._get_points_single(*args, **kwargs)
+ return y + 0.5, x + 0.5
+
+ def loss(self,
+ cls_scores,
+ bbox_preds,
+ gt_bbox_list,
+ gt_label_list,
+ img_metas,
+ gt_bboxes_ignore=None):
+ assert len(cls_scores) == len(bbox_preds)
+
+ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
+ points = self.get_points(featmap_sizes, bbox_preds[0].dtype,
+ bbox_preds[0].device)
+ num_imgs = cls_scores[0].size(0)
+ flatten_cls_scores = [
+ cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels)
+ for cls_score in cls_scores
+ ]
+ flatten_bbox_preds = [
+ bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4)
+ for bbox_pred in bbox_preds
+ ]
+ flatten_cls_scores = torch.cat(flatten_cls_scores)
+ flatten_bbox_preds = torch.cat(flatten_bbox_preds)
+ flatten_labels, flatten_bbox_targets = self.get_targets(
+ gt_bbox_list, gt_label_list, featmap_sizes, points)
+
+ # FG cat_id: [0, num_classes -1], BG cat_id: num_classes
+ pos_inds = ((flatten_labels >= 0)
+ & (flatten_labels < self.num_classes)).nonzero().view(-1)
+ num_pos = len(pos_inds)
+
+ loss_cls = self.loss_cls(
+ flatten_cls_scores, flatten_labels, avg_factor=num_pos + num_imgs)
+ if num_pos > 0:
+ pos_bbox_preds = flatten_bbox_preds[pos_inds]
+ pos_bbox_targets = flatten_bbox_targets[pos_inds]
+ pos_weights = pos_bbox_targets.new_zeros(
+ pos_bbox_targets.size()) + 1.0
+ loss_bbox = self.loss_bbox(
+ pos_bbox_preds,
+ pos_bbox_targets,
+ pos_weights,
+ avg_factor=num_pos)
+ else:
+ loss_bbox = torch.tensor(
+ 0,
+ dtype=flatten_bbox_preds.dtype,
+ device=flatten_bbox_preds.device)
+ return dict(loss_cls=loss_cls, loss_bbox=loss_bbox)
+
+ def get_targets(self, gt_bbox_list, gt_label_list, featmap_sizes, points):
+ label_list, bbox_target_list = multi_apply(
+ self._get_target_single,
+ gt_bbox_list,
+ gt_label_list,
+ featmap_size_list=featmap_sizes,
+ point_list=points)
+ flatten_labels = [
+ torch.cat([
+ labels_level_img.flatten() for labels_level_img in labels_level
+ ]) for labels_level in zip(*label_list)
+ ]
+ flatten_bbox_targets = [
+ torch.cat([
+ bbox_targets_level_img.reshape(-1, 4)
+ for bbox_targets_level_img in bbox_targets_level
+ ]) for bbox_targets_level in zip(*bbox_target_list)
+ ]
+ flatten_labels = torch.cat(flatten_labels)
+ flatten_bbox_targets = torch.cat(flatten_bbox_targets)
+ return flatten_labels, flatten_bbox_targets
+
+ def _get_target_single(self,
+ gt_bboxes_raw,
+ gt_labels_raw,
+ featmap_size_list=None,
+ point_list=None):
+
+ gt_areas = torch.sqrt((gt_bboxes_raw[:, 2] - gt_bboxes_raw[:, 0]) *
+ (gt_bboxes_raw[:, 3] - gt_bboxes_raw[:, 1]))
+ label_list = []
+ bbox_target_list = []
+ # for each pyramid, find the cls and box target
+ for base_len, (lower_bound, upper_bound), stride, featmap_size, \
+ (y, x) in zip(self.base_edge_list, self.scale_ranges,
+ self.strides, featmap_size_list, point_list):
+ # FG cat_id: [0, num_classes -1], BG cat_id: num_classes
+ labels = gt_labels_raw.new_zeros(featmap_size) + self.num_classes
+ bbox_targets = gt_bboxes_raw.new(featmap_size[0], featmap_size[1],
+ 4) + 1
+ # scale assignment
+ hit_indices = ((gt_areas >= lower_bound) &
+ (gt_areas <= upper_bound)).nonzero().flatten()
+ if len(hit_indices) == 0:
+ label_list.append(labels)
+ bbox_target_list.append(torch.log(bbox_targets))
+ continue
+ _, hit_index_order = torch.sort(-gt_areas[hit_indices])
+ hit_indices = hit_indices[hit_index_order]
+ gt_bboxes = gt_bboxes_raw[hit_indices, :] / stride
+ gt_labels = gt_labels_raw[hit_indices]
+ half_w = 0.5 * (gt_bboxes[:, 2] - gt_bboxes[:, 0])
+ half_h = 0.5 * (gt_bboxes[:, 3] - gt_bboxes[:, 1])
+ # valid fovea area: left, right, top, down
+ pos_left = torch.ceil(
+ gt_bboxes[:, 0] + (1 - self.sigma) * half_w - 0.5).long().\
+ clamp(0, featmap_size[1] - 1)
+ pos_right = torch.floor(
+ gt_bboxes[:, 0] + (1 + self.sigma) * half_w - 0.5).long().\
+ clamp(0, featmap_size[1] - 1)
+ pos_top = torch.ceil(
+ gt_bboxes[:, 1] + (1 - self.sigma) * half_h - 0.5).long().\
+ clamp(0, featmap_size[0] - 1)
+ pos_down = torch.floor(
+ gt_bboxes[:, 1] + (1 + self.sigma) * half_h - 0.5).long().\
+ clamp(0, featmap_size[0] - 1)
+ for px1, py1, px2, py2, label, (gt_x1, gt_y1, gt_x2, gt_y2) in \
+ zip(pos_left, pos_top, pos_right, pos_down, gt_labels,
+ gt_bboxes_raw[hit_indices, :]):
+ labels[py1:py2 + 1, px1:px2 + 1] = label
+ bbox_targets[py1:py2 + 1, px1:px2 + 1, 0] = \
+ (stride * x[py1:py2 + 1, px1:px2 + 1] - gt_x1) / base_len
+ bbox_targets[py1:py2 + 1, px1:px2 + 1, 1] = \
+ (stride * y[py1:py2 + 1, px1:px2 + 1] - gt_y1) / base_len
+ bbox_targets[py1:py2 + 1, px1:px2 + 1, 2] = \
+ (gt_x2 - stride * x[py1:py2 + 1, px1:px2 + 1]) / base_len
+ bbox_targets[py1:py2 + 1, px1:px2 + 1, 3] = \
+ (gt_y2 - stride * y[py1:py2 + 1, px1:px2 + 1]) / base_len
+ bbox_targets = bbox_targets.clamp(min=1. / 16, max=16.)
+ label_list.append(labels)
+ bbox_target_list.append(torch.log(bbox_targets))
+ return label_list, bbox_target_list
+
+ def get_bboxes(self,
+ cls_scores,
+ bbox_preds,
+ img_metas,
+ cfg=None,
+ rescale=None):
+ assert len(cls_scores) == len(bbox_preds)
+ num_levels = len(cls_scores)
+ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
+ points = self.get_points(
+ featmap_sizes,
+ bbox_preds[0].dtype,
+ bbox_preds[0].device,
+ flatten=True)
+ result_list = []
+ for img_id in range(len(img_metas)):
+ cls_score_list = [
+ cls_scores[i][img_id].detach() for i in range(num_levels)
+ ]
+ bbox_pred_list = [
+ bbox_preds[i][img_id].detach() for i in range(num_levels)
+ ]
+ img_shape = img_metas[img_id]['img_shape']
+ scale_factor = img_metas[img_id]['scale_factor']
+ det_bboxes = self._get_bboxes_single(cls_score_list,
+ bbox_pred_list, featmap_sizes,
+ points, img_shape,
+ scale_factor, cfg, rescale)
+ result_list.append(det_bboxes)
+ return result_list
+
+ def _get_bboxes_single(self,
+ cls_scores,
+ bbox_preds,
+ featmap_sizes,
+ point_list,
+ img_shape,
+ scale_factor,
+ cfg,
+ rescale=False):
+ cfg = self.test_cfg if cfg is None else cfg
+ assert len(cls_scores) == len(bbox_preds) == len(point_list)
+ det_bboxes = []
+ det_scores = []
+ for cls_score, bbox_pred, featmap_size, stride, base_len, (y, x) \
+ in zip(cls_scores, bbox_preds, featmap_sizes, self.strides,
+ self.base_edge_list, point_list):
+ assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
+ scores = cls_score.permute(1, 2, 0).reshape(
+ -1, self.cls_out_channels).sigmoid()
+ bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4).exp()
+ nms_pre = cfg.get('nms_pre', -1)
+ if (nms_pre > 0) and (scores.shape[0] > nms_pre):
+ max_scores, _ = scores.max(dim=1)
+ _, topk_inds = max_scores.topk(nms_pre)
+ bbox_pred = bbox_pred[topk_inds, :]
+ scores = scores[topk_inds, :]
+ y = y[topk_inds]
+ x = x[topk_inds]
+ x1 = (stride * x - base_len * bbox_pred[:, 0]).\
+ clamp(min=0, max=img_shape[1] - 1)
+ y1 = (stride * y - base_len * bbox_pred[:, 1]).\
+ clamp(min=0, max=img_shape[0] - 1)
+ x2 = (stride * x + base_len * bbox_pred[:, 2]).\
+ clamp(min=0, max=img_shape[1] - 1)
+ y2 = (stride * y + base_len * bbox_pred[:, 3]).\
+ clamp(min=0, max=img_shape[0] - 1)
+ bboxes = torch.stack([x1, y1, x2, y2], -1)
+ det_bboxes.append(bboxes)
+ det_scores.append(scores)
+ det_bboxes = torch.cat(det_bboxes)
+ if rescale:
+ det_bboxes /= det_bboxes.new_tensor(scale_factor)
+ det_scores = torch.cat(det_scores)
+ padding = det_scores.new_zeros(det_scores.shape[0], 1)
+ # remind that we set FG labels to [0, num_class-1] since mmdet v2.0
+ # BG cat_id: num_class
+ det_scores = torch.cat([det_scores, padding], dim=1)
+ det_bboxes, det_labels = multiclass_nms(det_bboxes, det_scores,
+ cfg.score_thr, cfg.nms,
+ cfg.max_per_img)
+ return det_bboxes, det_labels
diff --git a/annotator/uniformer/mmdet/models/dense_heads/free_anchor_retina_head.py b/annotator/uniformer/mmdet/models/dense_heads/free_anchor_retina_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..79879fdc3171b8e34b606b27eb1ceb67f4473e3e
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/dense_heads/free_anchor_retina_head.py
@@ -0,0 +1,270 @@
+import torch
+import torch.nn.functional as F
+
+from mmdet.core import bbox_overlaps
+from ..builder import HEADS
+from .retina_head import RetinaHead
+
+EPS = 1e-12
+
+
+@HEADS.register_module()
+class FreeAnchorRetinaHead(RetinaHead):
+ """FreeAnchor RetinaHead used in https://arxiv.org/abs/1909.02466.
+
+ Args:
+ num_classes (int): Number of categories excluding the background
+ category.
+ in_channels (int): Number of channels in the input feature map.
+ stacked_convs (int): Number of conv layers in cls and reg tower.
+ Default: 4.
+ conv_cfg (dict): dictionary to construct and config conv layer.
+ Default: None.
+ norm_cfg (dict): dictionary to construct and config norm layer.
+ Default: norm_cfg=dict(type='GN', num_groups=32,
+ requires_grad=True).
+ pre_anchor_topk (int): Number of boxes that be token in each bag.
+ bbox_thr (float): The threshold of the saturated linear function. It is
+ usually the same with the IoU threshold used in NMS.
+ gamma (float): Gamma parameter in focal loss.
+ alpha (float): Alpha parameter in focal loss.
+ """ # noqa: W605
+
+ def __init__(self,
+ num_classes,
+ in_channels,
+ stacked_convs=4,
+ conv_cfg=None,
+ norm_cfg=None,
+ pre_anchor_topk=50,
+ bbox_thr=0.6,
+ gamma=2.0,
+ alpha=0.5,
+ **kwargs):
+ super(FreeAnchorRetinaHead,
+ self).__init__(num_classes, in_channels, stacked_convs, conv_cfg,
+ norm_cfg, **kwargs)
+
+ self.pre_anchor_topk = pre_anchor_topk
+ self.bbox_thr = bbox_thr
+ self.gamma = gamma
+ self.alpha = alpha
+
+ def loss(self,
+ cls_scores,
+ bbox_preds,
+ gt_bboxes,
+ gt_labels,
+ img_metas,
+ gt_bboxes_ignore=None):
+ """Compute losses of the head.
+
+ Args:
+ cls_scores (list[Tensor]): Box scores for each scale level
+ Has shape (N, num_anchors * num_classes, H, W)
+ bbox_preds (list[Tensor]): Box energies / deltas for each scale
+ level with shape (N, num_anchors * 4, H, W)
+ gt_bboxes (list[Tensor]): each item are the truth boxes for each
+ image in [tl_x, tl_y, br_x, br_y] format.
+ gt_labels (list[Tensor]): class indices corresponding to each box
+ img_metas (list[dict]): Meta information of each image, e.g.,
+ image size, scaling factor, etc.
+ gt_bboxes_ignore (None | list[Tensor]): specify which bounding
+ boxes can be ignored when computing the loss.
+
+ Returns:
+ dict[str, Tensor]: A dictionary of loss components.
+ """
+ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
+ assert len(featmap_sizes) == len(self.anchor_generator.base_anchors)
+
+ anchor_list, _ = self.get_anchors(featmap_sizes, img_metas)
+ anchors = [torch.cat(anchor) for anchor in anchor_list]
+
+ # concatenate each level
+ cls_scores = [
+ cls.permute(0, 2, 3,
+ 1).reshape(cls.size(0), -1, self.cls_out_channels)
+ for cls in cls_scores
+ ]
+ bbox_preds = [
+ bbox_pred.permute(0, 2, 3, 1).reshape(bbox_pred.size(0), -1, 4)
+ for bbox_pred in bbox_preds
+ ]
+ cls_scores = torch.cat(cls_scores, dim=1)
+ bbox_preds = torch.cat(bbox_preds, dim=1)
+
+ cls_prob = torch.sigmoid(cls_scores)
+ box_prob = []
+ num_pos = 0
+ positive_losses = []
+ for _, (anchors_, gt_labels_, gt_bboxes_, cls_prob_,
+ bbox_preds_) in enumerate(
+ zip(anchors, gt_labels, gt_bboxes, cls_prob, bbox_preds)):
+
+ with torch.no_grad():
+ if len(gt_bboxes_) == 0:
+ image_box_prob = torch.zeros(
+ anchors_.size(0),
+ self.cls_out_channels).type_as(bbox_preds_)
+ else:
+ # box_localization: a_{j}^{loc}, shape: [j, 4]
+ pred_boxes = self.bbox_coder.decode(anchors_, bbox_preds_)
+
+ # object_box_iou: IoU_{ij}^{loc}, shape: [i, j]
+ object_box_iou = bbox_overlaps(gt_bboxes_, pred_boxes)
+
+ # object_box_prob: P{a_{j} -> b_{i}}, shape: [i, j]
+ t1 = self.bbox_thr
+ t2 = object_box_iou.max(
+ dim=1, keepdim=True).values.clamp(min=t1 + 1e-12)
+ object_box_prob = ((object_box_iou - t1) /
+ (t2 - t1)).clamp(
+ min=0, max=1)
+
+ # object_cls_box_prob: P{a_{j} -> b_{i}}, shape: [i, c, j]
+ num_obj = gt_labels_.size(0)
+ indices = torch.stack([
+ torch.arange(num_obj).type_as(gt_labels_), gt_labels_
+ ],
+ dim=0)
+ object_cls_box_prob = torch.sparse_coo_tensor(
+ indices, object_box_prob)
+
+ # image_box_iou: P{a_{j} \in A_{+}}, shape: [c, j]
+ """
+ from "start" to "end" implement:
+ image_box_iou = torch.sparse.max(object_cls_box_prob,
+ dim=0).t()
+
+ """
+ # start
+ box_cls_prob = torch.sparse.sum(
+ object_cls_box_prob, dim=0).to_dense()
+
+ indices = torch.nonzero(box_cls_prob, as_tuple=False).t_()
+ if indices.numel() == 0:
+ image_box_prob = torch.zeros(
+ anchors_.size(0),
+ self.cls_out_channels).type_as(object_box_prob)
+ else:
+ nonzero_box_prob = torch.where(
+ (gt_labels_.unsqueeze(dim=-1) == indices[0]),
+ object_box_prob[:, indices[1]],
+ torch.tensor([
+ 0
+ ]).type_as(object_box_prob)).max(dim=0).values
+
+ # upmap to shape [j, c]
+ image_box_prob = torch.sparse_coo_tensor(
+ indices.flip([0]),
+ nonzero_box_prob,
+ size=(anchors_.size(0),
+ self.cls_out_channels)).to_dense()
+ # end
+
+ box_prob.append(image_box_prob)
+
+ # construct bags for objects
+ match_quality_matrix = bbox_overlaps(gt_bboxes_, anchors_)
+ _, matched = torch.topk(
+ match_quality_matrix,
+ self.pre_anchor_topk,
+ dim=1,
+ sorted=False)
+ del match_quality_matrix
+
+ # matched_cls_prob: P_{ij}^{cls}
+ matched_cls_prob = torch.gather(
+ cls_prob_[matched], 2,
+ gt_labels_.view(-1, 1, 1).repeat(1, self.pre_anchor_topk,
+ 1)).squeeze(2)
+
+ # matched_box_prob: P_{ij}^{loc}
+ matched_anchors = anchors_[matched]
+ matched_object_targets = self.bbox_coder.encode(
+ matched_anchors,
+ gt_bboxes_.unsqueeze(dim=1).expand_as(matched_anchors))
+ loss_bbox = self.loss_bbox(
+ bbox_preds_[matched],
+ matched_object_targets,
+ reduction_override='none').sum(-1)
+ matched_box_prob = torch.exp(-loss_bbox)
+
+ # positive_losses: {-log( Mean-max(P_{ij}^{cls} * P_{ij}^{loc}) )}
+ num_pos += len(gt_bboxes_)
+ positive_losses.append(
+ self.positive_bag_loss(matched_cls_prob, matched_box_prob))
+ positive_loss = torch.cat(positive_losses).sum() / max(1, num_pos)
+
+ # box_prob: P{a_{j} \in A_{+}}
+ box_prob = torch.stack(box_prob, dim=0)
+
+ # negative_loss:
+ # \sum_{j}{ FL((1 - P{a_{j} \in A_{+}}) * (1 - P_{j}^{bg})) } / n||B||
+ negative_loss = self.negative_bag_loss(cls_prob, box_prob).sum() / max(
+ 1, num_pos * self.pre_anchor_topk)
+
+ # avoid the absence of gradients in regression subnet
+ # when no ground-truth in a batch
+ if num_pos == 0:
+ positive_loss = bbox_preds.sum() * 0
+
+ losses = {
+ 'positive_bag_loss': positive_loss,
+ 'negative_bag_loss': negative_loss
+ }
+ return losses
+
+ def positive_bag_loss(self, matched_cls_prob, matched_box_prob):
+ """Compute positive bag loss.
+
+ :math:`-log( Mean-max(P_{ij}^{cls} * P_{ij}^{loc}) )`.
+
+ :math:`P_{ij}^{cls}`: matched_cls_prob, classification probability of matched samples.
+
+ :math:`P_{ij}^{loc}`: matched_box_prob, box probability of matched samples.
+
+ Args:
+ matched_cls_prob (Tensor): Classification probabilty of matched
+ samples in shape (num_gt, pre_anchor_topk).
+ matched_box_prob (Tensor): BBox probability of matched samples,
+ in shape (num_gt, pre_anchor_topk).
+
+ Returns:
+ Tensor: Positive bag loss in shape (num_gt,).
+ """ # noqa: E501, W605
+ # bag_prob = Mean-max(matched_prob)
+ matched_prob = matched_cls_prob * matched_box_prob
+ weight = 1 / torch.clamp(1 - matched_prob, 1e-12, None)
+ weight /= weight.sum(dim=1).unsqueeze(dim=-1)
+ bag_prob = (weight * matched_prob).sum(dim=1)
+ # positive_bag_loss = -self.alpha * log(bag_prob)
+ return self.alpha * F.binary_cross_entropy(
+ bag_prob, torch.ones_like(bag_prob), reduction='none')
+
+ def negative_bag_loss(self, cls_prob, box_prob):
+ """Compute negative bag loss.
+
+ :math:`FL((1 - P_{a_{j} \in A_{+}}) * (1 - P_{j}^{bg}))`.
+
+ :math:`P_{a_{j} \in A_{+}}`: Box_probability of matched samples.
+
+ :math:`P_{j}^{bg}`: Classification probability of negative samples.
+
+ Args:
+ cls_prob (Tensor): Classification probability, in shape
+ (num_img, num_anchors, num_classes).
+ box_prob (Tensor): Box probability, in shape
+ (num_img, num_anchors, num_classes).
+
+ Returns:
+ Tensor: Negative bag loss in shape (num_img, num_anchors, num_classes).
+ """ # noqa: E501, W605
+ prob = cls_prob * (1 - box_prob)
+ # There are some cases when neg_prob = 0.
+ # This will cause the neg_prob.log() to be inf without clamp.
+ prob = prob.clamp(min=EPS, max=1 - EPS)
+ negative_bag_loss = prob**self.gamma * F.binary_cross_entropy(
+ prob, torch.zeros_like(prob), reduction='none')
+ return (1 - self.alpha) * negative_bag_loss
diff --git a/annotator/uniformer/mmdet/models/dense_heads/fsaf_head.py b/annotator/uniformer/mmdet/models/dense_heads/fsaf_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..7183efce28596ba106411250f508aec5995fbf60
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/dense_heads/fsaf_head.py
@@ -0,0 +1,422 @@
+import numpy as np
+import torch
+from mmcv.cnn import normal_init
+from mmcv.runner import force_fp32
+
+from mmdet.core import (anchor_inside_flags, images_to_levels, multi_apply,
+ unmap)
+from ..builder import HEADS
+from ..losses.accuracy import accuracy
+from ..losses.utils import weight_reduce_loss
+from .retina_head import RetinaHead
+
+
+@HEADS.register_module()
+class FSAFHead(RetinaHead):
+ """Anchor-free head used in `FSAF `_.
+
+ The head contains two subnetworks. The first classifies anchor boxes and
+ the second regresses deltas for the anchors (num_anchors is 1 for anchor-
+ free methods)
+
+ Args:
+ *args: Same as its base class in :class:`RetinaHead`
+ score_threshold (float, optional): The score_threshold to calculate
+ positive recall. If given, prediction scores lower than this value
+ is counted as incorrect prediction. Default to None.
+ **kwargs: Same as its base class in :class:`RetinaHead`
+
+ Example:
+ >>> import torch
+ >>> self = FSAFHead(11, 7)
+ >>> x = torch.rand(1, 7, 32, 32)
+ >>> cls_score, bbox_pred = self.forward_single(x)
+ >>> # Each anchor predicts a score for each class except background
+ >>> cls_per_anchor = cls_score.shape[1] / self.num_anchors
+ >>> box_per_anchor = bbox_pred.shape[1] / self.num_anchors
+ >>> assert cls_per_anchor == self.num_classes
+ >>> assert box_per_anchor == 4
+ """
+
+ def __init__(self, *args, score_threshold=None, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.score_threshold = score_threshold
+
+ def forward_single(self, x):
+ """Forward feature map of a single scale level.
+
+ Args:
+ x (Tensor): Feature map of a single scale level.
+
+ Returns:
+ tuple (Tensor):
+ cls_score (Tensor): Box scores for each scale level
+ Has shape (N, num_points * num_classes, H, W).
+ bbox_pred (Tensor): Box energies / deltas for each scale
+ level with shape (N, num_points * 4, H, W).
+ """
+ cls_score, bbox_pred = super().forward_single(x)
+ # relu: TBLR encoder only accepts positive bbox_pred
+ return cls_score, self.relu(bbox_pred)
+
+ def init_weights(self):
+ """Initialize weights of the head."""
+ super(FSAFHead, self).init_weights()
+ # The positive bias in self.retina_reg conv is to prevent predicted \
+ # bbox with 0 area
+ normal_init(self.retina_reg, std=0.01, bias=0.25)
+
+ def _get_targets_single(self,
+ flat_anchors,
+ valid_flags,
+ gt_bboxes,
+ gt_bboxes_ignore,
+ gt_labels,
+ img_meta,
+ label_channels=1,
+ unmap_outputs=True):
+ """Compute regression and classification targets for anchors in a
+ single image.
+
+ Most of the codes are the same with the base class
+ :obj: `AnchorHead`, except that it also collects and returns
+ the matched gt index in the image (from 0 to num_gt-1). If the
+ anchor bbox is not matched to any gt, the corresponding value in
+ pos_gt_inds is -1.
+ """
+ inside_flags = anchor_inside_flags(flat_anchors, valid_flags,
+ img_meta['img_shape'][:2],
+ self.train_cfg.allowed_border)
+ if not inside_flags.any():
+ return (None, ) * 7
+ # Assign gt and sample anchors
+ anchors = flat_anchors[inside_flags.type(torch.bool), :]
+ assign_result = self.assigner.assign(
+ anchors, gt_bboxes, gt_bboxes_ignore,
+ None if self.sampling else gt_labels)
+
+ sampling_result = self.sampler.sample(assign_result, anchors,
+ gt_bboxes)
+
+ num_valid_anchors = anchors.shape[0]
+ bbox_targets = torch.zeros_like(anchors)
+ bbox_weights = torch.zeros_like(anchors)
+ labels = anchors.new_full((num_valid_anchors, ),
+ self.num_classes,
+ dtype=torch.long)
+ label_weights = anchors.new_zeros((num_valid_anchors, label_channels),
+ dtype=torch.float)
+ pos_gt_inds = anchors.new_full((num_valid_anchors, ),
+ -1,
+ dtype=torch.long)
+
+ pos_inds = sampling_result.pos_inds
+ neg_inds = sampling_result.neg_inds
+
+ if len(pos_inds) > 0:
+ if not self.reg_decoded_bbox:
+ pos_bbox_targets = self.bbox_coder.encode(
+ sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes)
+ else:
+ # When the regression loss (e.g. `IouLoss`, `GIouLoss`)
+ # is applied directly on the decoded bounding boxes, both
+ # the predicted boxes and regression targets should be with
+ # absolute coordinate format.
+ pos_bbox_targets = sampling_result.pos_gt_bboxes
+ bbox_targets[pos_inds, :] = pos_bbox_targets
+ bbox_weights[pos_inds, :] = 1.0
+ # The assigned gt_index for each anchor. (0-based)
+ pos_gt_inds[pos_inds] = sampling_result.pos_assigned_gt_inds
+ if gt_labels is None:
+ # Only rpn gives gt_labels as None
+ # Foreground is the first class
+ labels[pos_inds] = 0
+ else:
+ labels[pos_inds] = gt_labels[
+ sampling_result.pos_assigned_gt_inds]
+ if self.train_cfg.pos_weight <= 0:
+ label_weights[pos_inds] = 1.0
+ else:
+ label_weights[pos_inds] = self.train_cfg.pos_weight
+
+ if len(neg_inds) > 0:
+ label_weights[neg_inds] = 1.0
+
+ # shadowed_labels is a tensor composed of tuples
+ # (anchor_inds, class_label) that indicate those anchors lying in the
+ # outer region of a gt or overlapped by another gt with a smaller
+ # area.
+ #
+ # Therefore, only the shadowed labels are ignored for loss calculation.
+ # the key `shadowed_labels` is defined in :obj:`CenterRegionAssigner`
+ shadowed_labels = assign_result.get_extra_property('shadowed_labels')
+ if shadowed_labels is not None and shadowed_labels.numel():
+ if len(shadowed_labels.shape) == 2:
+ idx_, label_ = shadowed_labels[:, 0], shadowed_labels[:, 1]
+ assert (labels[idx_] != label_).all(), \
+ 'One label cannot be both positive and ignored'
+ label_weights[idx_, label_] = 0
+ else:
+ label_weights[shadowed_labels] = 0
+
+ # map up to original set of anchors
+ if unmap_outputs:
+ num_total_anchors = flat_anchors.size(0)
+ labels = unmap(labels, num_total_anchors, inside_flags)
+ label_weights = unmap(label_weights, num_total_anchors,
+ inside_flags)
+ bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags)
+ bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags)
+ pos_gt_inds = unmap(
+ pos_gt_inds, num_total_anchors, inside_flags, fill=-1)
+
+ return (labels, label_weights, bbox_targets, bbox_weights, pos_inds,
+ neg_inds, sampling_result, pos_gt_inds)
+
+ @force_fp32(apply_to=('cls_scores', 'bbox_preds'))
+ def loss(self,
+ cls_scores,
+ bbox_preds,
+ gt_bboxes,
+ gt_labels,
+ img_metas,
+ gt_bboxes_ignore=None):
+ """Compute loss of the head.
+
+ Args:
+ cls_scores (list[Tensor]): Box scores for each scale level
+ Has shape (N, num_points * num_classes, H, W).
+ bbox_preds (list[Tensor]): Box energies / deltas for each scale
+ level with shape (N, num_points * 4, H, W).
+ gt_bboxes (list[Tensor]): each item are the truth boxes for each
+ image in [tl_x, tl_y, br_x, br_y] format.
+ gt_labels (list[Tensor]): class indices corresponding to each box
+ img_metas (list[dict]): Meta information of each image, e.g.,
+ image size, scaling factor, etc.
+ gt_bboxes_ignore (None | list[Tensor]): specify which bounding
+ boxes can be ignored when computing the loss.
+
+ Returns:
+ dict[str, Tensor]: A dictionary of loss components.
+ """
+ for i in range(len(bbox_preds)): # loop over fpn level
+ # avoid 0 area of the predicted bbox
+ bbox_preds[i] = bbox_preds[i].clamp(min=1e-4)
+ # TODO: It may directly use the base-class loss function.
+ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
+ assert len(featmap_sizes) == self.anchor_generator.num_levels
+ batch_size = len(gt_bboxes)
+ device = cls_scores[0].device
+ anchor_list, valid_flag_list = self.get_anchors(
+ featmap_sizes, img_metas, device=device)
+ label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1
+ cls_reg_targets = self.get_targets(
+ anchor_list,
+ valid_flag_list,
+ gt_bboxes,
+ img_metas,
+ gt_bboxes_ignore_list=gt_bboxes_ignore,
+ gt_labels_list=gt_labels,
+ label_channels=label_channels)
+ if cls_reg_targets is None:
+ return None
+ (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,
+ num_total_pos, num_total_neg,
+ pos_assigned_gt_inds_list) = cls_reg_targets
+
+ num_gts = np.array(list(map(len, gt_labels)))
+ num_total_samples = (
+ num_total_pos + num_total_neg if self.sampling else num_total_pos)
+ # anchor number of multi levels
+ num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]
+ # concat all level anchors and flags to a single tensor
+ concat_anchor_list = []
+ for i in range(len(anchor_list)):
+ concat_anchor_list.append(torch.cat(anchor_list[i]))
+ all_anchor_list = images_to_levels(concat_anchor_list,
+ num_level_anchors)
+ losses_cls, losses_bbox = multi_apply(
+ self.loss_single,
+ cls_scores,
+ bbox_preds,
+ all_anchor_list,
+ labels_list,
+ label_weights_list,
+ bbox_targets_list,
+ bbox_weights_list,
+ num_total_samples=num_total_samples)
+
+ # `pos_assigned_gt_inds_list` (length: fpn_levels) stores the assigned
+ # gt index of each anchor bbox in each fpn level.
+ cum_num_gts = list(np.cumsum(num_gts)) # length of batch_size
+ for i, assign in enumerate(pos_assigned_gt_inds_list):
+ # loop over fpn levels
+ for j in range(1, batch_size):
+ # loop over batch size
+ # Convert gt indices in each img to those in the batch
+ assign[j][assign[j] >= 0] += int(cum_num_gts[j - 1])
+ pos_assigned_gt_inds_list[i] = assign.flatten()
+ labels_list[i] = labels_list[i].flatten()
+ num_gts = sum(map(len, gt_labels)) # total number of gt in the batch
+ # The unique label index of each gt in the batch
+ label_sequence = torch.arange(num_gts, device=device)
+ # Collect the average loss of each gt in each level
+ with torch.no_grad():
+ loss_levels, = multi_apply(
+ self.collect_loss_level_single,
+ losses_cls,
+ losses_bbox,
+ pos_assigned_gt_inds_list,
+ labels_seq=label_sequence)
+ # Shape: (fpn_levels, num_gts). Loss of each gt at each fpn level
+ loss_levels = torch.stack(loss_levels, dim=0)
+ # Locate the best fpn level for loss back-propagation
+ if loss_levels.numel() == 0: # zero gt
+ argmin = loss_levels.new_empty((num_gts, ), dtype=torch.long)
+ else:
+ _, argmin = loss_levels.min(dim=0)
+
+ # Reweight the loss of each (anchor, label) pair, so that only those
+ # at the best gt level are back-propagated.
+ losses_cls, losses_bbox, pos_inds = multi_apply(
+ self.reweight_loss_single,
+ losses_cls,
+ losses_bbox,
+ pos_assigned_gt_inds_list,
+ labels_list,
+ list(range(len(losses_cls))),
+ min_levels=argmin)
+ num_pos = torch.cat(pos_inds, 0).sum().float()
+ pos_recall = self.calculate_pos_recall(cls_scores, labels_list,
+ pos_inds)
+
+ if num_pos == 0: # No gt
+ avg_factor = num_pos + float(num_total_neg)
+ else:
+ avg_factor = num_pos
+ for i in range(len(losses_cls)):
+ losses_cls[i] /= avg_factor
+ losses_bbox[i] /= avg_factor
+ return dict(
+ loss_cls=losses_cls,
+ loss_bbox=losses_bbox,
+ num_pos=num_pos / batch_size,
+ pos_recall=pos_recall)
+
+ def calculate_pos_recall(self, cls_scores, labels_list, pos_inds):
+ """Calculate positive recall with score threshold.
+
+ Args:
+ cls_scores (list[Tensor]): Classification scores at all fpn levels.
+ Each tensor is in shape (N, num_classes * num_anchors, H, W)
+ labels_list (list[Tensor]): The label that each anchor is assigned
+ to. Shape (N * H * W * num_anchors, )
+ pos_inds (list[Tensor]): List of bool tensors indicating whether
+ the anchor is assigned to a positive label.
+ Shape (N * H * W * num_anchors, )
+
+ Returns:
+ Tensor: A single float number indicating the positive recall.
+ """
+ with torch.no_grad():
+ num_class = self.num_classes
+ scores = [
+ cls.permute(0, 2, 3, 1).reshape(-1, num_class)[pos]
+ for cls, pos in zip(cls_scores, pos_inds)
+ ]
+ labels = [
+ label.reshape(-1)[pos]
+ for label, pos in zip(labels_list, pos_inds)
+ ]
+ scores = torch.cat(scores, dim=0)
+ labels = torch.cat(labels, dim=0)
+ if self.use_sigmoid_cls:
+ scores = scores.sigmoid()
+ else:
+ scores = scores.softmax(dim=1)
+
+ return accuracy(scores, labels, thresh=self.score_threshold)
+
+ def collect_loss_level_single(self, cls_loss, reg_loss, assigned_gt_inds,
+ labels_seq):
+ """Get the average loss in each FPN level w.r.t. each gt label.
+
+ Args:
+ cls_loss (Tensor): Classification loss of each feature map pixel,
+ shape (num_anchor, num_class)
+ reg_loss (Tensor): Regression loss of each feature map pixel,
+ shape (num_anchor, 4)
+ assigned_gt_inds (Tensor): It indicates which gt the prior is
+ assigned to (0-based, -1: no assignment). shape (num_anchor),
+ labels_seq: The rank of labels. shape (num_gt)
+
+ Returns:
+ shape: (num_gt), average loss of each gt in this level
+ """
+ if len(reg_loss.shape) == 2: # iou loss has shape (num_prior, 4)
+ reg_loss = reg_loss.sum(dim=-1) # sum loss in tblr dims
+ if len(cls_loss.shape) == 2:
+ cls_loss = cls_loss.sum(dim=-1) # sum loss in class dims
+ loss = cls_loss + reg_loss
+ assert loss.size(0) == assigned_gt_inds.size(0)
+ # Default loss value is 1e6 for a layer where no anchor is positive
+ # to ensure it will not be chosen to back-propagate gradient
+ losses_ = loss.new_full(labels_seq.shape, 1e6)
+ for i, l in enumerate(labels_seq):
+ match = assigned_gt_inds == l
+ if match.any():
+ losses_[i] = loss[match].mean()
+ return losses_,
+
+ def reweight_loss_single(self, cls_loss, reg_loss, assigned_gt_inds,
+ labels, level, min_levels):
+ """Reweight loss values at each level.
+
+ Reassign loss values at each level by masking those where the
+ pre-calculated loss is too large. Then return the reduced losses.
+
+ Args:
+ cls_loss (Tensor): Element-wise classification loss.
+ Shape: (num_anchors, num_classes)
+ reg_loss (Tensor): Element-wise regression loss.
+ Shape: (num_anchors, 4)
+ assigned_gt_inds (Tensor): The gt indices that each anchor bbox
+ is assigned to. -1 denotes a negative anchor, otherwise it is the
+ gt index (0-based). Shape: (num_anchors, ),
+ labels (Tensor): Label assigned to anchors. Shape: (num_anchors, ).
+ level (int): The current level index in the pyramid
+ (0-4 for RetinaNet)
+ min_levels (Tensor): The best-matching level for each gt.
+ Shape: (num_gts, ),
+
+ Returns:
+ tuple:
+ - cls_loss: Reduced corrected classification loss. Scalar.
+ - reg_loss: Reduced corrected regression loss. Scalar.
+ - pos_flags (Tensor): Corrected bool tensor indicating the
+ final positive anchors. Shape: (num_anchors, ).
+ """
+ loc_weight = torch.ones_like(reg_loss)
+ cls_weight = torch.ones_like(cls_loss)
+ pos_flags = assigned_gt_inds >= 0 # positive pixel flag
+ pos_indices = torch.nonzero(pos_flags, as_tuple=False).flatten()
+
+ if pos_flags.any(): # pos pixels exist
+ pos_assigned_gt_inds = assigned_gt_inds[pos_flags]
+ zeroing_indices = (min_levels[pos_assigned_gt_inds] != level)
+ neg_indices = pos_indices[zeroing_indices]
+
+ if neg_indices.numel():
+ pos_flags[neg_indices] = 0
+ loc_weight[neg_indices] = 0
+ # Only the weight corresponding to the label is
+ # zeroed out if not selected
+ zeroing_labels = labels[neg_indices]
+ assert (zeroing_labels >= 0).all()
+ cls_weight[neg_indices, zeroing_labels] = 0
+
+ # Weighted loss for both cls and reg loss
+ cls_loss = weight_reduce_loss(cls_loss, cls_weight, reduction='sum')
+ reg_loss = weight_reduce_loss(reg_loss, loc_weight, reduction='sum')
+
+ return cls_loss, reg_loss, pos_flags
diff --git a/annotator/uniformer/mmdet/models/dense_heads/ga_retina_head.py b/annotator/uniformer/mmdet/models/dense_heads/ga_retina_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..8822d1ca78ee2fa2f304a0649e81274830383533
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/dense_heads/ga_retina_head.py
@@ -0,0 +1,109 @@
+import torch.nn as nn
+from mmcv.cnn import ConvModule, bias_init_with_prob, normal_init
+from mmcv.ops import MaskedConv2d
+
+from ..builder import HEADS
+from .guided_anchor_head import FeatureAdaption, GuidedAnchorHead
+
+
+@HEADS.register_module()
+class GARetinaHead(GuidedAnchorHead):
+ """Guided-Anchor-based RetinaNet head."""
+
+ def __init__(self,
+ num_classes,
+ in_channels,
+ stacked_convs=4,
+ conv_cfg=None,
+ norm_cfg=None,
+ **kwargs):
+ self.stacked_convs = stacked_convs
+ self.conv_cfg = conv_cfg
+ self.norm_cfg = norm_cfg
+ super(GARetinaHead, self).__init__(num_classes, in_channels, **kwargs)
+
+ def _init_layers(self):
+ """Initialize layers of the head."""
+ self.relu = nn.ReLU(inplace=True)
+ self.cls_convs = nn.ModuleList()
+ self.reg_convs = nn.ModuleList()
+ for i in range(self.stacked_convs):
+ chn = self.in_channels if i == 0 else self.feat_channels
+ self.cls_convs.append(
+ ConvModule(
+ chn,
+ self.feat_channels,
+ 3,
+ stride=1,
+ padding=1,
+ conv_cfg=self.conv_cfg,
+ norm_cfg=self.norm_cfg))
+ self.reg_convs.append(
+ ConvModule(
+ chn,
+ self.feat_channels,
+ 3,
+ stride=1,
+ padding=1,
+ conv_cfg=self.conv_cfg,
+ norm_cfg=self.norm_cfg))
+
+ self.conv_loc = nn.Conv2d(self.feat_channels, 1, 1)
+ self.conv_shape = nn.Conv2d(self.feat_channels, self.num_anchors * 2,
+ 1)
+ self.feature_adaption_cls = FeatureAdaption(
+ self.feat_channels,
+ self.feat_channels,
+ kernel_size=3,
+ deform_groups=self.deform_groups)
+ self.feature_adaption_reg = FeatureAdaption(
+ self.feat_channels,
+ self.feat_channels,
+ kernel_size=3,
+ deform_groups=self.deform_groups)
+ self.retina_cls = MaskedConv2d(
+ self.feat_channels,
+ self.num_anchors * self.cls_out_channels,
+ 3,
+ padding=1)
+ self.retina_reg = MaskedConv2d(
+ self.feat_channels, self.num_anchors * 4, 3, padding=1)
+
+ def init_weights(self):
+ """Initialize weights of the layer."""
+ for m in self.cls_convs:
+ normal_init(m.conv, std=0.01)
+ for m in self.reg_convs:
+ normal_init(m.conv, std=0.01)
+
+ self.feature_adaption_cls.init_weights()
+ self.feature_adaption_reg.init_weights()
+
+ bias_cls = bias_init_with_prob(0.01)
+ normal_init(self.conv_loc, std=0.01, bias=bias_cls)
+ normal_init(self.conv_shape, std=0.01)
+ normal_init(self.retina_cls, std=0.01, bias=bias_cls)
+ normal_init(self.retina_reg, std=0.01)
+
+ def forward_single(self, x):
+ """Forward feature map of a single scale level."""
+ cls_feat = x
+ reg_feat = x
+ for cls_conv in self.cls_convs:
+ cls_feat = cls_conv(cls_feat)
+ for reg_conv in self.reg_convs:
+ reg_feat = reg_conv(reg_feat)
+
+ loc_pred = self.conv_loc(cls_feat)
+ shape_pred = self.conv_shape(reg_feat)
+
+ cls_feat = self.feature_adaption_cls(cls_feat, shape_pred)
+ reg_feat = self.feature_adaption_reg(reg_feat, shape_pred)
+
+ if not self.training:
+ mask = loc_pred.sigmoid()[0] >= self.loc_filter_thr
+ else:
+ mask = None
+ cls_score = self.retina_cls(cls_feat, mask)
+ bbox_pred = self.retina_reg(reg_feat, mask)
+ return cls_score, bbox_pred, shape_pred, loc_pred
diff --git a/annotator/uniformer/mmdet/models/dense_heads/ga_rpn_head.py b/annotator/uniformer/mmdet/models/dense_heads/ga_rpn_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..2ec0d4fdd3475bfbd2e541a6e8130b1df9ad861a
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/dense_heads/ga_rpn_head.py
@@ -0,0 +1,171 @@
+import copy
+import warnings
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from mmcv import ConfigDict
+from mmcv.cnn import normal_init
+from mmcv.ops import nms
+
+from ..builder import HEADS
+from .guided_anchor_head import GuidedAnchorHead
+from .rpn_test_mixin import RPNTestMixin
+
+
+@HEADS.register_module()
+class GARPNHead(RPNTestMixin, GuidedAnchorHead):
+ """Guided-Anchor-based RPN head."""
+
+ def __init__(self, in_channels, **kwargs):
+ super(GARPNHead, self).__init__(1, in_channels, **kwargs)
+
+ def _init_layers(self):
+ """Initialize layers of the head."""
+ self.rpn_conv = nn.Conv2d(
+ self.in_channels, self.feat_channels, 3, padding=1)
+ super(GARPNHead, self)._init_layers()
+
+ def init_weights(self):
+ """Initialize weights of the head."""
+ normal_init(self.rpn_conv, std=0.01)
+ super(GARPNHead, self).init_weights()
+
+ def forward_single(self, x):
+ """Forward feature of a single scale level."""
+
+ x = self.rpn_conv(x)
+ x = F.relu(x, inplace=True)
+ (cls_score, bbox_pred, shape_pred,
+ loc_pred) = super(GARPNHead, self).forward_single(x)
+ return cls_score, bbox_pred, shape_pred, loc_pred
+
+ def loss(self,
+ cls_scores,
+ bbox_preds,
+ shape_preds,
+ loc_preds,
+ gt_bboxes,
+ img_metas,
+ gt_bboxes_ignore=None):
+ losses = super(GARPNHead, self).loss(
+ cls_scores,
+ bbox_preds,
+ shape_preds,
+ loc_preds,
+ gt_bboxes,
+ None,
+ img_metas,
+ gt_bboxes_ignore=gt_bboxes_ignore)
+ return dict(
+ loss_rpn_cls=losses['loss_cls'],
+ loss_rpn_bbox=losses['loss_bbox'],
+ loss_anchor_shape=losses['loss_shape'],
+ loss_anchor_loc=losses['loss_loc'])
+
+ def _get_bboxes_single(self,
+ cls_scores,
+ bbox_preds,
+ mlvl_anchors,
+ mlvl_masks,
+ img_shape,
+ scale_factor,
+ cfg,
+ rescale=False):
+ cfg = self.test_cfg if cfg is None else cfg
+
+ cfg = copy.deepcopy(cfg)
+
+ # deprecate arguments warning
+ if 'nms' not in cfg or 'max_num' in cfg or 'nms_thr' in cfg:
+ warnings.warn(
+ 'In rpn_proposal or test_cfg, '
+ 'nms_thr has been moved to a dict named nms as '
+ 'iou_threshold, max_num has been renamed as max_per_img, '
+ 'name of original arguments and the way to specify '
+ 'iou_threshold of NMS will be deprecated.')
+ if 'nms' not in cfg:
+ cfg.nms = ConfigDict(dict(type='nms', iou_threshold=cfg.nms_thr))
+ if 'max_num' in cfg:
+ if 'max_per_img' in cfg:
+ assert cfg.max_num == cfg.max_per_img, f'You ' \
+ f'set max_num and max_per_img at the same time, ' \
+ f'but get {cfg.max_num} ' \
+ f'and {cfg.max_per_img} respectively' \
+ 'Please delete max_num which will be deprecated.'
+ else:
+ cfg.max_per_img = cfg.max_num
+ if 'nms_thr' in cfg:
+ assert cfg.nms.iou_threshold == cfg.nms_thr, f'You set ' \
+ f'iou_threshold in nms and ' \
+ f'nms_thr at the same time, but get ' \
+ f'{cfg.nms.iou_threshold} and {cfg.nms_thr}' \
+ f' respectively. Please delete the ' \
+ f'nms_thr which will be deprecated.'
+
+ assert cfg.nms.get('type', 'nms') == 'nms', 'GARPNHead only support ' \
+ 'naive nms.'
+
+ mlvl_proposals = []
+ for idx in range(len(cls_scores)):
+ rpn_cls_score = cls_scores[idx]
+ rpn_bbox_pred = bbox_preds[idx]
+ anchors = mlvl_anchors[idx]
+ mask = mlvl_masks[idx]
+ assert rpn_cls_score.size()[-2:] == rpn_bbox_pred.size()[-2:]
+ # if no location is kept, end.
+ if mask.sum() == 0:
+ continue
+ rpn_cls_score = rpn_cls_score.permute(1, 2, 0)
+ if self.use_sigmoid_cls:
+ rpn_cls_score = rpn_cls_score.reshape(-1)
+ scores = rpn_cls_score.sigmoid()
+ else:
+ rpn_cls_score = rpn_cls_score.reshape(-1, 2)
+ # remind that we set FG labels to [0, num_class-1]
+ # since mmdet v2.0
+ # BG cat_id: num_class
+ scores = rpn_cls_score.softmax(dim=1)[:, :-1]
+ # filter scores, bbox_pred w.r.t. mask.
+ # anchors are filtered in get_anchors() beforehand.
+ scores = scores[mask]
+ rpn_bbox_pred = rpn_bbox_pred.permute(1, 2, 0).reshape(-1,
+ 4)[mask, :]
+ if scores.dim() == 0:
+ rpn_bbox_pred = rpn_bbox_pred.unsqueeze(0)
+ anchors = anchors.unsqueeze(0)
+ scores = scores.unsqueeze(0)
+ # filter anchors, bbox_pred, scores w.r.t. scores
+ if cfg.nms_pre > 0 and scores.shape[0] > cfg.nms_pre:
+ _, topk_inds = scores.topk(cfg.nms_pre)
+ rpn_bbox_pred = rpn_bbox_pred[topk_inds, :]
+ anchors = anchors[topk_inds, :]
+ scores = scores[topk_inds]
+ # get proposals w.r.t. anchors and rpn_bbox_pred
+ proposals = self.bbox_coder.decode(
+ anchors, rpn_bbox_pred, max_shape=img_shape)
+ # filter out too small bboxes
+ if cfg.min_bbox_size > 0:
+ w = proposals[:, 2] - proposals[:, 0]
+ h = proposals[:, 3] - proposals[:, 1]
+ valid_inds = torch.nonzero(
+ (w >= cfg.min_bbox_size) & (h >= cfg.min_bbox_size),
+ as_tuple=False).squeeze()
+ proposals = proposals[valid_inds, :]
+ scores = scores[valid_inds]
+ # NMS in current level
+ proposals, _ = nms(proposals, scores, cfg.nms.iou_threshold)
+ proposals = proposals[:cfg.nms_post, :]
+ mlvl_proposals.append(proposals)
+ proposals = torch.cat(mlvl_proposals, 0)
+ if cfg.get('nms_across_levels', False):
+ # NMS across multi levels
+ proposals, _ = nms(proposals[:, :4], proposals[:, -1],
+ cfg.nms.iou_threshold)
+ proposals = proposals[:cfg.max_per_img, :]
+ else:
+ scores = proposals[:, 4]
+ num = min(cfg.max_per_img, proposals.shape[0])
+ _, topk_inds = scores.topk(num)
+ proposals = proposals[topk_inds, :]
+ return proposals
diff --git a/annotator/uniformer/mmdet/models/dense_heads/gfl_head.py b/annotator/uniformer/mmdet/models/dense_heads/gfl_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..961bc92237663ad5343d3d08eb9c0e4e811ada05
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/dense_heads/gfl_head.py
@@ -0,0 +1,647 @@
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from mmcv.cnn import ConvModule, Scale, bias_init_with_prob, normal_init
+from mmcv.runner import force_fp32
+
+from mmdet.core import (anchor_inside_flags, bbox2distance, bbox_overlaps,
+ build_assigner, build_sampler, distance2bbox,
+ images_to_levels, multi_apply, multiclass_nms,
+ reduce_mean, unmap)
+from ..builder import HEADS, build_loss
+from .anchor_head import AnchorHead
+
+
+class Integral(nn.Module):
+ """A fixed layer for calculating integral result from distribution.
+
+ This layer calculates the target location by :math: `sum{P(y_i) * y_i}`,
+ P(y_i) denotes the softmax vector that represents the discrete distribution
+ y_i denotes the discrete set, usually {0, 1, 2, ..., reg_max}
+
+ Args:
+ reg_max (int): The maximal value of the discrete set. Default: 16. You
+ may want to reset it according to your new dataset or related
+ settings.
+ """
+
+ def __init__(self, reg_max=16):
+ super(Integral, self).__init__()
+ self.reg_max = reg_max
+ self.register_buffer('project',
+ torch.linspace(0, self.reg_max, self.reg_max + 1))
+
+ def forward(self, x):
+ """Forward feature from the regression head to get integral result of
+ bounding box location.
+
+ Args:
+ x (Tensor): Features of the regression head, shape (N, 4*(n+1)),
+ n is self.reg_max.
+
+ Returns:
+ x (Tensor): Integral result of box locations, i.e., distance
+ offsets from the box center in four directions, shape (N, 4).
+ """
+ x = F.softmax(x.reshape(-1, self.reg_max + 1), dim=1)
+ x = F.linear(x, self.project.type_as(x)).reshape(-1, 4)
+ return x
+
+
+@HEADS.register_module()
+class GFLHead(AnchorHead):
+ """Generalized Focal Loss: Learning Qualified and Distributed Bounding
+ Boxes for Dense Object Detection.
+
+ GFL head structure is similar with ATSS, however GFL uses
+ 1) joint representation for classification and localization quality, and
+ 2) flexible General distribution for bounding box locations,
+ which are supervised by
+ Quality Focal Loss (QFL) and Distribution Focal Loss (DFL), respectively
+
+ https://arxiv.org/abs/2006.04388
+
+ Args:
+ num_classes (int): Number of categories excluding the background
+ category.
+ in_channels (int): Number of channels in the input feature map.
+ stacked_convs (int): Number of conv layers in cls and reg tower.
+ Default: 4.
+ conv_cfg (dict): dictionary to construct and config conv layer.
+ Default: None.
+ norm_cfg (dict): dictionary to construct and config norm layer.
+ Default: dict(type='GN', num_groups=32, requires_grad=True).
+ loss_qfl (dict): Config of Quality Focal Loss (QFL).
+ reg_max (int): Max value of integral set :math: `{0, ..., reg_max}`
+ in QFL setting. Default: 16.
+ Example:
+ >>> self = GFLHead(11, 7)
+ >>> feats = [torch.rand(1, 7, s, s) for s in [4, 8, 16, 32, 64]]
+ >>> cls_quality_score, bbox_pred = self.forward(feats)
+ >>> assert len(cls_quality_score) == len(self.scales)
+ """
+
+ def __init__(self,
+ num_classes,
+ in_channels,
+ stacked_convs=4,
+ conv_cfg=None,
+ norm_cfg=dict(type='GN', num_groups=32, requires_grad=True),
+ loss_dfl=dict(type='DistributionFocalLoss', loss_weight=0.25),
+ reg_max=16,
+ **kwargs):
+ self.stacked_convs = stacked_convs
+ self.conv_cfg = conv_cfg
+ self.norm_cfg = norm_cfg
+ self.reg_max = reg_max
+ super(GFLHead, self).__init__(num_classes, in_channels, **kwargs)
+
+ self.sampling = False
+ if self.train_cfg:
+ self.assigner = build_assigner(self.train_cfg.assigner)
+ # SSD sampling=False so use PseudoSampler
+ sampler_cfg = dict(type='PseudoSampler')
+ self.sampler = build_sampler(sampler_cfg, context=self)
+
+ self.integral = Integral(self.reg_max)
+ self.loss_dfl = build_loss(loss_dfl)
+
+ def _init_layers(self):
+ """Initialize layers of the head."""
+ self.relu = nn.ReLU(inplace=True)
+ self.cls_convs = nn.ModuleList()
+ self.reg_convs = nn.ModuleList()
+ for i in range(self.stacked_convs):
+ chn = self.in_channels if i == 0 else self.feat_channels
+ self.cls_convs.append(
+ ConvModule(
+ chn,
+ self.feat_channels,
+ 3,
+ stride=1,
+ padding=1,
+ conv_cfg=self.conv_cfg,
+ norm_cfg=self.norm_cfg))
+ self.reg_convs.append(
+ ConvModule(
+ chn,
+ self.feat_channels,
+ 3,
+ stride=1,
+ padding=1,
+ conv_cfg=self.conv_cfg,
+ norm_cfg=self.norm_cfg))
+ assert self.num_anchors == 1, 'anchor free version'
+ self.gfl_cls = nn.Conv2d(
+ self.feat_channels, self.cls_out_channels, 3, padding=1)
+ self.gfl_reg = nn.Conv2d(
+ self.feat_channels, 4 * (self.reg_max + 1), 3, padding=1)
+ self.scales = nn.ModuleList(
+ [Scale(1.0) for _ in self.anchor_generator.strides])
+
+ def init_weights(self):
+ """Initialize weights of the head."""
+ for m in self.cls_convs:
+ normal_init(m.conv, std=0.01)
+ for m in self.reg_convs:
+ normal_init(m.conv, std=0.01)
+ bias_cls = bias_init_with_prob(0.01)
+ normal_init(self.gfl_cls, std=0.01, bias=bias_cls)
+ normal_init(self.gfl_reg, std=0.01)
+
+ def forward(self, feats):
+ """Forward features from the upstream network.
+
+ Args:
+ feats (tuple[Tensor]): Features from the upstream network, each is
+ a 4D-tensor.
+
+ Returns:
+ tuple: Usually a tuple of classification scores and bbox prediction
+ cls_scores (list[Tensor]): Classification and quality (IoU)
+ joint scores for all scale levels, each is a 4D-tensor,
+ the channel number is num_classes.
+ bbox_preds (list[Tensor]): Box distribution logits for all
+ scale levels, each is a 4D-tensor, the channel number is
+ 4*(n+1), n is max value of integral set.
+ """
+ return multi_apply(self.forward_single, feats, self.scales)
+
+ def forward_single(self, x, scale):
+ """Forward feature of a single scale level.
+
+ Args:
+ x (Tensor): Features of a single scale level.
+ scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize
+ the bbox prediction.
+
+ Returns:
+ tuple:
+ cls_score (Tensor): Cls and quality joint scores for a single
+ scale level the channel number is num_classes.
+ bbox_pred (Tensor): Box distribution logits for a single scale
+ level, the channel number is 4*(n+1), n is max value of
+ integral set.
+ """
+ cls_feat = x
+ reg_feat = x
+ for cls_conv in self.cls_convs:
+ cls_feat = cls_conv(cls_feat)
+ for reg_conv in self.reg_convs:
+ reg_feat = reg_conv(reg_feat)
+ cls_score = self.gfl_cls(cls_feat)
+ bbox_pred = scale(self.gfl_reg(reg_feat)).float()
+ return cls_score, bbox_pred
+
+ def anchor_center(self, anchors):
+ """Get anchor centers from anchors.
+
+ Args:
+ anchors (Tensor): Anchor list with shape (N, 4), "xyxy" format.
+
+ Returns:
+ Tensor: Anchor centers with shape (N, 2), "xy" format.
+ """
+ anchors_cx = (anchors[..., 2] + anchors[..., 0]) / 2
+ anchors_cy = (anchors[..., 3] + anchors[..., 1]) / 2
+ return torch.stack([anchors_cx, anchors_cy], dim=-1)
+
+ def loss_single(self, anchors, cls_score, bbox_pred, labels, label_weights,
+ bbox_targets, stride, num_total_samples):
+ """Compute loss of a single scale level.
+
+ Args:
+ anchors (Tensor): Box reference for each scale level with shape
+ (N, num_total_anchors, 4).
+ cls_score (Tensor): Cls and quality joint scores for each scale
+ level has shape (N, num_classes, H, W).
+ bbox_pred (Tensor): Box distribution logits for each scale
+ level with shape (N, 4*(n+1), H, W), n is max value of integral
+ set.
+ labels (Tensor): Labels of each anchors with shape
+ (N, num_total_anchors).
+ label_weights (Tensor): Label weights of each anchor with shape
+ (N, num_total_anchors)
+ bbox_targets (Tensor): BBox regression targets of each anchor wight
+ shape (N, num_total_anchors, 4).
+ stride (tuple): Stride in this scale level.
+ num_total_samples (int): Number of positive samples that is
+ reduced over all GPUs.
+
+ Returns:
+ dict[str, Tensor]: A dictionary of loss components.
+ """
+ assert stride[0] == stride[1], 'h stride is not equal to w stride!'
+ anchors = anchors.reshape(-1, 4)
+ cls_score = cls_score.permute(0, 2, 3,
+ 1).reshape(-1, self.cls_out_channels)
+ bbox_pred = bbox_pred.permute(0, 2, 3,
+ 1).reshape(-1, 4 * (self.reg_max + 1))
+ bbox_targets = bbox_targets.reshape(-1, 4)
+ labels = labels.reshape(-1)
+ label_weights = label_weights.reshape(-1)
+
+ # FG cat_id: [0, num_classes -1], BG cat_id: num_classes
+ bg_class_ind = self.num_classes
+ pos_inds = ((labels >= 0)
+ & (labels < bg_class_ind)).nonzero().squeeze(1)
+ score = label_weights.new_zeros(labels.shape)
+
+ if len(pos_inds) > 0:
+ pos_bbox_targets = bbox_targets[pos_inds]
+ pos_bbox_pred = bbox_pred[pos_inds]
+ pos_anchors = anchors[pos_inds]
+ pos_anchor_centers = self.anchor_center(pos_anchors) / stride[0]
+
+ weight_targets = cls_score.detach().sigmoid()
+ weight_targets = weight_targets.max(dim=1)[0][pos_inds]
+ pos_bbox_pred_corners = self.integral(pos_bbox_pred)
+ pos_decode_bbox_pred = distance2bbox(pos_anchor_centers,
+ pos_bbox_pred_corners)
+ pos_decode_bbox_targets = pos_bbox_targets / stride[0]
+ score[pos_inds] = bbox_overlaps(
+ pos_decode_bbox_pred.detach(),
+ pos_decode_bbox_targets,
+ is_aligned=True)
+ pred_corners = pos_bbox_pred.reshape(-1, self.reg_max + 1)
+ target_corners = bbox2distance(pos_anchor_centers,
+ pos_decode_bbox_targets,
+ self.reg_max).reshape(-1)
+
+ # regression loss
+ loss_bbox = self.loss_bbox(
+ pos_decode_bbox_pred,
+ pos_decode_bbox_targets,
+ weight=weight_targets,
+ avg_factor=1.0)
+
+ # dfl loss
+ loss_dfl = self.loss_dfl(
+ pred_corners,
+ target_corners,
+ weight=weight_targets[:, None].expand(-1, 4).reshape(-1),
+ avg_factor=4.0)
+ else:
+ loss_bbox = bbox_pred.sum() * 0
+ loss_dfl = bbox_pred.sum() * 0
+ weight_targets = bbox_pred.new_tensor(0)
+
+ # cls (qfl) loss
+ loss_cls = self.loss_cls(
+ cls_score, (labels, score),
+ weight=label_weights,
+ avg_factor=num_total_samples)
+
+ return loss_cls, loss_bbox, loss_dfl, weight_targets.sum()
+
+ @force_fp32(apply_to=('cls_scores', 'bbox_preds'))
+ def loss(self,
+ cls_scores,
+ bbox_preds,
+ gt_bboxes,
+ gt_labels,
+ img_metas,
+ gt_bboxes_ignore=None):
+ """Compute losses of the head.
+
+ Args:
+ cls_scores (list[Tensor]): Cls and quality scores for each scale
+ level has shape (N, num_classes, H, W).
+ bbox_preds (list[Tensor]): Box distribution logits for each scale
+ level with shape (N, 4*(n+1), H, W), n is max value of integral
+ set.
+ gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
+ shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
+ gt_labels (list[Tensor]): class indices corresponding to each box
+ img_metas (list[dict]): Meta information of each image, e.g.,
+ image size, scaling factor, etc.
+ gt_bboxes_ignore (list[Tensor] | None): specify which bounding
+ boxes can be ignored when computing the loss.
+
+ Returns:
+ dict[str, Tensor]: A dictionary of loss components.
+ """
+
+ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
+ assert len(featmap_sizes) == self.anchor_generator.num_levels
+
+ device = cls_scores[0].device
+ anchor_list, valid_flag_list = self.get_anchors(
+ featmap_sizes, img_metas, device=device)
+ label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1
+
+ cls_reg_targets = self.get_targets(
+ anchor_list,
+ valid_flag_list,
+ gt_bboxes,
+ img_metas,
+ gt_bboxes_ignore_list=gt_bboxes_ignore,
+ gt_labels_list=gt_labels,
+ label_channels=label_channels)
+ if cls_reg_targets is None:
+ return None
+
+ (anchor_list, labels_list, label_weights_list, bbox_targets_list,
+ bbox_weights_list, num_total_pos, num_total_neg) = cls_reg_targets
+
+ num_total_samples = reduce_mean(
+ torch.tensor(num_total_pos, dtype=torch.float,
+ device=device)).item()
+ num_total_samples = max(num_total_samples, 1.0)
+
+ losses_cls, losses_bbox, losses_dfl,\
+ avg_factor = multi_apply(
+ self.loss_single,
+ anchor_list,
+ cls_scores,
+ bbox_preds,
+ labels_list,
+ label_weights_list,
+ bbox_targets_list,
+ self.anchor_generator.strides,
+ num_total_samples=num_total_samples)
+
+ avg_factor = sum(avg_factor)
+ avg_factor = reduce_mean(avg_factor).item()
+ losses_bbox = list(map(lambda x: x / avg_factor, losses_bbox))
+ losses_dfl = list(map(lambda x: x / avg_factor, losses_dfl))
+ return dict(
+ loss_cls=losses_cls, loss_bbox=losses_bbox, loss_dfl=losses_dfl)
+
+ def _get_bboxes(self,
+ cls_scores,
+ bbox_preds,
+ mlvl_anchors,
+ img_shapes,
+ scale_factors,
+ cfg,
+ rescale=False,
+ with_nms=True):
+ """Transform outputs for a single batch item into labeled boxes.
+
+ Args:
+ cls_scores (list[Tensor]): Box scores for a single scale level
+ has shape (N, num_classes, H, W).
+ bbox_preds (list[Tensor]): Box distribution logits for a single
+ scale level with shape (N, 4*(n+1), H, W), n is max value of
+ integral set.
+ mlvl_anchors (list[Tensor]): Box reference for a single scale level
+ with shape (num_total_anchors, 4).
+ img_shapes (list[tuple[int]]): Shape of the input image,
+ list[(height, width, 3)].
+ scale_factors (list[ndarray]): Scale factor of the image arange as
+ (w_scale, h_scale, w_scale, h_scale).
+ cfg (mmcv.Config | None): Test / postprocessing configuration,
+ if None, test_cfg would be used.
+ rescale (bool): If True, return boxes in original image space.
+ Default: False.
+ with_nms (bool): If True, do nms before return boxes.
+ Default: True.
+
+ Returns:
+ list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple.
+ The first item is an (n, 5) tensor, where 5 represent
+ (tl_x, tl_y, br_x, br_y, score) and the score between 0 and 1.
+ The shape of the second tensor in the tuple is (n,), and
+ each element represents the class label of the corresponding
+ box.
+ """
+ cfg = self.test_cfg if cfg is None else cfg
+ assert len(cls_scores) == len(bbox_preds) == len(mlvl_anchors)
+ batch_size = cls_scores[0].shape[0]
+
+ mlvl_bboxes = []
+ mlvl_scores = []
+ for cls_score, bbox_pred, stride, anchors in zip(
+ cls_scores, bbox_preds, self.anchor_generator.strides,
+ mlvl_anchors):
+ assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
+ assert stride[0] == stride[1]
+ scores = cls_score.permute(0, 2, 3, 1).reshape(
+ batch_size, -1, self.cls_out_channels).sigmoid()
+ bbox_pred = bbox_pred.permute(0, 2, 3, 1)
+
+ bbox_pred = self.integral(bbox_pred) * stride[0]
+ bbox_pred = bbox_pred.reshape(batch_size, -1, 4)
+
+ nms_pre = cfg.get('nms_pre', -1)
+ if nms_pre > 0 and scores.shape[1] > nms_pre:
+ max_scores, _ = scores.max(-1)
+ _, topk_inds = max_scores.topk(nms_pre)
+ batch_inds = torch.arange(batch_size).view(
+ -1, 1).expand_as(topk_inds).long()
+ anchors = anchors[topk_inds, :]
+ bbox_pred = bbox_pred[batch_inds, topk_inds, :]
+ scores = scores[batch_inds, topk_inds, :]
+ else:
+ anchors = anchors.expand_as(bbox_pred)
+
+ bboxes = distance2bbox(
+ self.anchor_center(anchors), bbox_pred, max_shape=img_shapes)
+ mlvl_bboxes.append(bboxes)
+ mlvl_scores.append(scores)
+
+ batch_mlvl_bboxes = torch.cat(mlvl_bboxes, dim=1)
+ if rescale:
+ batch_mlvl_bboxes /= batch_mlvl_bboxes.new_tensor(
+ scale_factors).unsqueeze(1)
+
+ batch_mlvl_scores = torch.cat(mlvl_scores, dim=1)
+ # Add a dummy background class to the backend when using sigmoid
+ # remind that we set FG labels to [0, num_class-1] since mmdet v2.0
+ # BG cat_id: num_class
+ padding = batch_mlvl_scores.new_zeros(batch_size,
+ batch_mlvl_scores.shape[1], 1)
+ batch_mlvl_scores = torch.cat([batch_mlvl_scores, padding], dim=-1)
+
+ if with_nms:
+ det_results = []
+ for (mlvl_bboxes, mlvl_scores) in zip(batch_mlvl_bboxes,
+ batch_mlvl_scores):
+ det_bbox, det_label = multiclass_nms(mlvl_bboxes, mlvl_scores,
+ cfg.score_thr, cfg.nms,
+ cfg.max_per_img)
+ det_results.append(tuple([det_bbox, det_label]))
+ else:
+ det_results = [
+ tuple(mlvl_bs)
+ for mlvl_bs in zip(batch_mlvl_bboxes, batch_mlvl_scores)
+ ]
+ return det_results
+
+ def get_targets(self,
+ anchor_list,
+ valid_flag_list,
+ gt_bboxes_list,
+ img_metas,
+ gt_bboxes_ignore_list=None,
+ gt_labels_list=None,
+ label_channels=1,
+ unmap_outputs=True):
+ """Get targets for GFL head.
+
+ This method is almost the same as `AnchorHead.get_targets()`. Besides
+ returning the targets as the parent method does, it also returns the
+ anchors as the first element of the returned tuple.
+ """
+ num_imgs = len(img_metas)
+ assert len(anchor_list) == len(valid_flag_list) == num_imgs
+
+ # anchor number of multi levels
+ num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]
+ num_level_anchors_list = [num_level_anchors] * num_imgs
+
+ # concat all level anchors and flags to a single tensor
+ for i in range(num_imgs):
+ assert len(anchor_list[i]) == len(valid_flag_list[i])
+ anchor_list[i] = torch.cat(anchor_list[i])
+ valid_flag_list[i] = torch.cat(valid_flag_list[i])
+
+ # compute targets for each image
+ if gt_bboxes_ignore_list is None:
+ gt_bboxes_ignore_list = [None for _ in range(num_imgs)]
+ if gt_labels_list is None:
+ gt_labels_list = [None for _ in range(num_imgs)]
+ (all_anchors, all_labels, all_label_weights, all_bbox_targets,
+ all_bbox_weights, pos_inds_list, neg_inds_list) = multi_apply(
+ self._get_target_single,
+ anchor_list,
+ valid_flag_list,
+ num_level_anchors_list,
+ gt_bboxes_list,
+ gt_bboxes_ignore_list,
+ gt_labels_list,
+ img_metas,
+ label_channels=label_channels,
+ unmap_outputs=unmap_outputs)
+ # no valid anchors
+ if any([labels is None for labels in all_labels]):
+ return None
+ # sampled anchors of all images
+ num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list])
+ num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list])
+ # split targets to a list w.r.t. multiple levels
+ anchors_list = images_to_levels(all_anchors, num_level_anchors)
+ labels_list = images_to_levels(all_labels, num_level_anchors)
+ label_weights_list = images_to_levels(all_label_weights,
+ num_level_anchors)
+ bbox_targets_list = images_to_levels(all_bbox_targets,
+ num_level_anchors)
+ bbox_weights_list = images_to_levels(all_bbox_weights,
+ num_level_anchors)
+ return (anchors_list, labels_list, label_weights_list,
+ bbox_targets_list, bbox_weights_list, num_total_pos,
+ num_total_neg)
+
+ def _get_target_single(self,
+ flat_anchors,
+ valid_flags,
+ num_level_anchors,
+ gt_bboxes,
+ gt_bboxes_ignore,
+ gt_labels,
+ img_meta,
+ label_channels=1,
+ unmap_outputs=True):
+ """Compute regression, classification targets for anchors in a single
+ image.
+
+ Args:
+ flat_anchors (Tensor): Multi-level anchors of the image, which are
+ concatenated into a single tensor of shape (num_anchors, 4)
+ valid_flags (Tensor): Multi level valid flags of the image,
+ which are concatenated into a single tensor of
+ shape (num_anchors,).
+ num_level_anchors Tensor): Number of anchors of each scale level.
+ gt_bboxes (Tensor): Ground truth bboxes of the image,
+ shape (num_gts, 4).
+ gt_bboxes_ignore (Tensor): Ground truth bboxes to be
+ ignored, shape (num_ignored_gts, 4).
+ gt_labels (Tensor): Ground truth labels of each box,
+ shape (num_gts,).
+ img_meta (dict): Meta info of the image.
+ label_channels (int): Channel of label.
+ unmap_outputs (bool): Whether to map outputs back to the original
+ set of anchors.
+
+ Returns:
+ tuple: N is the number of total anchors in the image.
+ anchors (Tensor): All anchors in the image with shape (N, 4).
+ labels (Tensor): Labels of all anchors in the image with shape
+ (N,).
+ label_weights (Tensor): Label weights of all anchor in the
+ image with shape (N,).
+ bbox_targets (Tensor): BBox targets of all anchors in the
+ image with shape (N, 4).
+ bbox_weights (Tensor): BBox weights of all anchors in the
+ image with shape (N, 4).
+ pos_inds (Tensor): Indices of positive anchor with shape
+ (num_pos,).
+ neg_inds (Tensor): Indices of negative anchor with shape
+ (num_neg,).
+ """
+ inside_flags = anchor_inside_flags(flat_anchors, valid_flags,
+ img_meta['img_shape'][:2],
+ self.train_cfg.allowed_border)
+ if not inside_flags.any():
+ return (None, ) * 7
+ # assign gt and sample anchors
+ anchors = flat_anchors[inside_flags, :]
+
+ num_level_anchors_inside = self.get_num_level_anchors_inside(
+ num_level_anchors, inside_flags)
+ assign_result = self.assigner.assign(anchors, num_level_anchors_inside,
+ gt_bboxes, gt_bboxes_ignore,
+ gt_labels)
+
+ sampling_result = self.sampler.sample(assign_result, anchors,
+ gt_bboxes)
+
+ num_valid_anchors = anchors.shape[0]
+ bbox_targets = torch.zeros_like(anchors)
+ bbox_weights = torch.zeros_like(anchors)
+ labels = anchors.new_full((num_valid_anchors, ),
+ self.num_classes,
+ dtype=torch.long)
+ label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float)
+
+ pos_inds = sampling_result.pos_inds
+ neg_inds = sampling_result.neg_inds
+ if len(pos_inds) > 0:
+ pos_bbox_targets = sampling_result.pos_gt_bboxes
+ bbox_targets[pos_inds, :] = pos_bbox_targets
+ bbox_weights[pos_inds, :] = 1.0
+ if gt_labels is None:
+ # Only rpn gives gt_labels as None
+ # Foreground is the first class
+ labels[pos_inds] = 0
+ else:
+ labels[pos_inds] = gt_labels[
+ sampling_result.pos_assigned_gt_inds]
+ if self.train_cfg.pos_weight <= 0:
+ label_weights[pos_inds] = 1.0
+ else:
+ label_weights[pos_inds] = self.train_cfg.pos_weight
+ if len(neg_inds) > 0:
+ label_weights[neg_inds] = 1.0
+
+ # map up to original set of anchors
+ if unmap_outputs:
+ num_total_anchors = flat_anchors.size(0)
+ anchors = unmap(anchors, num_total_anchors, inside_flags)
+ labels = unmap(
+ labels, num_total_anchors, inside_flags, fill=self.num_classes)
+ label_weights = unmap(label_weights, num_total_anchors,
+ inside_flags)
+ bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags)
+ bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags)
+
+ return (anchors, labels, label_weights, bbox_targets, bbox_weights,
+ pos_inds, neg_inds)
+
+ def get_num_level_anchors_inside(self, num_level_anchors, inside_flags):
+ split_inside_flags = torch.split(inside_flags, num_level_anchors)
+ num_level_anchors_inside = [
+ int(flags.sum()) for flags in split_inside_flags
+ ]
+ return num_level_anchors_inside
diff --git a/annotator/uniformer/mmdet/models/dense_heads/guided_anchor_head.py b/annotator/uniformer/mmdet/models/dense_heads/guided_anchor_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..997ebb751ade2ebae3fce335a08c46f596c60913
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/dense_heads/guided_anchor_head.py
@@ -0,0 +1,860 @@
+import torch
+import torch.nn as nn
+from mmcv.cnn import bias_init_with_prob, normal_init
+from mmcv.ops import DeformConv2d, MaskedConv2d
+from mmcv.runner import force_fp32
+
+from mmdet.core import (anchor_inside_flags, build_anchor_generator,
+ build_assigner, build_bbox_coder, build_sampler,
+ calc_region, images_to_levels, multi_apply,
+ multiclass_nms, unmap)
+from ..builder import HEADS, build_loss
+from .anchor_head import AnchorHead
+
+
+class FeatureAdaption(nn.Module):
+ """Feature Adaption Module.
+
+ Feature Adaption Module is implemented based on DCN v1.
+ It uses anchor shape prediction rather than feature map to
+ predict offsets of deform conv layer.
+
+ Args:
+ in_channels (int): Number of channels in the input feature map.
+ out_channels (int): Number of channels in the output feature map.
+ kernel_size (int): Deformable conv kernel size.
+ deform_groups (int): Deformable conv group size.
+ """
+
+ def __init__(self,
+ in_channels,
+ out_channels,
+ kernel_size=3,
+ deform_groups=4):
+ super(FeatureAdaption, self).__init__()
+ offset_channels = kernel_size * kernel_size * 2
+ self.conv_offset = nn.Conv2d(
+ 2, deform_groups * offset_channels, 1, bias=False)
+ self.conv_adaption = DeformConv2d(
+ in_channels,
+ out_channels,
+ kernel_size=kernel_size,
+ padding=(kernel_size - 1) // 2,
+ deform_groups=deform_groups)
+ self.relu = nn.ReLU(inplace=True)
+
+ def init_weights(self):
+ normal_init(self.conv_offset, std=0.1)
+ normal_init(self.conv_adaption, std=0.01)
+
+ def forward(self, x, shape):
+ offset = self.conv_offset(shape.detach())
+ x = self.relu(self.conv_adaption(x, offset))
+ return x
+
+
+@HEADS.register_module()
+class GuidedAnchorHead(AnchorHead):
+ """Guided-Anchor-based head (GA-RPN, GA-RetinaNet, etc.).
+
+ This GuidedAnchorHead will predict high-quality feature guided
+ anchors and locations where anchors will be kept in inference.
+ There are mainly 3 categories of bounding-boxes.
+
+ - Sampled 9 pairs for target assignment. (approxes)
+ - The square boxes where the predicted anchors are based on. (squares)
+ - Guided anchors.
+
+ Please refer to https://arxiv.org/abs/1901.03278 for more details.
+
+ Args:
+ num_classes (int): Number of classes.
+ in_channels (int): Number of channels in the input feature map.
+ feat_channels (int): Number of hidden channels.
+ approx_anchor_generator (dict): Config dict for approx generator
+ square_anchor_generator (dict): Config dict for square generator
+ anchor_coder (dict): Config dict for anchor coder
+ bbox_coder (dict): Config dict for bbox coder
+ reg_decoded_bbox (bool): If true, the regression loss would be
+ applied directly on decoded bounding boxes, converting both
+ the predicted boxes and regression targets to absolute
+ coordinates format. Default False. It should be `True` when
+ using `IoULoss`, `GIoULoss`, or `DIoULoss` in the bbox head.
+ deform_groups: (int): Group number of DCN in
+ FeatureAdaption module.
+ loc_filter_thr (float): Threshold to filter out unconcerned regions.
+ loss_loc (dict): Config of location loss.
+ loss_shape (dict): Config of anchor shape loss.
+ loss_cls (dict): Config of classification loss.
+ loss_bbox (dict): Config of bbox regression loss.
+ """
+
+ def __init__(
+ self,
+ num_classes,
+ in_channels,
+ feat_channels=256,
+ approx_anchor_generator=dict(
+ type='AnchorGenerator',
+ octave_base_scale=8,
+ scales_per_octave=3,
+ ratios=[0.5, 1.0, 2.0],
+ strides=[4, 8, 16, 32, 64]),
+ square_anchor_generator=dict(
+ type='AnchorGenerator',
+ ratios=[1.0],
+ scales=[8],
+ strides=[4, 8, 16, 32, 64]),
+ anchor_coder=dict(
+ type='DeltaXYWHBBoxCoder',
+ target_means=[.0, .0, .0, .0],
+ target_stds=[1.0, 1.0, 1.0, 1.0]
+ ),
+ bbox_coder=dict(
+ type='DeltaXYWHBBoxCoder',
+ target_means=[.0, .0, .0, .0],
+ target_stds=[1.0, 1.0, 1.0, 1.0]
+ ),
+ reg_decoded_bbox=False,
+ deform_groups=4,
+ loc_filter_thr=0.01,
+ train_cfg=None,
+ test_cfg=None,
+ loss_loc=dict(
+ type='FocalLoss',
+ use_sigmoid=True,
+ gamma=2.0,
+ alpha=0.25,
+ loss_weight=1.0),
+ loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0),
+ loss_cls=dict(
+ type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
+ loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
+ loss_weight=1.0)): # yapf: disable
+ super(AnchorHead, self).__init__()
+ self.in_channels = in_channels
+ self.num_classes = num_classes
+ self.feat_channels = feat_channels
+ self.deform_groups = deform_groups
+ self.loc_filter_thr = loc_filter_thr
+
+ # build approx_anchor_generator and square_anchor_generator
+ assert (approx_anchor_generator['octave_base_scale'] ==
+ square_anchor_generator['scales'][0])
+ assert (approx_anchor_generator['strides'] ==
+ square_anchor_generator['strides'])
+ self.approx_anchor_generator = build_anchor_generator(
+ approx_anchor_generator)
+ self.square_anchor_generator = build_anchor_generator(
+ square_anchor_generator)
+ self.approxs_per_octave = self.approx_anchor_generator \
+ .num_base_anchors[0]
+
+ self.reg_decoded_bbox = reg_decoded_bbox
+
+ # one anchor per location
+ self.num_anchors = 1
+ self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False)
+ self.loc_focal_loss = loss_loc['type'] in ['FocalLoss']
+ self.sampling = loss_cls['type'] not in ['FocalLoss']
+ self.ga_sampling = train_cfg is not None and hasattr(
+ train_cfg, 'ga_sampler')
+ if self.use_sigmoid_cls:
+ self.cls_out_channels = self.num_classes
+ else:
+ self.cls_out_channels = self.num_classes + 1
+
+ # build bbox_coder
+ self.anchor_coder = build_bbox_coder(anchor_coder)
+ self.bbox_coder = build_bbox_coder(bbox_coder)
+
+ # build losses
+ self.loss_loc = build_loss(loss_loc)
+ self.loss_shape = build_loss(loss_shape)
+ self.loss_cls = build_loss(loss_cls)
+ self.loss_bbox = build_loss(loss_bbox)
+
+ self.train_cfg = train_cfg
+ self.test_cfg = test_cfg
+
+ if self.train_cfg:
+ self.assigner = build_assigner(self.train_cfg.assigner)
+ # use PseudoSampler when sampling is False
+ if self.sampling and hasattr(self.train_cfg, 'sampler'):
+ sampler_cfg = self.train_cfg.sampler
+ else:
+ sampler_cfg = dict(type='PseudoSampler')
+ self.sampler = build_sampler(sampler_cfg, context=self)
+
+ self.ga_assigner = build_assigner(self.train_cfg.ga_assigner)
+ if self.ga_sampling:
+ ga_sampler_cfg = self.train_cfg.ga_sampler
+ else:
+ ga_sampler_cfg = dict(type='PseudoSampler')
+ self.ga_sampler = build_sampler(ga_sampler_cfg, context=self)
+
+ self.fp16_enabled = False
+
+ self._init_layers()
+
+ def _init_layers(self):
+ self.relu = nn.ReLU(inplace=True)
+ self.conv_loc = nn.Conv2d(self.in_channels, 1, 1)
+ self.conv_shape = nn.Conv2d(self.in_channels, self.num_anchors * 2, 1)
+ self.feature_adaption = FeatureAdaption(
+ self.in_channels,
+ self.feat_channels,
+ kernel_size=3,
+ deform_groups=self.deform_groups)
+ self.conv_cls = MaskedConv2d(self.feat_channels,
+ self.num_anchors * self.cls_out_channels,
+ 1)
+ self.conv_reg = MaskedConv2d(self.feat_channels, self.num_anchors * 4,
+ 1)
+
+ def init_weights(self):
+ normal_init(self.conv_cls, std=0.01)
+ normal_init(self.conv_reg, std=0.01)
+
+ bias_cls = bias_init_with_prob(0.01)
+ normal_init(self.conv_loc, std=0.01, bias=bias_cls)
+ normal_init(self.conv_shape, std=0.01)
+
+ self.feature_adaption.init_weights()
+
+ def forward_single(self, x):
+ loc_pred = self.conv_loc(x)
+ shape_pred = self.conv_shape(x)
+ x = self.feature_adaption(x, shape_pred)
+ # masked conv is only used during inference for speed-up
+ if not self.training:
+ mask = loc_pred.sigmoid()[0] >= self.loc_filter_thr
+ else:
+ mask = None
+ cls_score = self.conv_cls(x, mask)
+ bbox_pred = self.conv_reg(x, mask)
+ return cls_score, bbox_pred, shape_pred, loc_pred
+
+ def forward(self, feats):
+ return multi_apply(self.forward_single, feats)
+
+ def get_sampled_approxs(self, featmap_sizes, img_metas, device='cuda'):
+ """Get sampled approxs and inside flags according to feature map sizes.
+
+ Args:
+ featmap_sizes (list[tuple]): Multi-level feature map sizes.
+ img_metas (list[dict]): Image meta info.
+ device (torch.device | str): device for returned tensors
+
+ Returns:
+ tuple: approxes of each image, inside flags of each image
+ """
+ num_imgs = len(img_metas)
+
+ # since feature map sizes of all images are the same, we only compute
+ # approxes for one time
+ multi_level_approxs = self.approx_anchor_generator.grid_anchors(
+ featmap_sizes, device=device)
+ approxs_list = [multi_level_approxs for _ in range(num_imgs)]
+
+ # for each image, we compute inside flags of multi level approxes
+ inside_flag_list = []
+ for img_id, img_meta in enumerate(img_metas):
+ multi_level_flags = []
+ multi_level_approxs = approxs_list[img_id]
+
+ # obtain valid flags for each approx first
+ multi_level_approx_flags = self.approx_anchor_generator \
+ .valid_flags(featmap_sizes,
+ img_meta['pad_shape'],
+ device=device)
+
+ for i, flags in enumerate(multi_level_approx_flags):
+ approxs = multi_level_approxs[i]
+ inside_flags_list = []
+ for i in range(self.approxs_per_octave):
+ split_valid_flags = flags[i::self.approxs_per_octave]
+ split_approxs = approxs[i::self.approxs_per_octave, :]
+ inside_flags = anchor_inside_flags(
+ split_approxs, split_valid_flags,
+ img_meta['img_shape'][:2],
+ self.train_cfg.allowed_border)
+ inside_flags_list.append(inside_flags)
+ # inside_flag for a position is true if any anchor in this
+ # position is true
+ inside_flags = (
+ torch.stack(inside_flags_list, 0).sum(dim=0) > 0)
+ multi_level_flags.append(inside_flags)
+ inside_flag_list.append(multi_level_flags)
+ return approxs_list, inside_flag_list
+
+ def get_anchors(self,
+ featmap_sizes,
+ shape_preds,
+ loc_preds,
+ img_metas,
+ use_loc_filter=False,
+ device='cuda'):
+ """Get squares according to feature map sizes and guided anchors.
+
+ Args:
+ featmap_sizes (list[tuple]): Multi-level feature map sizes.
+ shape_preds (list[tensor]): Multi-level shape predictions.
+ loc_preds (list[tensor]): Multi-level location predictions.
+ img_metas (list[dict]): Image meta info.
+ use_loc_filter (bool): Use loc filter or not.
+ device (torch.device | str): device for returned tensors
+
+ Returns:
+ tuple: square approxs of each image, guided anchors of each image,
+ loc masks of each image
+ """
+ num_imgs = len(img_metas)
+ num_levels = len(featmap_sizes)
+
+ # since feature map sizes of all images are the same, we only compute
+ # squares for one time
+ multi_level_squares = self.square_anchor_generator.grid_anchors(
+ featmap_sizes, device=device)
+ squares_list = [multi_level_squares for _ in range(num_imgs)]
+
+ # for each image, we compute multi level guided anchors
+ guided_anchors_list = []
+ loc_mask_list = []
+ for img_id, img_meta in enumerate(img_metas):
+ multi_level_guided_anchors = []
+ multi_level_loc_mask = []
+ for i in range(num_levels):
+ squares = squares_list[img_id][i]
+ shape_pred = shape_preds[i][img_id]
+ loc_pred = loc_preds[i][img_id]
+ guided_anchors, loc_mask = self._get_guided_anchors_single(
+ squares,
+ shape_pred,
+ loc_pred,
+ use_loc_filter=use_loc_filter)
+ multi_level_guided_anchors.append(guided_anchors)
+ multi_level_loc_mask.append(loc_mask)
+ guided_anchors_list.append(multi_level_guided_anchors)
+ loc_mask_list.append(multi_level_loc_mask)
+ return squares_list, guided_anchors_list, loc_mask_list
+
+ def _get_guided_anchors_single(self,
+ squares,
+ shape_pred,
+ loc_pred,
+ use_loc_filter=False):
+ """Get guided anchors and loc masks for a single level.
+
+ Args:
+ square (tensor): Squares of a single level.
+ shape_pred (tensor): Shape predections of a single level.
+ loc_pred (tensor): Loc predections of a single level.
+ use_loc_filter (list[tensor]): Use loc filter or not.
+
+ Returns:
+ tuple: guided anchors, location masks
+ """
+ # calculate location filtering mask
+ loc_pred = loc_pred.sigmoid().detach()
+ if use_loc_filter:
+ loc_mask = loc_pred >= self.loc_filter_thr
+ else:
+ loc_mask = loc_pred >= 0.0
+ mask = loc_mask.permute(1, 2, 0).expand(-1, -1, self.num_anchors)
+ mask = mask.contiguous().view(-1)
+ # calculate guided anchors
+ squares = squares[mask]
+ anchor_deltas = shape_pred.permute(1, 2, 0).contiguous().view(
+ -1, 2).detach()[mask]
+ bbox_deltas = anchor_deltas.new_full(squares.size(), 0)
+ bbox_deltas[:, 2:] = anchor_deltas
+ guided_anchors = self.anchor_coder.decode(
+ squares, bbox_deltas, wh_ratio_clip=1e-6)
+ return guided_anchors, mask
+
+ def ga_loc_targets(self, gt_bboxes_list, featmap_sizes):
+ """Compute location targets for guided anchoring.
+
+ Each feature map is divided into positive, negative and ignore regions.
+ - positive regions: target 1, weight 1
+ - ignore regions: target 0, weight 0
+ - negative regions: target 0, weight 0.1
+
+ Args:
+ gt_bboxes_list (list[Tensor]): Gt bboxes of each image.
+ featmap_sizes (list[tuple]): Multi level sizes of each feature
+ maps.
+
+ Returns:
+ tuple
+ """
+ anchor_scale = self.approx_anchor_generator.octave_base_scale
+ anchor_strides = self.approx_anchor_generator.strides
+ # Currently only supports same stride in x and y direction.
+ for stride in anchor_strides:
+ assert (stride[0] == stride[1])
+ anchor_strides = [stride[0] for stride in anchor_strides]
+
+ center_ratio = self.train_cfg.center_ratio
+ ignore_ratio = self.train_cfg.ignore_ratio
+ img_per_gpu = len(gt_bboxes_list)
+ num_lvls = len(featmap_sizes)
+ r1 = (1 - center_ratio) / 2
+ r2 = (1 - ignore_ratio) / 2
+ all_loc_targets = []
+ all_loc_weights = []
+ all_ignore_map = []
+ for lvl_id in range(num_lvls):
+ h, w = featmap_sizes[lvl_id]
+ loc_targets = torch.zeros(
+ img_per_gpu,
+ 1,
+ h,
+ w,
+ device=gt_bboxes_list[0].device,
+ dtype=torch.float32)
+ loc_weights = torch.full_like(loc_targets, -1)
+ ignore_map = torch.zeros_like(loc_targets)
+ all_loc_targets.append(loc_targets)
+ all_loc_weights.append(loc_weights)
+ all_ignore_map.append(ignore_map)
+ for img_id in range(img_per_gpu):
+ gt_bboxes = gt_bboxes_list[img_id]
+ scale = torch.sqrt((gt_bboxes[:, 2] - gt_bboxes[:, 0]) *
+ (gt_bboxes[:, 3] - gt_bboxes[:, 1]))
+ min_anchor_size = scale.new_full(
+ (1, ), float(anchor_scale * anchor_strides[0]))
+ # assign gt bboxes to different feature levels w.r.t. their scales
+ target_lvls = torch.floor(
+ torch.log2(scale) - torch.log2(min_anchor_size) + 0.5)
+ target_lvls = target_lvls.clamp(min=0, max=num_lvls - 1).long()
+ for gt_id in range(gt_bboxes.size(0)):
+ lvl = target_lvls[gt_id].item()
+ # rescaled to corresponding feature map
+ gt_ = gt_bboxes[gt_id, :4] / anchor_strides[lvl]
+ # calculate ignore regions
+ ignore_x1, ignore_y1, ignore_x2, ignore_y2 = calc_region(
+ gt_, r2, featmap_sizes[lvl])
+ # calculate positive (center) regions
+ ctr_x1, ctr_y1, ctr_x2, ctr_y2 = calc_region(
+ gt_, r1, featmap_sizes[lvl])
+ all_loc_targets[lvl][img_id, 0, ctr_y1:ctr_y2 + 1,
+ ctr_x1:ctr_x2 + 1] = 1
+ all_loc_weights[lvl][img_id, 0, ignore_y1:ignore_y2 + 1,
+ ignore_x1:ignore_x2 + 1] = 0
+ all_loc_weights[lvl][img_id, 0, ctr_y1:ctr_y2 + 1,
+ ctr_x1:ctr_x2 + 1] = 1
+ # calculate ignore map on nearby low level feature
+ if lvl > 0:
+ d_lvl = lvl - 1
+ # rescaled to corresponding feature map
+ gt_ = gt_bboxes[gt_id, :4] / anchor_strides[d_lvl]
+ ignore_x1, ignore_y1, ignore_x2, ignore_y2 = calc_region(
+ gt_, r2, featmap_sizes[d_lvl])
+ all_ignore_map[d_lvl][img_id, 0, ignore_y1:ignore_y2 + 1,
+ ignore_x1:ignore_x2 + 1] = 1
+ # calculate ignore map on nearby high level feature
+ if lvl < num_lvls - 1:
+ u_lvl = lvl + 1
+ # rescaled to corresponding feature map
+ gt_ = gt_bboxes[gt_id, :4] / anchor_strides[u_lvl]
+ ignore_x1, ignore_y1, ignore_x2, ignore_y2 = calc_region(
+ gt_, r2, featmap_sizes[u_lvl])
+ all_ignore_map[u_lvl][img_id, 0, ignore_y1:ignore_y2 + 1,
+ ignore_x1:ignore_x2 + 1] = 1
+ for lvl_id in range(num_lvls):
+ # ignore negative regions w.r.t. ignore map
+ all_loc_weights[lvl_id][(all_loc_weights[lvl_id] < 0)
+ & (all_ignore_map[lvl_id] > 0)] = 0
+ # set negative regions with weight 0.1
+ all_loc_weights[lvl_id][all_loc_weights[lvl_id] < 0] = 0.1
+ # loc average factor to balance loss
+ loc_avg_factor = sum(
+ [t.size(0) * t.size(-1) * t.size(-2)
+ for t in all_loc_targets]) / 200
+ return all_loc_targets, all_loc_weights, loc_avg_factor
+
+ def _ga_shape_target_single(self,
+ flat_approxs,
+ inside_flags,
+ flat_squares,
+ gt_bboxes,
+ gt_bboxes_ignore,
+ img_meta,
+ unmap_outputs=True):
+ """Compute guided anchoring targets.
+
+ This function returns sampled anchors and gt bboxes directly
+ rather than calculates regression targets.
+
+ Args:
+ flat_approxs (Tensor): flat approxs of a single image,
+ shape (n, 4)
+ inside_flags (Tensor): inside flags of a single image,
+ shape (n, ).
+ flat_squares (Tensor): flat squares of a single image,
+ shape (approxs_per_octave * n, 4)
+ gt_bboxes (Tensor): Ground truth bboxes of a single image.
+ img_meta (dict): Meta info of a single image.
+ approxs_per_octave (int): number of approxs per octave
+ cfg (dict): RPN train configs.
+ unmap_outputs (bool): unmap outputs or not.
+
+ Returns:
+ tuple
+ """
+ if not inside_flags.any():
+ return (None, ) * 5
+ # assign gt and sample anchors
+ expand_inside_flags = inside_flags[:, None].expand(
+ -1, self.approxs_per_octave).reshape(-1)
+ approxs = flat_approxs[expand_inside_flags, :]
+ squares = flat_squares[inside_flags, :]
+
+ assign_result = self.ga_assigner.assign(approxs, squares,
+ self.approxs_per_octave,
+ gt_bboxes, gt_bboxes_ignore)
+ sampling_result = self.ga_sampler.sample(assign_result, squares,
+ gt_bboxes)
+
+ bbox_anchors = torch.zeros_like(squares)
+ bbox_gts = torch.zeros_like(squares)
+ bbox_weights = torch.zeros_like(squares)
+
+ pos_inds = sampling_result.pos_inds
+ neg_inds = sampling_result.neg_inds
+ if len(pos_inds) > 0:
+ bbox_anchors[pos_inds, :] = sampling_result.pos_bboxes
+ bbox_gts[pos_inds, :] = sampling_result.pos_gt_bboxes
+ bbox_weights[pos_inds, :] = 1.0
+
+ # map up to original set of anchors
+ if unmap_outputs:
+ num_total_anchors = flat_squares.size(0)
+ bbox_anchors = unmap(bbox_anchors, num_total_anchors, inside_flags)
+ bbox_gts = unmap(bbox_gts, num_total_anchors, inside_flags)
+ bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags)
+
+ return (bbox_anchors, bbox_gts, bbox_weights, pos_inds, neg_inds)
+
+ def ga_shape_targets(self,
+ approx_list,
+ inside_flag_list,
+ square_list,
+ gt_bboxes_list,
+ img_metas,
+ gt_bboxes_ignore_list=None,
+ unmap_outputs=True):
+ """Compute guided anchoring targets.
+
+ Args:
+ approx_list (list[list]): Multi level approxs of each image.
+ inside_flag_list (list[list]): Multi level inside flags of each
+ image.
+ square_list (list[list]): Multi level squares of each image.
+ gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image.
+ img_metas (list[dict]): Meta info of each image.
+ gt_bboxes_ignore_list (list[Tensor]): ignore list of gt bboxes.
+ unmap_outputs (bool): unmap outputs or not.
+
+ Returns:
+ tuple
+ """
+ num_imgs = len(img_metas)
+ assert len(approx_list) == len(inside_flag_list) == len(
+ square_list) == num_imgs
+ # anchor number of multi levels
+ num_level_squares = [squares.size(0) for squares in square_list[0]]
+ # concat all level anchors and flags to a single tensor
+ inside_flag_flat_list = []
+ approx_flat_list = []
+ square_flat_list = []
+ for i in range(num_imgs):
+ assert len(square_list[i]) == len(inside_flag_list[i])
+ inside_flag_flat_list.append(torch.cat(inside_flag_list[i]))
+ approx_flat_list.append(torch.cat(approx_list[i]))
+ square_flat_list.append(torch.cat(square_list[i]))
+
+ # compute targets for each image
+ if gt_bboxes_ignore_list is None:
+ gt_bboxes_ignore_list = [None for _ in range(num_imgs)]
+ (all_bbox_anchors, all_bbox_gts, all_bbox_weights, pos_inds_list,
+ neg_inds_list) = multi_apply(
+ self._ga_shape_target_single,
+ approx_flat_list,
+ inside_flag_flat_list,
+ square_flat_list,
+ gt_bboxes_list,
+ gt_bboxes_ignore_list,
+ img_metas,
+ unmap_outputs=unmap_outputs)
+ # no valid anchors
+ if any([bbox_anchors is None for bbox_anchors in all_bbox_anchors]):
+ return None
+ # sampled anchors of all images
+ num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list])
+ num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list])
+ # split targets to a list w.r.t. multiple levels
+ bbox_anchors_list = images_to_levels(all_bbox_anchors,
+ num_level_squares)
+ bbox_gts_list = images_to_levels(all_bbox_gts, num_level_squares)
+ bbox_weights_list = images_to_levels(all_bbox_weights,
+ num_level_squares)
+ return (bbox_anchors_list, bbox_gts_list, bbox_weights_list,
+ num_total_pos, num_total_neg)
+
+ def loss_shape_single(self, shape_pred, bbox_anchors, bbox_gts,
+ anchor_weights, anchor_total_num):
+ shape_pred = shape_pred.permute(0, 2, 3, 1).contiguous().view(-1, 2)
+ bbox_anchors = bbox_anchors.contiguous().view(-1, 4)
+ bbox_gts = bbox_gts.contiguous().view(-1, 4)
+ anchor_weights = anchor_weights.contiguous().view(-1, 4)
+ bbox_deltas = bbox_anchors.new_full(bbox_anchors.size(), 0)
+ bbox_deltas[:, 2:] += shape_pred
+ # filter out negative samples to speed-up weighted_bounded_iou_loss
+ inds = torch.nonzero(
+ anchor_weights[:, 0] > 0, as_tuple=False).squeeze(1)
+ bbox_deltas_ = bbox_deltas[inds]
+ bbox_anchors_ = bbox_anchors[inds]
+ bbox_gts_ = bbox_gts[inds]
+ anchor_weights_ = anchor_weights[inds]
+ pred_anchors_ = self.anchor_coder.decode(
+ bbox_anchors_, bbox_deltas_, wh_ratio_clip=1e-6)
+ loss_shape = self.loss_shape(
+ pred_anchors_,
+ bbox_gts_,
+ anchor_weights_,
+ avg_factor=anchor_total_num)
+ return loss_shape
+
+ def loss_loc_single(self, loc_pred, loc_target, loc_weight,
+ loc_avg_factor):
+ loss_loc = self.loss_loc(
+ loc_pred.reshape(-1, 1),
+ loc_target.reshape(-1).long(),
+ loc_weight.reshape(-1),
+ avg_factor=loc_avg_factor)
+ return loss_loc
+
+ @force_fp32(
+ apply_to=('cls_scores', 'bbox_preds', 'shape_preds', 'loc_preds'))
+ def loss(self,
+ cls_scores,
+ bbox_preds,
+ shape_preds,
+ loc_preds,
+ gt_bboxes,
+ gt_labels,
+ img_metas,
+ gt_bboxes_ignore=None):
+ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
+ assert len(featmap_sizes) == self.approx_anchor_generator.num_levels
+
+ device = cls_scores[0].device
+
+ # get loc targets
+ loc_targets, loc_weights, loc_avg_factor = self.ga_loc_targets(
+ gt_bboxes, featmap_sizes)
+
+ # get sampled approxes
+ approxs_list, inside_flag_list = self.get_sampled_approxs(
+ featmap_sizes, img_metas, device=device)
+ # get squares and guided anchors
+ squares_list, guided_anchors_list, _ = self.get_anchors(
+ featmap_sizes, shape_preds, loc_preds, img_metas, device=device)
+
+ # get shape targets
+ shape_targets = self.ga_shape_targets(approxs_list, inside_flag_list,
+ squares_list, gt_bboxes,
+ img_metas)
+ if shape_targets is None:
+ return None
+ (bbox_anchors_list, bbox_gts_list, anchor_weights_list, anchor_fg_num,
+ anchor_bg_num) = shape_targets
+ anchor_total_num = (
+ anchor_fg_num if not self.ga_sampling else anchor_fg_num +
+ anchor_bg_num)
+
+ # get anchor targets
+ label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1
+ cls_reg_targets = self.get_targets(
+ guided_anchors_list,
+ inside_flag_list,
+ gt_bboxes,
+ img_metas,
+ gt_bboxes_ignore_list=gt_bboxes_ignore,
+ gt_labels_list=gt_labels,
+ label_channels=label_channels)
+ if cls_reg_targets is None:
+ return None
+ (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,
+ num_total_pos, num_total_neg) = cls_reg_targets
+ num_total_samples = (
+ num_total_pos + num_total_neg if self.sampling else num_total_pos)
+
+ # anchor number of multi levels
+ num_level_anchors = [
+ anchors.size(0) for anchors in guided_anchors_list[0]
+ ]
+ # concat all level anchors to a single tensor
+ concat_anchor_list = []
+ for i in range(len(guided_anchors_list)):
+ concat_anchor_list.append(torch.cat(guided_anchors_list[i]))
+ all_anchor_list = images_to_levels(concat_anchor_list,
+ num_level_anchors)
+
+ # get classification and bbox regression losses
+ losses_cls, losses_bbox = multi_apply(
+ self.loss_single,
+ cls_scores,
+ bbox_preds,
+ all_anchor_list,
+ labels_list,
+ label_weights_list,
+ bbox_targets_list,
+ bbox_weights_list,
+ num_total_samples=num_total_samples)
+
+ # get anchor location loss
+ losses_loc = []
+ for i in range(len(loc_preds)):
+ loss_loc = self.loss_loc_single(
+ loc_preds[i],
+ loc_targets[i],
+ loc_weights[i],
+ loc_avg_factor=loc_avg_factor)
+ losses_loc.append(loss_loc)
+
+ # get anchor shape loss
+ losses_shape = []
+ for i in range(len(shape_preds)):
+ loss_shape = self.loss_shape_single(
+ shape_preds[i],
+ bbox_anchors_list[i],
+ bbox_gts_list[i],
+ anchor_weights_list[i],
+ anchor_total_num=anchor_total_num)
+ losses_shape.append(loss_shape)
+
+ return dict(
+ loss_cls=losses_cls,
+ loss_bbox=losses_bbox,
+ loss_shape=losses_shape,
+ loss_loc=losses_loc)
+
+ @force_fp32(
+ apply_to=('cls_scores', 'bbox_preds', 'shape_preds', 'loc_preds'))
+ def get_bboxes(self,
+ cls_scores,
+ bbox_preds,
+ shape_preds,
+ loc_preds,
+ img_metas,
+ cfg=None,
+ rescale=False):
+ assert len(cls_scores) == len(bbox_preds) == len(shape_preds) == len(
+ loc_preds)
+ num_levels = len(cls_scores)
+ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
+ device = cls_scores[0].device
+ # get guided anchors
+ _, guided_anchors, loc_masks = self.get_anchors(
+ featmap_sizes,
+ shape_preds,
+ loc_preds,
+ img_metas,
+ use_loc_filter=not self.training,
+ device=device)
+ result_list = []
+ for img_id in range(len(img_metas)):
+ cls_score_list = [
+ cls_scores[i][img_id].detach() for i in range(num_levels)
+ ]
+ bbox_pred_list = [
+ bbox_preds[i][img_id].detach() for i in range(num_levels)
+ ]
+ guided_anchor_list = [
+ guided_anchors[img_id][i].detach() for i in range(num_levels)
+ ]
+ loc_mask_list = [
+ loc_masks[img_id][i].detach() for i in range(num_levels)
+ ]
+ img_shape = img_metas[img_id]['img_shape']
+ scale_factor = img_metas[img_id]['scale_factor']
+ proposals = self._get_bboxes_single(cls_score_list, bbox_pred_list,
+ guided_anchor_list,
+ loc_mask_list, img_shape,
+ scale_factor, cfg, rescale)
+ result_list.append(proposals)
+ return result_list
+
+ def _get_bboxes_single(self,
+ cls_scores,
+ bbox_preds,
+ mlvl_anchors,
+ mlvl_masks,
+ img_shape,
+ scale_factor,
+ cfg,
+ rescale=False):
+ cfg = self.test_cfg if cfg is None else cfg
+ assert len(cls_scores) == len(bbox_preds) == len(mlvl_anchors)
+ mlvl_bboxes = []
+ mlvl_scores = []
+ for cls_score, bbox_pred, anchors, mask in zip(cls_scores, bbox_preds,
+ mlvl_anchors,
+ mlvl_masks):
+ assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
+ # if no location is kept, end.
+ if mask.sum() == 0:
+ continue
+ # reshape scores and bbox_pred
+ cls_score = cls_score.permute(1, 2,
+ 0).reshape(-1, self.cls_out_channels)
+ if self.use_sigmoid_cls:
+ scores = cls_score.sigmoid()
+ else:
+ scores = cls_score.softmax(-1)
+ bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4)
+ # filter scores, bbox_pred w.r.t. mask.
+ # anchors are filtered in get_anchors() beforehand.
+ scores = scores[mask, :]
+ bbox_pred = bbox_pred[mask, :]
+ if scores.dim() == 0:
+ anchors = anchors.unsqueeze(0)
+ scores = scores.unsqueeze(0)
+ bbox_pred = bbox_pred.unsqueeze(0)
+ # filter anchors, bbox_pred, scores w.r.t. scores
+ nms_pre = cfg.get('nms_pre', -1)
+ if nms_pre > 0 and scores.shape[0] > nms_pre:
+ if self.use_sigmoid_cls:
+ max_scores, _ = scores.max(dim=1)
+ else:
+ # remind that we set FG labels to [0, num_class-1]
+ # since mmdet v2.0
+ # BG cat_id: num_class
+ max_scores, _ = scores[:, :-1].max(dim=1)
+ _, topk_inds = max_scores.topk(nms_pre)
+ anchors = anchors[topk_inds, :]
+ bbox_pred = bbox_pred[topk_inds, :]
+ scores = scores[topk_inds, :]
+ bboxes = self.bbox_coder.decode(
+ anchors, bbox_pred, max_shape=img_shape)
+ mlvl_bboxes.append(bboxes)
+ mlvl_scores.append(scores)
+ mlvl_bboxes = torch.cat(mlvl_bboxes)
+ if rescale:
+ mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor)
+ mlvl_scores = torch.cat(mlvl_scores)
+ if self.use_sigmoid_cls:
+ # Add a dummy background class to the backend when using sigmoid
+ # remind that we set FG labels to [0, num_class-1] since mmdet v2.0
+ # BG cat_id: num_class
+ padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1)
+ mlvl_scores = torch.cat([mlvl_scores, padding], dim=1)
+ # multi class NMS
+ det_bboxes, det_labels = multiclass_nms(mlvl_bboxes, mlvl_scores,
+ cfg.score_thr, cfg.nms,
+ cfg.max_per_img)
+ return det_bboxes, det_labels
diff --git a/annotator/uniformer/mmdet/models/dense_heads/ld_head.py b/annotator/uniformer/mmdet/models/dense_heads/ld_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..501e1f7befa086f0b2f818531807411fc383d7bd
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/dense_heads/ld_head.py
@@ -0,0 +1,261 @@
+import torch
+from mmcv.runner import force_fp32
+
+from mmdet.core import (bbox2distance, bbox_overlaps, distance2bbox,
+ multi_apply, reduce_mean)
+from ..builder import HEADS, build_loss
+from .gfl_head import GFLHead
+
+
+@HEADS.register_module()
+class LDHead(GFLHead):
+ """Localization distillation Head. (Short description)
+
+ It utilizes the learned bbox distributions to transfer the localization
+ dark knowledge from teacher to student. Original paper: `Localization
+ Distillation for Object Detection. `_
+
+ Args:
+ num_classes (int): Number of categories excluding the background
+ category.
+ in_channels (int): Number of channels in the input feature map.
+ loss_ld (dict): Config of Localization Distillation Loss (LD),
+ T is the temperature for distillation.
+ """
+
+ def __init__(self,
+ num_classes,
+ in_channels,
+ loss_ld=dict(
+ type='LocalizationDistillationLoss',
+ loss_weight=0.25,
+ T=10),
+ **kwargs):
+
+ super(LDHead, self).__init__(num_classes, in_channels, **kwargs)
+ self.loss_ld = build_loss(loss_ld)
+
+ def loss_single(self, anchors, cls_score, bbox_pred, labels, label_weights,
+ bbox_targets, stride, soft_targets, num_total_samples):
+ """Compute loss of a single scale level.
+
+ Args:
+ anchors (Tensor): Box reference for each scale level with shape
+ (N, num_total_anchors, 4).
+ cls_score (Tensor): Cls and quality joint scores for each scale
+ level has shape (N, num_classes, H, W).
+ bbox_pred (Tensor): Box distribution logits for each scale
+ level with shape (N, 4*(n+1), H, W), n is max value of integral
+ set.
+ labels (Tensor): Labels of each anchors with shape
+ (N, num_total_anchors).
+ label_weights (Tensor): Label weights of each anchor with shape
+ (N, num_total_anchors)
+ bbox_targets (Tensor): BBox regression targets of each anchor wight
+ shape (N, num_total_anchors, 4).
+ stride (tuple): Stride in this scale level.
+ num_total_samples (int): Number of positive samples that is
+ reduced over all GPUs.
+
+ Returns:
+ dict[tuple, Tensor]: Loss components and weight targets.
+ """
+ assert stride[0] == stride[1], 'h stride is not equal to w stride!'
+ anchors = anchors.reshape(-1, 4)
+ cls_score = cls_score.permute(0, 2, 3,
+ 1).reshape(-1, self.cls_out_channels)
+ bbox_pred = bbox_pred.permute(0, 2, 3,
+ 1).reshape(-1, 4 * (self.reg_max + 1))
+ soft_targets = soft_targets.permute(0, 2, 3,
+ 1).reshape(-1,
+ 4 * (self.reg_max + 1))
+
+ bbox_targets = bbox_targets.reshape(-1, 4)
+ labels = labels.reshape(-1)
+ label_weights = label_weights.reshape(-1)
+
+ # FG cat_id: [0, num_classes -1], BG cat_id: num_classes
+ bg_class_ind = self.num_classes
+ pos_inds = ((labels >= 0)
+ & (labels < bg_class_ind)).nonzero().squeeze(1)
+ score = label_weights.new_zeros(labels.shape)
+
+ if len(pos_inds) > 0:
+ pos_bbox_targets = bbox_targets[pos_inds]
+ pos_bbox_pred = bbox_pred[pos_inds]
+ pos_anchors = anchors[pos_inds]
+ pos_anchor_centers = self.anchor_center(pos_anchors) / stride[0]
+
+ weight_targets = cls_score.detach().sigmoid()
+ weight_targets = weight_targets.max(dim=1)[0][pos_inds]
+ pos_bbox_pred_corners = self.integral(pos_bbox_pred)
+ pos_decode_bbox_pred = distance2bbox(pos_anchor_centers,
+ pos_bbox_pred_corners)
+ pos_decode_bbox_targets = pos_bbox_targets / stride[0]
+ score[pos_inds] = bbox_overlaps(
+ pos_decode_bbox_pred.detach(),
+ pos_decode_bbox_targets,
+ is_aligned=True)
+ pred_corners = pos_bbox_pred.reshape(-1, self.reg_max + 1)
+ pos_soft_targets = soft_targets[pos_inds]
+ soft_corners = pos_soft_targets.reshape(-1, self.reg_max + 1)
+
+ target_corners = bbox2distance(pos_anchor_centers,
+ pos_decode_bbox_targets,
+ self.reg_max).reshape(-1)
+
+ # regression loss
+ loss_bbox = self.loss_bbox(
+ pos_decode_bbox_pred,
+ pos_decode_bbox_targets,
+ weight=weight_targets,
+ avg_factor=1.0)
+
+ # dfl loss
+ loss_dfl = self.loss_dfl(
+ pred_corners,
+ target_corners,
+ weight=weight_targets[:, None].expand(-1, 4).reshape(-1),
+ avg_factor=4.0)
+
+ # ld loss
+ loss_ld = self.loss_ld(
+ pred_corners,
+ soft_corners,
+ weight=weight_targets[:, None].expand(-1, 4).reshape(-1),
+ avg_factor=4.0)
+
+ else:
+ loss_ld = bbox_pred.sum() * 0
+ loss_bbox = bbox_pred.sum() * 0
+ loss_dfl = bbox_pred.sum() * 0
+ weight_targets = bbox_pred.new_tensor(0)
+
+ # cls (qfl) loss
+ loss_cls = self.loss_cls(
+ cls_score, (labels, score),
+ weight=label_weights,
+ avg_factor=num_total_samples)
+
+ return loss_cls, loss_bbox, loss_dfl, loss_ld, weight_targets.sum()
+
+ def forward_train(self,
+ x,
+ out_teacher,
+ img_metas,
+ gt_bboxes,
+ gt_labels=None,
+ gt_bboxes_ignore=None,
+ proposal_cfg=None,
+ **kwargs):
+ """
+ Args:
+ x (list[Tensor]): Features from FPN.
+ img_metas (list[dict]): Meta information of each image, e.g.,
+ image size, scaling factor, etc.
+ gt_bboxes (Tensor): Ground truth bboxes of the image,
+ shape (num_gts, 4).
+ gt_labels (Tensor): Ground truth labels of each box,
+ shape (num_gts,).
+ gt_bboxes_ignore (Tensor): Ground truth bboxes to be
+ ignored, shape (num_ignored_gts, 4).
+ proposal_cfg (mmcv.Config): Test / postprocessing configuration,
+ if None, test_cfg would be used
+
+ Returns:
+ tuple[dict, list]: The loss components and proposals of each image.
+
+ - losses (dict[str, Tensor]): A dictionary of loss components.
+ - proposal_list (list[Tensor]): Proposals of each image.
+ """
+ outs = self(x)
+ soft_target = out_teacher[1]
+ if gt_labels is None:
+ loss_inputs = outs + (gt_bboxes, soft_target, img_metas)
+ else:
+ loss_inputs = outs + (gt_bboxes, gt_labels, soft_target, img_metas)
+ losses = self.loss(*loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore)
+ if proposal_cfg is None:
+ return losses
+ else:
+ proposal_list = self.get_bboxes(*outs, img_metas, cfg=proposal_cfg)
+ return losses, proposal_list
+
+ @force_fp32(apply_to=('cls_scores', 'bbox_preds'))
+ def loss(self,
+ cls_scores,
+ bbox_preds,
+ gt_bboxes,
+ gt_labels,
+ soft_target,
+ img_metas,
+ gt_bboxes_ignore=None):
+ """Compute losses of the head.
+
+ Args:
+ cls_scores (list[Tensor]): Cls and quality scores for each scale
+ level has shape (N, num_classes, H, W).
+ bbox_preds (list[Tensor]): Box distribution logits for each scale
+ level with shape (N, 4*(n+1), H, W), n is max value of integral
+ set.
+ gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
+ shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
+ gt_labels (list[Tensor]): class indices corresponding to each box
+ img_metas (list[dict]): Meta information of each image, e.g.,
+ image size, scaling factor, etc.
+ gt_bboxes_ignore (list[Tensor] | None): specify which bounding
+ boxes can be ignored when computing the loss.
+
+ Returns:
+ dict[str, Tensor]: A dictionary of loss components.
+ """
+
+ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
+ assert len(featmap_sizes) == self.anchor_generator.num_levels
+
+ device = cls_scores[0].device
+ anchor_list, valid_flag_list = self.get_anchors(
+ featmap_sizes, img_metas, device=device)
+ label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1
+
+ cls_reg_targets = self.get_targets(
+ anchor_list,
+ valid_flag_list,
+ gt_bboxes,
+ img_metas,
+ gt_bboxes_ignore_list=gt_bboxes_ignore,
+ gt_labels_list=gt_labels,
+ label_channels=label_channels)
+ if cls_reg_targets is None:
+ return None
+
+ (anchor_list, labels_list, label_weights_list, bbox_targets_list,
+ bbox_weights_list, num_total_pos, num_total_neg) = cls_reg_targets
+
+ num_total_samples = reduce_mean(
+ torch.tensor(num_total_pos, dtype=torch.float,
+ device=device)).item()
+ num_total_samples = max(num_total_samples, 1.0)
+
+ losses_cls, losses_bbox, losses_dfl, losses_ld, \
+ avg_factor = multi_apply(
+ self.loss_single,
+ anchor_list,
+ cls_scores,
+ bbox_preds,
+ labels_list,
+ label_weights_list,
+ bbox_targets_list,
+ self.anchor_generator.strides,
+ soft_target,
+ num_total_samples=num_total_samples)
+
+ avg_factor = sum(avg_factor) + 1e-6
+ avg_factor = reduce_mean(avg_factor).item()
+ losses_bbox = [x / avg_factor for x in losses_bbox]
+ losses_dfl = [x / avg_factor for x in losses_dfl]
+ return dict(
+ loss_cls=losses_cls,
+ loss_bbox=losses_bbox,
+ loss_dfl=losses_dfl,
+ loss_ld=losses_ld)
diff --git a/annotator/uniformer/mmdet/models/dense_heads/nasfcos_head.py b/annotator/uniformer/mmdet/models/dense_heads/nasfcos_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..994ce0455e1982110f237b3958a81394c319bb47
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/dense_heads/nasfcos_head.py
@@ -0,0 +1,75 @@
+import copy
+
+import torch.nn as nn
+from mmcv.cnn import (ConvModule, Scale, bias_init_with_prob,
+ caffe2_xavier_init, normal_init)
+
+from mmdet.models.dense_heads.fcos_head import FCOSHead
+from ..builder import HEADS
+
+
+@HEADS.register_module()
+class NASFCOSHead(FCOSHead):
+ """Anchor-free head used in `NASFCOS `_.
+
+ It is quite similar with FCOS head, except for the searched structure of
+ classification branch and bbox regression branch, where a structure of
+ "dconv3x3, conv3x3, dconv3x3, conv1x1" is utilized instead.
+ """
+
+ def _init_layers(self):
+ """Initialize layers of the head."""
+ dconv3x3_config = dict(
+ type='DCNv2',
+ kernel_size=3,
+ use_bias=True,
+ deform_groups=2,
+ padding=1)
+ conv3x3_config = dict(type='Conv', kernel_size=3, padding=1)
+ conv1x1_config = dict(type='Conv', kernel_size=1)
+
+ self.arch_config = [
+ dconv3x3_config, conv3x3_config, dconv3x3_config, conv1x1_config
+ ]
+ self.cls_convs = nn.ModuleList()
+ self.reg_convs = nn.ModuleList()
+ for i, op_ in enumerate(self.arch_config):
+ op = copy.deepcopy(op_)
+ chn = self.in_channels if i == 0 else self.feat_channels
+ assert isinstance(op, dict)
+ use_bias = op.pop('use_bias', False)
+ padding = op.pop('padding', 0)
+ kernel_size = op.pop('kernel_size')
+ module = ConvModule(
+ chn,
+ self.feat_channels,
+ kernel_size,
+ stride=1,
+ padding=padding,
+ norm_cfg=self.norm_cfg,
+ bias=use_bias,
+ conv_cfg=op)
+
+ self.cls_convs.append(copy.deepcopy(module))
+ self.reg_convs.append(copy.deepcopy(module))
+
+ self.conv_cls = nn.Conv2d(
+ self.feat_channels, self.cls_out_channels, 3, padding=1)
+ self.conv_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1)
+ self.conv_centerness = nn.Conv2d(self.feat_channels, 1, 3, padding=1)
+
+ self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides])
+
+ def init_weights(self):
+ """Initialize weights of the head."""
+ # retinanet_bias_init
+ bias_cls = bias_init_with_prob(0.01)
+ normal_init(self.conv_reg, std=0.01)
+ normal_init(self.conv_centerness, std=0.01)
+ normal_init(self.conv_cls, std=0.01, bias=bias_cls)
+
+ for branch in [self.cls_convs, self.reg_convs]:
+ for module in branch.modules():
+ if isinstance(module, ConvModule) \
+ and isinstance(module.conv, nn.Conv2d):
+ caffe2_xavier_init(module.conv)
diff --git a/annotator/uniformer/mmdet/models/dense_heads/paa_head.py b/annotator/uniformer/mmdet/models/dense_heads/paa_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..e067b0121cf8b8230c0c9c6b8cfd41f56be4e298
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/dense_heads/paa_head.py
@@ -0,0 +1,671 @@
+import numpy as np
+import torch
+from mmcv.runner import force_fp32
+
+from mmdet.core import multi_apply, multiclass_nms
+from mmdet.core.bbox.iou_calculators import bbox_overlaps
+from mmdet.models import HEADS
+from mmdet.models.dense_heads import ATSSHead
+
+EPS = 1e-12
+try:
+ import sklearn.mixture as skm
+except ImportError:
+ skm = None
+
+
+def levels_to_images(mlvl_tensor):
+ """Concat multi-level feature maps by image.
+
+ [feature_level0, feature_level1...] -> [feature_image0, feature_image1...]
+ Convert the shape of each element in mlvl_tensor from (N, C, H, W) to
+ (N, H*W , C), then split the element to N elements with shape (H*W, C), and
+ concat elements in same image of all level along first dimension.
+
+ Args:
+ mlvl_tensor (list[torch.Tensor]): list of Tensor which collect from
+ corresponding level. Each element is of shape (N, C, H, W)
+
+ Returns:
+ list[torch.Tensor]: A list that contains N tensors and each tensor is
+ of shape (num_elements, C)
+ """
+ batch_size = mlvl_tensor[0].size(0)
+ batch_list = [[] for _ in range(batch_size)]
+ channels = mlvl_tensor[0].size(1)
+ for t in mlvl_tensor:
+ t = t.permute(0, 2, 3, 1)
+ t = t.view(batch_size, -1, channels).contiguous()
+ for img in range(batch_size):
+ batch_list[img].append(t[img])
+ return [torch.cat(item, 0) for item in batch_list]
+
+
+@HEADS.register_module()
+class PAAHead(ATSSHead):
+ """Head of PAAAssignment: Probabilistic Anchor Assignment with IoU
+ Prediction for Object Detection.
+
+ Code is modified from the `official github repo
+ `_.
+
+ More details can be found in the `paper
+ `_ .
+
+ Args:
+ topk (int): Select topk samples with smallest loss in
+ each level.
+ score_voting (bool): Whether to use score voting in post-process.
+ covariance_type : String describing the type of covariance parameters
+ to be used in :class:`sklearn.mixture.GaussianMixture`.
+ It must be one of:
+
+ - 'full': each component has its own general covariance matrix
+ - 'tied': all components share the same general covariance matrix
+ - 'diag': each component has its own diagonal covariance matrix
+ - 'spherical': each component has its own single variance
+ Default: 'diag'. From 'full' to 'spherical', the gmm fitting
+ process is faster yet the performance could be influenced. For most
+ cases, 'diag' should be a good choice.
+ """
+
+ def __init__(self,
+ *args,
+ topk=9,
+ score_voting=True,
+ covariance_type='diag',
+ **kwargs):
+ # topk used in paa reassign process
+ self.topk = topk
+ self.with_score_voting = score_voting
+ self.covariance_type = covariance_type
+ super(PAAHead, self).__init__(*args, **kwargs)
+
+ @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'iou_preds'))
+ def loss(self,
+ cls_scores,
+ bbox_preds,
+ iou_preds,
+ gt_bboxes,
+ gt_labels,
+ img_metas,
+ gt_bboxes_ignore=None):
+ """Compute losses of the head.
+
+ Args:
+ cls_scores (list[Tensor]): Box scores for each scale level
+ Has shape (N, num_anchors * num_classes, H, W)
+ bbox_preds (list[Tensor]): Box energies / deltas for each scale
+ level with shape (N, num_anchors * 4, H, W)
+ iou_preds (list[Tensor]): iou_preds for each scale
+ level with shape (N, num_anchors * 1, H, W)
+ gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
+ shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
+ gt_labels (list[Tensor]): class indices corresponding to each box
+ img_metas (list[dict]): Meta information of each image, e.g.,
+ image size, scaling factor, etc.
+ gt_bboxes_ignore (list[Tensor] | None): Specify which bounding
+ boxes can be ignored when are computing the loss.
+
+ Returns:
+ dict[str, Tensor]: A dictionary of loss gmm_assignment.
+ """
+
+ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
+ assert len(featmap_sizes) == self.anchor_generator.num_levels
+
+ device = cls_scores[0].device
+ anchor_list, valid_flag_list = self.get_anchors(
+ featmap_sizes, img_metas, device=device)
+ label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1
+ cls_reg_targets = self.get_targets(
+ anchor_list,
+ valid_flag_list,
+ gt_bboxes,
+ img_metas,
+ gt_bboxes_ignore_list=gt_bboxes_ignore,
+ gt_labels_list=gt_labels,
+ label_channels=label_channels,
+ )
+ (labels, labels_weight, bboxes_target, bboxes_weight, pos_inds,
+ pos_gt_index) = cls_reg_targets
+ cls_scores = levels_to_images(cls_scores)
+ cls_scores = [
+ item.reshape(-1, self.cls_out_channels) for item in cls_scores
+ ]
+ bbox_preds = levels_to_images(bbox_preds)
+ bbox_preds = [item.reshape(-1, 4) for item in bbox_preds]
+ iou_preds = levels_to_images(iou_preds)
+ iou_preds = [item.reshape(-1, 1) for item in iou_preds]
+ pos_losses_list, = multi_apply(self.get_pos_loss, anchor_list,
+ cls_scores, bbox_preds, labels,
+ labels_weight, bboxes_target,
+ bboxes_weight, pos_inds)
+
+ with torch.no_grad():
+ reassign_labels, reassign_label_weight, \
+ reassign_bbox_weights, num_pos = multi_apply(
+ self.paa_reassign,
+ pos_losses_list,
+ labels,
+ labels_weight,
+ bboxes_weight,
+ pos_inds,
+ pos_gt_index,
+ anchor_list)
+ num_pos = sum(num_pos)
+ # convert all tensor list to a flatten tensor
+ cls_scores = torch.cat(cls_scores, 0).view(-1, cls_scores[0].size(-1))
+ bbox_preds = torch.cat(bbox_preds, 0).view(-1, bbox_preds[0].size(-1))
+ iou_preds = torch.cat(iou_preds, 0).view(-1, iou_preds[0].size(-1))
+ labels = torch.cat(reassign_labels, 0).view(-1)
+ flatten_anchors = torch.cat(
+ [torch.cat(item, 0) for item in anchor_list])
+ labels_weight = torch.cat(reassign_label_weight, 0).view(-1)
+ bboxes_target = torch.cat(bboxes_target,
+ 0).view(-1, bboxes_target[0].size(-1))
+
+ pos_inds_flatten = ((labels >= 0)
+ &
+ (labels < self.num_classes)).nonzero().reshape(-1)
+
+ losses_cls = self.loss_cls(
+ cls_scores,
+ labels,
+ labels_weight,
+ avg_factor=max(num_pos, len(img_metas))) # avoid num_pos=0
+ if num_pos:
+ pos_bbox_pred = self.bbox_coder.decode(
+ flatten_anchors[pos_inds_flatten],
+ bbox_preds[pos_inds_flatten])
+ pos_bbox_target = bboxes_target[pos_inds_flatten]
+ iou_target = bbox_overlaps(
+ pos_bbox_pred.detach(), pos_bbox_target, is_aligned=True)
+ losses_iou = self.loss_centerness(
+ iou_preds[pos_inds_flatten],
+ iou_target.unsqueeze(-1),
+ avg_factor=num_pos)
+ losses_bbox = self.loss_bbox(
+ pos_bbox_pred,
+ pos_bbox_target,
+ iou_target.clamp(min=EPS),
+ avg_factor=iou_target.sum())
+ else:
+ losses_iou = iou_preds.sum() * 0
+ losses_bbox = bbox_preds.sum() * 0
+
+ return dict(
+ loss_cls=losses_cls, loss_bbox=losses_bbox, loss_iou=losses_iou)
+
+ def get_pos_loss(self, anchors, cls_score, bbox_pred, label, label_weight,
+ bbox_target, bbox_weight, pos_inds):
+ """Calculate loss of all potential positive samples obtained from first
+ match process.
+
+ Args:
+ anchors (list[Tensor]): Anchors of each scale.
+ cls_score (Tensor): Box scores of single image with shape
+ (num_anchors, num_classes)
+ bbox_pred (Tensor): Box energies / deltas of single image
+ with shape (num_anchors, 4)
+ label (Tensor): classification target of each anchor with
+ shape (num_anchors,)
+ label_weight (Tensor): Classification loss weight of each
+ anchor with shape (num_anchors).
+ bbox_target (dict): Regression target of each anchor with
+ shape (num_anchors, 4).
+ bbox_weight (Tensor): Bbox weight of each anchor with shape
+ (num_anchors, 4).
+ pos_inds (Tensor): Index of all positive samples got from
+ first assign process.
+
+ Returns:
+ Tensor: Losses of all positive samples in single image.
+ """
+ if not len(pos_inds):
+ return cls_score.new([]),
+ anchors_all_level = torch.cat(anchors, 0)
+ pos_scores = cls_score[pos_inds]
+ pos_bbox_pred = bbox_pred[pos_inds]
+ pos_label = label[pos_inds]
+ pos_label_weight = label_weight[pos_inds]
+ pos_bbox_target = bbox_target[pos_inds]
+ pos_bbox_weight = bbox_weight[pos_inds]
+ pos_anchors = anchors_all_level[pos_inds]
+ pos_bbox_pred = self.bbox_coder.decode(pos_anchors, pos_bbox_pred)
+
+ # to keep loss dimension
+ loss_cls = self.loss_cls(
+ pos_scores,
+ pos_label,
+ pos_label_weight,
+ avg_factor=self.loss_cls.loss_weight,
+ reduction_override='none')
+
+ loss_bbox = self.loss_bbox(
+ pos_bbox_pred,
+ pos_bbox_target,
+ pos_bbox_weight,
+ avg_factor=self.loss_cls.loss_weight,
+ reduction_override='none')
+
+ loss_cls = loss_cls.sum(-1)
+ pos_loss = loss_bbox + loss_cls
+ return pos_loss,
+
+ def paa_reassign(self, pos_losses, label, label_weight, bbox_weight,
+ pos_inds, pos_gt_inds, anchors):
+ """Fit loss to GMM distribution and separate positive, ignore, negative
+ samples again with GMM model.
+
+ Args:
+ pos_losses (Tensor): Losses of all positive samples in
+ single image.
+ label (Tensor): classification target of each anchor with
+ shape (num_anchors,)
+ label_weight (Tensor): Classification loss weight of each
+ anchor with shape (num_anchors).
+ bbox_weight (Tensor): Bbox weight of each anchor with shape
+ (num_anchors, 4).
+ pos_inds (Tensor): Index of all positive samples got from
+ first assign process.
+ pos_gt_inds (Tensor): Gt_index of all positive samples got
+ from first assign process.
+ anchors (list[Tensor]): Anchors of each scale.
+
+ Returns:
+ tuple: Usually returns a tuple containing learning targets.
+
+ - label (Tensor): classification target of each anchor after
+ paa assign, with shape (num_anchors,)
+ - label_weight (Tensor): Classification loss weight of each
+ anchor after paa assign, with shape (num_anchors).
+ - bbox_weight (Tensor): Bbox weight of each anchor with shape
+ (num_anchors, 4).
+ - num_pos (int): The number of positive samples after paa
+ assign.
+ """
+ if not len(pos_inds):
+ return label, label_weight, bbox_weight, 0
+ label = label.clone()
+ label_weight = label_weight.clone()
+ bbox_weight = bbox_weight.clone()
+ num_gt = pos_gt_inds.max() + 1
+ num_level = len(anchors)
+ num_anchors_each_level = [item.size(0) for item in anchors]
+ num_anchors_each_level.insert(0, 0)
+ inds_level_interval = np.cumsum(num_anchors_each_level)
+ pos_level_mask = []
+ for i in range(num_level):
+ mask = (pos_inds >= inds_level_interval[i]) & (
+ pos_inds < inds_level_interval[i + 1])
+ pos_level_mask.append(mask)
+ pos_inds_after_paa = [label.new_tensor([])]
+ ignore_inds_after_paa = [label.new_tensor([])]
+ for gt_ind in range(num_gt):
+ pos_inds_gmm = []
+ pos_loss_gmm = []
+ gt_mask = pos_gt_inds == gt_ind
+ for level in range(num_level):
+ level_mask = pos_level_mask[level]
+ level_gt_mask = level_mask & gt_mask
+ value, topk_inds = pos_losses[level_gt_mask].topk(
+ min(level_gt_mask.sum(), self.topk), largest=False)
+ pos_inds_gmm.append(pos_inds[level_gt_mask][topk_inds])
+ pos_loss_gmm.append(value)
+ pos_inds_gmm = torch.cat(pos_inds_gmm)
+ pos_loss_gmm = torch.cat(pos_loss_gmm)
+ # fix gmm need at least two sample
+ if len(pos_inds_gmm) < 2:
+ continue
+ device = pos_inds_gmm.device
+ pos_loss_gmm, sort_inds = pos_loss_gmm.sort()
+ pos_inds_gmm = pos_inds_gmm[sort_inds]
+ pos_loss_gmm = pos_loss_gmm.view(-1, 1).cpu().numpy()
+ min_loss, max_loss = pos_loss_gmm.min(), pos_loss_gmm.max()
+ means_init = np.array([min_loss, max_loss]).reshape(2, 1)
+ weights_init = np.array([0.5, 0.5])
+ precisions_init = np.array([1.0, 1.0]).reshape(2, 1, 1) # full
+ if self.covariance_type == 'spherical':
+ precisions_init = precisions_init.reshape(2)
+ elif self.covariance_type == 'diag':
+ precisions_init = precisions_init.reshape(2, 1)
+ elif self.covariance_type == 'tied':
+ precisions_init = np.array([[1.0]])
+ if skm is None:
+ raise ImportError('Please run "pip install sklearn" '
+ 'to install sklearn first.')
+ gmm = skm.GaussianMixture(
+ 2,
+ weights_init=weights_init,
+ means_init=means_init,
+ precisions_init=precisions_init,
+ covariance_type=self.covariance_type)
+ gmm.fit(pos_loss_gmm)
+ gmm_assignment = gmm.predict(pos_loss_gmm)
+ scores = gmm.score_samples(pos_loss_gmm)
+ gmm_assignment = torch.from_numpy(gmm_assignment).to(device)
+ scores = torch.from_numpy(scores).to(device)
+
+ pos_inds_temp, ignore_inds_temp = self.gmm_separation_scheme(
+ gmm_assignment, scores, pos_inds_gmm)
+ pos_inds_after_paa.append(pos_inds_temp)
+ ignore_inds_after_paa.append(ignore_inds_temp)
+
+ pos_inds_after_paa = torch.cat(pos_inds_after_paa)
+ ignore_inds_after_paa = torch.cat(ignore_inds_after_paa)
+ reassign_mask = (pos_inds.unsqueeze(1) != pos_inds_after_paa).all(1)
+ reassign_ids = pos_inds[reassign_mask]
+ label[reassign_ids] = self.num_classes
+ label_weight[ignore_inds_after_paa] = 0
+ bbox_weight[reassign_ids] = 0
+ num_pos = len(pos_inds_after_paa)
+ return label, label_weight, bbox_weight, num_pos
+
+ def gmm_separation_scheme(self, gmm_assignment, scores, pos_inds_gmm):
+ """A general separation scheme for gmm model.
+
+ It separates a GMM distribution of candidate samples into three
+ parts, 0 1 and uncertain areas, and you can implement other
+ separation schemes by rewriting this function.
+
+ Args:
+ gmm_assignment (Tensor): The prediction of GMM which is of shape
+ (num_samples,). The 0/1 value indicates the distribution
+ that each sample comes from.
+ scores (Tensor): The probability of sample coming from the
+ fit GMM distribution. The tensor is of shape (num_samples,).
+ pos_inds_gmm (Tensor): All the indexes of samples which are used
+ to fit GMM model. The tensor is of shape (num_samples,)
+
+ Returns:
+ tuple[Tensor]: The indices of positive and ignored samples.
+
+ - pos_inds_temp (Tensor): Indices of positive samples.
+ - ignore_inds_temp (Tensor): Indices of ignore samples.
+ """
+ # The implementation is (c) in Fig.3 in origin paper instead of (b).
+ # You can refer to issues such as
+ # https://github.com/kkhoot/PAA/issues/8 and
+ # https://github.com/kkhoot/PAA/issues/9.
+ fgs = gmm_assignment == 0
+ pos_inds_temp = fgs.new_tensor([], dtype=torch.long)
+ ignore_inds_temp = fgs.new_tensor([], dtype=torch.long)
+ if fgs.nonzero().numel():
+ _, pos_thr_ind = scores[fgs].topk(1)
+ pos_inds_temp = pos_inds_gmm[fgs][:pos_thr_ind + 1]
+ ignore_inds_temp = pos_inds_gmm.new_tensor([])
+ return pos_inds_temp, ignore_inds_temp
+
+ def get_targets(
+ self,
+ anchor_list,
+ valid_flag_list,
+ gt_bboxes_list,
+ img_metas,
+ gt_bboxes_ignore_list=None,
+ gt_labels_list=None,
+ label_channels=1,
+ unmap_outputs=True,
+ ):
+ """Get targets for PAA head.
+
+ This method is almost the same as `AnchorHead.get_targets()`. We direct
+ return the results from _get_targets_single instead map it to levels
+ by images_to_levels function.
+
+ Args:
+ anchor_list (list[list[Tensor]]): Multi level anchors of each
+ image. The outer list indicates images, and the inner list
+ corresponds to feature levels of the image. Each element of
+ the inner list is a tensor of shape (num_anchors, 4).
+ valid_flag_list (list[list[Tensor]]): Multi level valid flags of
+ each image. The outer list indicates images, and the inner list
+ corresponds to feature levels of the image. Each element of
+ the inner list is a tensor of shape (num_anchors, )
+ gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image.
+ img_metas (list[dict]): Meta info of each image.
+ gt_bboxes_ignore_list (list[Tensor]): Ground truth bboxes to be
+ ignored.
+ gt_labels_list (list[Tensor]): Ground truth labels of each box.
+ label_channels (int): Channel of label.
+ unmap_outputs (bool): Whether to map outputs back to the original
+ set of anchors.
+
+ Returns:
+ tuple: Usually returns a tuple containing learning targets.
+
+ - labels (list[Tensor]): Labels of all anchors, each with
+ shape (num_anchors,).
+ - label_weights (list[Tensor]): Label weights of all anchor.
+ each with shape (num_anchors,).
+ - bbox_targets (list[Tensor]): BBox targets of all anchors.
+ each with shape (num_anchors, 4).
+ - bbox_weights (list[Tensor]): BBox weights of all anchors.
+ each with shape (num_anchors, 4).
+ - pos_inds (list[Tensor]): Contains all index of positive
+ sample in all anchor.
+ - gt_inds (list[Tensor]): Contains all gt_index of positive
+ sample in all anchor.
+ """
+
+ num_imgs = len(img_metas)
+ assert len(anchor_list) == len(valid_flag_list) == num_imgs
+ concat_anchor_list = []
+ concat_valid_flag_list = []
+ for i in range(num_imgs):
+ assert len(anchor_list[i]) == len(valid_flag_list[i])
+ concat_anchor_list.append(torch.cat(anchor_list[i]))
+ concat_valid_flag_list.append(torch.cat(valid_flag_list[i]))
+
+ # compute targets for each image
+ if gt_bboxes_ignore_list is None:
+ gt_bboxes_ignore_list = [None for _ in range(num_imgs)]
+ if gt_labels_list is None:
+ gt_labels_list = [None for _ in range(num_imgs)]
+ results = multi_apply(
+ self._get_targets_single,
+ concat_anchor_list,
+ concat_valid_flag_list,
+ gt_bboxes_list,
+ gt_bboxes_ignore_list,
+ gt_labels_list,
+ img_metas,
+ label_channels=label_channels,
+ unmap_outputs=unmap_outputs)
+
+ (labels, label_weights, bbox_targets, bbox_weights, valid_pos_inds,
+ valid_neg_inds, sampling_result) = results
+
+ # Due to valid flag of anchors, we have to calculate the real pos_inds
+ # in origin anchor set.
+ pos_inds = []
+ for i, single_labels in enumerate(labels):
+ pos_mask = (0 <= single_labels) & (
+ single_labels < self.num_classes)
+ pos_inds.append(pos_mask.nonzero().view(-1))
+
+ gt_inds = [item.pos_assigned_gt_inds for item in sampling_result]
+ return (labels, label_weights, bbox_targets, bbox_weights, pos_inds,
+ gt_inds)
+
+ def _get_targets_single(self,
+ flat_anchors,
+ valid_flags,
+ gt_bboxes,
+ gt_bboxes_ignore,
+ gt_labels,
+ img_meta,
+ label_channels=1,
+ unmap_outputs=True):
+ """Compute regression and classification targets for anchors in a
+ single image.
+
+ This method is same as `AnchorHead._get_targets_single()`.
+ """
+ assert unmap_outputs, 'We must map outputs back to the original' \
+ 'set of anchors in PAAhead'
+ return super(ATSSHead, self)._get_targets_single(
+ flat_anchors,
+ valid_flags,
+ gt_bboxes,
+ gt_bboxes_ignore,
+ gt_labels,
+ img_meta,
+ label_channels=1,
+ unmap_outputs=True)
+
+ def _get_bboxes(self,
+ cls_scores,
+ bbox_preds,
+ iou_preds,
+ mlvl_anchors,
+ img_shapes,
+ scale_factors,
+ cfg,
+ rescale=False,
+ with_nms=True):
+ """Transform outputs for a single batch item into labeled boxes.
+
+ This method is almost same as `ATSSHead._get_bboxes()`.
+ We use sqrt(iou_preds * cls_scores) in NMS process instead of just
+ cls_scores. Besides, score voting is used when `` score_voting``
+ is set to True.
+ """
+ assert with_nms, 'PAA only supports "with_nms=True" now'
+ assert len(cls_scores) == len(bbox_preds) == len(mlvl_anchors)
+ batch_size = cls_scores[0].shape[0]
+
+ mlvl_bboxes = []
+ mlvl_scores = []
+ mlvl_iou_preds = []
+ for cls_score, bbox_pred, iou_preds, anchors in zip(
+ cls_scores, bbox_preds, iou_preds, mlvl_anchors):
+ assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
+
+ scores = cls_score.permute(0, 2, 3, 1).reshape(
+ batch_size, -1, self.cls_out_channels).sigmoid()
+ bbox_pred = bbox_pred.permute(0, 2, 3,
+ 1).reshape(batch_size, -1, 4)
+ iou_preds = iou_preds.permute(0, 2, 3, 1).reshape(batch_size,
+ -1).sigmoid()
+
+ nms_pre = cfg.get('nms_pre', -1)
+ if nms_pre > 0 and scores.shape[1] > nms_pre:
+ max_scores, _ = (scores * iou_preds[..., None]).sqrt().max(-1)
+ _, topk_inds = max_scores.topk(nms_pre)
+ batch_inds = torch.arange(batch_size).view(
+ -1, 1).expand_as(topk_inds).long()
+ anchors = anchors[topk_inds, :]
+ bbox_pred = bbox_pred[batch_inds, topk_inds, :]
+ scores = scores[batch_inds, topk_inds, :]
+ iou_preds = iou_preds[batch_inds, topk_inds]
+ else:
+ anchors = anchors.expand_as(bbox_pred)
+
+ bboxes = self.bbox_coder.decode(
+ anchors, bbox_pred, max_shape=img_shapes)
+ mlvl_bboxes.append(bboxes)
+ mlvl_scores.append(scores)
+ mlvl_iou_preds.append(iou_preds)
+
+ batch_mlvl_bboxes = torch.cat(mlvl_bboxes, dim=1)
+ if rescale:
+ batch_mlvl_bboxes /= batch_mlvl_bboxes.new_tensor(
+ scale_factors).unsqueeze(1)
+ batch_mlvl_scores = torch.cat(mlvl_scores, dim=1)
+ # Add a dummy background class to the backend when using sigmoid
+ # remind that we set FG labels to [0, num_class-1] since mmdet v2.0
+ # BG cat_id: num_class
+ padding = batch_mlvl_scores.new_zeros(batch_size,
+ batch_mlvl_scores.shape[1], 1)
+ batch_mlvl_scores = torch.cat([batch_mlvl_scores, padding], dim=-1)
+ batch_mlvl_iou_preds = torch.cat(mlvl_iou_preds, dim=1)
+ batch_mlvl_nms_scores = (batch_mlvl_scores *
+ batch_mlvl_iou_preds[..., None]).sqrt()
+
+ det_results = []
+ for (mlvl_bboxes, mlvl_scores) in zip(batch_mlvl_bboxes,
+ batch_mlvl_nms_scores):
+ det_bbox, det_label = multiclass_nms(
+ mlvl_bboxes,
+ mlvl_scores,
+ cfg.score_thr,
+ cfg.nms,
+ cfg.max_per_img,
+ score_factors=None)
+ if self.with_score_voting and len(det_bbox) > 0:
+ det_bbox, det_label = self.score_voting(
+ det_bbox, det_label, mlvl_bboxes, mlvl_scores,
+ cfg.score_thr)
+ det_results.append(tuple([det_bbox, det_label]))
+
+ return det_results
+
+ def score_voting(self, det_bboxes, det_labels, mlvl_bboxes,
+ mlvl_nms_scores, score_thr):
+ """Implementation of score voting method works on each remaining boxes
+ after NMS procedure.
+
+ Args:
+ det_bboxes (Tensor): Remaining boxes after NMS procedure,
+ with shape (k, 5), each dimension means
+ (x1, y1, x2, y2, score).
+ det_labels (Tensor): The label of remaining boxes, with shape
+ (k, 1),Labels are 0-based.
+ mlvl_bboxes (Tensor): All boxes before the NMS procedure,
+ with shape (num_anchors,4).
+ mlvl_nms_scores (Tensor): The scores of all boxes which is used
+ in the NMS procedure, with shape (num_anchors, num_class)
+ mlvl_iou_preds (Tensor): The predictions of IOU of all boxes
+ before the NMS procedure, with shape (num_anchors, 1)
+ score_thr (float): The score threshold of bboxes.
+
+ Returns:
+ tuple: Usually returns a tuple containing voting results.
+
+ - det_bboxes_voted (Tensor): Remaining boxes after
+ score voting procedure, with shape (k, 5), each
+ dimension means (x1, y1, x2, y2, score).
+ - det_labels_voted (Tensor): Label of remaining bboxes
+ after voting, with shape (num_anchors,).
+ """
+ candidate_mask = mlvl_nms_scores > score_thr
+ candidate_mask_nonzeros = candidate_mask.nonzero()
+ candidate_inds = candidate_mask_nonzeros[:, 0]
+ candidate_labels = candidate_mask_nonzeros[:, 1]
+ candidate_bboxes = mlvl_bboxes[candidate_inds]
+ candidate_scores = mlvl_nms_scores[candidate_mask]
+ det_bboxes_voted = []
+ det_labels_voted = []
+ for cls in range(self.cls_out_channels):
+ candidate_cls_mask = candidate_labels == cls
+ if not candidate_cls_mask.any():
+ continue
+ candidate_cls_scores = candidate_scores[candidate_cls_mask]
+ candidate_cls_bboxes = candidate_bboxes[candidate_cls_mask]
+ det_cls_mask = det_labels == cls
+ det_cls_bboxes = det_bboxes[det_cls_mask].view(
+ -1, det_bboxes.size(-1))
+ det_candidate_ious = bbox_overlaps(det_cls_bboxes[:, :4],
+ candidate_cls_bboxes)
+ for det_ind in range(len(det_cls_bboxes)):
+ single_det_ious = det_candidate_ious[det_ind]
+ pos_ious_mask = single_det_ious > 0.01
+ pos_ious = single_det_ious[pos_ious_mask]
+ pos_bboxes = candidate_cls_bboxes[pos_ious_mask]
+ pos_scores = candidate_cls_scores[pos_ious_mask]
+ pis = (torch.exp(-(1 - pos_ious)**2 / 0.025) *
+ pos_scores)[:, None]
+ voted_box = torch.sum(
+ pis * pos_bboxes, dim=0) / torch.sum(
+ pis, dim=0)
+ voted_score = det_cls_bboxes[det_ind][-1:][None, :]
+ det_bboxes_voted.append(
+ torch.cat((voted_box[None, :], voted_score), dim=1))
+ det_labels_voted.append(cls)
+
+ det_bboxes_voted = torch.cat(det_bboxes_voted, dim=0)
+ det_labels_voted = det_labels.new_tensor(det_labels_voted)
+ return det_bboxes_voted, det_labels_voted
diff --git a/annotator/uniformer/mmdet/models/dense_heads/pisa_retinanet_head.py b/annotator/uniformer/mmdet/models/dense_heads/pisa_retinanet_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..bd87b9aeb07e05ff94b444ac8999eca3f616711a
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/dense_heads/pisa_retinanet_head.py
@@ -0,0 +1,154 @@
+import torch
+from mmcv.runner import force_fp32
+
+from mmdet.core import images_to_levels
+from ..builder import HEADS
+from ..losses import carl_loss, isr_p
+from .retina_head import RetinaHead
+
+
+@HEADS.register_module()
+class PISARetinaHead(RetinaHead):
+ """PISA Retinanet Head.
+
+ The head owns the same structure with Retinanet Head, but differs in two
+ aspects:
+ 1. Importance-based Sample Reweighting Positive (ISR-P) is applied to
+ change the positive loss weights.
+ 2. Classification-aware regression loss is adopted as a third loss.
+ """
+
+ @force_fp32(apply_to=('cls_scores', 'bbox_preds'))
+ def loss(self,
+ cls_scores,
+ bbox_preds,
+ gt_bboxes,
+ gt_labels,
+ img_metas,
+ gt_bboxes_ignore=None):
+ """Compute losses of the head.
+
+ Args:
+ cls_scores (list[Tensor]): Box scores for each scale level
+ Has shape (N, num_anchors * num_classes, H, W)
+ bbox_preds (list[Tensor]): Box energies / deltas for each scale
+ level with shape (N, num_anchors * 4, H, W)
+ gt_bboxes (list[Tensor]): Ground truth bboxes of each image
+ with shape (num_obj, 4).
+ gt_labels (list[Tensor]): Ground truth labels of each image
+ with shape (num_obj, 4).
+ img_metas (list[dict]): Meta information of each image, e.g.,
+ image size, scaling factor, etc.
+ gt_bboxes_ignore (list[Tensor]): Ignored gt bboxes of each image.
+ Default: None.
+
+ Returns:
+ dict: Loss dict, comprise classification loss, regression loss and
+ carl loss.
+ """
+ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
+ assert len(featmap_sizes) == self.anchor_generator.num_levels
+
+ device = cls_scores[0].device
+
+ anchor_list, valid_flag_list = self.get_anchors(
+ featmap_sizes, img_metas, device=device)
+ label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1
+ cls_reg_targets = self.get_targets(
+ anchor_list,
+ valid_flag_list,
+ gt_bboxes,
+ img_metas,
+ gt_bboxes_ignore_list=gt_bboxes_ignore,
+ gt_labels_list=gt_labels,
+ label_channels=label_channels,
+ return_sampling_results=True)
+ if cls_reg_targets is None:
+ return None
+ (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,
+ num_total_pos, num_total_neg, sampling_results_list) = cls_reg_targets
+ num_total_samples = (
+ num_total_pos + num_total_neg if self.sampling else num_total_pos)
+
+ # anchor number of multi levels
+ num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]
+ # concat all level anchors and flags to a single tensor
+ concat_anchor_list = []
+ for i in range(len(anchor_list)):
+ concat_anchor_list.append(torch.cat(anchor_list[i]))
+ all_anchor_list = images_to_levels(concat_anchor_list,
+ num_level_anchors)
+
+ num_imgs = len(img_metas)
+ flatten_cls_scores = [
+ cls_score.permute(0, 2, 3, 1).reshape(num_imgs, -1, label_channels)
+ for cls_score in cls_scores
+ ]
+ flatten_cls_scores = torch.cat(
+ flatten_cls_scores, dim=1).reshape(-1,
+ flatten_cls_scores[0].size(-1))
+ flatten_bbox_preds = [
+ bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4)
+ for bbox_pred in bbox_preds
+ ]
+ flatten_bbox_preds = torch.cat(
+ flatten_bbox_preds, dim=1).view(-1, flatten_bbox_preds[0].size(-1))
+ flatten_labels = torch.cat(labels_list, dim=1).reshape(-1)
+ flatten_label_weights = torch.cat(
+ label_weights_list, dim=1).reshape(-1)
+ flatten_anchors = torch.cat(all_anchor_list, dim=1).reshape(-1, 4)
+ flatten_bbox_targets = torch.cat(
+ bbox_targets_list, dim=1).reshape(-1, 4)
+ flatten_bbox_weights = torch.cat(
+ bbox_weights_list, dim=1).reshape(-1, 4)
+
+ # Apply ISR-P
+ isr_cfg = self.train_cfg.get('isr', None)
+ if isr_cfg is not None:
+ all_targets = (flatten_labels, flatten_label_weights,
+ flatten_bbox_targets, flatten_bbox_weights)
+ with torch.no_grad():
+ all_targets = isr_p(
+ flatten_cls_scores,
+ flatten_bbox_preds,
+ all_targets,
+ flatten_anchors,
+ sampling_results_list,
+ bbox_coder=self.bbox_coder,
+ loss_cls=self.loss_cls,
+ num_class=self.num_classes,
+ **self.train_cfg.isr)
+ (flatten_labels, flatten_label_weights, flatten_bbox_targets,
+ flatten_bbox_weights) = all_targets
+
+ # For convenience we compute loss once instead separating by fpn level,
+ # so that we don't need to separate the weights by level again.
+ # The result should be the same
+ losses_cls = self.loss_cls(
+ flatten_cls_scores,
+ flatten_labels,
+ flatten_label_weights,
+ avg_factor=num_total_samples)
+ losses_bbox = self.loss_bbox(
+ flatten_bbox_preds,
+ flatten_bbox_targets,
+ flatten_bbox_weights,
+ avg_factor=num_total_samples)
+ loss_dict = dict(loss_cls=losses_cls, loss_bbox=losses_bbox)
+
+ # CARL Loss
+ carl_cfg = self.train_cfg.get('carl', None)
+ if carl_cfg is not None:
+ loss_carl = carl_loss(
+ flatten_cls_scores,
+ flatten_labels,
+ flatten_bbox_preds,
+ flatten_bbox_targets,
+ self.loss_bbox,
+ **self.train_cfg.carl,
+ avg_factor=num_total_pos,
+ sigmoid=True,
+ num_class=self.num_classes)
+ loss_dict.update(loss_carl)
+
+ return loss_dict
diff --git a/annotator/uniformer/mmdet/models/dense_heads/pisa_ssd_head.py b/annotator/uniformer/mmdet/models/dense_heads/pisa_ssd_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..90ef3c83ed62d8346c8daef01f18ad7bd236623c
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/dense_heads/pisa_ssd_head.py
@@ -0,0 +1,139 @@
+import torch
+
+from mmdet.core import multi_apply
+from ..builder import HEADS
+from ..losses import CrossEntropyLoss, SmoothL1Loss, carl_loss, isr_p
+from .ssd_head import SSDHead
+
+
+# TODO: add loss evaluator for SSD
+@HEADS.register_module()
+class PISASSDHead(SSDHead):
+
+ def loss(self,
+ cls_scores,
+ bbox_preds,
+ gt_bboxes,
+ gt_labels,
+ img_metas,
+ gt_bboxes_ignore=None):
+ """Compute losses of the head.
+
+ Args:
+ cls_scores (list[Tensor]): Box scores for each scale level
+ Has shape (N, num_anchors * num_classes, H, W)
+ bbox_preds (list[Tensor]): Box energies / deltas for each scale
+ level with shape (N, num_anchors * 4, H, W)
+ gt_bboxes (list[Tensor]): Ground truth bboxes of each image
+ with shape (num_obj, 4).
+ gt_labels (list[Tensor]): Ground truth labels of each image
+ with shape (num_obj, 4).
+ img_metas (list[dict]): Meta information of each image, e.g.,
+ image size, scaling factor, etc.
+ gt_bboxes_ignore (list[Tensor]): Ignored gt bboxes of each image.
+ Default: None.
+
+ Returns:
+ dict: Loss dict, comprise classification loss regression loss and
+ carl loss.
+ """
+ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
+ assert len(featmap_sizes) == self.anchor_generator.num_levels
+
+ device = cls_scores[0].device
+
+ anchor_list, valid_flag_list = self.get_anchors(
+ featmap_sizes, img_metas, device=device)
+ cls_reg_targets = self.get_targets(
+ anchor_list,
+ valid_flag_list,
+ gt_bboxes,
+ img_metas,
+ gt_bboxes_ignore_list=gt_bboxes_ignore,
+ gt_labels_list=gt_labels,
+ label_channels=1,
+ unmap_outputs=False,
+ return_sampling_results=True)
+ if cls_reg_targets is None:
+ return None
+ (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,
+ num_total_pos, num_total_neg, sampling_results_list) = cls_reg_targets
+
+ num_images = len(img_metas)
+ all_cls_scores = torch.cat([
+ s.permute(0, 2, 3, 1).reshape(
+ num_images, -1, self.cls_out_channels) for s in cls_scores
+ ], 1)
+ all_labels = torch.cat(labels_list, -1).view(num_images, -1)
+ all_label_weights = torch.cat(label_weights_list,
+ -1).view(num_images, -1)
+ all_bbox_preds = torch.cat([
+ b.permute(0, 2, 3, 1).reshape(num_images, -1, 4)
+ for b in bbox_preds
+ ], -2)
+ all_bbox_targets = torch.cat(bbox_targets_list,
+ -2).view(num_images, -1, 4)
+ all_bbox_weights = torch.cat(bbox_weights_list,
+ -2).view(num_images, -1, 4)
+
+ # concat all level anchors to a single tensor
+ all_anchors = []
+ for i in range(num_images):
+ all_anchors.append(torch.cat(anchor_list[i]))
+
+ isr_cfg = self.train_cfg.get('isr', None)
+ all_targets = (all_labels.view(-1), all_label_weights.view(-1),
+ all_bbox_targets.view(-1,
+ 4), all_bbox_weights.view(-1, 4))
+ # apply ISR-P
+ if isr_cfg is not None:
+ all_targets = isr_p(
+ all_cls_scores.view(-1, all_cls_scores.size(-1)),
+ all_bbox_preds.view(-1, 4),
+ all_targets,
+ torch.cat(all_anchors),
+ sampling_results_list,
+ loss_cls=CrossEntropyLoss(),
+ bbox_coder=self.bbox_coder,
+ **self.train_cfg.isr,
+ num_class=self.num_classes)
+ (new_labels, new_label_weights, new_bbox_targets,
+ new_bbox_weights) = all_targets
+ all_labels = new_labels.view(all_labels.shape)
+ all_label_weights = new_label_weights.view(all_label_weights.shape)
+ all_bbox_targets = new_bbox_targets.view(all_bbox_targets.shape)
+ all_bbox_weights = new_bbox_weights.view(all_bbox_weights.shape)
+
+ # add CARL loss
+ carl_loss_cfg = self.train_cfg.get('carl', None)
+ if carl_loss_cfg is not None:
+ loss_carl = carl_loss(
+ all_cls_scores.view(-1, all_cls_scores.size(-1)),
+ all_targets[0],
+ all_bbox_preds.view(-1, 4),
+ all_targets[2],
+ SmoothL1Loss(beta=1.),
+ **self.train_cfg.carl,
+ avg_factor=num_total_pos,
+ num_class=self.num_classes)
+
+ # check NaN and Inf
+ assert torch.isfinite(all_cls_scores).all().item(), \
+ 'classification scores become infinite or NaN!'
+ assert torch.isfinite(all_bbox_preds).all().item(), \
+ 'bbox predications become infinite or NaN!'
+
+ losses_cls, losses_bbox = multi_apply(
+ self.loss_single,
+ all_cls_scores,
+ all_bbox_preds,
+ all_anchors,
+ all_labels,
+ all_label_weights,
+ all_bbox_targets,
+ all_bbox_weights,
+ num_total_samples=num_total_pos)
+ loss_dict = dict(loss_cls=losses_cls, loss_bbox=losses_bbox)
+ if carl_loss_cfg is not None:
+ loss_dict.update(loss_carl)
+ return loss_dict
diff --git a/annotator/uniformer/mmdet/models/dense_heads/reppoints_head.py b/annotator/uniformer/mmdet/models/dense_heads/reppoints_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..499cc4f71c968704a40ab2bb7a6b22dd079d82de
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/dense_heads/reppoints_head.py
@@ -0,0 +1,763 @@
+import numpy as np
+import torch
+import torch.nn as nn
+from mmcv.cnn import ConvModule, bias_init_with_prob, normal_init
+from mmcv.ops import DeformConv2d
+
+from mmdet.core import (PointGenerator, build_assigner, build_sampler,
+ images_to_levels, multi_apply, multiclass_nms, unmap)
+from ..builder import HEADS, build_loss
+from .anchor_free_head import AnchorFreeHead
+
+
+@HEADS.register_module()
+class RepPointsHead(AnchorFreeHead):
+ """RepPoint head.
+
+ Args:
+ point_feat_channels (int): Number of channels of points features.
+ gradient_mul (float): The multiplier to gradients from
+ points refinement and recognition.
+ point_strides (Iterable): points strides.
+ point_base_scale (int): bbox scale for assigning labels.
+ loss_cls (dict): Config of classification loss.
+ loss_bbox_init (dict): Config of initial points loss.
+ loss_bbox_refine (dict): Config of points loss in refinement.
+ use_grid_points (bool): If we use bounding box representation, the
+ reppoints is represented as grid points on the bounding box.
+ center_init (bool): Whether to use center point assignment.
+ transform_method (str): The methods to transform RepPoints to bbox.
+ """ # noqa: W605
+
+ def __init__(self,
+ num_classes,
+ in_channels,
+ point_feat_channels=256,
+ num_points=9,
+ gradient_mul=0.1,
+ point_strides=[8, 16, 32, 64, 128],
+ point_base_scale=4,
+ loss_cls=dict(
+ type='FocalLoss',
+ use_sigmoid=True,
+ gamma=2.0,
+ alpha=0.25,
+ loss_weight=1.0),
+ loss_bbox_init=dict(
+ type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=0.5),
+ loss_bbox_refine=dict(
+ type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0),
+ use_grid_points=False,
+ center_init=True,
+ transform_method='moment',
+ moment_mul=0.01,
+ **kwargs):
+ self.num_points = num_points
+ self.point_feat_channels = point_feat_channels
+ self.use_grid_points = use_grid_points
+ self.center_init = center_init
+
+ # we use deform conv to extract points features
+ self.dcn_kernel = int(np.sqrt(num_points))
+ self.dcn_pad = int((self.dcn_kernel - 1) / 2)
+ assert self.dcn_kernel * self.dcn_kernel == num_points, \
+ 'The points number should be a square number.'
+ assert self.dcn_kernel % 2 == 1, \
+ 'The points number should be an odd square number.'
+ dcn_base = np.arange(-self.dcn_pad,
+ self.dcn_pad + 1).astype(np.float64)
+ dcn_base_y = np.repeat(dcn_base, self.dcn_kernel)
+ dcn_base_x = np.tile(dcn_base, self.dcn_kernel)
+ dcn_base_offset = np.stack([dcn_base_y, dcn_base_x], axis=1).reshape(
+ (-1))
+ self.dcn_base_offset = torch.tensor(dcn_base_offset).view(1, -1, 1, 1)
+
+ super().__init__(num_classes, in_channels, loss_cls=loss_cls, **kwargs)
+
+ self.gradient_mul = gradient_mul
+ self.point_base_scale = point_base_scale
+ self.point_strides = point_strides
+ self.point_generators = [PointGenerator() for _ in self.point_strides]
+
+ self.sampling = loss_cls['type'] not in ['FocalLoss']
+ if self.train_cfg:
+ self.init_assigner = build_assigner(self.train_cfg.init.assigner)
+ self.refine_assigner = build_assigner(
+ self.train_cfg.refine.assigner)
+ # use PseudoSampler when sampling is False
+ if self.sampling and hasattr(self.train_cfg, 'sampler'):
+ sampler_cfg = self.train_cfg.sampler
+ else:
+ sampler_cfg = dict(type='PseudoSampler')
+ self.sampler = build_sampler(sampler_cfg, context=self)
+ self.transform_method = transform_method
+ if self.transform_method == 'moment':
+ self.moment_transfer = nn.Parameter(
+ data=torch.zeros(2), requires_grad=True)
+ self.moment_mul = moment_mul
+
+ self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False)
+ if self.use_sigmoid_cls:
+ self.cls_out_channels = self.num_classes
+ else:
+ self.cls_out_channels = self.num_classes + 1
+ self.loss_bbox_init = build_loss(loss_bbox_init)
+ self.loss_bbox_refine = build_loss(loss_bbox_refine)
+
+ def _init_layers(self):
+ """Initialize layers of the head."""
+ self.relu = nn.ReLU(inplace=True)
+ self.cls_convs = nn.ModuleList()
+ self.reg_convs = nn.ModuleList()
+ for i in range(self.stacked_convs):
+ chn = self.in_channels if i == 0 else self.feat_channels
+ self.cls_convs.append(
+ ConvModule(
+ chn,
+ self.feat_channels,
+ 3,
+ stride=1,
+ padding=1,
+ conv_cfg=self.conv_cfg,
+ norm_cfg=self.norm_cfg))
+ self.reg_convs.append(
+ ConvModule(
+ chn,
+ self.feat_channels,
+ 3,
+ stride=1,
+ padding=1,
+ conv_cfg=self.conv_cfg,
+ norm_cfg=self.norm_cfg))
+ pts_out_dim = 4 if self.use_grid_points else 2 * self.num_points
+ self.reppoints_cls_conv = DeformConv2d(self.feat_channels,
+ self.point_feat_channels,
+ self.dcn_kernel, 1,
+ self.dcn_pad)
+ self.reppoints_cls_out = nn.Conv2d(self.point_feat_channels,
+ self.cls_out_channels, 1, 1, 0)
+ self.reppoints_pts_init_conv = nn.Conv2d(self.feat_channels,
+ self.point_feat_channels, 3,
+ 1, 1)
+ self.reppoints_pts_init_out = nn.Conv2d(self.point_feat_channels,
+ pts_out_dim, 1, 1, 0)
+ self.reppoints_pts_refine_conv = DeformConv2d(self.feat_channels,
+ self.point_feat_channels,
+ self.dcn_kernel, 1,
+ self.dcn_pad)
+ self.reppoints_pts_refine_out = nn.Conv2d(self.point_feat_channels,
+ pts_out_dim, 1, 1, 0)
+
+ def init_weights(self):
+ """Initialize weights of the head."""
+ for m in self.cls_convs:
+ normal_init(m.conv, std=0.01)
+ for m in self.reg_convs:
+ normal_init(m.conv, std=0.01)
+ bias_cls = bias_init_with_prob(0.01)
+ normal_init(self.reppoints_cls_conv, std=0.01)
+ normal_init(self.reppoints_cls_out, std=0.01, bias=bias_cls)
+ normal_init(self.reppoints_pts_init_conv, std=0.01)
+ normal_init(self.reppoints_pts_init_out, std=0.01)
+ normal_init(self.reppoints_pts_refine_conv, std=0.01)
+ normal_init(self.reppoints_pts_refine_out, std=0.01)
+
+ def points2bbox(self, pts, y_first=True):
+ """Converting the points set into bounding box.
+
+ :param pts: the input points sets (fields), each points
+ set (fields) is represented as 2n scalar.
+ :param y_first: if y_first=True, the point set is represented as
+ [y1, x1, y2, x2 ... yn, xn], otherwise the point set is
+ represented as [x1, y1, x2, y2 ... xn, yn].
+ :return: each points set is converting to a bbox [x1, y1, x2, y2].
+ """
+ pts_reshape = pts.view(pts.shape[0], -1, 2, *pts.shape[2:])
+ pts_y = pts_reshape[:, :, 0, ...] if y_first else pts_reshape[:, :, 1,
+ ...]
+ pts_x = pts_reshape[:, :, 1, ...] if y_first else pts_reshape[:, :, 0,
+ ...]
+ if self.transform_method == 'minmax':
+ bbox_left = pts_x.min(dim=1, keepdim=True)[0]
+ bbox_right = pts_x.max(dim=1, keepdim=True)[0]
+ bbox_up = pts_y.min(dim=1, keepdim=True)[0]
+ bbox_bottom = pts_y.max(dim=1, keepdim=True)[0]
+ bbox = torch.cat([bbox_left, bbox_up, bbox_right, bbox_bottom],
+ dim=1)
+ elif self.transform_method == 'partial_minmax':
+ pts_y = pts_y[:, :4, ...]
+ pts_x = pts_x[:, :4, ...]
+ bbox_left = pts_x.min(dim=1, keepdim=True)[0]
+ bbox_right = pts_x.max(dim=1, keepdim=True)[0]
+ bbox_up = pts_y.min(dim=1, keepdim=True)[0]
+ bbox_bottom = pts_y.max(dim=1, keepdim=True)[0]
+ bbox = torch.cat([bbox_left, bbox_up, bbox_right, bbox_bottom],
+ dim=1)
+ elif self.transform_method == 'moment':
+ pts_y_mean = pts_y.mean(dim=1, keepdim=True)
+ pts_x_mean = pts_x.mean(dim=1, keepdim=True)
+ pts_y_std = torch.std(pts_y - pts_y_mean, dim=1, keepdim=True)
+ pts_x_std = torch.std(pts_x - pts_x_mean, dim=1, keepdim=True)
+ moment_transfer = (self.moment_transfer * self.moment_mul) + (
+ self.moment_transfer.detach() * (1 - self.moment_mul))
+ moment_width_transfer = moment_transfer[0]
+ moment_height_transfer = moment_transfer[1]
+ half_width = pts_x_std * torch.exp(moment_width_transfer)
+ half_height = pts_y_std * torch.exp(moment_height_transfer)
+ bbox = torch.cat([
+ pts_x_mean - half_width, pts_y_mean - half_height,
+ pts_x_mean + half_width, pts_y_mean + half_height
+ ],
+ dim=1)
+ else:
+ raise NotImplementedError
+ return bbox
+
+ def gen_grid_from_reg(self, reg, previous_boxes):
+ """Base on the previous bboxes and regression values, we compute the
+ regressed bboxes and generate the grids on the bboxes.
+
+ :param reg: the regression value to previous bboxes.
+ :param previous_boxes: previous bboxes.
+ :return: generate grids on the regressed bboxes.
+ """
+ b, _, h, w = reg.shape
+ bxy = (previous_boxes[:, :2, ...] + previous_boxes[:, 2:, ...]) / 2.
+ bwh = (previous_boxes[:, 2:, ...] -
+ previous_boxes[:, :2, ...]).clamp(min=1e-6)
+ grid_topleft = bxy + bwh * reg[:, :2, ...] - 0.5 * bwh * torch.exp(
+ reg[:, 2:, ...])
+ grid_wh = bwh * torch.exp(reg[:, 2:, ...])
+ grid_left = grid_topleft[:, [0], ...]
+ grid_top = grid_topleft[:, [1], ...]
+ grid_width = grid_wh[:, [0], ...]
+ grid_height = grid_wh[:, [1], ...]
+ intervel = torch.linspace(0., 1., self.dcn_kernel).view(
+ 1, self.dcn_kernel, 1, 1).type_as(reg)
+ grid_x = grid_left + grid_width * intervel
+ grid_x = grid_x.unsqueeze(1).repeat(1, self.dcn_kernel, 1, 1, 1)
+ grid_x = grid_x.view(b, -1, h, w)
+ grid_y = grid_top + grid_height * intervel
+ grid_y = grid_y.unsqueeze(2).repeat(1, 1, self.dcn_kernel, 1, 1)
+ grid_y = grid_y.view(b, -1, h, w)
+ grid_yx = torch.stack([grid_y, grid_x], dim=2)
+ grid_yx = grid_yx.view(b, -1, h, w)
+ regressed_bbox = torch.cat([
+ grid_left, grid_top, grid_left + grid_width, grid_top + grid_height
+ ], 1)
+ return grid_yx, regressed_bbox
+
+ def forward(self, feats):
+ return multi_apply(self.forward_single, feats)
+
+ def forward_single(self, x):
+ """Forward feature map of a single FPN level."""
+ dcn_base_offset = self.dcn_base_offset.type_as(x)
+ # If we use center_init, the initial reppoints is from center points.
+ # If we use bounding bbox representation, the initial reppoints is
+ # from regular grid placed on a pre-defined bbox.
+ if self.use_grid_points or not self.center_init:
+ scale = self.point_base_scale / 2
+ points_init = dcn_base_offset / dcn_base_offset.max() * scale
+ bbox_init = x.new_tensor([-scale, -scale, scale,
+ scale]).view(1, 4, 1, 1)
+ else:
+ points_init = 0
+ cls_feat = x
+ pts_feat = x
+ for cls_conv in self.cls_convs:
+ cls_feat = cls_conv(cls_feat)
+ for reg_conv in self.reg_convs:
+ pts_feat = reg_conv(pts_feat)
+ # initialize reppoints
+ pts_out_init = self.reppoints_pts_init_out(
+ self.relu(self.reppoints_pts_init_conv(pts_feat)))
+ if self.use_grid_points:
+ pts_out_init, bbox_out_init = self.gen_grid_from_reg(
+ pts_out_init, bbox_init.detach())
+ else:
+ pts_out_init = pts_out_init + points_init
+ # refine and classify reppoints
+ pts_out_init_grad_mul = (1 - self.gradient_mul) * pts_out_init.detach(
+ ) + self.gradient_mul * pts_out_init
+ dcn_offset = pts_out_init_grad_mul - dcn_base_offset
+ cls_out = self.reppoints_cls_out(
+ self.relu(self.reppoints_cls_conv(cls_feat, dcn_offset)))
+ pts_out_refine = self.reppoints_pts_refine_out(
+ self.relu(self.reppoints_pts_refine_conv(pts_feat, dcn_offset)))
+ if self.use_grid_points:
+ pts_out_refine, bbox_out_refine = self.gen_grid_from_reg(
+ pts_out_refine, bbox_out_init.detach())
+ else:
+ pts_out_refine = pts_out_refine + pts_out_init.detach()
+ return cls_out, pts_out_init, pts_out_refine
+
+ def get_points(self, featmap_sizes, img_metas, device):
+ """Get points according to feature map sizes.
+
+ Args:
+ featmap_sizes (list[tuple]): Multi-level feature map sizes.
+ img_metas (list[dict]): Image meta info.
+
+ Returns:
+ tuple: points of each image, valid flags of each image
+ """
+ num_imgs = len(img_metas)
+ num_levels = len(featmap_sizes)
+
+ # since feature map sizes of all images are the same, we only compute
+ # points center for one time
+ multi_level_points = []
+ for i in range(num_levels):
+ points = self.point_generators[i].grid_points(
+ featmap_sizes[i], self.point_strides[i], device)
+ multi_level_points.append(points)
+ points_list = [[point.clone() for point in multi_level_points]
+ for _ in range(num_imgs)]
+
+ # for each image, we compute valid flags of multi level grids
+ valid_flag_list = []
+ for img_id, img_meta in enumerate(img_metas):
+ multi_level_flags = []
+ for i in range(num_levels):
+ point_stride = self.point_strides[i]
+ feat_h, feat_w = featmap_sizes[i]
+ h, w = img_meta['pad_shape'][:2]
+ valid_feat_h = min(int(np.ceil(h / point_stride)), feat_h)
+ valid_feat_w = min(int(np.ceil(w / point_stride)), feat_w)
+ flags = self.point_generators[i].valid_flags(
+ (feat_h, feat_w), (valid_feat_h, valid_feat_w), device)
+ multi_level_flags.append(flags)
+ valid_flag_list.append(multi_level_flags)
+
+ return points_list, valid_flag_list
+
+ def centers_to_bboxes(self, point_list):
+ """Get bboxes according to center points.
+
+ Only used in :class:`MaxIoUAssigner`.
+ """
+ bbox_list = []
+ for i_img, point in enumerate(point_list):
+ bbox = []
+ for i_lvl in range(len(self.point_strides)):
+ scale = self.point_base_scale * self.point_strides[i_lvl] * 0.5
+ bbox_shift = torch.Tensor([-scale, -scale, scale,
+ scale]).view(1, 4).type_as(point[0])
+ bbox_center = torch.cat(
+ [point[i_lvl][:, :2], point[i_lvl][:, :2]], dim=1)
+ bbox.append(bbox_center + bbox_shift)
+ bbox_list.append(bbox)
+ return bbox_list
+
+ def offset_to_pts(self, center_list, pred_list):
+ """Change from point offset to point coordinate."""
+ pts_list = []
+ for i_lvl in range(len(self.point_strides)):
+ pts_lvl = []
+ for i_img in range(len(center_list)):
+ pts_center = center_list[i_img][i_lvl][:, :2].repeat(
+ 1, self.num_points)
+ pts_shift = pred_list[i_lvl][i_img]
+ yx_pts_shift = pts_shift.permute(1, 2, 0).view(
+ -1, 2 * self.num_points)
+ y_pts_shift = yx_pts_shift[..., 0::2]
+ x_pts_shift = yx_pts_shift[..., 1::2]
+ xy_pts_shift = torch.stack([x_pts_shift, y_pts_shift], -1)
+ xy_pts_shift = xy_pts_shift.view(*yx_pts_shift.shape[:-1], -1)
+ pts = xy_pts_shift * self.point_strides[i_lvl] + pts_center
+ pts_lvl.append(pts)
+ pts_lvl = torch.stack(pts_lvl, 0)
+ pts_list.append(pts_lvl)
+ return pts_list
+
+ def _point_target_single(self,
+ flat_proposals,
+ valid_flags,
+ gt_bboxes,
+ gt_bboxes_ignore,
+ gt_labels,
+ label_channels=1,
+ stage='init',
+ unmap_outputs=True):
+ inside_flags = valid_flags
+ if not inside_flags.any():
+ return (None, ) * 7
+ # assign gt and sample proposals
+ proposals = flat_proposals[inside_flags, :]
+
+ if stage == 'init':
+ assigner = self.init_assigner
+ pos_weight = self.train_cfg.init.pos_weight
+ else:
+ assigner = self.refine_assigner
+ pos_weight = self.train_cfg.refine.pos_weight
+ assign_result = assigner.assign(proposals, gt_bboxes, gt_bboxes_ignore,
+ None if self.sampling else gt_labels)
+ sampling_result = self.sampler.sample(assign_result, proposals,
+ gt_bboxes)
+
+ num_valid_proposals = proposals.shape[0]
+ bbox_gt = proposals.new_zeros([num_valid_proposals, 4])
+ pos_proposals = torch.zeros_like(proposals)
+ proposals_weights = proposals.new_zeros([num_valid_proposals, 4])
+ labels = proposals.new_full((num_valid_proposals, ),
+ self.num_classes,
+ dtype=torch.long)
+ label_weights = proposals.new_zeros(
+ num_valid_proposals, dtype=torch.float)
+
+ pos_inds = sampling_result.pos_inds
+ neg_inds = sampling_result.neg_inds
+ if len(pos_inds) > 0:
+ pos_gt_bboxes = sampling_result.pos_gt_bboxes
+ bbox_gt[pos_inds, :] = pos_gt_bboxes
+ pos_proposals[pos_inds, :] = proposals[pos_inds, :]
+ proposals_weights[pos_inds, :] = 1.0
+ if gt_labels is None:
+ # Only rpn gives gt_labels as None
+ # Foreground is the first class
+ labels[pos_inds] = 0
+ else:
+ labels[pos_inds] = gt_labels[
+ sampling_result.pos_assigned_gt_inds]
+ if pos_weight <= 0:
+ label_weights[pos_inds] = 1.0
+ else:
+ label_weights[pos_inds] = pos_weight
+ if len(neg_inds) > 0:
+ label_weights[neg_inds] = 1.0
+
+ # map up to original set of proposals
+ if unmap_outputs:
+ num_total_proposals = flat_proposals.size(0)
+ labels = unmap(labels, num_total_proposals, inside_flags)
+ label_weights = unmap(label_weights, num_total_proposals,
+ inside_flags)
+ bbox_gt = unmap(bbox_gt, num_total_proposals, inside_flags)
+ pos_proposals = unmap(pos_proposals, num_total_proposals,
+ inside_flags)
+ proposals_weights = unmap(proposals_weights, num_total_proposals,
+ inside_flags)
+
+ return (labels, label_weights, bbox_gt, pos_proposals,
+ proposals_weights, pos_inds, neg_inds)
+
+ def get_targets(self,
+ proposals_list,
+ valid_flag_list,
+ gt_bboxes_list,
+ img_metas,
+ gt_bboxes_ignore_list=None,
+ gt_labels_list=None,
+ stage='init',
+ label_channels=1,
+ unmap_outputs=True):
+ """Compute corresponding GT box and classification targets for
+ proposals.
+
+ Args:
+ proposals_list (list[list]): Multi level points/bboxes of each
+ image.
+ valid_flag_list (list[list]): Multi level valid flags of each
+ image.
+ gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image.
+ img_metas (list[dict]): Meta info of each image.
+ gt_bboxes_ignore_list (list[Tensor]): Ground truth bboxes to be
+ ignored.
+ gt_bboxes_list (list[Tensor]): Ground truth labels of each box.
+ stage (str): `init` or `refine`. Generate target for init stage or
+ refine stage
+ label_channels (int): Channel of label.
+ unmap_outputs (bool): Whether to map outputs back to the original
+ set of anchors.
+
+ Returns:
+ tuple:
+ - labels_list (list[Tensor]): Labels of each level.
+ - label_weights_list (list[Tensor]): Label weights of each level. # noqa: E501
+ - bbox_gt_list (list[Tensor]): Ground truth bbox of each level.
+ - proposal_list (list[Tensor]): Proposals(points/bboxes) of each level. # noqa: E501
+ - proposal_weights_list (list[Tensor]): Proposal weights of each level. # noqa: E501
+ - num_total_pos (int): Number of positive samples in all images. # noqa: E501
+ - num_total_neg (int): Number of negative samples in all images. # noqa: E501
+ """
+ assert stage in ['init', 'refine']
+ num_imgs = len(img_metas)
+ assert len(proposals_list) == len(valid_flag_list) == num_imgs
+
+ # points number of multi levels
+ num_level_proposals = [points.size(0) for points in proposals_list[0]]
+
+ # concat all level points and flags to a single tensor
+ for i in range(num_imgs):
+ assert len(proposals_list[i]) == len(valid_flag_list[i])
+ proposals_list[i] = torch.cat(proposals_list[i])
+ valid_flag_list[i] = torch.cat(valid_flag_list[i])
+
+ # compute targets for each image
+ if gt_bboxes_ignore_list is None:
+ gt_bboxes_ignore_list = [None for _ in range(num_imgs)]
+ if gt_labels_list is None:
+ gt_labels_list = [None for _ in range(num_imgs)]
+ (all_labels, all_label_weights, all_bbox_gt, all_proposals,
+ all_proposal_weights, pos_inds_list, neg_inds_list) = multi_apply(
+ self._point_target_single,
+ proposals_list,
+ valid_flag_list,
+ gt_bboxes_list,
+ gt_bboxes_ignore_list,
+ gt_labels_list,
+ stage=stage,
+ label_channels=label_channels,
+ unmap_outputs=unmap_outputs)
+ # no valid points
+ if any([labels is None for labels in all_labels]):
+ return None
+ # sampled points of all images
+ num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list])
+ num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list])
+ labels_list = images_to_levels(all_labels, num_level_proposals)
+ label_weights_list = images_to_levels(all_label_weights,
+ num_level_proposals)
+ bbox_gt_list = images_to_levels(all_bbox_gt, num_level_proposals)
+ proposals_list = images_to_levels(all_proposals, num_level_proposals)
+ proposal_weights_list = images_to_levels(all_proposal_weights,
+ num_level_proposals)
+ return (labels_list, label_weights_list, bbox_gt_list, proposals_list,
+ proposal_weights_list, num_total_pos, num_total_neg)
+
+ def loss_single(self, cls_score, pts_pred_init, pts_pred_refine, labels,
+ label_weights, bbox_gt_init, bbox_weights_init,
+ bbox_gt_refine, bbox_weights_refine, stride,
+ num_total_samples_init, num_total_samples_refine):
+ # classification loss
+ labels = labels.reshape(-1)
+ label_weights = label_weights.reshape(-1)
+ cls_score = cls_score.permute(0, 2, 3,
+ 1).reshape(-1, self.cls_out_channels)
+ cls_score = cls_score.contiguous()
+ loss_cls = self.loss_cls(
+ cls_score,
+ labels,
+ label_weights,
+ avg_factor=num_total_samples_refine)
+
+ # points loss
+ bbox_gt_init = bbox_gt_init.reshape(-1, 4)
+ bbox_weights_init = bbox_weights_init.reshape(-1, 4)
+ bbox_pred_init = self.points2bbox(
+ pts_pred_init.reshape(-1, 2 * self.num_points), y_first=False)
+ bbox_gt_refine = bbox_gt_refine.reshape(-1, 4)
+ bbox_weights_refine = bbox_weights_refine.reshape(-1, 4)
+ bbox_pred_refine = self.points2bbox(
+ pts_pred_refine.reshape(-1, 2 * self.num_points), y_first=False)
+ normalize_term = self.point_base_scale * stride
+ loss_pts_init = self.loss_bbox_init(
+ bbox_pred_init / normalize_term,
+ bbox_gt_init / normalize_term,
+ bbox_weights_init,
+ avg_factor=num_total_samples_init)
+ loss_pts_refine = self.loss_bbox_refine(
+ bbox_pred_refine / normalize_term,
+ bbox_gt_refine / normalize_term,
+ bbox_weights_refine,
+ avg_factor=num_total_samples_refine)
+ return loss_cls, loss_pts_init, loss_pts_refine
+
+ def loss(self,
+ cls_scores,
+ pts_preds_init,
+ pts_preds_refine,
+ gt_bboxes,
+ gt_labels,
+ img_metas,
+ gt_bboxes_ignore=None):
+ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
+ assert len(featmap_sizes) == len(self.point_generators)
+ device = cls_scores[0].device
+ label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1
+
+ # target for initial stage
+ center_list, valid_flag_list = self.get_points(featmap_sizes,
+ img_metas, device)
+ pts_coordinate_preds_init = self.offset_to_pts(center_list,
+ pts_preds_init)
+ if self.train_cfg.init.assigner['type'] == 'PointAssigner':
+ # Assign target for center list
+ candidate_list = center_list
+ else:
+ # transform center list to bbox list and
+ # assign target for bbox list
+ bbox_list = self.centers_to_bboxes(center_list)
+ candidate_list = bbox_list
+ cls_reg_targets_init = self.get_targets(
+ candidate_list,
+ valid_flag_list,
+ gt_bboxes,
+ img_metas,
+ gt_bboxes_ignore_list=gt_bboxes_ignore,
+ gt_labels_list=gt_labels,
+ stage='init',
+ label_channels=label_channels)
+ (*_, bbox_gt_list_init, candidate_list_init, bbox_weights_list_init,
+ num_total_pos_init, num_total_neg_init) = cls_reg_targets_init
+ num_total_samples_init = (
+ num_total_pos_init +
+ num_total_neg_init if self.sampling else num_total_pos_init)
+
+ # target for refinement stage
+ center_list, valid_flag_list = self.get_points(featmap_sizes,
+ img_metas, device)
+ pts_coordinate_preds_refine = self.offset_to_pts(
+ center_list, pts_preds_refine)
+ bbox_list = []
+ for i_img, center in enumerate(center_list):
+ bbox = []
+ for i_lvl in range(len(pts_preds_refine)):
+ bbox_preds_init = self.points2bbox(
+ pts_preds_init[i_lvl].detach())
+ bbox_shift = bbox_preds_init * self.point_strides[i_lvl]
+ bbox_center = torch.cat(
+ [center[i_lvl][:, :2], center[i_lvl][:, :2]], dim=1)
+ bbox.append(bbox_center +
+ bbox_shift[i_img].permute(1, 2, 0).reshape(-1, 4))
+ bbox_list.append(bbox)
+ cls_reg_targets_refine = self.get_targets(
+ bbox_list,
+ valid_flag_list,
+ gt_bboxes,
+ img_metas,
+ gt_bboxes_ignore_list=gt_bboxes_ignore,
+ gt_labels_list=gt_labels,
+ stage='refine',
+ label_channels=label_channels)
+ (labels_list, label_weights_list, bbox_gt_list_refine,
+ candidate_list_refine, bbox_weights_list_refine, num_total_pos_refine,
+ num_total_neg_refine) = cls_reg_targets_refine
+ num_total_samples_refine = (
+ num_total_pos_refine +
+ num_total_neg_refine if self.sampling else num_total_pos_refine)
+
+ # compute loss
+ losses_cls, losses_pts_init, losses_pts_refine = multi_apply(
+ self.loss_single,
+ cls_scores,
+ pts_coordinate_preds_init,
+ pts_coordinate_preds_refine,
+ labels_list,
+ label_weights_list,
+ bbox_gt_list_init,
+ bbox_weights_list_init,
+ bbox_gt_list_refine,
+ bbox_weights_list_refine,
+ self.point_strides,
+ num_total_samples_init=num_total_samples_init,
+ num_total_samples_refine=num_total_samples_refine)
+ loss_dict_all = {
+ 'loss_cls': losses_cls,
+ 'loss_pts_init': losses_pts_init,
+ 'loss_pts_refine': losses_pts_refine
+ }
+ return loss_dict_all
+
+ def get_bboxes(self,
+ cls_scores,
+ pts_preds_init,
+ pts_preds_refine,
+ img_metas,
+ cfg=None,
+ rescale=False,
+ with_nms=True):
+ assert len(cls_scores) == len(pts_preds_refine)
+ device = cls_scores[0].device
+ bbox_preds_refine = [
+ self.points2bbox(pts_pred_refine)
+ for pts_pred_refine in pts_preds_refine
+ ]
+ num_levels = len(cls_scores)
+ mlvl_points = [
+ self.point_generators[i].grid_points(cls_scores[i].size()[-2:],
+ self.point_strides[i], device)
+ for i in range(num_levels)
+ ]
+ result_list = []
+ for img_id in range(len(img_metas)):
+ cls_score_list = [
+ cls_scores[i][img_id].detach() for i in range(num_levels)
+ ]
+ bbox_pred_list = [
+ bbox_preds_refine[i][img_id].detach()
+ for i in range(num_levels)
+ ]
+ img_shape = img_metas[img_id]['img_shape']
+ scale_factor = img_metas[img_id]['scale_factor']
+ proposals = self._get_bboxes_single(cls_score_list, bbox_pred_list,
+ mlvl_points, img_shape,
+ scale_factor, cfg, rescale,
+ with_nms)
+ result_list.append(proposals)
+ return result_list
+
+ def _get_bboxes_single(self,
+ cls_scores,
+ bbox_preds,
+ mlvl_points,
+ img_shape,
+ scale_factor,
+ cfg,
+ rescale=False,
+ with_nms=True):
+ cfg = self.test_cfg if cfg is None else cfg
+ assert len(cls_scores) == len(bbox_preds) == len(mlvl_points)
+ mlvl_bboxes = []
+ mlvl_scores = []
+ for i_lvl, (cls_score, bbox_pred, points) in enumerate(
+ zip(cls_scores, bbox_preds, mlvl_points)):
+ assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
+ cls_score = cls_score.permute(1, 2,
+ 0).reshape(-1, self.cls_out_channels)
+ if self.use_sigmoid_cls:
+ scores = cls_score.sigmoid()
+ else:
+ scores = cls_score.softmax(-1)
+ bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4)
+ nms_pre = cfg.get('nms_pre', -1)
+ if nms_pre > 0 and scores.shape[0] > nms_pre:
+ if self.use_sigmoid_cls:
+ max_scores, _ = scores.max(dim=1)
+ else:
+ # remind that we set FG labels to [0, num_class-1]
+ # since mmdet v2.0
+ # BG cat_id: num_class
+ max_scores, _ = scores[:, :-1].max(dim=1)
+ _, topk_inds = max_scores.topk(nms_pre)
+ points = points[topk_inds, :]
+ bbox_pred = bbox_pred[topk_inds, :]
+ scores = scores[topk_inds, :]
+ bbox_pos_center = torch.cat([points[:, :2], points[:, :2]], dim=1)
+ bboxes = bbox_pred * self.point_strides[i_lvl] + bbox_pos_center
+ x1 = bboxes[:, 0].clamp(min=0, max=img_shape[1])
+ y1 = bboxes[:, 1].clamp(min=0, max=img_shape[0])
+ x2 = bboxes[:, 2].clamp(min=0, max=img_shape[1])
+ y2 = bboxes[:, 3].clamp(min=0, max=img_shape[0])
+ bboxes = torch.stack([x1, y1, x2, y2], dim=-1)
+ mlvl_bboxes.append(bboxes)
+ mlvl_scores.append(scores)
+ mlvl_bboxes = torch.cat(mlvl_bboxes)
+ if rescale:
+ mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor)
+ mlvl_scores = torch.cat(mlvl_scores)
+ if self.use_sigmoid_cls:
+ # Add a dummy background class to the backend when using sigmoid
+ # remind that we set FG labels to [0, num_class-1] since mmdet v2.0
+ # BG cat_id: num_class
+ padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1)
+ mlvl_scores = torch.cat([mlvl_scores, padding], dim=1)
+ if with_nms:
+ det_bboxes, det_labels = multiclass_nms(mlvl_bboxes, mlvl_scores,
+ cfg.score_thr, cfg.nms,
+ cfg.max_per_img)
+ return det_bboxes, det_labels
+ else:
+ return mlvl_bboxes, mlvl_scores
diff --git a/annotator/uniformer/mmdet/models/dense_heads/retina_head.py b/annotator/uniformer/mmdet/models/dense_heads/retina_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..b12416fa8332f02b9a04bbfc7926f6d13875e61b
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/dense_heads/retina_head.py
@@ -0,0 +1,114 @@
+import torch.nn as nn
+from mmcv.cnn import ConvModule, bias_init_with_prob, normal_init
+
+from ..builder import HEADS
+from .anchor_head import AnchorHead
+
+
+@HEADS.register_module()
+class RetinaHead(AnchorHead):
+ r"""An anchor-based head used in `RetinaNet
+ `_.
+
+ The head contains two subnetworks. The first classifies anchor boxes and
+ the second regresses deltas for the anchors.
+
+ Example:
+ >>> import torch
+ >>> self = RetinaHead(11, 7)
+ >>> x = torch.rand(1, 7, 32, 32)
+ >>> cls_score, bbox_pred = self.forward_single(x)
+ >>> # Each anchor predicts a score for each class except background
+ >>> cls_per_anchor = cls_score.shape[1] / self.num_anchors
+ >>> box_per_anchor = bbox_pred.shape[1] / self.num_anchors
+ >>> assert cls_per_anchor == (self.num_classes)
+ >>> assert box_per_anchor == 4
+ """
+
+ def __init__(self,
+ num_classes,
+ in_channels,
+ stacked_convs=4,
+ conv_cfg=None,
+ norm_cfg=None,
+ anchor_generator=dict(
+ type='AnchorGenerator',
+ octave_base_scale=4,
+ scales_per_octave=3,
+ ratios=[0.5, 1.0, 2.0],
+ strides=[8, 16, 32, 64, 128]),
+ **kwargs):
+ self.stacked_convs = stacked_convs
+ self.conv_cfg = conv_cfg
+ self.norm_cfg = norm_cfg
+ super(RetinaHead, self).__init__(
+ num_classes,
+ in_channels,
+ anchor_generator=anchor_generator,
+ **kwargs)
+
+ def _init_layers(self):
+ """Initialize layers of the head."""
+ self.relu = nn.ReLU(inplace=True)
+ self.cls_convs = nn.ModuleList()
+ self.reg_convs = nn.ModuleList()
+ for i in range(self.stacked_convs):
+ chn = self.in_channels if i == 0 else self.feat_channels
+ self.cls_convs.append(
+ ConvModule(
+ chn,
+ self.feat_channels,
+ 3,
+ stride=1,
+ padding=1,
+ conv_cfg=self.conv_cfg,
+ norm_cfg=self.norm_cfg))
+ self.reg_convs.append(
+ ConvModule(
+ chn,
+ self.feat_channels,
+ 3,
+ stride=1,
+ padding=1,
+ conv_cfg=self.conv_cfg,
+ norm_cfg=self.norm_cfg))
+ self.retina_cls = nn.Conv2d(
+ self.feat_channels,
+ self.num_anchors * self.cls_out_channels,
+ 3,
+ padding=1)
+ self.retina_reg = nn.Conv2d(
+ self.feat_channels, self.num_anchors * 4, 3, padding=1)
+
+ def init_weights(self):
+ """Initialize weights of the head."""
+ for m in self.cls_convs:
+ normal_init(m.conv, std=0.01)
+ for m in self.reg_convs:
+ normal_init(m.conv, std=0.01)
+ bias_cls = bias_init_with_prob(0.01)
+ normal_init(self.retina_cls, std=0.01, bias=bias_cls)
+ normal_init(self.retina_reg, std=0.01)
+
+ def forward_single(self, x):
+ """Forward feature of a single scale level.
+
+ Args:
+ x (Tensor): Features of a single scale level.
+
+ Returns:
+ tuple:
+ cls_score (Tensor): Cls scores for a single scale level
+ the channels number is num_anchors * num_classes.
+ bbox_pred (Tensor): Box energies / deltas for a single scale
+ level, the channels number is num_anchors * 4.
+ """
+ cls_feat = x
+ reg_feat = x
+ for cls_conv in self.cls_convs:
+ cls_feat = cls_conv(cls_feat)
+ for reg_conv in self.reg_convs:
+ reg_feat = reg_conv(reg_feat)
+ cls_score = self.retina_cls(cls_feat)
+ bbox_pred = self.retina_reg(reg_feat)
+ return cls_score, bbox_pred
diff --git a/annotator/uniformer/mmdet/models/dense_heads/retina_sepbn_head.py b/annotator/uniformer/mmdet/models/dense_heads/retina_sepbn_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..6b8ce7f0104b90af4b128e0f245473a1c0219fcd
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/dense_heads/retina_sepbn_head.py
@@ -0,0 +1,113 @@
+import torch.nn as nn
+from mmcv.cnn import ConvModule, bias_init_with_prob, normal_init
+
+from ..builder import HEADS
+from .anchor_head import AnchorHead
+
+
+@HEADS.register_module()
+class RetinaSepBNHead(AnchorHead):
+ """"RetinaHead with separate BN.
+
+ In RetinaHead, conv/norm layers are shared across different FPN levels,
+ while in RetinaSepBNHead, conv layers are shared across different FPN
+ levels, but BN layers are separated.
+ """
+
+ def __init__(self,
+ num_classes,
+ num_ins,
+ in_channels,
+ stacked_convs=4,
+ conv_cfg=None,
+ norm_cfg=None,
+ **kwargs):
+ self.stacked_convs = stacked_convs
+ self.conv_cfg = conv_cfg
+ self.norm_cfg = norm_cfg
+ self.num_ins = num_ins
+ super(RetinaSepBNHead, self).__init__(num_classes, in_channels,
+ **kwargs)
+
+ def _init_layers(self):
+ """Initialize layers of the head."""
+ self.relu = nn.ReLU(inplace=True)
+ self.cls_convs = nn.ModuleList()
+ self.reg_convs = nn.ModuleList()
+ for i in range(self.num_ins):
+ cls_convs = nn.ModuleList()
+ reg_convs = nn.ModuleList()
+ for i in range(self.stacked_convs):
+ chn = self.in_channels if i == 0 else self.feat_channels
+ cls_convs.append(
+ ConvModule(
+ chn,
+ self.feat_channels,
+ 3,
+ stride=1,
+ padding=1,
+ conv_cfg=self.conv_cfg,
+ norm_cfg=self.norm_cfg))
+ reg_convs.append(
+ ConvModule(
+ chn,
+ self.feat_channels,
+ 3,
+ stride=1,
+ padding=1,
+ conv_cfg=self.conv_cfg,
+ norm_cfg=self.norm_cfg))
+ self.cls_convs.append(cls_convs)
+ self.reg_convs.append(reg_convs)
+ for i in range(self.stacked_convs):
+ for j in range(1, self.num_ins):
+ self.cls_convs[j][i].conv = self.cls_convs[0][i].conv
+ self.reg_convs[j][i].conv = self.reg_convs[0][i].conv
+ self.retina_cls = nn.Conv2d(
+ self.feat_channels,
+ self.num_anchors * self.cls_out_channels,
+ 3,
+ padding=1)
+ self.retina_reg = nn.Conv2d(
+ self.feat_channels, self.num_anchors * 4, 3, padding=1)
+
+ def init_weights(self):
+ """Initialize weights of the head."""
+ for m in self.cls_convs[0]:
+ normal_init(m.conv, std=0.01)
+ for m in self.reg_convs[0]:
+ normal_init(m.conv, std=0.01)
+ bias_cls = bias_init_with_prob(0.01)
+ normal_init(self.retina_cls, std=0.01, bias=bias_cls)
+ normal_init(self.retina_reg, std=0.01)
+
+ def forward(self, feats):
+ """Forward features from the upstream network.
+
+ Args:
+ feats (tuple[Tensor]): Features from the upstream network, each is
+ a 4D-tensor.
+
+ Returns:
+ tuple: Usually a tuple of classification scores and bbox prediction
+ cls_scores (list[Tensor]): Classification scores for all scale
+ levels, each is a 4D-tensor, the channels number is
+ num_anchors * num_classes.
+ bbox_preds (list[Tensor]): Box energies / deltas for all scale
+ levels, each is a 4D-tensor, the channels number is
+ num_anchors * 4.
+ """
+ cls_scores = []
+ bbox_preds = []
+ for i, x in enumerate(feats):
+ cls_feat = feats[i]
+ reg_feat = feats[i]
+ for cls_conv in self.cls_convs[i]:
+ cls_feat = cls_conv(cls_feat)
+ for reg_conv in self.reg_convs[i]:
+ reg_feat = reg_conv(reg_feat)
+ cls_score = self.retina_cls(cls_feat)
+ bbox_pred = self.retina_reg(reg_feat)
+ cls_scores.append(cls_score)
+ bbox_preds.append(bbox_pred)
+ return cls_scores, bbox_preds
diff --git a/annotator/uniformer/mmdet/models/dense_heads/rpn_head.py b/annotator/uniformer/mmdet/models/dense_heads/rpn_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..a888cb8c188ca6fe63045b6230266553fbe8c996
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/dense_heads/rpn_head.py
@@ -0,0 +1,236 @@
+import copy
+import warnings
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from mmcv import ConfigDict
+from mmcv.cnn import normal_init
+from mmcv.ops import batched_nms
+
+from ..builder import HEADS
+from .anchor_head import AnchorHead
+from .rpn_test_mixin import RPNTestMixin
+
+
+@HEADS.register_module()
+class RPNHead(RPNTestMixin, AnchorHead):
+ """RPN head.
+
+ Args:
+ in_channels (int): Number of channels in the input feature map.
+ """ # noqa: W605
+
+ def __init__(self, in_channels, **kwargs):
+ super(RPNHead, self).__init__(1, in_channels, **kwargs)
+
+ def _init_layers(self):
+ """Initialize layers of the head."""
+ self.rpn_conv = nn.Conv2d(
+ self.in_channels, self.feat_channels, 3, padding=1)
+ self.rpn_cls = nn.Conv2d(self.feat_channels,
+ self.num_anchors * self.cls_out_channels, 1)
+ self.rpn_reg = nn.Conv2d(self.feat_channels, self.num_anchors * 4, 1)
+
+ def init_weights(self):
+ """Initialize weights of the head."""
+ normal_init(self.rpn_conv, std=0.01)
+ normal_init(self.rpn_cls, std=0.01)
+ normal_init(self.rpn_reg, std=0.01)
+
+ def forward_single(self, x):
+ """Forward feature map of a single scale level."""
+ x = self.rpn_conv(x)
+ x = F.relu(x, inplace=True)
+ rpn_cls_score = self.rpn_cls(x)
+ rpn_bbox_pred = self.rpn_reg(x)
+ return rpn_cls_score, rpn_bbox_pred
+
+ def loss(self,
+ cls_scores,
+ bbox_preds,
+ gt_bboxes,
+ img_metas,
+ gt_bboxes_ignore=None):
+ """Compute losses of the head.
+
+ Args:
+ cls_scores (list[Tensor]): Box scores for each scale level
+ Has shape (N, num_anchors * num_classes, H, W)
+ bbox_preds (list[Tensor]): Box energies / deltas for each scale
+ level with shape (N, num_anchors * 4, H, W)
+ gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
+ shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
+ img_metas (list[dict]): Meta information of each image, e.g.,
+ image size, scaling factor, etc.
+ gt_bboxes_ignore (None | list[Tensor]): specify which bounding
+ boxes can be ignored when computing the loss.
+
+ Returns:
+ dict[str, Tensor]: A dictionary of loss components.
+ """
+ losses = super(RPNHead, self).loss(
+ cls_scores,
+ bbox_preds,
+ gt_bboxes,
+ None,
+ img_metas,
+ gt_bboxes_ignore=gt_bboxes_ignore)
+ return dict(
+ loss_rpn_cls=losses['loss_cls'], loss_rpn_bbox=losses['loss_bbox'])
+
+ def _get_bboxes(self,
+ cls_scores,
+ bbox_preds,
+ mlvl_anchors,
+ img_shapes,
+ scale_factors,
+ cfg,
+ rescale=False):
+ """Transform outputs for a single batch item into bbox predictions.
+
+ Args:
+ cls_scores (list[Tensor]): Box scores for each scale level
+ Has shape (N, num_anchors * num_classes, H, W).
+ bbox_preds (list[Tensor]): Box energies / deltas for each scale
+ level with shape (N, num_anchors * 4, H, W).
+ mlvl_anchors (list[Tensor]): Box reference for each scale level
+ with shape (num_total_anchors, 4).
+ img_shapes (list[tuple[int]]): Shape of the input image,
+ (height, width, 3).
+ scale_factors (list[ndarray]): Scale factor of the image arange as
+ (w_scale, h_scale, w_scale, h_scale).
+ cfg (mmcv.Config): Test / postprocessing configuration,
+ if None, test_cfg would be used.
+ rescale (bool): If True, return boxes in original image space.
+
+ Returns:
+ list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple.
+ The first item is an (n, 5) tensor, where the first 4 columns
+ are bounding box positions (tl_x, tl_y, br_x, br_y) and the
+ 5-th column is a score between 0 and 1. The second item is a
+ (n,) tensor where each item is the predicted class labelof the
+ corresponding box.
+ """
+ cfg = self.test_cfg if cfg is None else cfg
+ cfg = copy.deepcopy(cfg)
+ # bboxes from different level should be independent during NMS,
+ # level_ids are used as labels for batched NMS to separate them
+ level_ids = []
+ mlvl_scores = []
+ mlvl_bbox_preds = []
+ mlvl_valid_anchors = []
+ batch_size = cls_scores[0].shape[0]
+ nms_pre_tensor = torch.tensor(
+ cfg.nms_pre, device=cls_scores[0].device, dtype=torch.long)
+ for idx in range(len(cls_scores)):
+ rpn_cls_score = cls_scores[idx]
+ rpn_bbox_pred = bbox_preds[idx]
+ assert rpn_cls_score.size()[-2:] == rpn_bbox_pred.size()[-2:]
+ rpn_cls_score = rpn_cls_score.permute(0, 2, 3, 1)
+ if self.use_sigmoid_cls:
+ rpn_cls_score = rpn_cls_score.reshape(batch_size, -1)
+ scores = rpn_cls_score.sigmoid()
+ else:
+ rpn_cls_score = rpn_cls_score.reshape(batch_size, -1, 2)
+ # We set FG labels to [0, num_class-1] and BG label to
+ # num_class in RPN head since mmdet v2.5, which is unified to
+ # be consistent with other head since mmdet v2.0. In mmdet v2.0
+ # to v2.4 we keep BG label as 0 and FG label as 1 in rpn head.
+ scores = rpn_cls_score.softmax(-1)[..., 0]
+ rpn_bbox_pred = rpn_bbox_pred.permute(0, 2, 3, 1).reshape(
+ batch_size, -1, 4)
+ anchors = mlvl_anchors[idx]
+ anchors = anchors.expand_as(rpn_bbox_pred)
+ if nms_pre_tensor > 0:
+ # sort is faster than topk
+ # _, topk_inds = scores.topk(cfg.nms_pre)
+ # keep topk op for dynamic k in onnx model
+ if torch.onnx.is_in_onnx_export():
+ # sort op will be converted to TopK in onnx
+ # and k<=3480 in TensorRT
+ scores_shape = torch._shape_as_tensor(scores)
+ nms_pre = torch.where(scores_shape[1] < nms_pre_tensor,
+ scores_shape[1], nms_pre_tensor)
+ _, topk_inds = scores.topk(nms_pre)
+ batch_inds = torch.arange(batch_size).view(
+ -1, 1).expand_as(topk_inds)
+ scores = scores[batch_inds, topk_inds]
+ rpn_bbox_pred = rpn_bbox_pred[batch_inds, topk_inds, :]
+ anchors = anchors[batch_inds, topk_inds, :]
+
+ elif scores.shape[-1] > cfg.nms_pre:
+ ranked_scores, rank_inds = scores.sort(descending=True)
+ topk_inds = rank_inds[:, :cfg.nms_pre]
+ scores = ranked_scores[:, :cfg.nms_pre]
+ batch_inds = torch.arange(batch_size).view(
+ -1, 1).expand_as(topk_inds)
+ rpn_bbox_pred = rpn_bbox_pred[batch_inds, topk_inds, :]
+ anchors = anchors[batch_inds, topk_inds, :]
+
+ mlvl_scores.append(scores)
+ mlvl_bbox_preds.append(rpn_bbox_pred)
+ mlvl_valid_anchors.append(anchors)
+ level_ids.append(
+ scores.new_full((
+ batch_size,
+ scores.size(1),
+ ),
+ idx,
+ dtype=torch.long))
+
+ batch_mlvl_scores = torch.cat(mlvl_scores, dim=1)
+ batch_mlvl_anchors = torch.cat(mlvl_valid_anchors, dim=1)
+ batch_mlvl_rpn_bbox_pred = torch.cat(mlvl_bbox_preds, dim=1)
+ batch_mlvl_proposals = self.bbox_coder.decode(
+ batch_mlvl_anchors, batch_mlvl_rpn_bbox_pred, max_shape=img_shapes)
+ batch_mlvl_ids = torch.cat(level_ids, dim=1)
+
+ # deprecate arguments warning
+ if 'nms' not in cfg or 'max_num' in cfg or 'nms_thr' in cfg:
+ warnings.warn(
+ 'In rpn_proposal or test_cfg, '
+ 'nms_thr has been moved to a dict named nms as '
+ 'iou_threshold, max_num has been renamed as max_per_img, '
+ 'name of original arguments and the way to specify '
+ 'iou_threshold of NMS will be deprecated.')
+ if 'nms' not in cfg:
+ cfg.nms = ConfigDict(dict(type='nms', iou_threshold=cfg.nms_thr))
+ if 'max_num' in cfg:
+ if 'max_per_img' in cfg:
+ assert cfg.max_num == cfg.max_per_img, f'You ' \
+ f'set max_num and ' \
+ f'max_per_img at the same time, but get {cfg.max_num} ' \
+ f'and {cfg.max_per_img} respectively' \
+ 'Please delete max_num which will be deprecated.'
+ else:
+ cfg.max_per_img = cfg.max_num
+ if 'nms_thr' in cfg:
+ assert cfg.nms.iou_threshold == cfg.nms_thr, f'You set' \
+ f' iou_threshold in nms and ' \
+ f'nms_thr at the same time, but get' \
+ f' {cfg.nms.iou_threshold} and {cfg.nms_thr}' \
+ f' respectively. Please delete the nms_thr ' \
+ f'which will be deprecated.'
+
+ result_list = []
+ for (mlvl_proposals, mlvl_scores,
+ mlvl_ids) in zip(batch_mlvl_proposals, batch_mlvl_scores,
+ batch_mlvl_ids):
+ # Skip nonzero op while exporting to ONNX
+ if cfg.min_bbox_size > 0 and (not torch.onnx.is_in_onnx_export()):
+ w = mlvl_proposals[:, 2] - mlvl_proposals[:, 0]
+ h = mlvl_proposals[:, 3] - mlvl_proposals[:, 1]
+ valid_ind = torch.nonzero(
+ (w >= cfg.min_bbox_size)
+ & (h >= cfg.min_bbox_size),
+ as_tuple=False).squeeze()
+ if valid_ind.sum().item() != len(mlvl_proposals):
+ mlvl_proposals = mlvl_proposals[valid_ind, :]
+ mlvl_scores = mlvl_scores[valid_ind]
+ mlvl_ids = mlvl_ids[valid_ind]
+
+ dets, keep = batched_nms(mlvl_proposals, mlvl_scores, mlvl_ids,
+ cfg.nms)
+ result_list.append(dets[:cfg.max_per_img])
+ return result_list
diff --git a/annotator/uniformer/mmdet/models/dense_heads/rpn_test_mixin.py b/annotator/uniformer/mmdet/models/dense_heads/rpn_test_mixin.py
new file mode 100644
index 0000000000000000000000000000000000000000..4ce5c66f82595f496e6e55719c1caee75150d568
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/dense_heads/rpn_test_mixin.py
@@ -0,0 +1,59 @@
+import sys
+
+from mmdet.core import merge_aug_proposals
+
+if sys.version_info >= (3, 7):
+ from mmdet.utils.contextmanagers import completed
+
+
+class RPNTestMixin(object):
+ """Test methods of RPN."""
+
+ if sys.version_info >= (3, 7):
+
+ async def async_simple_test_rpn(self, x, img_metas):
+ sleep_interval = self.test_cfg.pop('async_sleep_interval', 0.025)
+ async with completed(
+ __name__, 'rpn_head_forward',
+ sleep_interval=sleep_interval):
+ rpn_outs = self(x)
+
+ proposal_list = self.get_bboxes(*rpn_outs, img_metas)
+ return proposal_list
+
+ def simple_test_rpn(self, x, img_metas):
+ """Test without augmentation.
+
+ Args:
+ x (tuple[Tensor]): Features from the upstream network, each is
+ a 4D-tensor.
+ img_metas (list[dict]): Meta info of each image.
+
+ Returns:
+ list[Tensor]: Proposals of each image.
+ """
+ rpn_outs = self(x)
+ proposal_list = self.get_bboxes(*rpn_outs, img_metas)
+ return proposal_list
+
+ def aug_test_rpn(self, feats, img_metas):
+ samples_per_gpu = len(img_metas[0])
+ aug_proposals = [[] for _ in range(samples_per_gpu)]
+ for x, img_meta in zip(feats, img_metas):
+ proposal_list = self.simple_test_rpn(x, img_meta)
+ for i, proposals in enumerate(proposal_list):
+ aug_proposals[i].append(proposals)
+ # reorganize the order of 'img_metas' to match the dimensions
+ # of 'aug_proposals'
+ aug_img_metas = []
+ for i in range(samples_per_gpu):
+ aug_img_meta = []
+ for j in range(len(img_metas)):
+ aug_img_meta.append(img_metas[j][i])
+ aug_img_metas.append(aug_img_meta)
+ # after merging, proposals will be rescaled to the original image size
+ merged_proposals = [
+ merge_aug_proposals(proposals, aug_img_meta, self.test_cfg)
+ for proposals, aug_img_meta in zip(aug_proposals, aug_img_metas)
+ ]
+ return merged_proposals
diff --git a/annotator/uniformer/mmdet/models/dense_heads/sabl_retina_head.py b/annotator/uniformer/mmdet/models/dense_heads/sabl_retina_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..4211622cb8b4fe807230a89bcaab8f4f1681bfc0
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/dense_heads/sabl_retina_head.py
@@ -0,0 +1,621 @@
+import numpy as np
+import torch
+import torch.nn as nn
+from mmcv.cnn import ConvModule, bias_init_with_prob, normal_init
+from mmcv.runner import force_fp32
+
+from mmdet.core import (build_anchor_generator, build_assigner,
+ build_bbox_coder, build_sampler, images_to_levels,
+ multi_apply, multiclass_nms, unmap)
+from ..builder import HEADS, build_loss
+from .base_dense_head import BaseDenseHead
+from .guided_anchor_head import GuidedAnchorHead
+
+
+@HEADS.register_module()
+class SABLRetinaHead(BaseDenseHead):
+ """Side-Aware Boundary Localization (SABL) for RetinaNet.
+
+ The anchor generation, assigning and sampling in SABLRetinaHead
+ are the same as GuidedAnchorHead for guided anchoring.
+
+ Please refer to https://arxiv.org/abs/1912.04260 for more details.
+
+ Args:
+ num_classes (int): Number of classes.
+ in_channels (int): Number of channels in the input feature map.
+ stacked_convs (int): Number of Convs for classification \
+ and regression branches. Defaults to 4.
+ feat_channels (int): Number of hidden channels. \
+ Defaults to 256.
+ approx_anchor_generator (dict): Config dict for approx generator.
+ square_anchor_generator (dict): Config dict for square generator.
+ conv_cfg (dict): Config dict for ConvModule. Defaults to None.
+ norm_cfg (dict): Config dict for Norm Layer. Defaults to None.
+ bbox_coder (dict): Config dict for bbox coder.
+ reg_decoded_bbox (bool): If true, the regression loss would be
+ applied directly on decoded bounding boxes, converting both
+ the predicted boxes and regression targets to absolute
+ coordinates format. Default False. It should be `True` when
+ using `IoULoss`, `GIoULoss`, or `DIoULoss` in the bbox head.
+ train_cfg (dict): Training config of SABLRetinaHead.
+ test_cfg (dict): Testing config of SABLRetinaHead.
+ loss_cls (dict): Config of classification loss.
+ loss_bbox_cls (dict): Config of classification loss for bbox branch.
+ loss_bbox_reg (dict): Config of regression loss for bbox branch.
+ """
+
+ def __init__(self,
+ num_classes,
+ in_channels,
+ stacked_convs=4,
+ feat_channels=256,
+ approx_anchor_generator=dict(
+ type='AnchorGenerator',
+ octave_base_scale=4,
+ scales_per_octave=3,
+ ratios=[0.5, 1.0, 2.0],
+ strides=[8, 16, 32, 64, 128]),
+ square_anchor_generator=dict(
+ type='AnchorGenerator',
+ ratios=[1.0],
+ scales=[4],
+ strides=[8, 16, 32, 64, 128]),
+ conv_cfg=None,
+ norm_cfg=None,
+ bbox_coder=dict(
+ type='BucketingBBoxCoder',
+ num_buckets=14,
+ scale_factor=3.0),
+ reg_decoded_bbox=False,
+ train_cfg=None,
+ test_cfg=None,
+ loss_cls=dict(
+ type='FocalLoss',
+ use_sigmoid=True,
+ gamma=2.0,
+ alpha=0.25,
+ loss_weight=1.0),
+ loss_bbox_cls=dict(
+ type='CrossEntropyLoss',
+ use_sigmoid=True,
+ loss_weight=1.5),
+ loss_bbox_reg=dict(
+ type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.5)):
+ super(SABLRetinaHead, self).__init__()
+ self.in_channels = in_channels
+ self.num_classes = num_classes
+ self.feat_channels = feat_channels
+ self.num_buckets = bbox_coder['num_buckets']
+ self.side_num = int(np.ceil(self.num_buckets / 2))
+
+ assert (approx_anchor_generator['octave_base_scale'] ==
+ square_anchor_generator['scales'][0])
+ assert (approx_anchor_generator['strides'] ==
+ square_anchor_generator['strides'])
+
+ self.approx_anchor_generator = build_anchor_generator(
+ approx_anchor_generator)
+ self.square_anchor_generator = build_anchor_generator(
+ square_anchor_generator)
+ self.approxs_per_octave = (
+ self.approx_anchor_generator.num_base_anchors[0])
+
+ # one anchor per location
+ self.num_anchors = 1
+ self.stacked_convs = stacked_convs
+ self.conv_cfg = conv_cfg
+ self.norm_cfg = norm_cfg
+
+ self.reg_decoded_bbox = reg_decoded_bbox
+
+ self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False)
+ self.sampling = loss_cls['type'] not in [
+ 'FocalLoss', 'GHMC', 'QualityFocalLoss'
+ ]
+ if self.use_sigmoid_cls:
+ self.cls_out_channels = num_classes
+ else:
+ self.cls_out_channels = num_classes + 1
+
+ self.bbox_coder = build_bbox_coder(bbox_coder)
+ self.loss_cls = build_loss(loss_cls)
+ self.loss_bbox_cls = build_loss(loss_bbox_cls)
+ self.loss_bbox_reg = build_loss(loss_bbox_reg)
+
+ self.train_cfg = train_cfg
+ self.test_cfg = test_cfg
+
+ if self.train_cfg:
+ self.assigner = build_assigner(self.train_cfg.assigner)
+ # use PseudoSampler when sampling is False
+ if self.sampling and hasattr(self.train_cfg, 'sampler'):
+ sampler_cfg = self.train_cfg.sampler
+ else:
+ sampler_cfg = dict(type='PseudoSampler')
+ self.sampler = build_sampler(sampler_cfg, context=self)
+
+ self.fp16_enabled = False
+ self._init_layers()
+
+ def _init_layers(self):
+ self.relu = nn.ReLU(inplace=True)
+ self.cls_convs = nn.ModuleList()
+ self.reg_convs = nn.ModuleList()
+ for i in range(self.stacked_convs):
+ chn = self.in_channels if i == 0 else self.feat_channels
+ self.cls_convs.append(
+ ConvModule(
+ chn,
+ self.feat_channels,
+ 3,
+ stride=1,
+ padding=1,
+ conv_cfg=self.conv_cfg,
+ norm_cfg=self.norm_cfg))
+ self.reg_convs.append(
+ ConvModule(
+ chn,
+ self.feat_channels,
+ 3,
+ stride=1,
+ padding=1,
+ conv_cfg=self.conv_cfg,
+ norm_cfg=self.norm_cfg))
+ self.retina_cls = nn.Conv2d(
+ self.feat_channels, self.cls_out_channels, 3, padding=1)
+ self.retina_bbox_reg = nn.Conv2d(
+ self.feat_channels, self.side_num * 4, 3, padding=1)
+ self.retina_bbox_cls = nn.Conv2d(
+ self.feat_channels, self.side_num * 4, 3, padding=1)
+
+ def init_weights(self):
+ for m in self.cls_convs:
+ normal_init(m.conv, std=0.01)
+ for m in self.reg_convs:
+ normal_init(m.conv, std=0.01)
+ bias_cls = bias_init_with_prob(0.01)
+ normal_init(self.retina_cls, std=0.01, bias=bias_cls)
+ normal_init(self.retina_bbox_reg, std=0.01)
+ normal_init(self.retina_bbox_cls, std=0.01)
+
+ def forward_single(self, x):
+ cls_feat = x
+ reg_feat = x
+ for cls_conv in self.cls_convs:
+ cls_feat = cls_conv(cls_feat)
+ for reg_conv in self.reg_convs:
+ reg_feat = reg_conv(reg_feat)
+ cls_score = self.retina_cls(cls_feat)
+ bbox_cls_pred = self.retina_bbox_cls(reg_feat)
+ bbox_reg_pred = self.retina_bbox_reg(reg_feat)
+ bbox_pred = (bbox_cls_pred, bbox_reg_pred)
+ return cls_score, bbox_pred
+
+ def forward(self, feats):
+ return multi_apply(self.forward_single, feats)
+
+ def get_anchors(self, featmap_sizes, img_metas, device='cuda'):
+ """Get squares according to feature map sizes and guided anchors.
+
+ Args:
+ featmap_sizes (list[tuple]): Multi-level feature map sizes.
+ img_metas (list[dict]): Image meta info.
+ device (torch.device | str): device for returned tensors
+
+ Returns:
+ tuple: square approxs of each image
+ """
+ num_imgs = len(img_metas)
+
+ # since feature map sizes of all images are the same, we only compute
+ # squares for one time
+ multi_level_squares = self.square_anchor_generator.grid_anchors(
+ featmap_sizes, device=device)
+ squares_list = [multi_level_squares for _ in range(num_imgs)]
+
+ return squares_list
+
+ def get_target(self,
+ approx_list,
+ inside_flag_list,
+ square_list,
+ gt_bboxes_list,
+ img_metas,
+ gt_bboxes_ignore_list=None,
+ gt_labels_list=None,
+ label_channels=None,
+ sampling=True,
+ unmap_outputs=True):
+ """Compute bucketing targets.
+ Args:
+ approx_list (list[list]): Multi level approxs of each image.
+ inside_flag_list (list[list]): Multi level inside flags of each
+ image.
+ square_list (list[list]): Multi level squares of each image.
+ gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image.
+ img_metas (list[dict]): Meta info of each image.
+ gt_bboxes_ignore_list (list[Tensor]): ignore list of gt bboxes.
+ gt_bboxes_list (list[Tensor]): Gt bboxes of each image.
+ label_channels (int): Channel of label.
+ sampling (bool): Sample Anchors or not.
+ unmap_outputs (bool): unmap outputs or not.
+
+ Returns:
+ tuple: Returns a tuple containing learning targets.
+
+ - labels_list (list[Tensor]): Labels of each level.
+ - label_weights_list (list[Tensor]): Label weights of each \
+ level.
+ - bbox_cls_targets_list (list[Tensor]): BBox cls targets of \
+ each level.
+ - bbox_cls_weights_list (list[Tensor]): BBox cls weights of \
+ each level.
+ - bbox_reg_targets_list (list[Tensor]): BBox reg targets of \
+ each level.
+ - bbox_reg_weights_list (list[Tensor]): BBox reg weights of \
+ each level.
+ - num_total_pos (int): Number of positive samples in all \
+ images.
+ - num_total_neg (int): Number of negative samples in all \
+ images.
+ """
+ num_imgs = len(img_metas)
+ assert len(approx_list) == len(inside_flag_list) == len(
+ square_list) == num_imgs
+ # anchor number of multi levels
+ num_level_squares = [squares.size(0) for squares in square_list[0]]
+ # concat all level anchors and flags to a single tensor
+ inside_flag_flat_list = []
+ approx_flat_list = []
+ square_flat_list = []
+ for i in range(num_imgs):
+ assert len(square_list[i]) == len(inside_flag_list[i])
+ inside_flag_flat_list.append(torch.cat(inside_flag_list[i]))
+ approx_flat_list.append(torch.cat(approx_list[i]))
+ square_flat_list.append(torch.cat(square_list[i]))
+
+ # compute targets for each image
+ if gt_bboxes_ignore_list is None:
+ gt_bboxes_ignore_list = [None for _ in range(num_imgs)]
+ if gt_labels_list is None:
+ gt_labels_list = [None for _ in range(num_imgs)]
+ (all_labels, all_label_weights, all_bbox_cls_targets,
+ all_bbox_cls_weights, all_bbox_reg_targets, all_bbox_reg_weights,
+ pos_inds_list, neg_inds_list) = multi_apply(
+ self._get_target_single,
+ approx_flat_list,
+ inside_flag_flat_list,
+ square_flat_list,
+ gt_bboxes_list,
+ gt_bboxes_ignore_list,
+ gt_labels_list,
+ img_metas,
+ label_channels=label_channels,
+ sampling=sampling,
+ unmap_outputs=unmap_outputs)
+ # no valid anchors
+ if any([labels is None for labels in all_labels]):
+ return None
+ # sampled anchors of all images
+ num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list])
+ num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list])
+ # split targets to a list w.r.t. multiple levels
+ labels_list = images_to_levels(all_labels, num_level_squares)
+ label_weights_list = images_to_levels(all_label_weights,
+ num_level_squares)
+ bbox_cls_targets_list = images_to_levels(all_bbox_cls_targets,
+ num_level_squares)
+ bbox_cls_weights_list = images_to_levels(all_bbox_cls_weights,
+ num_level_squares)
+ bbox_reg_targets_list = images_to_levels(all_bbox_reg_targets,
+ num_level_squares)
+ bbox_reg_weights_list = images_to_levels(all_bbox_reg_weights,
+ num_level_squares)
+ return (labels_list, label_weights_list, bbox_cls_targets_list,
+ bbox_cls_weights_list, bbox_reg_targets_list,
+ bbox_reg_weights_list, num_total_pos, num_total_neg)
+
+ def _get_target_single(self,
+ flat_approxs,
+ inside_flags,
+ flat_squares,
+ gt_bboxes,
+ gt_bboxes_ignore,
+ gt_labels,
+ img_meta,
+ label_channels=None,
+ sampling=True,
+ unmap_outputs=True):
+ """Compute regression and classification targets for anchors in a
+ single image.
+
+ Args:
+ flat_approxs (Tensor): flat approxs of a single image,
+ shape (n, 4)
+ inside_flags (Tensor): inside flags of a single image,
+ shape (n, ).
+ flat_squares (Tensor): flat squares of a single image,
+ shape (approxs_per_octave * n, 4)
+ gt_bboxes (Tensor): Ground truth bboxes of a single image, \
+ shape (num_gts, 4).
+ gt_bboxes_ignore (Tensor): Ground truth bboxes to be
+ ignored, shape (num_ignored_gts, 4).
+ gt_labels (Tensor): Ground truth labels of each box,
+ shape (num_gts,).
+ img_meta (dict): Meta info of the image.
+ label_channels (int): Channel of label.
+ sampling (bool): Sample Anchors or not.
+ unmap_outputs (bool): unmap outputs or not.
+
+ Returns:
+ tuple:
+
+ - labels_list (Tensor): Labels in a single image
+ - label_weights (Tensor): Label weights in a single image
+ - bbox_cls_targets (Tensor): BBox cls targets in a single image
+ - bbox_cls_weights (Tensor): BBox cls weights in a single image
+ - bbox_reg_targets (Tensor): BBox reg targets in a single image
+ - bbox_reg_weights (Tensor): BBox reg weights in a single image
+ - num_total_pos (int): Number of positive samples \
+ in a single image
+ - num_total_neg (int): Number of negative samples \
+ in a single image
+ """
+ if not inside_flags.any():
+ return (None, ) * 8
+ # assign gt and sample anchors
+ expand_inside_flags = inside_flags[:, None].expand(
+ -1, self.approxs_per_octave).reshape(-1)
+ approxs = flat_approxs[expand_inside_flags, :]
+ squares = flat_squares[inside_flags, :]
+
+ assign_result = self.assigner.assign(approxs, squares,
+ self.approxs_per_octave,
+ gt_bboxes, gt_bboxes_ignore)
+ sampling_result = self.sampler.sample(assign_result, squares,
+ gt_bboxes)
+
+ num_valid_squares = squares.shape[0]
+ bbox_cls_targets = squares.new_zeros(
+ (num_valid_squares, self.side_num * 4))
+ bbox_cls_weights = squares.new_zeros(
+ (num_valid_squares, self.side_num * 4))
+ bbox_reg_targets = squares.new_zeros(
+ (num_valid_squares, self.side_num * 4))
+ bbox_reg_weights = squares.new_zeros(
+ (num_valid_squares, self.side_num * 4))
+ labels = squares.new_full((num_valid_squares, ),
+ self.num_classes,
+ dtype=torch.long)
+ label_weights = squares.new_zeros(num_valid_squares, dtype=torch.float)
+
+ pos_inds = sampling_result.pos_inds
+ neg_inds = sampling_result.neg_inds
+ if len(pos_inds) > 0:
+ (pos_bbox_reg_targets, pos_bbox_reg_weights, pos_bbox_cls_targets,
+ pos_bbox_cls_weights) = self.bbox_coder.encode(
+ sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes)
+
+ bbox_cls_targets[pos_inds, :] = pos_bbox_cls_targets
+ bbox_reg_targets[pos_inds, :] = pos_bbox_reg_targets
+ bbox_cls_weights[pos_inds, :] = pos_bbox_cls_weights
+ bbox_reg_weights[pos_inds, :] = pos_bbox_reg_weights
+ if gt_labels is None:
+ # Only rpn gives gt_labels as None
+ # Foreground is the first class
+ labels[pos_inds] = 0
+ else:
+ labels[pos_inds] = gt_labels[
+ sampling_result.pos_assigned_gt_inds]
+ if self.train_cfg.pos_weight <= 0:
+ label_weights[pos_inds] = 1.0
+ else:
+ label_weights[pos_inds] = self.train_cfg.pos_weight
+ if len(neg_inds) > 0:
+ label_weights[neg_inds] = 1.0
+
+ # map up to original set of anchors
+ if unmap_outputs:
+ num_total_anchors = flat_squares.size(0)
+ labels = unmap(
+ labels, num_total_anchors, inside_flags, fill=self.num_classes)
+ label_weights = unmap(label_weights, num_total_anchors,
+ inside_flags)
+ bbox_cls_targets = unmap(bbox_cls_targets, num_total_anchors,
+ inside_flags)
+ bbox_cls_weights = unmap(bbox_cls_weights, num_total_anchors,
+ inside_flags)
+ bbox_reg_targets = unmap(bbox_reg_targets, num_total_anchors,
+ inside_flags)
+ bbox_reg_weights = unmap(bbox_reg_weights, num_total_anchors,
+ inside_flags)
+ return (labels, label_weights, bbox_cls_targets, bbox_cls_weights,
+ bbox_reg_targets, bbox_reg_weights, pos_inds, neg_inds)
+
+ def loss_single(self, cls_score, bbox_pred, labels, label_weights,
+ bbox_cls_targets, bbox_cls_weights, bbox_reg_targets,
+ bbox_reg_weights, num_total_samples):
+ # classification loss
+ labels = labels.reshape(-1)
+ label_weights = label_weights.reshape(-1)
+ cls_score = cls_score.permute(0, 2, 3,
+ 1).reshape(-1, self.cls_out_channels)
+ loss_cls = self.loss_cls(
+ cls_score, labels, label_weights, avg_factor=num_total_samples)
+ # regression loss
+ bbox_cls_targets = bbox_cls_targets.reshape(-1, self.side_num * 4)
+ bbox_cls_weights = bbox_cls_weights.reshape(-1, self.side_num * 4)
+ bbox_reg_targets = bbox_reg_targets.reshape(-1, self.side_num * 4)
+ bbox_reg_weights = bbox_reg_weights.reshape(-1, self.side_num * 4)
+ (bbox_cls_pred, bbox_reg_pred) = bbox_pred
+ bbox_cls_pred = bbox_cls_pred.permute(0, 2, 3, 1).reshape(
+ -1, self.side_num * 4)
+ bbox_reg_pred = bbox_reg_pred.permute(0, 2, 3, 1).reshape(
+ -1, self.side_num * 4)
+ loss_bbox_cls = self.loss_bbox_cls(
+ bbox_cls_pred,
+ bbox_cls_targets.long(),
+ bbox_cls_weights,
+ avg_factor=num_total_samples * 4 * self.side_num)
+ loss_bbox_reg = self.loss_bbox_reg(
+ bbox_reg_pred,
+ bbox_reg_targets,
+ bbox_reg_weights,
+ avg_factor=num_total_samples * 4 * self.bbox_coder.offset_topk)
+ return loss_cls, loss_bbox_cls, loss_bbox_reg
+
+ @force_fp32(apply_to=('cls_scores', 'bbox_preds'))
+ def loss(self,
+ cls_scores,
+ bbox_preds,
+ gt_bboxes,
+ gt_labels,
+ img_metas,
+ gt_bboxes_ignore=None):
+ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
+ assert len(featmap_sizes) == self.approx_anchor_generator.num_levels
+
+ device = cls_scores[0].device
+
+ # get sampled approxes
+ approxs_list, inside_flag_list = GuidedAnchorHead.get_sampled_approxs(
+ self, featmap_sizes, img_metas, device=device)
+
+ square_list = self.get_anchors(featmap_sizes, img_metas, device=device)
+
+ label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1
+
+ cls_reg_targets = self.get_target(
+ approxs_list,
+ inside_flag_list,
+ square_list,
+ gt_bboxes,
+ img_metas,
+ gt_bboxes_ignore_list=gt_bboxes_ignore,
+ gt_labels_list=gt_labels,
+ label_channels=label_channels,
+ sampling=self.sampling)
+ if cls_reg_targets is None:
+ return None
+ (labels_list, label_weights_list, bbox_cls_targets_list,
+ bbox_cls_weights_list, bbox_reg_targets_list, bbox_reg_weights_list,
+ num_total_pos, num_total_neg) = cls_reg_targets
+ num_total_samples = (
+ num_total_pos + num_total_neg if self.sampling else num_total_pos)
+ losses_cls, losses_bbox_cls, losses_bbox_reg = multi_apply(
+ self.loss_single,
+ cls_scores,
+ bbox_preds,
+ labels_list,
+ label_weights_list,
+ bbox_cls_targets_list,
+ bbox_cls_weights_list,
+ bbox_reg_targets_list,
+ bbox_reg_weights_list,
+ num_total_samples=num_total_samples)
+ return dict(
+ loss_cls=losses_cls,
+ loss_bbox_cls=losses_bbox_cls,
+ loss_bbox_reg=losses_bbox_reg)
+
+ @force_fp32(apply_to=('cls_scores', 'bbox_preds'))
+ def get_bboxes(self,
+ cls_scores,
+ bbox_preds,
+ img_metas,
+ cfg=None,
+ rescale=False):
+ assert len(cls_scores) == len(bbox_preds)
+ num_levels = len(cls_scores)
+ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
+
+ device = cls_scores[0].device
+ mlvl_anchors = self.get_anchors(
+ featmap_sizes, img_metas, device=device)
+ result_list = []
+ for img_id in range(len(img_metas)):
+ cls_score_list = [
+ cls_scores[i][img_id].detach() for i in range(num_levels)
+ ]
+ bbox_cls_pred_list = [
+ bbox_preds[i][0][img_id].detach() for i in range(num_levels)
+ ]
+ bbox_reg_pred_list = [
+ bbox_preds[i][1][img_id].detach() for i in range(num_levels)
+ ]
+ img_shape = img_metas[img_id]['img_shape']
+ scale_factor = img_metas[img_id]['scale_factor']
+ proposals = self.get_bboxes_single(cls_score_list,
+ bbox_cls_pred_list,
+ bbox_reg_pred_list,
+ mlvl_anchors[img_id], img_shape,
+ scale_factor, cfg, rescale)
+ result_list.append(proposals)
+ return result_list
+
+ def get_bboxes_single(self,
+ cls_scores,
+ bbox_cls_preds,
+ bbox_reg_preds,
+ mlvl_anchors,
+ img_shape,
+ scale_factor,
+ cfg,
+ rescale=False):
+ cfg = self.test_cfg if cfg is None else cfg
+ mlvl_bboxes = []
+ mlvl_scores = []
+ mlvl_confids = []
+ assert len(cls_scores) == len(bbox_cls_preds) == len(
+ bbox_reg_preds) == len(mlvl_anchors)
+ for cls_score, bbox_cls_pred, bbox_reg_pred, anchors in zip(
+ cls_scores, bbox_cls_preds, bbox_reg_preds, mlvl_anchors):
+ assert cls_score.size()[-2:] == bbox_cls_pred.size(
+ )[-2:] == bbox_reg_pred.size()[-2::]
+ cls_score = cls_score.permute(1, 2,
+ 0).reshape(-1, self.cls_out_channels)
+ if self.use_sigmoid_cls:
+ scores = cls_score.sigmoid()
+ else:
+ scores = cls_score.softmax(-1)
+ bbox_cls_pred = bbox_cls_pred.permute(1, 2, 0).reshape(
+ -1, self.side_num * 4)
+ bbox_reg_pred = bbox_reg_pred.permute(1, 2, 0).reshape(
+ -1, self.side_num * 4)
+ nms_pre = cfg.get('nms_pre', -1)
+ if nms_pre > 0 and scores.shape[0] > nms_pre:
+ if self.use_sigmoid_cls:
+ max_scores, _ = scores.max(dim=1)
+ else:
+ max_scores, _ = scores[:, :-1].max(dim=1)
+ _, topk_inds = max_scores.topk(nms_pre)
+ anchors = anchors[topk_inds, :]
+ bbox_cls_pred = bbox_cls_pred[topk_inds, :]
+ bbox_reg_pred = bbox_reg_pred[topk_inds, :]
+ scores = scores[topk_inds, :]
+ bbox_preds = [
+ bbox_cls_pred.contiguous(),
+ bbox_reg_pred.contiguous()
+ ]
+ bboxes, confids = self.bbox_coder.decode(
+ anchors.contiguous(), bbox_preds, max_shape=img_shape)
+ mlvl_bboxes.append(bboxes)
+ mlvl_scores.append(scores)
+ mlvl_confids.append(confids)
+ mlvl_bboxes = torch.cat(mlvl_bboxes)
+ if rescale:
+ mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor)
+ mlvl_scores = torch.cat(mlvl_scores)
+ mlvl_confids = torch.cat(mlvl_confids)
+ if self.use_sigmoid_cls:
+ padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1)
+ mlvl_scores = torch.cat([mlvl_scores, padding], dim=1)
+ det_bboxes, det_labels = multiclass_nms(
+ mlvl_bboxes,
+ mlvl_scores,
+ cfg.score_thr,
+ cfg.nms,
+ cfg.max_per_img,
+ score_factors=mlvl_confids)
+ return det_bboxes, det_labels
diff --git a/annotator/uniformer/mmdet/models/dense_heads/ssd_head.py b/annotator/uniformer/mmdet/models/dense_heads/ssd_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..145622b64e3f0b3f7f518fc61a2a01348ebfa4f3
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/dense_heads/ssd_head.py
@@ -0,0 +1,265 @@
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from mmcv.cnn import xavier_init
+from mmcv.runner import force_fp32
+
+from mmdet.core import (build_anchor_generator, build_assigner,
+ build_bbox_coder, build_sampler, multi_apply)
+from ..builder import HEADS
+from ..losses import smooth_l1_loss
+from .anchor_head import AnchorHead
+
+
+# TODO: add loss evaluator for SSD
+@HEADS.register_module()
+class SSDHead(AnchorHead):
+ """SSD head used in https://arxiv.org/abs/1512.02325.
+
+ Args:
+ num_classes (int): Number of categories excluding the background
+ category.
+ in_channels (int): Number of channels in the input feature map.
+ anchor_generator (dict): Config dict for anchor generator
+ bbox_coder (dict): Config of bounding box coder.
+ reg_decoded_bbox (bool): If true, the regression loss would be
+ applied directly on decoded bounding boxes, converting both
+ the predicted boxes and regression targets to absolute
+ coordinates format. Default False. It should be `True` when
+ using `IoULoss`, `GIoULoss`, or `DIoULoss` in the bbox head.
+ train_cfg (dict): Training config of anchor head.
+ test_cfg (dict): Testing config of anchor head.
+ """ # noqa: W605
+
+ def __init__(self,
+ num_classes=80,
+ in_channels=(512, 1024, 512, 256, 256, 256),
+ anchor_generator=dict(
+ type='SSDAnchorGenerator',
+ scale_major=False,
+ input_size=300,
+ strides=[8, 16, 32, 64, 100, 300],
+ ratios=([2], [2, 3], [2, 3], [2, 3], [2], [2]),
+ basesize_ratio_range=(0.1, 0.9)),
+ bbox_coder=dict(
+ type='DeltaXYWHBBoxCoder',
+ clip_border=True,
+ target_means=[.0, .0, .0, .0],
+ target_stds=[1.0, 1.0, 1.0, 1.0],
+ ),
+ reg_decoded_bbox=False,
+ train_cfg=None,
+ test_cfg=None):
+ super(AnchorHead, self).__init__()
+ self.num_classes = num_classes
+ self.in_channels = in_channels
+ self.cls_out_channels = num_classes + 1 # add background class
+ self.anchor_generator = build_anchor_generator(anchor_generator)
+ num_anchors = self.anchor_generator.num_base_anchors
+
+ reg_convs = []
+ cls_convs = []
+ for i in range(len(in_channels)):
+ reg_convs.append(
+ nn.Conv2d(
+ in_channels[i],
+ num_anchors[i] * 4,
+ kernel_size=3,
+ padding=1))
+ cls_convs.append(
+ nn.Conv2d(
+ in_channels[i],
+ num_anchors[i] * (num_classes + 1),
+ kernel_size=3,
+ padding=1))
+ self.reg_convs = nn.ModuleList(reg_convs)
+ self.cls_convs = nn.ModuleList(cls_convs)
+
+ self.bbox_coder = build_bbox_coder(bbox_coder)
+ self.reg_decoded_bbox = reg_decoded_bbox
+ self.use_sigmoid_cls = False
+ self.cls_focal_loss = False
+ self.train_cfg = train_cfg
+ self.test_cfg = test_cfg
+ # set sampling=False for archor_target
+ self.sampling = False
+ if self.train_cfg:
+ self.assigner = build_assigner(self.train_cfg.assigner)
+ # SSD sampling=False so use PseudoSampler
+ sampler_cfg = dict(type='PseudoSampler')
+ self.sampler = build_sampler(sampler_cfg, context=self)
+ self.fp16_enabled = False
+
+ def init_weights(self):
+ """Initialize weights of the head."""
+ for m in self.modules():
+ if isinstance(m, nn.Conv2d):
+ xavier_init(m, distribution='uniform', bias=0)
+
+ def forward(self, feats):
+ """Forward features from the upstream network.
+
+ Args:
+ feats (tuple[Tensor]): Features from the upstream network, each is
+ a 4D-tensor.
+
+ Returns:
+ tuple:
+ cls_scores (list[Tensor]): Classification scores for all scale
+ levels, each is a 4D-tensor, the channels number is
+ num_anchors * num_classes.
+ bbox_preds (list[Tensor]): Box energies / deltas for all scale
+ levels, each is a 4D-tensor, the channels number is
+ num_anchors * 4.
+ """
+ cls_scores = []
+ bbox_preds = []
+ for feat, reg_conv, cls_conv in zip(feats, self.reg_convs,
+ self.cls_convs):
+ cls_scores.append(cls_conv(feat))
+ bbox_preds.append(reg_conv(feat))
+ return cls_scores, bbox_preds
+
+ def loss_single(self, cls_score, bbox_pred, anchor, labels, label_weights,
+ bbox_targets, bbox_weights, num_total_samples):
+ """Compute loss of a single image.
+
+ Args:
+ cls_score (Tensor): Box scores for eachimage
+ Has shape (num_total_anchors, num_classes).
+ bbox_pred (Tensor): Box energies / deltas for each image
+ level with shape (num_total_anchors, 4).
+ anchors (Tensor): Box reference for each scale level with shape
+ (num_total_anchors, 4).
+ labels (Tensor): Labels of each anchors with shape
+ (num_total_anchors,).
+ label_weights (Tensor): Label weights of each anchor with shape
+ (num_total_anchors,)
+ bbox_targets (Tensor): BBox regression targets of each anchor wight
+ shape (num_total_anchors, 4).
+ bbox_weights (Tensor): BBox regression loss weights of each anchor
+ with shape (num_total_anchors, 4).
+ num_total_samples (int): If sampling, num total samples equal to
+ the number of total anchors; Otherwise, it is the number of
+ positive anchors.
+
+ Returns:
+ dict[str, Tensor]: A dictionary of loss components.
+ """
+
+ loss_cls_all = F.cross_entropy(
+ cls_score, labels, reduction='none') * label_weights
+ # FG cat_id: [0, num_classes -1], BG cat_id: num_classes
+ pos_inds = ((labels >= 0) &
+ (labels < self.num_classes)).nonzero().reshape(-1)
+ neg_inds = (labels == self.num_classes).nonzero().view(-1)
+
+ num_pos_samples = pos_inds.size(0)
+ num_neg_samples = self.train_cfg.neg_pos_ratio * num_pos_samples
+ if num_neg_samples > neg_inds.size(0):
+ num_neg_samples = neg_inds.size(0)
+ topk_loss_cls_neg, _ = loss_cls_all[neg_inds].topk(num_neg_samples)
+ loss_cls_pos = loss_cls_all[pos_inds].sum()
+ loss_cls_neg = topk_loss_cls_neg.sum()
+ loss_cls = (loss_cls_pos + loss_cls_neg) / num_total_samples
+
+ if self.reg_decoded_bbox:
+ # When the regression loss (e.g. `IouLoss`, `GIouLoss`)
+ # is applied directly on the decoded bounding boxes, it
+ # decodes the already encoded coordinates to absolute format.
+ bbox_pred = self.bbox_coder.decode(anchor, bbox_pred)
+
+ loss_bbox = smooth_l1_loss(
+ bbox_pred,
+ bbox_targets,
+ bbox_weights,
+ beta=self.train_cfg.smoothl1_beta,
+ avg_factor=num_total_samples)
+ return loss_cls[None], loss_bbox
+
+ @force_fp32(apply_to=('cls_scores', 'bbox_preds'))
+ def loss(self,
+ cls_scores,
+ bbox_preds,
+ gt_bboxes,
+ gt_labels,
+ img_metas,
+ gt_bboxes_ignore=None):
+ """Compute losses of the head.
+
+ Args:
+ cls_scores (list[Tensor]): Box scores for each scale level
+ Has shape (N, num_anchors * num_classes, H, W)
+ bbox_preds (list[Tensor]): Box energies / deltas for each scale
+ level with shape (N, num_anchors * 4, H, W)
+ gt_bboxes (list[Tensor]): each item are the truth boxes for each
+ image in [tl_x, tl_y, br_x, br_y] format.
+ gt_labels (list[Tensor]): class indices corresponding to each box
+ img_metas (list[dict]): Meta information of each image, e.g.,
+ image size, scaling factor, etc.
+ gt_bboxes_ignore (None | list[Tensor]): specify which bounding
+ boxes can be ignored when computing the loss.
+
+ Returns:
+ dict[str, Tensor]: A dictionary of loss components.
+ """
+ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
+ assert len(featmap_sizes) == self.anchor_generator.num_levels
+
+ device = cls_scores[0].device
+
+ anchor_list, valid_flag_list = self.get_anchors(
+ featmap_sizes, img_metas, device=device)
+ cls_reg_targets = self.get_targets(
+ anchor_list,
+ valid_flag_list,
+ gt_bboxes,
+ img_metas,
+ gt_bboxes_ignore_list=gt_bboxes_ignore,
+ gt_labels_list=gt_labels,
+ label_channels=1,
+ unmap_outputs=False)
+ if cls_reg_targets is None:
+ return None
+ (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,
+ num_total_pos, num_total_neg) = cls_reg_targets
+
+ num_images = len(img_metas)
+ all_cls_scores = torch.cat([
+ s.permute(0, 2, 3, 1).reshape(
+ num_images, -1, self.cls_out_channels) for s in cls_scores
+ ], 1)
+ all_labels = torch.cat(labels_list, -1).view(num_images, -1)
+ all_label_weights = torch.cat(label_weights_list,
+ -1).view(num_images, -1)
+ all_bbox_preds = torch.cat([
+ b.permute(0, 2, 3, 1).reshape(num_images, -1, 4)
+ for b in bbox_preds
+ ], -2)
+ all_bbox_targets = torch.cat(bbox_targets_list,
+ -2).view(num_images, -1, 4)
+ all_bbox_weights = torch.cat(bbox_weights_list,
+ -2).view(num_images, -1, 4)
+
+ # concat all level anchors to a single tensor
+ all_anchors = []
+ for i in range(num_images):
+ all_anchors.append(torch.cat(anchor_list[i]))
+
+ # check NaN and Inf
+ assert torch.isfinite(all_cls_scores).all().item(), \
+ 'classification scores become infinite or NaN!'
+ assert torch.isfinite(all_bbox_preds).all().item(), \
+ 'bbox predications become infinite or NaN!'
+
+ losses_cls, losses_bbox = multi_apply(
+ self.loss_single,
+ all_cls_scores,
+ all_bbox_preds,
+ all_anchors,
+ all_labels,
+ all_label_weights,
+ all_bbox_targets,
+ all_bbox_weights,
+ num_total_samples=num_total_pos)
+ return dict(loss_cls=losses_cls, loss_bbox=losses_bbox)
diff --git a/annotator/uniformer/mmdet/models/dense_heads/transformer_head.py b/annotator/uniformer/mmdet/models/dense_heads/transformer_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..820fd069fcca295f6102f0d27366158a8c640249
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/dense_heads/transformer_head.py
@@ -0,0 +1,654 @@
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from mmcv.cnn import Conv2d, Linear, build_activation_layer
+from mmcv.runner import force_fp32
+
+from mmdet.core import (bbox_cxcywh_to_xyxy, bbox_xyxy_to_cxcywh,
+ build_assigner, build_sampler, multi_apply,
+ reduce_mean)
+from mmdet.models.utils import (FFN, build_positional_encoding,
+ build_transformer)
+from ..builder import HEADS, build_loss
+from .anchor_free_head import AnchorFreeHead
+
+
+@HEADS.register_module()
+class TransformerHead(AnchorFreeHead):
+ """Implements the DETR transformer head.
+
+ See `paper: End-to-End Object Detection with Transformers
+ `_ for details.
+
+ Args:
+ num_classes (int): Number of categories excluding the background.
+ in_channels (int): Number of channels in the input feature map.
+ num_fcs (int, optional): Number of fully-connected layers used in
+ `FFN`, which is then used for the regression head. Default 2.
+ transformer (dict, optional): Config for transformer.
+ positional_encoding (dict, optional): Config for position encoding.
+ loss_cls (dict, optional): Config of the classification loss.
+ Default `CrossEntropyLoss`.
+ loss_bbox (dict, optional): Config of the regression loss.
+ Default `L1Loss`.
+ loss_iou (dict, optional): Config of the regression iou loss.
+ Default `GIoULoss`.
+ tran_cfg (dict, optional): Training config of transformer head.
+ test_cfg (dict, optional): Testing config of transformer head.
+
+ Example:
+ >>> import torch
+ >>> self = TransformerHead(80, 2048)
+ >>> x = torch.rand(1, 2048, 32, 32)
+ >>> mask = torch.ones(1, 32, 32).to(x.dtype)
+ >>> mask[:, :16, :15] = 0
+ >>> all_cls_scores, all_bbox_preds = self(x, mask)
+ """
+
+ def __init__(self,
+ num_classes,
+ in_channels,
+ num_fcs=2,
+ transformer=dict(
+ type='Transformer',
+ embed_dims=256,
+ num_heads=8,
+ num_encoder_layers=6,
+ num_decoder_layers=6,
+ feedforward_channels=2048,
+ dropout=0.1,
+ act_cfg=dict(type='ReLU', inplace=True),
+ norm_cfg=dict(type='LN'),
+ num_fcs=2,
+ pre_norm=False,
+ return_intermediate_dec=True),
+ positional_encoding=dict(
+ type='SinePositionalEncoding',
+ num_feats=128,
+ normalize=True),
+ loss_cls=dict(
+ type='CrossEntropyLoss',
+ bg_cls_weight=0.1,
+ use_sigmoid=False,
+ loss_weight=1.0,
+ class_weight=1.0),
+ loss_bbox=dict(type='L1Loss', loss_weight=5.0),
+ loss_iou=dict(type='GIoULoss', loss_weight=2.0),
+ train_cfg=dict(
+ assigner=dict(
+ type='HungarianAssigner',
+ cls_cost=dict(type='ClassificationCost', weight=1.),
+ reg_cost=dict(type='BBoxL1Cost', weight=5.0),
+ iou_cost=dict(
+ type='IoUCost', iou_mode='giou', weight=2.0))),
+ test_cfg=dict(max_per_img=100),
+ **kwargs):
+ # NOTE here use `AnchorFreeHead` instead of `TransformerHead`,
+ # since it brings inconvenience when the initialization of
+ # `AnchorFreeHead` is called.
+ super(AnchorFreeHead, self).__init__()
+ use_sigmoid_cls = loss_cls.get('use_sigmoid', False)
+ assert not use_sigmoid_cls, 'setting use_sigmoid_cls as True is ' \
+ 'not supported in DETR, since background is needed for the ' \
+ 'matching process.'
+ assert 'embed_dims' in transformer \
+ and 'num_feats' in positional_encoding
+ num_feats = positional_encoding['num_feats']
+ embed_dims = transformer['embed_dims']
+ assert num_feats * 2 == embed_dims, 'embed_dims should' \
+ f' be exactly 2 times of num_feats. Found {embed_dims}' \
+ f' and {num_feats}.'
+ assert test_cfg is not None and 'max_per_img' in test_cfg
+
+ class_weight = loss_cls.get('class_weight', None)
+ if class_weight is not None:
+ assert isinstance(class_weight, float), 'Expected ' \
+ 'class_weight to have type float. Found ' \
+ f'{type(class_weight)}.'
+ # NOTE following the official DETR rep0, bg_cls_weight means
+ # relative classification weight of the no-object class.
+ bg_cls_weight = loss_cls.get('bg_cls_weight', class_weight)
+ assert isinstance(bg_cls_weight, float), 'Expected ' \
+ 'bg_cls_weight to have type float. Found ' \
+ f'{type(bg_cls_weight)}.'
+ class_weight = torch.ones(num_classes + 1) * class_weight
+ # set background class as the last indice
+ class_weight[num_classes] = bg_cls_weight
+ loss_cls.update({'class_weight': class_weight})
+ if 'bg_cls_weight' in loss_cls:
+ loss_cls.pop('bg_cls_weight')
+ self.bg_cls_weight = bg_cls_weight
+
+ if train_cfg:
+ assert 'assigner' in train_cfg, 'assigner should be provided '\
+ 'when train_cfg is set.'
+ assigner = train_cfg['assigner']
+ assert loss_cls['loss_weight'] == assigner['cls_cost']['weight'], \
+ 'The classification weight for loss and matcher should be' \
+ 'exactly the same.'
+ assert loss_bbox['loss_weight'] == assigner['reg_cost'][
+ 'weight'], 'The regression L1 weight for loss and matcher ' \
+ 'should be exactly the same.'
+ assert loss_iou['loss_weight'] == assigner['iou_cost']['weight'], \
+ 'The regression iou weight for loss and matcher should be' \
+ 'exactly the same.'
+ self.assigner = build_assigner(assigner)
+ # DETR sampling=False, so use PseudoSampler
+ sampler_cfg = dict(type='PseudoSampler')
+ self.sampler = build_sampler(sampler_cfg, context=self)
+ self.num_classes = num_classes
+ self.cls_out_channels = num_classes + 1
+ self.in_channels = in_channels
+ self.num_fcs = num_fcs
+ self.train_cfg = train_cfg
+ self.test_cfg = test_cfg
+ self.use_sigmoid_cls = use_sigmoid_cls
+ self.embed_dims = embed_dims
+ self.num_query = test_cfg['max_per_img']
+ self.fp16_enabled = False
+ self.loss_cls = build_loss(loss_cls)
+ self.loss_bbox = build_loss(loss_bbox)
+ self.loss_iou = build_loss(loss_iou)
+ self.act_cfg = transformer.get('act_cfg',
+ dict(type='ReLU', inplace=True))
+ self.activate = build_activation_layer(self.act_cfg)
+ self.positional_encoding = build_positional_encoding(
+ positional_encoding)
+ self.transformer = build_transformer(transformer)
+ self._init_layers()
+
+ def _init_layers(self):
+ """Initialize layers of the transformer head."""
+ self.input_proj = Conv2d(
+ self.in_channels, self.embed_dims, kernel_size=1)
+ self.fc_cls = Linear(self.embed_dims, self.cls_out_channels)
+ self.reg_ffn = FFN(
+ self.embed_dims,
+ self.embed_dims,
+ self.num_fcs,
+ self.act_cfg,
+ dropout=0.0,
+ add_residual=False)
+ self.fc_reg = Linear(self.embed_dims, 4)
+ self.query_embedding = nn.Embedding(self.num_query, self.embed_dims)
+
+ def init_weights(self, distribution='uniform'):
+ """Initialize weights of the transformer head."""
+ # The initialization for transformer is important
+ self.transformer.init_weights()
+
+ def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
+ missing_keys, unexpected_keys, error_msgs):
+ """load checkpoints."""
+ # NOTE here use `AnchorFreeHead` instead of `TransformerHead`,
+ # since `AnchorFreeHead._load_from_state_dict` should not be
+ # called here. Invoking the default `Module._load_from_state_dict`
+ # is enough.
+ super(AnchorFreeHead,
+ self)._load_from_state_dict(state_dict, prefix, local_metadata,
+ strict, missing_keys,
+ unexpected_keys, error_msgs)
+
+ def forward(self, feats, img_metas):
+ """Forward function.
+
+ Args:
+ feats (tuple[Tensor]): Features from the upstream network, each is
+ a 4D-tensor.
+ img_metas (list[dict]): List of image information.
+
+ Returns:
+ tuple[list[Tensor], list[Tensor]]: Outputs for all scale levels.
+
+ - all_cls_scores_list (list[Tensor]): Classification scores \
+ for each scale level. Each is a 4D-tensor with shape \
+ [nb_dec, bs, num_query, cls_out_channels]. Note \
+ `cls_out_channels` should includes background.
+ - all_bbox_preds_list (list[Tensor]): Sigmoid regression \
+ outputs for each scale level. Each is a 4D-tensor with \
+ normalized coordinate format (cx, cy, w, h) and shape \
+ [nb_dec, bs, num_query, 4].
+ """
+ num_levels = len(feats)
+ img_metas_list = [img_metas for _ in range(num_levels)]
+ return multi_apply(self.forward_single, feats, img_metas_list)
+
+ def forward_single(self, x, img_metas):
+ """"Forward function for a single feature level.
+
+ Args:
+ x (Tensor): Input feature from backbone's single stage, shape
+ [bs, c, h, w].
+ img_metas (list[dict]): List of image information.
+
+ Returns:
+ all_cls_scores (Tensor): Outputs from the classification head,
+ shape [nb_dec, bs, num_query, cls_out_channels]. Note
+ cls_out_channels should includes background.
+ all_bbox_preds (Tensor): Sigmoid outputs from the regression
+ head with normalized coordinate format (cx, cy, w, h).
+ Shape [nb_dec, bs, num_query, 4].
+ """
+ # construct binary masks which used for the transformer.
+ # NOTE following the official DETR repo, non-zero values representing
+ # ignored positions, while zero values means valid positions.
+ batch_size = x.size(0)
+ input_img_h, input_img_w = img_metas[0]['batch_input_shape']
+ masks = x.new_ones((batch_size, input_img_h, input_img_w))
+ for img_id in range(batch_size):
+ img_h, img_w, _ = img_metas[img_id]['img_shape']
+ masks[img_id, :img_h, :img_w] = 0
+
+ x = self.input_proj(x)
+ # interpolate masks to have the same spatial shape with x
+ masks = F.interpolate(
+ masks.unsqueeze(1), size=x.shape[-2:]).to(torch.bool).squeeze(1)
+ # position encoding
+ pos_embed = self.positional_encoding(masks) # [bs, embed_dim, h, w]
+ # outs_dec: [nb_dec, bs, num_query, embed_dim]
+ outs_dec, _ = self.transformer(x, masks, self.query_embedding.weight,
+ pos_embed)
+
+ all_cls_scores = self.fc_cls(outs_dec)
+ all_bbox_preds = self.fc_reg(self.activate(
+ self.reg_ffn(outs_dec))).sigmoid()
+ return all_cls_scores, all_bbox_preds
+
+ @force_fp32(apply_to=('all_cls_scores_list', 'all_bbox_preds_list'))
+ def loss(self,
+ all_cls_scores_list,
+ all_bbox_preds_list,
+ gt_bboxes_list,
+ gt_labels_list,
+ img_metas,
+ gt_bboxes_ignore=None):
+ """"Loss function.
+
+ Only outputs from the last feature level are used for computing
+ losses by default.
+
+ Args:
+ all_cls_scores_list (list[Tensor]): Classification outputs
+ for each feature level. Each is a 4D-tensor with shape
+ [nb_dec, bs, num_query, cls_out_channels].
+ all_bbox_preds_list (list[Tensor]): Sigmoid regression
+ outputs for each feature level. Each is a 4D-tensor with
+ normalized coordinate format (cx, cy, w, h) and shape
+ [nb_dec, bs, num_query, 4].
+ gt_bboxes_list (list[Tensor]): Ground truth bboxes for each image
+ with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
+ gt_labels_list (list[Tensor]): Ground truth class indices for each
+ image with shape (num_gts, ).
+ img_metas (list[dict]): List of image meta information.
+ gt_bboxes_ignore (list[Tensor], optional): Bounding boxes
+ which can be ignored for each image. Default None.
+
+ Returns:
+ dict[str, Tensor]: A dictionary of loss components.
+ """
+ # NOTE defaultly only the outputs from the last feature scale is used.
+ all_cls_scores = all_cls_scores_list[-1]
+ all_bbox_preds = all_bbox_preds_list[-1]
+ assert gt_bboxes_ignore is None, \
+ 'Only supports for gt_bboxes_ignore setting to None.'
+
+ num_dec_layers = len(all_cls_scores)
+ all_gt_bboxes_list = [gt_bboxes_list for _ in range(num_dec_layers)]
+ all_gt_labels_list = [gt_labels_list for _ in range(num_dec_layers)]
+ all_gt_bboxes_ignore_list = [
+ gt_bboxes_ignore for _ in range(num_dec_layers)
+ ]
+ img_metas_list = [img_metas for _ in range(num_dec_layers)]
+
+ losses_cls, losses_bbox, losses_iou = multi_apply(
+ self.loss_single, all_cls_scores, all_bbox_preds,
+ all_gt_bboxes_list, all_gt_labels_list, img_metas_list,
+ all_gt_bboxes_ignore_list)
+
+ loss_dict = dict()
+ # loss from the last decoder layer
+ loss_dict['loss_cls'] = losses_cls[-1]
+ loss_dict['loss_bbox'] = losses_bbox[-1]
+ loss_dict['loss_iou'] = losses_iou[-1]
+ # loss from other decoder layers
+ num_dec_layer = 0
+ for loss_cls_i, loss_bbox_i, loss_iou_i in zip(losses_cls[:-1],
+ losses_bbox[:-1],
+ losses_iou[:-1]):
+ loss_dict[f'd{num_dec_layer}.loss_cls'] = loss_cls_i
+ loss_dict[f'd{num_dec_layer}.loss_bbox'] = loss_bbox_i
+ loss_dict[f'd{num_dec_layer}.loss_iou'] = loss_iou_i
+ num_dec_layer += 1
+ return loss_dict
+
+ def loss_single(self,
+ cls_scores,
+ bbox_preds,
+ gt_bboxes_list,
+ gt_labels_list,
+ img_metas,
+ gt_bboxes_ignore_list=None):
+ """"Loss function for outputs from a single decoder layer of a single
+ feature level.
+
+ Args:
+ cls_scores (Tensor): Box score logits from a single decoder layer
+ for all images. Shape [bs, num_query, cls_out_channels].
+ bbox_preds (Tensor): Sigmoid outputs from a single decoder layer
+ for all images, with normalized coordinate (cx, cy, w, h) and
+ shape [bs, num_query, 4].
+ gt_bboxes_list (list[Tensor]): Ground truth bboxes for each image
+ with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
+ gt_labels_list (list[Tensor]): Ground truth class indices for each
+ image with shape (num_gts, ).
+ img_metas (list[dict]): List of image meta information.
+ gt_bboxes_ignore_list (list[Tensor], optional): Bounding
+ boxes which can be ignored for each image. Default None.
+
+ Returns:
+ dict[str, Tensor]: A dictionary of loss components for outputs from
+ a single decoder layer.
+ """
+ num_imgs = cls_scores.size(0)
+ cls_scores_list = [cls_scores[i] for i in range(num_imgs)]
+ bbox_preds_list = [bbox_preds[i] for i in range(num_imgs)]
+ cls_reg_targets = self.get_targets(cls_scores_list, bbox_preds_list,
+ gt_bboxes_list, gt_labels_list,
+ img_metas, gt_bboxes_ignore_list)
+ (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,
+ num_total_pos, num_total_neg) = cls_reg_targets
+ labels = torch.cat(labels_list, 0)
+ label_weights = torch.cat(label_weights_list, 0)
+ bbox_targets = torch.cat(bbox_targets_list, 0)
+ bbox_weights = torch.cat(bbox_weights_list, 0)
+
+ # classification loss
+ cls_scores = cls_scores.reshape(-1, self.cls_out_channels)
+ # construct weighted avg_factor to match with the official DETR repo
+ cls_avg_factor = num_total_pos * 1.0 + \
+ num_total_neg * self.bg_cls_weight
+ loss_cls = self.loss_cls(
+ cls_scores, labels, label_weights, avg_factor=cls_avg_factor)
+
+ # Compute the average number of gt boxes accross all gpus, for
+ # normalization purposes
+ num_total_pos = loss_cls.new_tensor([num_total_pos])
+ num_total_pos = torch.clamp(reduce_mean(num_total_pos), min=1).item()
+
+ # construct factors used for rescale bboxes
+ factors = []
+ for img_meta, bbox_pred in zip(img_metas, bbox_preds):
+ img_h, img_w, _ = img_meta['img_shape']
+ factor = bbox_pred.new_tensor([img_w, img_h, img_w,
+ img_h]).unsqueeze(0).repeat(
+ bbox_pred.size(0), 1)
+ factors.append(factor)
+ factors = torch.cat(factors, 0)
+
+ # DETR regress the relative position of boxes (cxcywh) in the image,
+ # thus the learning target is normalized by the image size. So here
+ # we need to re-scale them for calculating IoU loss
+ bbox_preds = bbox_preds.reshape(-1, 4)
+ bboxes = bbox_cxcywh_to_xyxy(bbox_preds) * factors
+ bboxes_gt = bbox_cxcywh_to_xyxy(bbox_targets) * factors
+
+ # regression IoU loss, defaultly GIoU loss
+ loss_iou = self.loss_iou(
+ bboxes, bboxes_gt, bbox_weights, avg_factor=num_total_pos)
+
+ # regression L1 loss
+ loss_bbox = self.loss_bbox(
+ bbox_preds, bbox_targets, bbox_weights, avg_factor=num_total_pos)
+ return loss_cls, loss_bbox, loss_iou
+
+ def get_targets(self,
+ cls_scores_list,
+ bbox_preds_list,
+ gt_bboxes_list,
+ gt_labels_list,
+ img_metas,
+ gt_bboxes_ignore_list=None):
+ """"Compute regression and classification targets for a batch image.
+
+ Outputs from a single decoder layer of a single feature level are used.
+
+ Args:
+ cls_scores_list (list[Tensor]): Box score logits from a single
+ decoder layer for each image with shape [num_query,
+ cls_out_channels].
+ bbox_preds_list (list[Tensor]): Sigmoid outputs from a single
+ decoder layer for each image, with normalized coordinate
+ (cx, cy, w, h) and shape [num_query, 4].
+ gt_bboxes_list (list[Tensor]): Ground truth bboxes for each image
+ with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
+ gt_labels_list (list[Tensor]): Ground truth class indices for each
+ image with shape (num_gts, ).
+ img_metas (list[dict]): List of image meta information.
+ gt_bboxes_ignore_list (list[Tensor], optional): Bounding
+ boxes which can be ignored for each image. Default None.
+
+ Returns:
+ tuple: a tuple containing the following targets.
+
+ - labels_list (list[Tensor]): Labels for all images.
+ - label_weights_list (list[Tensor]): Label weights for all \
+ images.
+ - bbox_targets_list (list[Tensor]): BBox targets for all \
+ images.
+ - bbox_weights_list (list[Tensor]): BBox weights for all \
+ images.
+ - num_total_pos (int): Number of positive samples in all \
+ images.
+ - num_total_neg (int): Number of negative samples in all \
+ images.
+ """
+ assert gt_bboxes_ignore_list is None, \
+ 'Only supports for gt_bboxes_ignore setting to None.'
+ num_imgs = len(cls_scores_list)
+ gt_bboxes_ignore_list = [
+ gt_bboxes_ignore_list for _ in range(num_imgs)
+ ]
+
+ (labels_list, label_weights_list, bbox_targets_list,
+ bbox_weights_list, pos_inds_list, neg_inds_list) = multi_apply(
+ self._get_target_single, cls_scores_list, bbox_preds_list,
+ gt_bboxes_list, gt_labels_list, img_metas, gt_bboxes_ignore_list)
+ num_total_pos = sum((inds.numel() for inds in pos_inds_list))
+ num_total_neg = sum((inds.numel() for inds in neg_inds_list))
+ return (labels_list, label_weights_list, bbox_targets_list,
+ bbox_weights_list, num_total_pos, num_total_neg)
+
+ def _get_target_single(self,
+ cls_score,
+ bbox_pred,
+ gt_bboxes,
+ gt_labels,
+ img_meta,
+ gt_bboxes_ignore=None):
+ """"Compute regression and classification targets for one image.
+
+ Outputs from a single decoder layer of a single feature level are used.
+
+ Args:
+ cls_score (Tensor): Box score logits from a single decoder layer
+ for one image. Shape [num_query, cls_out_channels].
+ bbox_pred (Tensor): Sigmoid outputs from a single decoder layer
+ for one image, with normalized coordinate (cx, cy, w, h) and
+ shape [num_query, 4].
+ gt_bboxes (Tensor): Ground truth bboxes for one image with
+ shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
+ gt_labels (Tensor): Ground truth class indices for one image
+ with shape (num_gts, ).
+ img_meta (dict): Meta information for one image.
+ gt_bboxes_ignore (Tensor, optional): Bounding boxes
+ which can be ignored. Default None.
+
+ Returns:
+ tuple[Tensor]: a tuple containing the following for one image.
+
+ - labels (Tensor): Labels of each image.
+ - label_weights (Tensor]): Label weights of each image.
+ - bbox_targets (Tensor): BBox targets of each image.
+ - bbox_weights (Tensor): BBox weights of each image.
+ - pos_inds (Tensor): Sampled positive indices for each image.
+ - neg_inds (Tensor): Sampled negative indices for each image.
+ """
+
+ num_bboxes = bbox_pred.size(0)
+ # assigner and sampler
+ assign_result = self.assigner.assign(bbox_pred, cls_score, gt_bboxes,
+ gt_labels, img_meta,
+ gt_bboxes_ignore)
+ sampling_result = self.sampler.sample(assign_result, bbox_pred,
+ gt_bboxes)
+ pos_inds = sampling_result.pos_inds
+ neg_inds = sampling_result.neg_inds
+
+ # label targets
+ labels = gt_bboxes.new_full((num_bboxes, ),
+ self.num_classes,
+ dtype=torch.long)
+ labels[pos_inds] = gt_labels[sampling_result.pos_assigned_gt_inds]
+ label_weights = gt_bboxes.new_ones(num_bboxes)
+
+ # bbox targets
+ bbox_targets = torch.zeros_like(bbox_pred)
+ bbox_weights = torch.zeros_like(bbox_pred)
+ bbox_weights[pos_inds] = 1.0
+ img_h, img_w, _ = img_meta['img_shape']
+
+ # DETR regress the relative position of boxes (cxcywh) in the image.
+ # Thus the learning target should be normalized by the image size, also
+ # the box format should be converted from defaultly x1y1x2y2 to cxcywh.
+ factor = bbox_pred.new_tensor([img_w, img_h, img_w,
+ img_h]).unsqueeze(0)
+ pos_gt_bboxes_normalized = sampling_result.pos_gt_bboxes / factor
+ pos_gt_bboxes_targets = bbox_xyxy_to_cxcywh(pos_gt_bboxes_normalized)
+ bbox_targets[pos_inds] = pos_gt_bboxes_targets
+ return (labels, label_weights, bbox_targets, bbox_weights, pos_inds,
+ neg_inds)
+
+ # over-write because img_metas are needed as inputs for bbox_head.
+ def forward_train(self,
+ x,
+ img_metas,
+ gt_bboxes,
+ gt_labels=None,
+ gt_bboxes_ignore=None,
+ proposal_cfg=None,
+ **kwargs):
+ """Forward function for training mode.
+
+ Args:
+ x (list[Tensor]): Features from backbone.
+ img_metas (list[dict]): Meta information of each image, e.g.,
+ image size, scaling factor, etc.
+ gt_bboxes (Tensor): Ground truth bboxes of the image,
+ shape (num_gts, 4).
+ gt_labels (Tensor): Ground truth labels of each box,
+ shape (num_gts,).
+ gt_bboxes_ignore (Tensor): Ground truth bboxes to be
+ ignored, shape (num_ignored_gts, 4).
+ proposal_cfg (mmcv.Config): Test / postprocessing configuration,
+ if None, test_cfg would be used.
+
+ Returns:
+ dict[str, Tensor]: A dictionary of loss components.
+ """
+ assert proposal_cfg is None, '"proposal_cfg" must be None'
+ outs = self(x, img_metas)
+ if gt_labels is None:
+ loss_inputs = outs + (gt_bboxes, img_metas)
+ else:
+ loss_inputs = outs + (gt_bboxes, gt_labels, img_metas)
+ losses = self.loss(*loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore)
+ return losses
+
+ @force_fp32(apply_to=('all_cls_scores_list', 'all_bbox_preds_list'))
+ def get_bboxes(self,
+ all_cls_scores_list,
+ all_bbox_preds_list,
+ img_metas,
+ rescale=False):
+ """Transform network outputs for a batch into bbox predictions.
+
+ Args:
+ all_cls_scores_list (list[Tensor]): Classification outputs
+ for each feature level. Each is a 4D-tensor with shape
+ [nb_dec, bs, num_query, cls_out_channels].
+ all_bbox_preds_list (list[Tensor]): Sigmoid regression
+ outputs for each feature level. Each is a 4D-tensor with
+ normalized coordinate format (cx, cy, w, h) and shape
+ [nb_dec, bs, num_query, 4].
+ img_metas (list[dict]): Meta information of each image.
+ rescale (bool, optional): If True, return boxes in original
+ image space. Default False.
+
+ Returns:
+ list[list[Tensor, Tensor]]: Each item in result_list is 2-tuple. \
+ The first item is an (n, 5) tensor, where the first 4 columns \
+ are bounding box positions (tl_x, tl_y, br_x, br_y) and the \
+ 5-th column is a score between 0 and 1. The second item is a \
+ (n,) tensor where each item is the predicted class label of \
+ the corresponding box.
+ """
+ # NOTE defaultly only using outputs from the last feature level,
+ # and only the outputs from the last decoder layer is used.
+ cls_scores = all_cls_scores_list[-1][-1]
+ bbox_preds = all_bbox_preds_list[-1][-1]
+
+ result_list = []
+ for img_id in range(len(img_metas)):
+ cls_score = cls_scores[img_id]
+ bbox_pred = bbox_preds[img_id]
+ img_shape = img_metas[img_id]['img_shape']
+ scale_factor = img_metas[img_id]['scale_factor']
+ proposals = self._get_bboxes_single(cls_score, bbox_pred,
+ img_shape, scale_factor,
+ rescale)
+ result_list.append(proposals)
+ return result_list
+
+ def _get_bboxes_single(self,
+ cls_score,
+ bbox_pred,
+ img_shape,
+ scale_factor,
+ rescale=False):
+ """Transform outputs from the last decoder layer into bbox predictions
+ for each image.
+
+ Args:
+ cls_score (Tensor): Box score logits from the last decoder layer
+ for each image. Shape [num_query, cls_out_channels].
+ bbox_pred (Tensor): Sigmoid outputs from the last decoder layer
+ for each image, with coordinate format (cx, cy, w, h) and
+ shape [num_query, 4].
+ img_shape (tuple[int]): Shape of input image, (height, width, 3).
+ scale_factor (ndarray, optional): Scale factor of the image arange
+ as (w_scale, h_scale, w_scale, h_scale).
+ rescale (bool, optional): If True, return boxes in original image
+ space. Default False.
+
+ Returns:
+ tuple[Tensor]: Results of detected bboxes and labels.
+
+ - det_bboxes: Predicted bboxes with shape [num_query, 5], \
+ where the first 4 columns are bounding box positions \
+ (tl_x, tl_y, br_x, br_y) and the 5-th column are scores \
+ between 0 and 1.
+ - det_labels: Predicted labels of the corresponding box with \
+ shape [num_query].
+ """
+ assert len(cls_score) == len(bbox_pred)
+ # exclude background
+ scores, det_labels = F.softmax(cls_score, dim=-1)[..., :-1].max(-1)
+ det_bboxes = bbox_cxcywh_to_xyxy(bbox_pred)
+ det_bboxes[:, 0::2] = det_bboxes[:, 0::2] * img_shape[1]
+ det_bboxes[:, 1::2] = det_bboxes[:, 1::2] * img_shape[0]
+ det_bboxes[:, 0::2].clamp_(min=0, max=img_shape[1])
+ det_bboxes[:, 1::2].clamp_(min=0, max=img_shape[0])
+ if rescale:
+ det_bboxes /= det_bboxes.new_tensor(scale_factor)
+ det_bboxes = torch.cat((det_bboxes, scores.unsqueeze(1)), -1)
+ return det_bboxes, det_labels
diff --git a/annotator/uniformer/mmdet/models/dense_heads/vfnet_head.py b/annotator/uniformer/mmdet/models/dense_heads/vfnet_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..7243bb62893839568ec51928d88a5ad40b02a66c
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/dense_heads/vfnet_head.py
@@ -0,0 +1,794 @@
+import numpy as np
+import torch
+import torch.nn as nn
+from mmcv.cnn import ConvModule, Scale, bias_init_with_prob, normal_init
+from mmcv.ops import DeformConv2d
+from mmcv.runner import force_fp32
+
+from mmdet.core import (bbox2distance, bbox_overlaps, build_anchor_generator,
+ build_assigner, build_sampler, distance2bbox,
+ multi_apply, multiclass_nms, reduce_mean)
+from ..builder import HEADS, build_loss
+from .atss_head import ATSSHead
+from .fcos_head import FCOSHead
+
+INF = 1e8
+
+
+@HEADS.register_module()
+class VFNetHead(ATSSHead, FCOSHead):
+ """Head of `VarifocalNet (VFNet): An IoU-aware Dense Object
+ Detector.`_.
+
+ The VFNet predicts IoU-aware classification scores which mix the
+ object presence confidence and object localization accuracy as the
+ detection score. It is built on the FCOS architecture and uses ATSS
+ for defining positive/negative training examples. The VFNet is trained
+ with Varifocal Loss and empolys star-shaped deformable convolution to
+ extract features for a bbox.
+
+ Args:
+ num_classes (int): Number of categories excluding the background
+ category.
+ in_channels (int): Number of channels in the input feature map.
+ regress_ranges (tuple[tuple[int, int]]): Regress range of multiple
+ level points.
+ center_sampling (bool): If true, use center sampling. Default: False.
+ center_sample_radius (float): Radius of center sampling. Default: 1.5.
+ sync_num_pos (bool): If true, synchronize the number of positive
+ examples across GPUs. Default: True
+ gradient_mul (float): The multiplier to gradients from bbox refinement
+ and recognition. Default: 0.1.
+ bbox_norm_type (str): The bbox normalization type, 'reg_denom' or
+ 'stride'. Default: reg_denom
+ loss_cls_fl (dict): Config of focal loss.
+ use_vfl (bool): If true, use varifocal loss for training.
+ Default: True.
+ loss_cls (dict): Config of varifocal loss.
+ loss_bbox (dict): Config of localization loss, GIoU Loss.
+ loss_bbox (dict): Config of localization refinement loss, GIoU Loss.
+ norm_cfg (dict): dictionary to construct and config norm layer.
+ Default: norm_cfg=dict(type='GN', num_groups=32,
+ requires_grad=True).
+ use_atss (bool): If true, use ATSS to define positive/negative
+ examples. Default: True.
+ anchor_generator (dict): Config of anchor generator for ATSS.
+
+ Example:
+ >>> self = VFNetHead(11, 7)
+ >>> feats = [torch.rand(1, 7, s, s) for s in [4, 8, 16, 32, 64]]
+ >>> cls_score, bbox_pred, bbox_pred_refine= self.forward(feats)
+ >>> assert len(cls_score) == len(self.scales)
+ """ # noqa: E501
+
+ def __init__(self,
+ num_classes,
+ in_channels,
+ regress_ranges=((-1, 64), (64, 128), (128, 256), (256, 512),
+ (512, INF)),
+ center_sampling=False,
+ center_sample_radius=1.5,
+ sync_num_pos=True,
+ gradient_mul=0.1,
+ bbox_norm_type='reg_denom',
+ loss_cls_fl=dict(
+ type='FocalLoss',
+ use_sigmoid=True,
+ gamma=2.0,
+ alpha=0.25,
+ loss_weight=1.0),
+ use_vfl=True,
+ loss_cls=dict(
+ type='VarifocalLoss',
+ use_sigmoid=True,
+ alpha=0.75,
+ gamma=2.0,
+ iou_weighted=True,
+ loss_weight=1.0),
+ loss_bbox=dict(type='GIoULoss', loss_weight=1.5),
+ loss_bbox_refine=dict(type='GIoULoss', loss_weight=2.0),
+ norm_cfg=dict(type='GN', num_groups=32, requires_grad=True),
+ use_atss=True,
+ anchor_generator=dict(
+ type='AnchorGenerator',
+ ratios=[1.0],
+ octave_base_scale=8,
+ scales_per_octave=1,
+ center_offset=0.0,
+ strides=[8, 16, 32, 64, 128]),
+ **kwargs):
+ # dcn base offsets, adapted from reppoints_head.py
+ self.num_dconv_points = 9
+ self.dcn_kernel = int(np.sqrt(self.num_dconv_points))
+ self.dcn_pad = int((self.dcn_kernel - 1) / 2)
+ dcn_base = np.arange(-self.dcn_pad,
+ self.dcn_pad + 1).astype(np.float64)
+ dcn_base_y = np.repeat(dcn_base, self.dcn_kernel)
+ dcn_base_x = np.tile(dcn_base, self.dcn_kernel)
+ dcn_base_offset = np.stack([dcn_base_y, dcn_base_x], axis=1).reshape(
+ (-1))
+ self.dcn_base_offset = torch.tensor(dcn_base_offset).view(1, -1, 1, 1)
+
+ super(FCOSHead, self).__init__(
+ num_classes, in_channels, norm_cfg=norm_cfg, **kwargs)
+ self.regress_ranges = regress_ranges
+ self.reg_denoms = [
+ regress_range[-1] for regress_range in regress_ranges
+ ]
+ self.reg_denoms[-1] = self.reg_denoms[-2] * 2
+ self.center_sampling = center_sampling
+ self.center_sample_radius = center_sample_radius
+ self.sync_num_pos = sync_num_pos
+ self.bbox_norm_type = bbox_norm_type
+ self.gradient_mul = gradient_mul
+ self.use_vfl = use_vfl
+ if self.use_vfl:
+ self.loss_cls = build_loss(loss_cls)
+ else:
+ self.loss_cls = build_loss(loss_cls_fl)
+ self.loss_bbox = build_loss(loss_bbox)
+ self.loss_bbox_refine = build_loss(loss_bbox_refine)
+
+ # for getting ATSS targets
+ self.use_atss = use_atss
+ self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False)
+ self.anchor_generator = build_anchor_generator(anchor_generator)
+ self.anchor_center_offset = anchor_generator['center_offset']
+ self.num_anchors = self.anchor_generator.num_base_anchors[0]
+ self.sampling = False
+ if self.train_cfg:
+ self.assigner = build_assigner(self.train_cfg.assigner)
+ sampler_cfg = dict(type='PseudoSampler')
+ self.sampler = build_sampler(sampler_cfg, context=self)
+
+ def _init_layers(self):
+ """Initialize layers of the head."""
+ super(FCOSHead, self)._init_cls_convs()
+ super(FCOSHead, self)._init_reg_convs()
+ self.relu = nn.ReLU(inplace=True)
+ self.vfnet_reg_conv = ConvModule(
+ self.feat_channels,
+ self.feat_channels,
+ 3,
+ stride=1,
+ padding=1,
+ conv_cfg=self.conv_cfg,
+ norm_cfg=self.norm_cfg,
+ bias=self.conv_bias)
+ self.vfnet_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1)
+ self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides])
+
+ self.vfnet_reg_refine_dconv = DeformConv2d(
+ self.feat_channels,
+ self.feat_channels,
+ self.dcn_kernel,
+ 1,
+ padding=self.dcn_pad)
+ self.vfnet_reg_refine = nn.Conv2d(self.feat_channels, 4, 3, padding=1)
+ self.scales_refine = nn.ModuleList([Scale(1.0) for _ in self.strides])
+
+ self.vfnet_cls_dconv = DeformConv2d(
+ self.feat_channels,
+ self.feat_channels,
+ self.dcn_kernel,
+ 1,
+ padding=self.dcn_pad)
+ self.vfnet_cls = nn.Conv2d(
+ self.feat_channels, self.cls_out_channels, 3, padding=1)
+
+ def init_weights(self):
+ """Initialize weights of the head."""
+ for m in self.cls_convs:
+ if isinstance(m.conv, nn.Conv2d):
+ normal_init(m.conv, std=0.01)
+ for m in self.reg_convs:
+ if isinstance(m.conv, nn.Conv2d):
+ normal_init(m.conv, std=0.01)
+ normal_init(self.vfnet_reg_conv.conv, std=0.01)
+ normal_init(self.vfnet_reg, std=0.01)
+ normal_init(self.vfnet_reg_refine_dconv, std=0.01)
+ normal_init(self.vfnet_reg_refine, std=0.01)
+ normal_init(self.vfnet_cls_dconv, std=0.01)
+ bias_cls = bias_init_with_prob(0.01)
+ normal_init(self.vfnet_cls, std=0.01, bias=bias_cls)
+
+ def forward(self, feats):
+ """Forward features from the upstream network.
+
+ Args:
+ feats (tuple[Tensor]): Features from the upstream network, each is
+ a 4D-tensor.
+
+ Returns:
+ tuple:
+ cls_scores (list[Tensor]): Box iou-aware scores for each scale
+ level, each is a 4D-tensor, the channel number is
+ num_points * num_classes.
+ bbox_preds (list[Tensor]): Box offsets for each
+ scale level, each is a 4D-tensor, the channel number is
+ num_points * 4.
+ bbox_preds_refine (list[Tensor]): Refined Box offsets for
+ each scale level, each is a 4D-tensor, the channel
+ number is num_points * 4.
+ """
+ return multi_apply(self.forward_single, feats, self.scales,
+ self.scales_refine, self.strides, self.reg_denoms)
+
+ def forward_single(self, x, scale, scale_refine, stride, reg_denom):
+ """Forward features of a single scale level.
+
+ Args:
+ x (Tensor): FPN feature maps of the specified stride.
+ scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize
+ the bbox prediction.
+ scale_refine (:obj: `mmcv.cnn.Scale`): Learnable scale module to
+ resize the refined bbox prediction.
+ stride (int): The corresponding stride for feature maps,
+ used to normalize the bbox prediction when
+ bbox_norm_type = 'stride'.
+ reg_denom (int): The corresponding regression range for feature
+ maps, only used to normalize the bbox prediction when
+ bbox_norm_type = 'reg_denom'.
+
+ Returns:
+ tuple: iou-aware cls scores for each box, bbox predictions and
+ refined bbox predictions of input feature maps.
+ """
+ cls_feat = x
+ reg_feat = x
+
+ for cls_layer in self.cls_convs:
+ cls_feat = cls_layer(cls_feat)
+
+ for reg_layer in self.reg_convs:
+ reg_feat = reg_layer(reg_feat)
+
+ # predict the bbox_pred of different level
+ reg_feat_init = self.vfnet_reg_conv(reg_feat)
+ if self.bbox_norm_type == 'reg_denom':
+ bbox_pred = scale(
+ self.vfnet_reg(reg_feat_init)).float().exp() * reg_denom
+ elif self.bbox_norm_type == 'stride':
+ bbox_pred = scale(
+ self.vfnet_reg(reg_feat_init)).float().exp() * stride
+ else:
+ raise NotImplementedError
+
+ # compute star deformable convolution offsets
+ # converting dcn_offset to reg_feat.dtype thus VFNet can be
+ # trained with FP16
+ dcn_offset = self.star_dcn_offset(bbox_pred, self.gradient_mul,
+ stride).to(reg_feat.dtype)
+
+ # refine the bbox_pred
+ reg_feat = self.relu(self.vfnet_reg_refine_dconv(reg_feat, dcn_offset))
+ bbox_pred_refine = scale_refine(
+ self.vfnet_reg_refine(reg_feat)).float().exp()
+ bbox_pred_refine = bbox_pred_refine * bbox_pred.detach()
+
+ # predict the iou-aware cls score
+ cls_feat = self.relu(self.vfnet_cls_dconv(cls_feat, dcn_offset))
+ cls_score = self.vfnet_cls(cls_feat)
+
+ return cls_score, bbox_pred, bbox_pred_refine
+
+ def star_dcn_offset(self, bbox_pred, gradient_mul, stride):
+ """Compute the star deformable conv offsets.
+
+ Args:
+ bbox_pred (Tensor): Predicted bbox distance offsets (l, r, t, b).
+ gradient_mul (float): Gradient multiplier.
+ stride (int): The corresponding stride for feature maps,
+ used to project the bbox onto the feature map.
+
+ Returns:
+ dcn_offsets (Tensor): The offsets for deformable convolution.
+ """
+ dcn_base_offset = self.dcn_base_offset.type_as(bbox_pred)
+ bbox_pred_grad_mul = (1 - gradient_mul) * bbox_pred.detach() + \
+ gradient_mul * bbox_pred
+ # map to the feature map scale
+ bbox_pred_grad_mul = bbox_pred_grad_mul / stride
+ N, C, H, W = bbox_pred.size()
+
+ x1 = bbox_pred_grad_mul[:, 0, :, :]
+ y1 = bbox_pred_grad_mul[:, 1, :, :]
+ x2 = bbox_pred_grad_mul[:, 2, :, :]
+ y2 = bbox_pred_grad_mul[:, 3, :, :]
+ bbox_pred_grad_mul_offset = bbox_pred.new_zeros(
+ N, 2 * self.num_dconv_points, H, W)
+ bbox_pred_grad_mul_offset[:, 0, :, :] = -1.0 * y1 # -y1
+ bbox_pred_grad_mul_offset[:, 1, :, :] = -1.0 * x1 # -x1
+ bbox_pred_grad_mul_offset[:, 2, :, :] = -1.0 * y1 # -y1
+ bbox_pred_grad_mul_offset[:, 4, :, :] = -1.0 * y1 # -y1
+ bbox_pred_grad_mul_offset[:, 5, :, :] = x2 # x2
+ bbox_pred_grad_mul_offset[:, 7, :, :] = -1.0 * x1 # -x1
+ bbox_pred_grad_mul_offset[:, 11, :, :] = x2 # x2
+ bbox_pred_grad_mul_offset[:, 12, :, :] = y2 # y2
+ bbox_pred_grad_mul_offset[:, 13, :, :] = -1.0 * x1 # -x1
+ bbox_pred_grad_mul_offset[:, 14, :, :] = y2 # y2
+ bbox_pred_grad_mul_offset[:, 16, :, :] = y2 # y2
+ bbox_pred_grad_mul_offset[:, 17, :, :] = x2 # x2
+ dcn_offset = bbox_pred_grad_mul_offset - dcn_base_offset
+
+ return dcn_offset
+
+ @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'bbox_preds_refine'))
+ def loss(self,
+ cls_scores,
+ bbox_preds,
+ bbox_preds_refine,
+ gt_bboxes,
+ gt_labels,
+ img_metas,
+ gt_bboxes_ignore=None):
+ """Compute loss of the head.
+
+ Args:
+ cls_scores (list[Tensor]): Box iou-aware scores for each scale
+ level, each is a 4D-tensor, the channel number is
+ num_points * num_classes.
+ bbox_preds (list[Tensor]): Box offsets for each
+ scale level, each is a 4D-tensor, the channel number is
+ num_points * 4.
+ bbox_preds_refine (list[Tensor]): Refined Box offsets for
+ each scale level, each is a 4D-tensor, the channel
+ number is num_points * 4.
+ gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
+ shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
+ gt_labels (list[Tensor]): class indices corresponding to each box
+ img_metas (list[dict]): Meta information of each image, e.g.,
+ image size, scaling factor, etc.
+ gt_bboxes_ignore (None | list[Tensor]): specify which bounding
+ boxes can be ignored when computing the loss.
+ Default: None.
+
+ Returns:
+ dict[str, Tensor]: A dictionary of loss components.
+ """
+ assert len(cls_scores) == len(bbox_preds) == len(bbox_preds_refine)
+ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
+ all_level_points = self.get_points(featmap_sizes, bbox_preds[0].dtype,
+ bbox_preds[0].device)
+ labels, label_weights, bbox_targets, bbox_weights = self.get_targets(
+ cls_scores, all_level_points, gt_bboxes, gt_labels, img_metas,
+ gt_bboxes_ignore)
+
+ num_imgs = cls_scores[0].size(0)
+ # flatten cls_scores, bbox_preds and bbox_preds_refine
+ flatten_cls_scores = [
+ cls_score.permute(0, 2, 3,
+ 1).reshape(-1,
+ self.cls_out_channels).contiguous()
+ for cls_score in cls_scores
+ ]
+ flatten_bbox_preds = [
+ bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4).contiguous()
+ for bbox_pred in bbox_preds
+ ]
+ flatten_bbox_preds_refine = [
+ bbox_pred_refine.permute(0, 2, 3, 1).reshape(-1, 4).contiguous()
+ for bbox_pred_refine in bbox_preds_refine
+ ]
+ flatten_cls_scores = torch.cat(flatten_cls_scores)
+ flatten_bbox_preds = torch.cat(flatten_bbox_preds)
+ flatten_bbox_preds_refine = torch.cat(flatten_bbox_preds_refine)
+ flatten_labels = torch.cat(labels)
+ flatten_bbox_targets = torch.cat(bbox_targets)
+ # repeat points to align with bbox_preds
+ flatten_points = torch.cat(
+ [points.repeat(num_imgs, 1) for points in all_level_points])
+
+ # FG cat_id: [0, num_classes - 1], BG cat_id: num_classes
+ bg_class_ind = self.num_classes
+ pos_inds = torch.where(
+ ((flatten_labels >= 0) & (flatten_labels < bg_class_ind)) > 0)[0]
+ num_pos = len(pos_inds)
+
+ pos_bbox_preds = flatten_bbox_preds[pos_inds]
+ pos_bbox_preds_refine = flatten_bbox_preds_refine[pos_inds]
+ pos_labels = flatten_labels[pos_inds]
+
+ # sync num_pos across all gpus
+ if self.sync_num_pos:
+ num_pos_avg_per_gpu = reduce_mean(
+ pos_inds.new_tensor(num_pos).float()).item()
+ num_pos_avg_per_gpu = max(num_pos_avg_per_gpu, 1.0)
+ else:
+ num_pos_avg_per_gpu = num_pos
+
+ if num_pos > 0:
+ pos_bbox_targets = flatten_bbox_targets[pos_inds]
+ pos_points = flatten_points[pos_inds]
+
+ pos_decoded_bbox_preds = distance2bbox(pos_points, pos_bbox_preds)
+ pos_decoded_target_preds = distance2bbox(pos_points,
+ pos_bbox_targets)
+ iou_targets_ini = bbox_overlaps(
+ pos_decoded_bbox_preds,
+ pos_decoded_target_preds.detach(),
+ is_aligned=True).clamp(min=1e-6)
+ bbox_weights_ini = iou_targets_ini.clone().detach()
+ iou_targets_ini_avg_per_gpu = reduce_mean(
+ bbox_weights_ini.sum()).item()
+ bbox_avg_factor_ini = max(iou_targets_ini_avg_per_gpu, 1.0)
+ loss_bbox = self.loss_bbox(
+ pos_decoded_bbox_preds,
+ pos_decoded_target_preds.detach(),
+ weight=bbox_weights_ini,
+ avg_factor=bbox_avg_factor_ini)
+
+ pos_decoded_bbox_preds_refine = \
+ distance2bbox(pos_points, pos_bbox_preds_refine)
+ iou_targets_rf = bbox_overlaps(
+ pos_decoded_bbox_preds_refine,
+ pos_decoded_target_preds.detach(),
+ is_aligned=True).clamp(min=1e-6)
+ bbox_weights_rf = iou_targets_rf.clone().detach()
+ iou_targets_rf_avg_per_gpu = reduce_mean(
+ bbox_weights_rf.sum()).item()
+ bbox_avg_factor_rf = max(iou_targets_rf_avg_per_gpu, 1.0)
+ loss_bbox_refine = self.loss_bbox_refine(
+ pos_decoded_bbox_preds_refine,
+ pos_decoded_target_preds.detach(),
+ weight=bbox_weights_rf,
+ avg_factor=bbox_avg_factor_rf)
+
+ # build IoU-aware cls_score targets
+ if self.use_vfl:
+ pos_ious = iou_targets_rf.clone().detach()
+ cls_iou_targets = torch.zeros_like(flatten_cls_scores)
+ cls_iou_targets[pos_inds, pos_labels] = pos_ious
+ else:
+ loss_bbox = pos_bbox_preds.sum() * 0
+ loss_bbox_refine = pos_bbox_preds_refine.sum() * 0
+ if self.use_vfl:
+ cls_iou_targets = torch.zeros_like(flatten_cls_scores)
+
+ if self.use_vfl:
+ loss_cls = self.loss_cls(
+ flatten_cls_scores,
+ cls_iou_targets,
+ avg_factor=num_pos_avg_per_gpu)
+ else:
+ loss_cls = self.loss_cls(
+ flatten_cls_scores,
+ flatten_labels,
+ weight=label_weights,
+ avg_factor=num_pos_avg_per_gpu)
+
+ return dict(
+ loss_cls=loss_cls,
+ loss_bbox=loss_bbox,
+ loss_bbox_rf=loss_bbox_refine)
+
+ @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'bbox_preds_refine'))
+ def get_bboxes(self,
+ cls_scores,
+ bbox_preds,
+ bbox_preds_refine,
+ img_metas,
+ cfg=None,
+ rescale=None,
+ with_nms=True):
+ """Transform network outputs for a batch into bbox predictions.
+
+ Args:
+ cls_scores (list[Tensor]): Box iou-aware scores for each scale
+ level with shape (N, num_points * num_classes, H, W).
+ bbox_preds (list[Tensor]): Box offsets for each scale
+ level with shape (N, num_points * 4, H, W).
+ bbox_preds_refine (list[Tensor]): Refined Box offsets for
+ each scale level with shape (N, num_points * 4, H, W).
+ img_metas (list[dict]): Meta information of each image, e.g.,
+ image size, scaling factor, etc.
+ cfg (mmcv.Config): Test / postprocessing configuration,
+ if None, test_cfg would be used. Default: None.
+ rescale (bool): If True, return boxes in original image space.
+ Default: False.
+ with_nms (bool): If True, do nms before returning boxes.
+ Default: True.
+
+ Returns:
+ list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple.
+ The first item is an (n, 5) tensor, where the first 4 columns
+ are bounding box positions (tl_x, tl_y, br_x, br_y) and the
+ 5-th column is a score between 0 and 1. The second item is a
+ (n,) tensor where each item is the predicted class label of
+ the corresponding box.
+ """
+ assert len(cls_scores) == len(bbox_preds) == len(bbox_preds_refine)
+ num_levels = len(cls_scores)
+
+ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
+ mlvl_points = self.get_points(featmap_sizes, bbox_preds[0].dtype,
+ bbox_preds[0].device)
+ result_list = []
+ for img_id in range(len(img_metas)):
+ cls_score_list = [
+ cls_scores[i][img_id].detach() for i in range(num_levels)
+ ]
+ bbox_pred_list = [
+ bbox_preds_refine[i][img_id].detach()
+ for i in range(num_levels)
+ ]
+ img_shape = img_metas[img_id]['img_shape']
+ scale_factor = img_metas[img_id]['scale_factor']
+ det_bboxes = self._get_bboxes_single(cls_score_list,
+ bbox_pred_list, mlvl_points,
+ img_shape, scale_factor, cfg,
+ rescale, with_nms)
+ result_list.append(det_bboxes)
+ return result_list
+
+ def _get_bboxes_single(self,
+ cls_scores,
+ bbox_preds,
+ mlvl_points,
+ img_shape,
+ scale_factor,
+ cfg,
+ rescale=False,
+ with_nms=True):
+ """Transform outputs for a single batch item into bbox predictions.
+
+ Args:
+ cls_scores (list[Tensor]): Box iou-aware scores for a single scale
+ level with shape (num_points * num_classes, H, W).
+ bbox_preds (list[Tensor]): Box offsets for a single scale
+ level with shape (num_points * 4, H, W).
+ mlvl_points (list[Tensor]): Box reference for a single scale level
+ with shape (num_total_points, 4).
+ img_shape (tuple[int]): Shape of the input image,
+ (height, width, 3).
+ scale_factor (ndarray): Scale factor of the image arrange as
+ (w_scale, h_scale, w_scale, h_scale).
+ cfg (mmcv.Config | None): Test / postprocessing configuration,
+ if None, test_cfg would be used.
+ rescale (bool): If True, return boxes in original image space.
+ Default: False.
+ with_nms (bool): If True, do nms before returning boxes.
+ Default: True.
+
+ Returns:
+ tuple(Tensor):
+ det_bboxes (Tensor): BBox predictions in shape (n, 5), where
+ the first 4 columns are bounding box positions
+ (tl_x, tl_y, br_x, br_y) and the 5-th column is a score
+ between 0 and 1.
+ det_labels (Tensor): A (n,) tensor where each item is the
+ predicted class label of the corresponding box.
+ """
+ cfg = self.test_cfg if cfg is None else cfg
+ assert len(cls_scores) == len(bbox_preds) == len(mlvl_points)
+ mlvl_bboxes = []
+ mlvl_scores = []
+ for cls_score, bbox_pred, points in zip(cls_scores, bbox_preds,
+ mlvl_points):
+ assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
+ scores = cls_score.permute(1, 2, 0).reshape(
+ -1, self.cls_out_channels).contiguous().sigmoid()
+ bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4).contiguous()
+
+ nms_pre = cfg.get('nms_pre', -1)
+ if 0 < nms_pre < scores.shape[0]:
+ max_scores, _ = scores.max(dim=1)
+ _, topk_inds = max_scores.topk(nms_pre)
+ points = points[topk_inds, :]
+ bbox_pred = bbox_pred[topk_inds, :]
+ scores = scores[topk_inds, :]
+ bboxes = distance2bbox(points, bbox_pred, max_shape=img_shape)
+ mlvl_bboxes.append(bboxes)
+ mlvl_scores.append(scores)
+ mlvl_bboxes = torch.cat(mlvl_bboxes)
+ if rescale:
+ mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor)
+ mlvl_scores = torch.cat(mlvl_scores)
+ padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1)
+ # remind that we set FG labels to [0, num_class-1] since mmdet v2.0
+ # BG cat_id: num_class
+ mlvl_scores = torch.cat([mlvl_scores, padding], dim=1)
+ if with_nms:
+ det_bboxes, det_labels = multiclass_nms(mlvl_bboxes, mlvl_scores,
+ cfg.score_thr, cfg.nms,
+ cfg.max_per_img)
+ return det_bboxes, det_labels
+ else:
+ return mlvl_bboxes, mlvl_scores
+
+ def _get_points_single(self,
+ featmap_size,
+ stride,
+ dtype,
+ device,
+ flatten=False):
+ """Get points according to feature map sizes."""
+ h, w = featmap_size
+ x_range = torch.arange(
+ 0, w * stride, stride, dtype=dtype, device=device)
+ y_range = torch.arange(
+ 0, h * stride, stride, dtype=dtype, device=device)
+ y, x = torch.meshgrid(y_range, x_range)
+ # to be compatible with anchor points in ATSS
+ if self.use_atss:
+ points = torch.stack(
+ (x.reshape(-1), y.reshape(-1)), dim=-1) + \
+ stride * self.anchor_center_offset
+ else:
+ points = torch.stack(
+ (x.reshape(-1), y.reshape(-1)), dim=-1) + stride // 2
+ return points
+
+ def get_targets(self, cls_scores, mlvl_points, gt_bboxes, gt_labels,
+ img_metas, gt_bboxes_ignore):
+ """A wrapper for computing ATSS and FCOS targets for points in multiple
+ images.
+
+ Args:
+ cls_scores (list[Tensor]): Box iou-aware scores for each scale
+ level with shape (N, num_points * num_classes, H, W).
+ mlvl_points (list[Tensor]): Points of each fpn level, each has
+ shape (num_points, 2).
+ gt_bboxes (list[Tensor]): Ground truth bboxes of each image,
+ each has shape (num_gt, 4).
+ gt_labels (list[Tensor]): Ground truth labels of each box,
+ each has shape (num_gt,).
+ img_metas (list[dict]): Meta information of each image, e.g.,
+ image size, scaling factor, etc.
+ gt_bboxes_ignore (None | Tensor): Ground truth bboxes to be
+ ignored, shape (num_ignored_gts, 4).
+
+ Returns:
+ tuple:
+ labels_list (list[Tensor]): Labels of each level.
+ label_weights (Tensor/None): Label weights of all levels.
+ bbox_targets_list (list[Tensor]): Regression targets of each
+ level, (l, t, r, b).
+ bbox_weights (Tensor/None): Bbox weights of all levels.
+ """
+ if self.use_atss:
+ return self.get_atss_targets(cls_scores, mlvl_points, gt_bboxes,
+ gt_labels, img_metas,
+ gt_bboxes_ignore)
+ else:
+ self.norm_on_bbox = False
+ return self.get_fcos_targets(mlvl_points, gt_bboxes, gt_labels)
+
+ def _get_target_single(self, *args, **kwargs):
+ """Avoid ambiguity in multiple inheritance."""
+ if self.use_atss:
+ return ATSSHead._get_target_single(self, *args, **kwargs)
+ else:
+ return FCOSHead._get_target_single(self, *args, **kwargs)
+
+ def get_fcos_targets(self, points, gt_bboxes_list, gt_labels_list):
+ """Compute FCOS regression and classification targets for points in
+ multiple images.
+
+ Args:
+ points (list[Tensor]): Points of each fpn level, each has shape
+ (num_points, 2).
+ gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image,
+ each has shape (num_gt, 4).
+ gt_labels_list (list[Tensor]): Ground truth labels of each box,
+ each has shape (num_gt,).
+
+ Returns:
+ tuple:
+ labels (list[Tensor]): Labels of each level.
+ label_weights: None, to be compatible with ATSS targets.
+ bbox_targets (list[Tensor]): BBox targets of each level.
+ bbox_weights: None, to be compatible with ATSS targets.
+ """
+ labels, bbox_targets = FCOSHead.get_targets(self, points,
+ gt_bboxes_list,
+ gt_labels_list)
+ label_weights = None
+ bbox_weights = None
+ return labels, label_weights, bbox_targets, bbox_weights
+
+ def get_atss_targets(self,
+ cls_scores,
+ mlvl_points,
+ gt_bboxes,
+ gt_labels,
+ img_metas,
+ gt_bboxes_ignore=None):
+ """A wrapper for computing ATSS targets for points in multiple images.
+
+ Args:
+ cls_scores (list[Tensor]): Box iou-aware scores for each scale
+ level with shape (N, num_points * num_classes, H, W).
+ mlvl_points (list[Tensor]): Points of each fpn level, each has
+ shape (num_points, 2).
+ gt_bboxes (list[Tensor]): Ground truth bboxes of each image,
+ each has shape (num_gt, 4).
+ gt_labels (list[Tensor]): Ground truth labels of each box,
+ each has shape (num_gt,).
+ img_metas (list[dict]): Meta information of each image, e.g.,
+ image size, scaling factor, etc.
+ gt_bboxes_ignore (None | Tensor): Ground truth bboxes to be
+ ignored, shape (num_ignored_gts, 4). Default: None.
+
+ Returns:
+ tuple:
+ labels_list (list[Tensor]): Labels of each level.
+ label_weights (Tensor): Label weights of all levels.
+ bbox_targets_list (list[Tensor]): Regression targets of each
+ level, (l, t, r, b).
+ bbox_weights (Tensor): Bbox weights of all levels.
+ """
+ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
+ assert len(featmap_sizes) == self.anchor_generator.num_levels
+
+ device = cls_scores[0].device
+ anchor_list, valid_flag_list = self.get_anchors(
+ featmap_sizes, img_metas, device=device)
+ label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1
+
+ cls_reg_targets = ATSSHead.get_targets(
+ self,
+ anchor_list,
+ valid_flag_list,
+ gt_bboxes,
+ img_metas,
+ gt_bboxes_ignore_list=gt_bboxes_ignore,
+ gt_labels_list=gt_labels,
+ label_channels=label_channels,
+ unmap_outputs=True)
+ if cls_reg_targets is None:
+ return None
+
+ (anchor_list, labels_list, label_weights_list, bbox_targets_list,
+ bbox_weights_list, num_total_pos, num_total_neg) = cls_reg_targets
+
+ bbox_targets_list = [
+ bbox_targets.reshape(-1, 4) for bbox_targets in bbox_targets_list
+ ]
+
+ num_imgs = len(img_metas)
+ # transform bbox_targets (x1, y1, x2, y2) into (l, t, r, b) format
+ bbox_targets_list = self.transform_bbox_targets(
+ bbox_targets_list, mlvl_points, num_imgs)
+
+ labels_list = [labels.reshape(-1) for labels in labels_list]
+ label_weights_list = [
+ label_weights.reshape(-1) for label_weights in label_weights_list
+ ]
+ bbox_weights_list = [
+ bbox_weights.reshape(-1) for bbox_weights in bbox_weights_list
+ ]
+ label_weights = torch.cat(label_weights_list)
+ bbox_weights = torch.cat(bbox_weights_list)
+ return labels_list, label_weights, bbox_targets_list, bbox_weights
+
+ def transform_bbox_targets(self, decoded_bboxes, mlvl_points, num_imgs):
+ """Transform bbox_targets (x1, y1, x2, y2) into (l, t, r, b) format.
+
+ Args:
+ decoded_bboxes (list[Tensor]): Regression targets of each level,
+ in the form of (x1, y1, x2, y2).
+ mlvl_points (list[Tensor]): Points of each fpn level, each has
+ shape (num_points, 2).
+ num_imgs (int): the number of images in a batch.
+
+ Returns:
+ bbox_targets (list[Tensor]): Regression targets of each level in
+ the form of (l, t, r, b).
+ """
+ # TODO: Re-implemented in Class PointCoder
+ assert len(decoded_bboxes) == len(mlvl_points)
+ num_levels = len(decoded_bboxes)
+ mlvl_points = [points.repeat(num_imgs, 1) for points in mlvl_points]
+ bbox_targets = []
+ for i in range(num_levels):
+ bbox_target = bbox2distance(mlvl_points[i], decoded_bboxes[i])
+ bbox_targets.append(bbox_target)
+
+ return bbox_targets
+
+ def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
+ missing_keys, unexpected_keys, error_msgs):
+ """Override the method in the parent class to avoid changing para's
+ name."""
+ pass
diff --git a/annotator/uniformer/mmdet/models/dense_heads/yolact_head.py b/annotator/uniformer/mmdet/models/dense_heads/yolact_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..10d311f94ee99e1bf65ee3e5827f1699c28a23e3
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/dense_heads/yolact_head.py
@@ -0,0 +1,943 @@
+import numpy as np
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from mmcv.cnn import ConvModule, xavier_init
+from mmcv.runner import force_fp32
+
+from mmdet.core import build_sampler, fast_nms, images_to_levels, multi_apply
+from ..builder import HEADS, build_loss
+from .anchor_head import AnchorHead
+
+
+@HEADS.register_module()
+class YOLACTHead(AnchorHead):
+ """YOLACT box head used in https://arxiv.org/abs/1904.02689.
+
+ Note that YOLACT head is a light version of RetinaNet head.
+ Four differences are described as follows:
+
+ 1. YOLACT box head has three-times fewer anchors.
+ 2. YOLACT box head shares the convs for box and cls branches.
+ 3. YOLACT box head uses OHEM instead of Focal loss.
+ 4. YOLACT box head predicts a set of mask coefficients for each box.
+
+ Args:
+ num_classes (int): Number of categories excluding the background
+ category.
+ in_channels (int): Number of channels in the input feature map.
+ anchor_generator (dict): Config dict for anchor generator
+ loss_cls (dict): Config of classification loss.
+ loss_bbox (dict): Config of localization loss.
+ num_head_convs (int): Number of the conv layers shared by
+ box and cls branches.
+ num_protos (int): Number of the mask coefficients.
+ use_ohem (bool): If true, ``loss_single_OHEM`` will be used for
+ cls loss calculation. If false, ``loss_single`` will be used.
+ conv_cfg (dict): Dictionary to construct and config conv layer.
+ norm_cfg (dict): Dictionary to construct and config norm layer.
+ """
+
+ def __init__(self,
+ num_classes,
+ in_channels,
+ anchor_generator=dict(
+ type='AnchorGenerator',
+ octave_base_scale=3,
+ scales_per_octave=1,
+ ratios=[0.5, 1.0, 2.0],
+ strides=[8, 16, 32, 64, 128]),
+ loss_cls=dict(
+ type='CrossEntropyLoss',
+ use_sigmoid=False,
+ reduction='none',
+ loss_weight=1.0),
+ loss_bbox=dict(
+ type='SmoothL1Loss', beta=1.0, loss_weight=1.5),
+ num_head_convs=1,
+ num_protos=32,
+ use_ohem=True,
+ conv_cfg=None,
+ norm_cfg=None,
+ **kwargs):
+ self.num_head_convs = num_head_convs
+ self.num_protos = num_protos
+ self.use_ohem = use_ohem
+ self.conv_cfg = conv_cfg
+ self.norm_cfg = norm_cfg
+ super(YOLACTHead, self).__init__(
+ num_classes,
+ in_channels,
+ loss_cls=loss_cls,
+ loss_bbox=loss_bbox,
+ anchor_generator=anchor_generator,
+ **kwargs)
+ if self.use_ohem:
+ sampler_cfg = dict(type='PseudoSampler')
+ self.sampler = build_sampler(sampler_cfg, context=self)
+ self.sampling = False
+
+ def _init_layers(self):
+ """Initialize layers of the head."""
+ self.relu = nn.ReLU(inplace=True)
+ self.head_convs = nn.ModuleList()
+ for i in range(self.num_head_convs):
+ chn = self.in_channels if i == 0 else self.feat_channels
+ self.head_convs.append(
+ ConvModule(
+ chn,
+ self.feat_channels,
+ 3,
+ stride=1,
+ padding=1,
+ conv_cfg=self.conv_cfg,
+ norm_cfg=self.norm_cfg))
+ self.conv_cls = nn.Conv2d(
+ self.feat_channels,
+ self.num_anchors * self.cls_out_channels,
+ 3,
+ padding=1)
+ self.conv_reg = nn.Conv2d(
+ self.feat_channels, self.num_anchors * 4, 3, padding=1)
+ self.conv_coeff = nn.Conv2d(
+ self.feat_channels,
+ self.num_anchors * self.num_protos,
+ 3,
+ padding=1)
+
+ def init_weights(self):
+ """Initialize weights of the head."""
+ for m in self.head_convs:
+ xavier_init(m.conv, distribution='uniform', bias=0)
+ xavier_init(self.conv_cls, distribution='uniform', bias=0)
+ xavier_init(self.conv_reg, distribution='uniform', bias=0)
+ xavier_init(self.conv_coeff, distribution='uniform', bias=0)
+
+ def forward_single(self, x):
+ """Forward feature of a single scale level.
+
+ Args:
+ x (Tensor): Features of a single scale level.
+
+ Returns:
+ tuple:
+ cls_score (Tensor): Cls scores for a single scale level \
+ the channels number is num_anchors * num_classes.
+ bbox_pred (Tensor): Box energies / deltas for a single scale \
+ level, the channels number is num_anchors * 4.
+ coeff_pred (Tensor): Mask coefficients for a single scale \
+ level, the channels number is num_anchors * num_protos.
+ """
+ for head_conv in self.head_convs:
+ x = head_conv(x)
+ cls_score = self.conv_cls(x)
+ bbox_pred = self.conv_reg(x)
+ coeff_pred = self.conv_coeff(x).tanh()
+ return cls_score, bbox_pred, coeff_pred
+
+ @force_fp32(apply_to=('cls_scores', 'bbox_preds'))
+ def loss(self,
+ cls_scores,
+ bbox_preds,
+ gt_bboxes,
+ gt_labels,
+ img_metas,
+ gt_bboxes_ignore=None):
+ """A combination of the func:``AnchorHead.loss`` and
+ func:``SSDHead.loss``.
+
+ When ``self.use_ohem == True``, it functions like ``SSDHead.loss``,
+ otherwise, it follows ``AnchorHead.loss``. Besides, it additionally
+ returns ``sampling_results``.
+
+ Args:
+ cls_scores (list[Tensor]): Box scores for each scale level
+ Has shape (N, num_anchors * num_classes, H, W)
+ bbox_preds (list[Tensor]): Box energies / deltas for each scale
+ level with shape (N, num_anchors * 4, H, W)
+ gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
+ shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
+ gt_labels (list[Tensor]): Class indices corresponding to each box
+ img_metas (list[dict]): Meta information of each image, e.g.,
+ image size, scaling factor, etc.
+ gt_bboxes_ignore (None | list[Tensor]): Specify which bounding
+ boxes can be ignored when computing the loss. Default: None
+
+ Returns:
+ tuple:
+ dict[str, Tensor]: A dictionary of loss components.
+ List[:obj:``SamplingResult``]: Sampler results for each image.
+ """
+ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
+ assert len(featmap_sizes) == self.anchor_generator.num_levels
+
+ device = cls_scores[0].device
+
+ anchor_list, valid_flag_list = self.get_anchors(
+ featmap_sizes, img_metas, device=device)
+ label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1
+ cls_reg_targets = self.get_targets(
+ anchor_list,
+ valid_flag_list,
+ gt_bboxes,
+ img_metas,
+ gt_bboxes_ignore_list=gt_bboxes_ignore,
+ gt_labels_list=gt_labels,
+ label_channels=label_channels,
+ unmap_outputs=not self.use_ohem,
+ return_sampling_results=True)
+ if cls_reg_targets is None:
+ return None
+ (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,
+ num_total_pos, num_total_neg, sampling_results) = cls_reg_targets
+
+ if self.use_ohem:
+ num_images = len(img_metas)
+ all_cls_scores = torch.cat([
+ s.permute(0, 2, 3, 1).reshape(
+ num_images, -1, self.cls_out_channels) for s in cls_scores
+ ], 1)
+ all_labels = torch.cat(labels_list, -1).view(num_images, -1)
+ all_label_weights = torch.cat(label_weights_list,
+ -1).view(num_images, -1)
+ all_bbox_preds = torch.cat([
+ b.permute(0, 2, 3, 1).reshape(num_images, -1, 4)
+ for b in bbox_preds
+ ], -2)
+ all_bbox_targets = torch.cat(bbox_targets_list,
+ -2).view(num_images, -1, 4)
+ all_bbox_weights = torch.cat(bbox_weights_list,
+ -2).view(num_images, -1, 4)
+
+ # concat all level anchors to a single tensor
+ all_anchors = []
+ for i in range(num_images):
+ all_anchors.append(torch.cat(anchor_list[i]))
+
+ # check NaN and Inf
+ assert torch.isfinite(all_cls_scores).all().item(), \
+ 'classification scores become infinite or NaN!'
+ assert torch.isfinite(all_bbox_preds).all().item(), \
+ 'bbox predications become infinite or NaN!'
+
+ losses_cls, losses_bbox = multi_apply(
+ self.loss_single_OHEM,
+ all_cls_scores,
+ all_bbox_preds,
+ all_anchors,
+ all_labels,
+ all_label_weights,
+ all_bbox_targets,
+ all_bbox_weights,
+ num_total_samples=num_total_pos)
+ else:
+ num_total_samples = (
+ num_total_pos +
+ num_total_neg if self.sampling else num_total_pos)
+
+ # anchor number of multi levels
+ num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]
+ # concat all level anchors and flags to a single tensor
+ concat_anchor_list = []
+ for i in range(len(anchor_list)):
+ concat_anchor_list.append(torch.cat(anchor_list[i]))
+ all_anchor_list = images_to_levels(concat_anchor_list,
+ num_level_anchors)
+ losses_cls, losses_bbox = multi_apply(
+ self.loss_single,
+ cls_scores,
+ bbox_preds,
+ all_anchor_list,
+ labels_list,
+ label_weights_list,
+ bbox_targets_list,
+ bbox_weights_list,
+ num_total_samples=num_total_samples)
+
+ return dict(
+ loss_cls=losses_cls, loss_bbox=losses_bbox), sampling_results
+
+ def loss_single_OHEM(self, cls_score, bbox_pred, anchors, labels,
+ label_weights, bbox_targets, bbox_weights,
+ num_total_samples):
+ """"See func:``SSDHead.loss``."""
+ loss_cls_all = self.loss_cls(cls_score, labels, label_weights)
+
+ # FG cat_id: [0, num_classes -1], BG cat_id: num_classes
+ pos_inds = ((labels >= 0) & (labels < self.num_classes)).nonzero(
+ as_tuple=False).reshape(-1)
+ neg_inds = (labels == self.num_classes).nonzero(
+ as_tuple=False).view(-1)
+
+ num_pos_samples = pos_inds.size(0)
+ if num_pos_samples == 0:
+ num_neg_samples = neg_inds.size(0)
+ else:
+ num_neg_samples = self.train_cfg.neg_pos_ratio * num_pos_samples
+ if num_neg_samples > neg_inds.size(0):
+ num_neg_samples = neg_inds.size(0)
+ topk_loss_cls_neg, _ = loss_cls_all[neg_inds].topk(num_neg_samples)
+ loss_cls_pos = loss_cls_all[pos_inds].sum()
+ loss_cls_neg = topk_loss_cls_neg.sum()
+ loss_cls = (loss_cls_pos + loss_cls_neg) / num_total_samples
+ if self.reg_decoded_bbox:
+ # When the regression loss (e.g. `IouLoss`, `GIouLoss`)
+ # is applied directly on the decoded bounding boxes, it
+ # decodes the already encoded coordinates to absolute format.
+ bbox_pred = self.bbox_coder.decode(anchors, bbox_pred)
+ loss_bbox = self.loss_bbox(
+ bbox_pred,
+ bbox_targets,
+ bbox_weights,
+ avg_factor=num_total_samples)
+ return loss_cls[None], loss_bbox
+
+ @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'coeff_preds'))
+ def get_bboxes(self,
+ cls_scores,
+ bbox_preds,
+ coeff_preds,
+ img_metas,
+ cfg=None,
+ rescale=False):
+ """"Similiar to func:``AnchorHead.get_bboxes``, but additionally
+ processes coeff_preds.
+
+ Args:
+ cls_scores (list[Tensor]): Box scores for each scale level
+ with shape (N, num_anchors * num_classes, H, W)
+ bbox_preds (list[Tensor]): Box energies / deltas for each scale
+ level with shape (N, num_anchors * 4, H, W)
+ coeff_preds (list[Tensor]): Mask coefficients for each scale
+ level with shape (N, num_anchors * num_protos, H, W)
+ img_metas (list[dict]): Meta information of each image, e.g.,
+ image size, scaling factor, etc.
+ cfg (mmcv.Config | None): Test / postprocessing configuration,
+ if None, test_cfg would be used
+ rescale (bool): If True, return boxes in original image space.
+ Default: False.
+
+ Returns:
+ list[tuple[Tensor, Tensor, Tensor]]: Each item in result_list is
+ a 3-tuple. The first item is an (n, 5) tensor, where the
+ first 4 columns are bounding box positions
+ (tl_x, tl_y, br_x, br_y) and the 5-th column is a score
+ between 0 and 1. The second item is an (n,) tensor where each
+ item is the predicted class label of the corresponding box.
+ The third item is an (n, num_protos) tensor where each item
+ is the predicted mask coefficients of instance inside the
+ corresponding box.
+ """
+ assert len(cls_scores) == len(bbox_preds)
+ num_levels = len(cls_scores)
+
+ device = cls_scores[0].device
+ featmap_sizes = [cls_scores[i].shape[-2:] for i in range(num_levels)]
+ mlvl_anchors = self.anchor_generator.grid_anchors(
+ featmap_sizes, device=device)
+
+ det_bboxes = []
+ det_labels = []
+ det_coeffs = []
+ for img_id in range(len(img_metas)):
+ cls_score_list = [
+ cls_scores[i][img_id].detach() for i in range(num_levels)
+ ]
+ bbox_pred_list = [
+ bbox_preds[i][img_id].detach() for i in range(num_levels)
+ ]
+ coeff_pred_list = [
+ coeff_preds[i][img_id].detach() for i in range(num_levels)
+ ]
+ img_shape = img_metas[img_id]['img_shape']
+ scale_factor = img_metas[img_id]['scale_factor']
+ bbox_res = self._get_bboxes_single(cls_score_list, bbox_pred_list,
+ coeff_pred_list, mlvl_anchors,
+ img_shape, scale_factor, cfg,
+ rescale)
+ det_bboxes.append(bbox_res[0])
+ det_labels.append(bbox_res[1])
+ det_coeffs.append(bbox_res[2])
+ return det_bboxes, det_labels, det_coeffs
+
+ def _get_bboxes_single(self,
+ cls_score_list,
+ bbox_pred_list,
+ coeff_preds_list,
+ mlvl_anchors,
+ img_shape,
+ scale_factor,
+ cfg,
+ rescale=False):
+ """"Similiar to func:``AnchorHead._get_bboxes_single``, but
+ additionally processes coeff_preds_list and uses fast NMS instead of
+ traditional NMS.
+
+ Args:
+ cls_score_list (list[Tensor]): Box scores for a single scale level
+ Has shape (num_anchors * num_classes, H, W).
+ bbox_pred_list (list[Tensor]): Box energies / deltas for a single
+ scale level with shape (num_anchors * 4, H, W).
+ coeff_preds_list (list[Tensor]): Mask coefficients for a single
+ scale level with shape (num_anchors * num_protos, H, W).
+ mlvl_anchors (list[Tensor]): Box reference for a single scale level
+ with shape (num_total_anchors, 4).
+ img_shape (tuple[int]): Shape of the input image,
+ (height, width, 3).
+ scale_factor (ndarray): Scale factor of the image arange as
+ (w_scale, h_scale, w_scale, h_scale).
+ cfg (mmcv.Config): Test / postprocessing configuration,
+ if None, test_cfg would be used.
+ rescale (bool): If True, return boxes in original image space.
+
+ Returns:
+ tuple[Tensor, Tensor, Tensor]: The first item is an (n, 5) tensor,
+ where the first 4 columns are bounding box positions
+ (tl_x, tl_y, br_x, br_y) and the 5-th column is a score between
+ 0 and 1. The second item is an (n,) tensor where each item is
+ the predicted class label of the corresponding box. The third
+ item is an (n, num_protos) tensor where each item is the
+ predicted mask coefficients of instance inside the
+ corresponding box.
+ """
+ cfg = self.test_cfg if cfg is None else cfg
+ assert len(cls_score_list) == len(bbox_pred_list) == len(mlvl_anchors)
+ mlvl_bboxes = []
+ mlvl_scores = []
+ mlvl_coeffs = []
+ for cls_score, bbox_pred, coeff_pred, anchors in \
+ zip(cls_score_list, bbox_pred_list,
+ coeff_preds_list, mlvl_anchors):
+ assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
+ cls_score = cls_score.permute(1, 2,
+ 0).reshape(-1, self.cls_out_channels)
+ if self.use_sigmoid_cls:
+ scores = cls_score.sigmoid()
+ else:
+ scores = cls_score.softmax(-1)
+ bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4)
+ coeff_pred = coeff_pred.permute(1, 2,
+ 0).reshape(-1, self.num_protos)
+ nms_pre = cfg.get('nms_pre', -1)
+ if nms_pre > 0 and scores.shape[0] > nms_pre:
+ # Get maximum scores for foreground classes.
+ if self.use_sigmoid_cls:
+ max_scores, _ = scores.max(dim=1)
+ else:
+ # remind that we set FG labels to [0, num_class-1]
+ # since mmdet v2.0
+ # BG cat_id: num_class
+ max_scores, _ = scores[:, :-1].max(dim=1)
+ _, topk_inds = max_scores.topk(nms_pre)
+ anchors = anchors[topk_inds, :]
+ bbox_pred = bbox_pred[topk_inds, :]
+ scores = scores[topk_inds, :]
+ coeff_pred = coeff_pred[topk_inds, :]
+ bboxes = self.bbox_coder.decode(
+ anchors, bbox_pred, max_shape=img_shape)
+ mlvl_bboxes.append(bboxes)
+ mlvl_scores.append(scores)
+ mlvl_coeffs.append(coeff_pred)
+ mlvl_bboxes = torch.cat(mlvl_bboxes)
+ if rescale:
+ mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor)
+ mlvl_scores = torch.cat(mlvl_scores)
+ mlvl_coeffs = torch.cat(mlvl_coeffs)
+ if self.use_sigmoid_cls:
+ # Add a dummy background class to the backend when using sigmoid
+ # remind that we set FG labels to [0, num_class-1] since mmdet v2.0
+ # BG cat_id: num_class
+ padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1)
+ mlvl_scores = torch.cat([mlvl_scores, padding], dim=1)
+ det_bboxes, det_labels, det_coeffs = fast_nms(mlvl_bboxes, mlvl_scores,
+ mlvl_coeffs,
+ cfg.score_thr,
+ cfg.iou_thr, cfg.top_k,
+ cfg.max_per_img)
+ return det_bboxes, det_labels, det_coeffs
+
+
+@HEADS.register_module()
+class YOLACTSegmHead(nn.Module):
+ """YOLACT segmentation head used in https://arxiv.org/abs/1904.02689.
+
+ Apply a semantic segmentation loss on feature space using layers that are
+ only evaluated during training to increase performance with no speed
+ penalty.
+
+ Args:
+ in_channels (int): Number of channels in the input feature map.
+ num_classes (int): Number of categories excluding the background
+ category.
+ loss_segm (dict): Config of semantic segmentation loss.
+ """
+
+ def __init__(self,
+ num_classes,
+ in_channels=256,
+ loss_segm=dict(
+ type='CrossEntropyLoss',
+ use_sigmoid=True,
+ loss_weight=1.0)):
+ super(YOLACTSegmHead, self).__init__()
+ self.in_channels = in_channels
+ self.num_classes = num_classes
+ self.loss_segm = build_loss(loss_segm)
+ self._init_layers()
+ self.fp16_enabled = False
+
+ def _init_layers(self):
+ """Initialize layers of the head."""
+ self.segm_conv = nn.Conv2d(
+ self.in_channels, self.num_classes, kernel_size=1)
+
+ def init_weights(self):
+ """Initialize weights of the head."""
+ xavier_init(self.segm_conv, distribution='uniform')
+
+ def forward(self, x):
+ """Forward feature from the upstream network.
+
+ Args:
+ x (Tensor): Feature from the upstream network, which is
+ a 4D-tensor.
+
+ Returns:
+ Tensor: Predicted semantic segmentation map with shape
+ (N, num_classes, H, W).
+ """
+ return self.segm_conv(x)
+
+ @force_fp32(apply_to=('segm_pred', ))
+ def loss(self, segm_pred, gt_masks, gt_labels):
+ """Compute loss of the head.
+
+ Args:
+ segm_pred (list[Tensor]): Predicted semantic segmentation map
+ with shape (N, num_classes, H, W).
+ gt_masks (list[Tensor]): Ground truth masks for each image with
+ the same shape of the input image.
+ gt_labels (list[Tensor]): Class indices corresponding to each box.
+
+ Returns:
+ dict[str, Tensor]: A dictionary of loss components.
+ """
+ loss_segm = []
+ num_imgs, num_classes, mask_h, mask_w = segm_pred.size()
+ for idx in range(num_imgs):
+ cur_segm_pred = segm_pred[idx]
+ cur_gt_masks = gt_masks[idx].float()
+ cur_gt_labels = gt_labels[idx]
+ segm_targets = self.get_targets(cur_segm_pred, cur_gt_masks,
+ cur_gt_labels)
+ if segm_targets is None:
+ loss = self.loss_segm(cur_segm_pred,
+ torch.zeros_like(cur_segm_pred),
+ torch.zeros_like(cur_segm_pred))
+ else:
+ loss = self.loss_segm(
+ cur_segm_pred,
+ segm_targets,
+ avg_factor=num_imgs * mask_h * mask_w)
+ loss_segm.append(loss)
+ return dict(loss_segm=loss_segm)
+
+ def get_targets(self, segm_pred, gt_masks, gt_labels):
+ """Compute semantic segmentation targets for each image.
+
+ Args:
+ segm_pred (Tensor): Predicted semantic segmentation map
+ with shape (num_classes, H, W).
+ gt_masks (Tensor): Ground truth masks for each image with
+ the same shape of the input image.
+ gt_labels (Tensor): Class indices corresponding to each box.
+
+ Returns:
+ Tensor: Semantic segmentation targets with shape
+ (num_classes, H, W).
+ """
+ if gt_masks.size(0) == 0:
+ return None
+ num_classes, mask_h, mask_w = segm_pred.size()
+ with torch.no_grad():
+ downsampled_masks = F.interpolate(
+ gt_masks.unsqueeze(0), (mask_h, mask_w),
+ mode='bilinear',
+ align_corners=False).squeeze(0)
+ downsampled_masks = downsampled_masks.gt(0.5).float()
+ segm_targets = torch.zeros_like(segm_pred, requires_grad=False)
+ for obj_idx in range(downsampled_masks.size(0)):
+ segm_targets[gt_labels[obj_idx] - 1] = torch.max(
+ segm_targets[gt_labels[obj_idx] - 1],
+ downsampled_masks[obj_idx])
+ return segm_targets
+
+
+@HEADS.register_module()
+class YOLACTProtonet(nn.Module):
+ """YOLACT mask head used in https://arxiv.org/abs/1904.02689.
+
+ This head outputs the mask prototypes for YOLACT.
+
+ Args:
+ in_channels (int): Number of channels in the input feature map.
+ proto_channels (tuple[int]): Output channels of protonet convs.
+ proto_kernel_sizes (tuple[int]): Kernel sizes of protonet convs.
+ include_last_relu (Bool): If keep the last relu of protonet.
+ num_protos (int): Number of prototypes.
+ num_classes (int): Number of categories excluding the background
+ category.
+ loss_mask_weight (float): Reweight the mask loss by this factor.
+ max_masks_to_train (int): Maximum number of masks to train for
+ each image.
+ """
+
+ def __init__(self,
+ num_classes,
+ in_channels=256,
+ proto_channels=(256, 256, 256, None, 256, 32),
+ proto_kernel_sizes=(3, 3, 3, -2, 3, 1),
+ include_last_relu=True,
+ num_protos=32,
+ loss_mask_weight=1.0,
+ max_masks_to_train=100):
+ super(YOLACTProtonet, self).__init__()
+ self.in_channels = in_channels
+ self.proto_channels = proto_channels
+ self.proto_kernel_sizes = proto_kernel_sizes
+ self.include_last_relu = include_last_relu
+ self.protonet = self._init_layers()
+
+ self.loss_mask_weight = loss_mask_weight
+ self.num_protos = num_protos
+ self.num_classes = num_classes
+ self.max_masks_to_train = max_masks_to_train
+ self.fp16_enabled = False
+
+ def _init_layers(self):
+ """A helper function to take a config setting and turn it into a
+ network."""
+ # Possible patterns:
+ # ( 256, 3) -> conv
+ # ( 256,-2) -> deconv
+ # (None,-2) -> bilinear interpolate
+ in_channels = self.in_channels
+ protonets = nn.ModuleList()
+ for num_channels, kernel_size in zip(self.proto_channels,
+ self.proto_kernel_sizes):
+ if kernel_size > 0:
+ layer = nn.Conv2d(
+ in_channels,
+ num_channels,
+ kernel_size,
+ padding=kernel_size // 2)
+ else:
+ if num_channels is None:
+ layer = InterpolateModule(
+ scale_factor=-kernel_size,
+ mode='bilinear',
+ align_corners=False)
+ else:
+ layer = nn.ConvTranspose2d(
+ in_channels,
+ num_channels,
+ -kernel_size,
+ padding=kernel_size // 2)
+ protonets.append(layer)
+ protonets.append(nn.ReLU(inplace=True))
+ in_channels = num_channels if num_channels is not None \
+ else in_channels
+ if not self.include_last_relu:
+ protonets = protonets[:-1]
+ return nn.Sequential(*protonets)
+
+ def init_weights(self):
+ """Initialize weights of the head."""
+ for m in self.protonet:
+ if isinstance(m, nn.Conv2d):
+ xavier_init(m, distribution='uniform')
+
+ def forward(self, x, coeff_pred, bboxes, img_meta, sampling_results=None):
+ """Forward feature from the upstream network to get prototypes and
+ linearly combine the prototypes, using masks coefficients, into
+ instance masks. Finally, crop the instance masks with given bboxes.
+
+ Args:
+ x (Tensor): Feature from the upstream network, which is
+ a 4D-tensor.
+ coeff_pred (list[Tensor]): Mask coefficients for each scale
+ level with shape (N, num_anchors * num_protos, H, W).
+ bboxes (list[Tensor]): Box used for cropping with shape
+ (N, num_anchors * 4, H, W). During training, they are
+ ground truth boxes. During testing, they are predicted
+ boxes.
+ img_meta (list[dict]): Meta information of each image, e.g.,
+ image size, scaling factor, etc.
+ sampling_results (List[:obj:``SamplingResult``]): Sampler results
+ for each image.
+
+ Returns:
+ list[Tensor]: Predicted instance segmentation masks.
+ """
+ prototypes = self.protonet(x)
+ prototypes = prototypes.permute(0, 2, 3, 1).contiguous()
+
+ num_imgs = x.size(0)
+ # Training state
+ if self.training:
+ coeff_pred_list = []
+ for coeff_pred_per_level in coeff_pred:
+ coeff_pred_per_level = \
+ coeff_pred_per_level.permute(0, 2, 3, 1)\
+ .reshape(num_imgs, -1, self.num_protos)
+ coeff_pred_list.append(coeff_pred_per_level)
+ coeff_pred = torch.cat(coeff_pred_list, dim=1)
+
+ mask_pred_list = []
+ for idx in range(num_imgs):
+ cur_prototypes = prototypes[idx]
+ cur_coeff_pred = coeff_pred[idx]
+ cur_bboxes = bboxes[idx]
+ cur_img_meta = img_meta[idx]
+
+ # Testing state
+ if not self.training:
+ bboxes_for_cropping = cur_bboxes
+ else:
+ cur_sampling_results = sampling_results[idx]
+ pos_assigned_gt_inds = \
+ cur_sampling_results.pos_assigned_gt_inds
+ bboxes_for_cropping = cur_bboxes[pos_assigned_gt_inds].clone()
+ pos_inds = cur_sampling_results.pos_inds
+ cur_coeff_pred = cur_coeff_pred[pos_inds]
+
+ # Linearly combine the prototypes with the mask coefficients
+ mask_pred = cur_prototypes @ cur_coeff_pred.t()
+ mask_pred = torch.sigmoid(mask_pred)
+
+ h, w = cur_img_meta['img_shape'][:2]
+ bboxes_for_cropping[:, 0] /= w
+ bboxes_for_cropping[:, 1] /= h
+ bboxes_for_cropping[:, 2] /= w
+ bboxes_for_cropping[:, 3] /= h
+
+ mask_pred = self.crop(mask_pred, bboxes_for_cropping)
+ mask_pred = mask_pred.permute(2, 0, 1).contiguous()
+ mask_pred_list.append(mask_pred)
+ return mask_pred_list
+
+ @force_fp32(apply_to=('mask_pred', ))
+ def loss(self, mask_pred, gt_masks, gt_bboxes, img_meta, sampling_results):
+ """Compute loss of the head.
+
+ Args:
+ mask_pred (list[Tensor]): Predicted prototypes with shape
+ (num_classes, H, W).
+ gt_masks (list[Tensor]): Ground truth masks for each image with
+ the same shape of the input image.
+ gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
+ shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
+ img_meta (list[dict]): Meta information of each image, e.g.,
+ image size, scaling factor, etc.
+ sampling_results (List[:obj:``SamplingResult``]): Sampler results
+ for each image.
+
+ Returns:
+ dict[str, Tensor]: A dictionary of loss components.
+ """
+ loss_mask = []
+ num_imgs = len(mask_pred)
+ total_pos = 0
+ for idx in range(num_imgs):
+ cur_mask_pred = mask_pred[idx]
+ cur_gt_masks = gt_masks[idx].float()
+ cur_gt_bboxes = gt_bboxes[idx]
+ cur_img_meta = img_meta[idx]
+ cur_sampling_results = sampling_results[idx]
+
+ pos_assigned_gt_inds = cur_sampling_results.pos_assigned_gt_inds
+ num_pos = pos_assigned_gt_inds.size(0)
+ # Since we're producing (near) full image masks,
+ # it'd take too much vram to backprop on every single mask.
+ # Thus we select only a subset.
+ if num_pos > self.max_masks_to_train:
+ perm = torch.randperm(num_pos)
+ select = perm[:self.max_masks_to_train]
+ cur_mask_pred = cur_mask_pred[select]
+ pos_assigned_gt_inds = pos_assigned_gt_inds[select]
+ num_pos = self.max_masks_to_train
+ total_pos += num_pos
+
+ gt_bboxes_for_reweight = cur_gt_bboxes[pos_assigned_gt_inds]
+
+ mask_targets = self.get_targets(cur_mask_pred, cur_gt_masks,
+ pos_assigned_gt_inds)
+ if num_pos == 0:
+ loss = cur_mask_pred.sum() * 0.
+ elif mask_targets is None:
+ loss = F.binary_cross_entropy(cur_mask_pred,
+ torch.zeros_like(cur_mask_pred),
+ torch.zeros_like(cur_mask_pred))
+ else:
+ cur_mask_pred = torch.clamp(cur_mask_pred, 0, 1)
+ loss = F.binary_cross_entropy(
+ cur_mask_pred, mask_targets,
+ reduction='none') * self.loss_mask_weight
+
+ h, w = cur_img_meta['img_shape'][:2]
+ gt_bboxes_width = (gt_bboxes_for_reweight[:, 2] -
+ gt_bboxes_for_reweight[:, 0]) / w
+ gt_bboxes_height = (gt_bboxes_for_reweight[:, 3] -
+ gt_bboxes_for_reweight[:, 1]) / h
+ loss = loss.mean(dim=(1,
+ 2)) / gt_bboxes_width / gt_bboxes_height
+ loss = torch.sum(loss)
+ loss_mask.append(loss)
+
+ if total_pos == 0:
+ total_pos += 1 # avoid nan
+ loss_mask = [x / total_pos for x in loss_mask]
+
+ return dict(loss_mask=loss_mask)
+
+ def get_targets(self, mask_pred, gt_masks, pos_assigned_gt_inds):
+ """Compute instance segmentation targets for each image.
+
+ Args:
+ mask_pred (Tensor): Predicted prototypes with shape
+ (num_classes, H, W).
+ gt_masks (Tensor): Ground truth masks for each image with
+ the same shape of the input image.
+ pos_assigned_gt_inds (Tensor): GT indices of the corresponding
+ positive samples.
+ Returns:
+ Tensor: Instance segmentation targets with shape
+ (num_instances, H, W).
+ """
+ if gt_masks.size(0) == 0:
+ return None
+ mask_h, mask_w = mask_pred.shape[-2:]
+ gt_masks = F.interpolate(
+ gt_masks.unsqueeze(0), (mask_h, mask_w),
+ mode='bilinear',
+ align_corners=False).squeeze(0)
+ gt_masks = gt_masks.gt(0.5).float()
+ mask_targets = gt_masks[pos_assigned_gt_inds]
+ return mask_targets
+
+ def get_seg_masks(self, mask_pred, label_pred, img_meta, rescale):
+ """Resize, binarize, and format the instance mask predictions.
+
+ Args:
+ mask_pred (Tensor): shape (N, H, W).
+ label_pred (Tensor): shape (N, ).
+ img_meta (dict): Meta information of each image, e.g.,
+ image size, scaling factor, etc.
+ rescale (bool): If rescale is False, then returned masks will
+ fit the scale of imgs[0].
+ Returns:
+ list[ndarray]: Mask predictions grouped by their predicted classes.
+ """
+ ori_shape = img_meta['ori_shape']
+ scale_factor = img_meta['scale_factor']
+ if rescale:
+ img_h, img_w = ori_shape[:2]
+ else:
+ img_h = np.round(ori_shape[0] * scale_factor[1]).astype(np.int32)
+ img_w = np.round(ori_shape[1] * scale_factor[0]).astype(np.int32)
+
+ cls_segms = [[] for _ in range(self.num_classes)]
+ if mask_pred.size(0) == 0:
+ return cls_segms
+
+ mask_pred = F.interpolate(
+ mask_pred.unsqueeze(0), (img_h, img_w),
+ mode='bilinear',
+ align_corners=False).squeeze(0) > 0.5
+ mask_pred = mask_pred.cpu().numpy().astype(np.uint8)
+
+ for m, l in zip(mask_pred, label_pred):
+ cls_segms[l].append(m)
+ return cls_segms
+
+ def crop(self, masks, boxes, padding=1):
+ """Crop predicted masks by zeroing out everything not in the predicted
+ bbox.
+
+ Args:
+ masks (Tensor): shape [H, W, N].
+ boxes (Tensor): bbox coords in relative point form with
+ shape [N, 4].
+
+ Return:
+ Tensor: The cropped masks.
+ """
+ h, w, n = masks.size()
+ x1, x2 = self.sanitize_coordinates(
+ boxes[:, 0], boxes[:, 2], w, padding, cast=False)
+ y1, y2 = self.sanitize_coordinates(
+ boxes[:, 1], boxes[:, 3], h, padding, cast=False)
+
+ rows = torch.arange(
+ w, device=masks.device, dtype=x1.dtype).view(1, -1,
+ 1).expand(h, w, n)
+ cols = torch.arange(
+ h, device=masks.device, dtype=x1.dtype).view(-1, 1,
+ 1).expand(h, w, n)
+
+ masks_left = rows >= x1.view(1, 1, -1)
+ masks_right = rows < x2.view(1, 1, -1)
+ masks_up = cols >= y1.view(1, 1, -1)
+ masks_down = cols < y2.view(1, 1, -1)
+
+ crop_mask = masks_left * masks_right * masks_up * masks_down
+
+ return masks * crop_mask.float()
+
+ def sanitize_coordinates(self, x1, x2, img_size, padding=0, cast=True):
+ """Sanitizes the input coordinates so that x1 < x2, x1 != x2, x1 >= 0,
+ and x2 <= image_size. Also converts from relative to absolute
+ coordinates and casts the results to long tensors.
+
+ Warning: this does things in-place behind the scenes so
+ copy if necessary.
+
+ Args:
+ _x1 (Tensor): shape (N, ).
+ _x2 (Tensor): shape (N, ).
+ img_size (int): Size of the input image.
+ padding (int): x1 >= padding, x2 <= image_size-padding.
+ cast (bool): If cast is false, the result won't be cast to longs.
+
+ Returns:
+ tuple:
+ x1 (Tensor): Sanitized _x1.
+ x2 (Tensor): Sanitized _x2.
+ """
+ x1 = x1 * img_size
+ x2 = x2 * img_size
+ if cast:
+ x1 = x1.long()
+ x2 = x2.long()
+ x1 = torch.min(x1, x2)
+ x2 = torch.max(x1, x2)
+ x1 = torch.clamp(x1 - padding, min=0)
+ x2 = torch.clamp(x2 + padding, max=img_size)
+ return x1, x2
+
+
+class InterpolateModule(nn.Module):
+ """This is a module version of F.interpolate.
+
+ Any arguments you give it just get passed along for the ride.
+ """
+
+ def __init__(self, *args, **kwargs):
+ super().__init__()
+
+ self.args = args
+ self.kwargs = kwargs
+
+ def forward(self, x):
+ """Forward features from the upstream network."""
+ return F.interpolate(x, *self.args, **self.kwargs)
diff --git a/annotator/uniformer/mmdet/models/dense_heads/yolo_head.py b/annotator/uniformer/mmdet/models/dense_heads/yolo_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..25a005d36903333f37a6c6d31b4d613c071f4a07
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/dense_heads/yolo_head.py
@@ -0,0 +1,577 @@
+# Copyright (c) 2019 Western Digital Corporation or its affiliates.
+
+import warnings
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from mmcv.cnn import ConvModule, normal_init
+from mmcv.runner import force_fp32
+
+from mmdet.core import (build_anchor_generator, build_assigner,
+ build_bbox_coder, build_sampler, images_to_levels,
+ multi_apply, multiclass_nms)
+from ..builder import HEADS, build_loss
+from .base_dense_head import BaseDenseHead
+from .dense_test_mixins import BBoxTestMixin
+
+
+@HEADS.register_module()
+class YOLOV3Head(BaseDenseHead, BBoxTestMixin):
+ """YOLOV3Head Paper link: https://arxiv.org/abs/1804.02767.
+
+ Args:
+ num_classes (int): The number of object classes (w/o background)
+ in_channels (List[int]): Number of input channels per scale.
+ out_channels (List[int]): The number of output channels per scale
+ before the final 1x1 layer. Default: (1024, 512, 256).
+ anchor_generator (dict): Config dict for anchor generator
+ bbox_coder (dict): Config of bounding box coder.
+ featmap_strides (List[int]): The stride of each scale.
+ Should be in descending order. Default: (32, 16, 8).
+ one_hot_smoother (float): Set a non-zero value to enable label-smooth
+ Default: 0.
+ conv_cfg (dict): Config dict for convolution layer. Default: None.
+ norm_cfg (dict): Dictionary to construct and config norm layer.
+ Default: dict(type='BN', requires_grad=True)
+ act_cfg (dict): Config dict for activation layer.
+ Default: dict(type='LeakyReLU', negative_slope=0.1).
+ loss_cls (dict): Config of classification loss.
+ loss_conf (dict): Config of confidence loss.
+ loss_xy (dict): Config of xy coordinate loss.
+ loss_wh (dict): Config of wh coordinate loss.
+ train_cfg (dict): Training config of YOLOV3 head. Default: None.
+ test_cfg (dict): Testing config of YOLOV3 head. Default: None.
+ """
+
+ def __init__(self,
+ num_classes,
+ in_channels,
+ out_channels=(1024, 512, 256),
+ anchor_generator=dict(
+ type='YOLOAnchorGenerator',
+ base_sizes=[[(116, 90), (156, 198), (373, 326)],
+ [(30, 61), (62, 45), (59, 119)],
+ [(10, 13), (16, 30), (33, 23)]],
+ strides=[32, 16, 8]),
+ bbox_coder=dict(type='YOLOBBoxCoder'),
+ featmap_strides=[32, 16, 8],
+ one_hot_smoother=0.,
+ conv_cfg=None,
+ norm_cfg=dict(type='BN', requires_grad=True),
+ act_cfg=dict(type='LeakyReLU', negative_slope=0.1),
+ loss_cls=dict(
+ type='CrossEntropyLoss',
+ use_sigmoid=True,
+ loss_weight=1.0),
+ loss_conf=dict(
+ type='CrossEntropyLoss',
+ use_sigmoid=True,
+ loss_weight=1.0),
+ loss_xy=dict(
+ type='CrossEntropyLoss',
+ use_sigmoid=True,
+ loss_weight=1.0),
+ loss_wh=dict(type='MSELoss', loss_weight=1.0),
+ train_cfg=None,
+ test_cfg=None):
+ super(YOLOV3Head, self).__init__()
+ # Check params
+ assert (len(in_channels) == len(out_channels) == len(featmap_strides))
+
+ self.num_classes = num_classes
+ self.in_channels = in_channels
+ self.out_channels = out_channels
+ self.featmap_strides = featmap_strides
+ self.train_cfg = train_cfg
+ self.test_cfg = test_cfg
+ if self.train_cfg:
+ self.assigner = build_assigner(self.train_cfg.assigner)
+ if hasattr(self.train_cfg, 'sampler'):
+ sampler_cfg = self.train_cfg.sampler
+ else:
+ sampler_cfg = dict(type='PseudoSampler')
+ self.sampler = build_sampler(sampler_cfg, context=self)
+
+ self.one_hot_smoother = one_hot_smoother
+
+ self.conv_cfg = conv_cfg
+ self.norm_cfg = norm_cfg
+ self.act_cfg = act_cfg
+
+ self.bbox_coder = build_bbox_coder(bbox_coder)
+ self.anchor_generator = build_anchor_generator(anchor_generator)
+
+ self.loss_cls = build_loss(loss_cls)
+ self.loss_conf = build_loss(loss_conf)
+ self.loss_xy = build_loss(loss_xy)
+ self.loss_wh = build_loss(loss_wh)
+ # usually the numbers of anchors for each level are the same
+ # except SSD detectors
+ self.num_anchors = self.anchor_generator.num_base_anchors[0]
+ assert len(
+ self.anchor_generator.num_base_anchors) == len(featmap_strides)
+ self._init_layers()
+
+ @property
+ def num_levels(self):
+ return len(self.featmap_strides)
+
+ @property
+ def num_attrib(self):
+ """int: number of attributes in pred_map, bboxes (4) +
+ objectness (1) + num_classes"""
+
+ return 5 + self.num_classes
+
+ def _init_layers(self):
+ self.convs_bridge = nn.ModuleList()
+ self.convs_pred = nn.ModuleList()
+ for i in range(self.num_levels):
+ conv_bridge = ConvModule(
+ self.in_channels[i],
+ self.out_channels[i],
+ 3,
+ padding=1,
+ conv_cfg=self.conv_cfg,
+ norm_cfg=self.norm_cfg,
+ act_cfg=self.act_cfg)
+ conv_pred = nn.Conv2d(self.out_channels[i],
+ self.num_anchors * self.num_attrib, 1)
+
+ self.convs_bridge.append(conv_bridge)
+ self.convs_pred.append(conv_pred)
+
+ def init_weights(self):
+ """Initialize weights of the head."""
+ for m in self.convs_pred:
+ normal_init(m, std=0.01)
+
+ def forward(self, feats):
+ """Forward features from the upstream network.
+
+ Args:
+ feats (tuple[Tensor]): Features from the upstream network, each is
+ a 4D-tensor.
+
+ Returns:
+ tuple[Tensor]: A tuple of multi-level predication map, each is a
+ 4D-tensor of shape (batch_size, 5+num_classes, height, width).
+ """
+
+ assert len(feats) == self.num_levels
+ pred_maps = []
+ for i in range(self.num_levels):
+ x = feats[i]
+ x = self.convs_bridge[i](x)
+ pred_map = self.convs_pred[i](x)
+ pred_maps.append(pred_map)
+
+ return tuple(pred_maps),
+
+ @force_fp32(apply_to=('pred_maps', ))
+ def get_bboxes(self,
+ pred_maps,
+ img_metas,
+ cfg=None,
+ rescale=False,
+ with_nms=True):
+ """Transform network output for a batch into bbox predictions.
+
+ Args:
+ pred_maps (list[Tensor]): Raw predictions for a batch of images.
+ img_metas (list[dict]): Meta information of each image, e.g.,
+ image size, scaling factor, etc.
+ cfg (mmcv.Config | None): Test / postprocessing configuration,
+ if None, test_cfg would be used. Default: None.
+ rescale (bool): If True, return boxes in original image space.
+ Default: False.
+ with_nms (bool): If True, do nms before return boxes.
+ Default: True.
+
+ Returns:
+ list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple.
+ The first item is an (n, 5) tensor, where 5 represent
+ (tl_x, tl_y, br_x, br_y, score) and the score between 0 and 1.
+ The shape of the second tensor in the tuple is (n,), and
+ each element represents the class label of the corresponding
+ box.
+ """
+ num_levels = len(pred_maps)
+ pred_maps_list = [pred_maps[i].detach() for i in range(num_levels)]
+ scale_factors = [
+ img_metas[i]['scale_factor']
+ for i in range(pred_maps_list[0].shape[0])
+ ]
+ result_list = self._get_bboxes(pred_maps_list, scale_factors, cfg,
+ rescale, with_nms)
+ return result_list
+
+ def _get_bboxes(self,
+ pred_maps_list,
+ scale_factors,
+ cfg,
+ rescale=False,
+ with_nms=True):
+ """Transform outputs for a single batch item into bbox predictions.
+
+ Args:
+ pred_maps_list (list[Tensor]): Prediction maps for different scales
+ of each single image in the batch.
+ scale_factors (list(ndarray)): Scale factor of the image arrange as
+ (w_scale, h_scale, w_scale, h_scale).
+ cfg (mmcv.Config | None): Test / postprocessing configuration,
+ if None, test_cfg would be used.
+ rescale (bool): If True, return boxes in original image space.
+ Default: False.
+ with_nms (bool): If True, do nms before return boxes.
+ Default: True.
+
+ Returns:
+ list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple.
+ The first item is an (n, 5) tensor, where 5 represent
+ (tl_x, tl_y, br_x, br_y, score) and the score between 0 and 1.
+ The shape of the second tensor in the tuple is (n,), and
+ each element represents the class label of the corresponding
+ box.
+ """
+ cfg = self.test_cfg if cfg is None else cfg
+ assert len(pred_maps_list) == self.num_levels
+
+ device = pred_maps_list[0].device
+ batch_size = pred_maps_list[0].shape[0]
+
+ featmap_sizes = [
+ pred_maps_list[i].shape[-2:] for i in range(self.num_levels)
+ ]
+ multi_lvl_anchors = self.anchor_generator.grid_anchors(
+ featmap_sizes, device)
+ # convert to tensor to keep tracing
+ nms_pre_tensor = torch.tensor(
+ cfg.get('nms_pre', -1), device=device, dtype=torch.long)
+
+ multi_lvl_bboxes = []
+ multi_lvl_cls_scores = []
+ multi_lvl_conf_scores = []
+ for i in range(self.num_levels):
+ # get some key info for current scale
+ pred_map = pred_maps_list[i]
+ stride = self.featmap_strides[i]
+ # (b,h, w, num_anchors*num_attrib) ->
+ # (b,h*w*num_anchors, num_attrib)
+ pred_map = pred_map.permute(0, 2, 3,
+ 1).reshape(batch_size, -1,
+ self.num_attrib)
+ # Inplace operation like
+ # ```pred_map[..., :2] = \torch.sigmoid(pred_map[..., :2])```
+ # would create constant tensor when exporting to onnx
+ pred_map_conf = torch.sigmoid(pred_map[..., :2])
+ pred_map_rest = pred_map[..., 2:]
+ pred_map = torch.cat([pred_map_conf, pred_map_rest], dim=-1)
+ pred_map_boxes = pred_map[..., :4]
+ multi_lvl_anchor = multi_lvl_anchors[i]
+ multi_lvl_anchor = multi_lvl_anchor.expand_as(pred_map_boxes)
+ bbox_pred = self.bbox_coder.decode(multi_lvl_anchor,
+ pred_map_boxes, stride)
+ # conf and cls
+ conf_pred = torch.sigmoid(pred_map[..., 4])
+ cls_pred = torch.sigmoid(pred_map[..., 5:]).view(
+ batch_size, -1, self.num_classes) # Cls pred one-hot.
+
+ # Get top-k prediction
+ # Always keep topk op for dynamic input in onnx
+ if nms_pre_tensor > 0 and (torch.onnx.is_in_onnx_export()
+ or conf_pred.shape[1] > nms_pre_tensor):
+ from torch import _shape_as_tensor
+ # keep shape as tensor and get k
+ num_anchor = _shape_as_tensor(conf_pred)[1].to(device)
+ nms_pre = torch.where(nms_pre_tensor < num_anchor,
+ nms_pre_tensor, num_anchor)
+ _, topk_inds = conf_pred.topk(nms_pre)
+ batch_inds = torch.arange(batch_size).view(
+ -1, 1).expand_as(topk_inds).long()
+ bbox_pred = bbox_pred[batch_inds, topk_inds, :]
+ cls_pred = cls_pred[batch_inds, topk_inds, :]
+ conf_pred = conf_pred[batch_inds, topk_inds]
+
+ # Save the result of current scale
+ multi_lvl_bboxes.append(bbox_pred)
+ multi_lvl_cls_scores.append(cls_pred)
+ multi_lvl_conf_scores.append(conf_pred)
+
+ # Merge the results of different scales together
+ batch_mlvl_bboxes = torch.cat(multi_lvl_bboxes, dim=1)
+ batch_mlvl_scores = torch.cat(multi_lvl_cls_scores, dim=1)
+ batch_mlvl_conf_scores = torch.cat(multi_lvl_conf_scores, dim=1)
+
+ # Set max number of box to be feed into nms in deployment
+ deploy_nms_pre = cfg.get('deploy_nms_pre', -1)
+ if deploy_nms_pre > 0 and torch.onnx.is_in_onnx_export():
+ _, topk_inds = batch_mlvl_conf_scores.topk(deploy_nms_pre)
+ batch_inds = torch.arange(batch_size).view(
+ -1, 1).expand_as(topk_inds).long()
+ batch_mlvl_bboxes = batch_mlvl_bboxes[batch_inds, topk_inds, :]
+ batch_mlvl_scores = batch_mlvl_scores[batch_inds, topk_inds, :]
+ batch_mlvl_conf_scores = batch_mlvl_conf_scores[batch_inds,
+ topk_inds]
+
+ if with_nms and (batch_mlvl_conf_scores.size(0) == 0):
+ return torch.zeros((0, 5)), torch.zeros((0, ))
+
+ if rescale:
+ batch_mlvl_bboxes /= batch_mlvl_bboxes.new_tensor(
+ scale_factors).unsqueeze(1)
+
+ # In mmdet 2.x, the class_id for background is num_classes.
+ # i.e., the last column.
+ padding = batch_mlvl_scores.new_zeros(batch_size,
+ batch_mlvl_scores.shape[1], 1)
+ batch_mlvl_scores = torch.cat([batch_mlvl_scores, padding], dim=-1)
+
+ # Support exporting to onnx without nms
+ if with_nms and cfg.get('nms', None) is not None:
+ det_results = []
+ for (mlvl_bboxes, mlvl_scores,
+ mlvl_conf_scores) in zip(batch_mlvl_bboxes, batch_mlvl_scores,
+ batch_mlvl_conf_scores):
+ # Filtering out all predictions with conf < conf_thr
+ conf_thr = cfg.get('conf_thr', -1)
+ if conf_thr > 0 and (not torch.onnx.is_in_onnx_export()):
+ # TensorRT not support NonZero
+ # add as_tuple=False for compatibility in Pytorch 1.6
+ # flatten would create a Reshape op with constant values,
+ # and raise RuntimeError when doing inference in ONNX
+ # Runtime with a different input image (#4221).
+ conf_inds = mlvl_conf_scores.ge(conf_thr).nonzero(
+ as_tuple=False).squeeze(1)
+ mlvl_bboxes = mlvl_bboxes[conf_inds, :]
+ mlvl_scores = mlvl_scores[conf_inds, :]
+ mlvl_conf_scores = mlvl_conf_scores[conf_inds]
+
+ det_bboxes, det_labels = multiclass_nms(
+ mlvl_bboxes,
+ mlvl_scores,
+ cfg.score_thr,
+ cfg.nms,
+ cfg.max_per_img,
+ score_factors=mlvl_conf_scores)
+ det_results.append(tuple([det_bboxes, det_labels]))
+
+ else:
+ det_results = [
+ tuple(mlvl_bs)
+ for mlvl_bs in zip(batch_mlvl_bboxes, batch_mlvl_scores,
+ batch_mlvl_conf_scores)
+ ]
+ return det_results
+
+ @force_fp32(apply_to=('pred_maps', ))
+ def loss(self,
+ pred_maps,
+ gt_bboxes,
+ gt_labels,
+ img_metas,
+ gt_bboxes_ignore=None):
+ """Compute loss of the head.
+
+ Args:
+ pred_maps (list[Tensor]): Prediction map for each scale level,
+ shape (N, num_anchors * num_attrib, H, W)
+ gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
+ shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
+ gt_labels (list[Tensor]): class indices corresponding to each box
+ img_metas (list[dict]): Meta information of each image, e.g.,
+ image size, scaling factor, etc.
+ gt_bboxes_ignore (None | list[Tensor]): specify which bounding
+ boxes can be ignored when computing the loss.
+
+ Returns:
+ dict[str, Tensor]: A dictionary of loss components.
+ """
+ num_imgs = len(img_metas)
+ device = pred_maps[0][0].device
+
+ featmap_sizes = [
+ pred_maps[i].shape[-2:] for i in range(self.num_levels)
+ ]
+ multi_level_anchors = self.anchor_generator.grid_anchors(
+ featmap_sizes, device)
+ anchor_list = [multi_level_anchors for _ in range(num_imgs)]
+
+ responsible_flag_list = []
+ for img_id in range(len(img_metas)):
+ responsible_flag_list.append(
+ self.anchor_generator.responsible_flags(
+ featmap_sizes, gt_bboxes[img_id], device))
+
+ target_maps_list, neg_maps_list = self.get_targets(
+ anchor_list, responsible_flag_list, gt_bboxes, gt_labels)
+
+ losses_cls, losses_conf, losses_xy, losses_wh = multi_apply(
+ self.loss_single, pred_maps, target_maps_list, neg_maps_list)
+
+ return dict(
+ loss_cls=losses_cls,
+ loss_conf=losses_conf,
+ loss_xy=losses_xy,
+ loss_wh=losses_wh)
+
+ def loss_single(self, pred_map, target_map, neg_map):
+ """Compute loss of a single image from a batch.
+
+ Args:
+ pred_map (Tensor): Raw predictions for a single level.
+ target_map (Tensor): The Ground-Truth target for a single level.
+ neg_map (Tensor): The negative masks for a single level.
+
+ Returns:
+ tuple:
+ loss_cls (Tensor): Classification loss.
+ loss_conf (Tensor): Confidence loss.
+ loss_xy (Tensor): Regression loss of x, y coordinate.
+ loss_wh (Tensor): Regression loss of w, h coordinate.
+ """
+
+ num_imgs = len(pred_map)
+ pred_map = pred_map.permute(0, 2, 3,
+ 1).reshape(num_imgs, -1, self.num_attrib)
+ neg_mask = neg_map.float()
+ pos_mask = target_map[..., 4]
+ pos_and_neg_mask = neg_mask + pos_mask
+ pos_mask = pos_mask.unsqueeze(dim=-1)
+ if torch.max(pos_and_neg_mask) > 1.:
+ warnings.warn('There is overlap between pos and neg sample.')
+ pos_and_neg_mask = pos_and_neg_mask.clamp(min=0., max=1.)
+
+ pred_xy = pred_map[..., :2]
+ pred_wh = pred_map[..., 2:4]
+ pred_conf = pred_map[..., 4]
+ pred_label = pred_map[..., 5:]
+
+ target_xy = target_map[..., :2]
+ target_wh = target_map[..., 2:4]
+ target_conf = target_map[..., 4]
+ target_label = target_map[..., 5:]
+
+ loss_cls = self.loss_cls(pred_label, target_label, weight=pos_mask)
+ loss_conf = self.loss_conf(
+ pred_conf, target_conf, weight=pos_and_neg_mask)
+ loss_xy = self.loss_xy(pred_xy, target_xy, weight=pos_mask)
+ loss_wh = self.loss_wh(pred_wh, target_wh, weight=pos_mask)
+
+ return loss_cls, loss_conf, loss_xy, loss_wh
+
+ def get_targets(self, anchor_list, responsible_flag_list, gt_bboxes_list,
+ gt_labels_list):
+ """Compute target maps for anchors in multiple images.
+
+ Args:
+ anchor_list (list[list[Tensor]]): Multi level anchors of each
+ image. The outer list indicates images, and the inner list
+ corresponds to feature levels of the image. Each element of
+ the inner list is a tensor of shape (num_total_anchors, 4).
+ responsible_flag_list (list[list[Tensor]]): Multi level responsible
+ flags of each image. Each element is a tensor of shape
+ (num_total_anchors, )
+ gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image.
+ gt_labels_list (list[Tensor]): Ground truth labels of each box.
+
+ Returns:
+ tuple: Usually returns a tuple containing learning targets.
+ - target_map_list (list[Tensor]): Target map of each level.
+ - neg_map_list (list[Tensor]): Negative map of each level.
+ """
+ num_imgs = len(anchor_list)
+
+ # anchor number of multi levels
+ num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]
+
+ results = multi_apply(self._get_targets_single, anchor_list,
+ responsible_flag_list, gt_bboxes_list,
+ gt_labels_list)
+
+ all_target_maps, all_neg_maps = results
+ assert num_imgs == len(all_target_maps) == len(all_neg_maps)
+ target_maps_list = images_to_levels(all_target_maps, num_level_anchors)
+ neg_maps_list = images_to_levels(all_neg_maps, num_level_anchors)
+
+ return target_maps_list, neg_maps_list
+
+ def _get_targets_single(self, anchors, responsible_flags, gt_bboxes,
+ gt_labels):
+ """Generate matching bounding box prior and converted GT.
+
+ Args:
+ anchors (list[Tensor]): Multi-level anchors of the image.
+ responsible_flags (list[Tensor]): Multi-level responsible flags of
+ anchors
+ gt_bboxes (Tensor): Ground truth bboxes of single image.
+ gt_labels (Tensor): Ground truth labels of single image.
+
+ Returns:
+ tuple:
+ target_map (Tensor): Predication target map of each
+ scale level, shape (num_total_anchors,
+ 5+num_classes)
+ neg_map (Tensor): Negative map of each scale level,
+ shape (num_total_anchors,)
+ """
+
+ anchor_strides = []
+ for i in range(len(anchors)):
+ anchor_strides.append(
+ torch.tensor(self.featmap_strides[i],
+ device=gt_bboxes.device).repeat(len(anchors[i])))
+ concat_anchors = torch.cat(anchors)
+ concat_responsible_flags = torch.cat(responsible_flags)
+
+ anchor_strides = torch.cat(anchor_strides)
+ assert len(anchor_strides) == len(concat_anchors) == \
+ len(concat_responsible_flags)
+ assign_result = self.assigner.assign(concat_anchors,
+ concat_responsible_flags,
+ gt_bboxes)
+ sampling_result = self.sampler.sample(assign_result, concat_anchors,
+ gt_bboxes)
+
+ target_map = concat_anchors.new_zeros(
+ concat_anchors.size(0), self.num_attrib)
+
+ target_map[sampling_result.pos_inds, :4] = self.bbox_coder.encode(
+ sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes,
+ anchor_strides[sampling_result.pos_inds])
+
+ target_map[sampling_result.pos_inds, 4] = 1
+
+ gt_labels_one_hot = F.one_hot(
+ gt_labels, num_classes=self.num_classes).float()
+ if self.one_hot_smoother != 0: # label smooth
+ gt_labels_one_hot = gt_labels_one_hot * (
+ 1 - self.one_hot_smoother
+ ) + self.one_hot_smoother / self.num_classes
+ target_map[sampling_result.pos_inds, 5:] = gt_labels_one_hot[
+ sampling_result.pos_assigned_gt_inds]
+
+ neg_map = concat_anchors.new_zeros(
+ concat_anchors.size(0), dtype=torch.uint8)
+ neg_map[sampling_result.neg_inds] = 1
+
+ return target_map, neg_map
+
+ def aug_test(self, feats, img_metas, rescale=False):
+ """Test function with test time augmentation.
+
+ Args:
+ feats (list[Tensor]): the outer list indicates test-time
+ augmentations and inner Tensor should have a shape NxCxHxW,
+ which contains features for all images in the batch.
+ img_metas (list[list[dict]]): the outer list indicates test-time
+ augs (multiscale, flip, etc.) and the inner list indicates
+ images in a batch. each dict has image information.
+ rescale (bool, optional): Whether to rescale the results.
+ Defaults to False.
+
+ Returns:
+ list[ndarray]: bbox results of each class
+ """
+ return self.aug_test_bboxes(feats, img_metas, rescale=rescale)
diff --git a/annotator/uniformer/mmdet/models/detectors/__init__.py b/annotator/uniformer/mmdet/models/detectors/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..04011130435cf9fdfadeb821919046b1bddab7d4
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/detectors/__init__.py
@@ -0,0 +1,40 @@
+from .atss import ATSS
+from .base import BaseDetector
+from .cascade_rcnn import CascadeRCNN
+from .cornernet import CornerNet
+from .detr import DETR
+from .fast_rcnn import FastRCNN
+from .faster_rcnn import FasterRCNN
+from .fcos import FCOS
+from .fovea import FOVEA
+from .fsaf import FSAF
+from .gfl import GFL
+from .grid_rcnn import GridRCNN
+from .htc import HybridTaskCascade
+from .kd_one_stage import KnowledgeDistillationSingleStageDetector
+from .mask_rcnn import MaskRCNN
+from .mask_scoring_rcnn import MaskScoringRCNN
+from .nasfcos import NASFCOS
+from .paa import PAA
+from .point_rend import PointRend
+from .reppoints_detector import RepPointsDetector
+from .retinanet import RetinaNet
+from .rpn import RPN
+from .scnet import SCNet
+from .single_stage import SingleStageDetector
+from .sparse_rcnn import SparseRCNN
+from .trident_faster_rcnn import TridentFasterRCNN
+from .two_stage import TwoStageDetector
+from .vfnet import VFNet
+from .yolact import YOLACT
+from .yolo import YOLOV3
+
+__all__ = [
+ 'ATSS', 'BaseDetector', 'SingleStageDetector',
+ 'KnowledgeDistillationSingleStageDetector', 'TwoStageDetector', 'RPN',
+ 'FastRCNN', 'FasterRCNN', 'MaskRCNN', 'CascadeRCNN', 'HybridTaskCascade',
+ 'RetinaNet', 'FCOS', 'GridRCNN', 'MaskScoringRCNN', 'RepPointsDetector',
+ 'FOVEA', 'FSAF', 'NASFCOS', 'PointRend', 'GFL', 'CornerNet', 'PAA',
+ 'YOLOV3', 'YOLACT', 'VFNet', 'DETR', 'TridentFasterRCNN', 'SparseRCNN',
+ 'SCNet'
+]
diff --git a/annotator/uniformer/mmdet/models/detectors/atss.py b/annotator/uniformer/mmdet/models/detectors/atss.py
new file mode 100644
index 0000000000000000000000000000000000000000..db7139c6b4fcd7e83007cdb785520743ddae7066
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/detectors/atss.py
@@ -0,0 +1,17 @@
+from ..builder import DETECTORS
+from .single_stage import SingleStageDetector
+
+
+@DETECTORS.register_module()
+class ATSS(SingleStageDetector):
+ """Implementation of `ATSS `_."""
+
+ def __init__(self,
+ backbone,
+ neck,
+ bbox_head,
+ train_cfg=None,
+ test_cfg=None,
+ pretrained=None):
+ super(ATSS, self).__init__(backbone, neck, bbox_head, train_cfg,
+ test_cfg, pretrained)
diff --git a/annotator/uniformer/mmdet/models/detectors/base.py b/annotator/uniformer/mmdet/models/detectors/base.py
new file mode 100644
index 0000000000000000000000000000000000000000..89134f3696ead442a5ff57184e9d256fdf7d0ba4
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/detectors/base.py
@@ -0,0 +1,355 @@
+from abc import ABCMeta, abstractmethod
+from collections import OrderedDict
+
+import mmcv
+import numpy as np
+import torch
+import torch.distributed as dist
+import torch.nn as nn
+from mmcv.runner import auto_fp16
+from mmcv.utils import print_log
+
+from mmdet.core.visualization import imshow_det_bboxes
+from mmdet.utils import get_root_logger
+
+
+class BaseDetector(nn.Module, metaclass=ABCMeta):
+ """Base class for detectors."""
+
+ def __init__(self):
+ super(BaseDetector, self).__init__()
+ self.fp16_enabled = False
+
+ @property
+ def with_neck(self):
+ """bool: whether the detector has a neck"""
+ return hasattr(self, 'neck') and self.neck is not None
+
+ # TODO: these properties need to be carefully handled
+ # for both single stage & two stage detectors
+ @property
+ def with_shared_head(self):
+ """bool: whether the detector has a shared head in the RoI Head"""
+ return hasattr(self, 'roi_head') and self.roi_head.with_shared_head
+
+ @property
+ def with_bbox(self):
+ """bool: whether the detector has a bbox head"""
+ return ((hasattr(self, 'roi_head') and self.roi_head.with_bbox)
+ or (hasattr(self, 'bbox_head') and self.bbox_head is not None))
+
+ @property
+ def with_mask(self):
+ """bool: whether the detector has a mask head"""
+ return ((hasattr(self, 'roi_head') and self.roi_head.with_mask)
+ or (hasattr(self, 'mask_head') and self.mask_head is not None))
+
+ @abstractmethod
+ def extract_feat(self, imgs):
+ """Extract features from images."""
+ pass
+
+ def extract_feats(self, imgs):
+ """Extract features from multiple images.
+
+ Args:
+ imgs (list[torch.Tensor]): A list of images. The images are
+ augmented from the same image but in different ways.
+
+ Returns:
+ list[torch.Tensor]: Features of different images
+ """
+ assert isinstance(imgs, list)
+ return [self.extract_feat(img) for img in imgs]
+
+ def forward_train(self, imgs, img_metas, **kwargs):
+ """
+ Args:
+ img (list[Tensor]): List of tensors of shape (1, C, H, W).
+ Typically these should be mean centered and std scaled.
+ img_metas (list[dict]): List of image info dict where each dict
+ has: 'img_shape', 'scale_factor', 'flip', and may also contain
+ 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
+ For details on the values of these keys, see
+ :class:`mmdet.datasets.pipelines.Collect`.
+ kwargs (keyword arguments): Specific to concrete implementation.
+ """
+ # NOTE the batched image size information may be useful, e.g.
+ # in DETR, this is needed for the construction of masks, which is
+ # then used for the transformer_head.
+ batch_input_shape = tuple(imgs[0].size()[-2:])
+ for img_meta in img_metas:
+ img_meta['batch_input_shape'] = batch_input_shape
+
+ async def async_simple_test(self, img, img_metas, **kwargs):
+ raise NotImplementedError
+
+ @abstractmethod
+ def simple_test(self, img, img_metas, **kwargs):
+ pass
+
+ @abstractmethod
+ def aug_test(self, imgs, img_metas, **kwargs):
+ """Test function with test time augmentation."""
+ pass
+
+ def init_weights(self, pretrained=None):
+ """Initialize the weights in detector.
+
+ Args:
+ pretrained (str, optional): Path to pre-trained weights.
+ Defaults to None.
+ """
+ if pretrained is not None:
+ logger = get_root_logger()
+ print_log(f'load model from: {pretrained}', logger=logger)
+
+ async def aforward_test(self, *, img, img_metas, **kwargs):
+ for var, name in [(img, 'img'), (img_metas, 'img_metas')]:
+ if not isinstance(var, list):
+ raise TypeError(f'{name} must be a list, but got {type(var)}')
+
+ num_augs = len(img)
+ if num_augs != len(img_metas):
+ raise ValueError(f'num of augmentations ({len(img)}) '
+ f'!= num of image metas ({len(img_metas)})')
+ # TODO: remove the restriction of samples_per_gpu == 1 when prepared
+ samples_per_gpu = img[0].size(0)
+ assert samples_per_gpu == 1
+
+ if num_augs == 1:
+ return await self.async_simple_test(img[0], img_metas[0], **kwargs)
+ else:
+ raise NotImplementedError
+
+ def forward_test(self, imgs, img_metas, **kwargs):
+ """
+ Args:
+ imgs (List[Tensor]): the outer list indicates test-time
+ augmentations and inner Tensor should have a shape NxCxHxW,
+ which contains all images in the batch.
+ img_metas (List[List[dict]]): the outer list indicates test-time
+ augs (multiscale, flip, etc.) and the inner list indicates
+ images in a batch.
+ """
+ for var, name in [(imgs, 'imgs'), (img_metas, 'img_metas')]:
+ if not isinstance(var, list):
+ raise TypeError(f'{name} must be a list, but got {type(var)}')
+
+ num_augs = len(imgs)
+ if num_augs != len(img_metas):
+ raise ValueError(f'num of augmentations ({len(imgs)}) '
+ f'!= num of image meta ({len(img_metas)})')
+
+ # NOTE the batched image size information may be useful, e.g.
+ # in DETR, this is needed for the construction of masks, which is
+ # then used for the transformer_head.
+ for img, img_meta in zip(imgs, img_metas):
+ batch_size = len(img_meta)
+ for img_id in range(batch_size):
+ img_meta[img_id]['batch_input_shape'] = tuple(img.size()[-2:])
+
+ if num_augs == 1:
+ # proposals (List[List[Tensor]]): the outer list indicates
+ # test-time augs (multiscale, flip, etc.) and the inner list
+ # indicates images in a batch.
+ # The Tensor should have a shape Px4, where P is the number of
+ # proposals.
+ if 'proposals' in kwargs:
+ kwargs['proposals'] = kwargs['proposals'][0]
+ return self.simple_test(imgs[0], img_metas[0], **kwargs)
+ else:
+ assert imgs[0].size(0) == 1, 'aug test does not support ' \
+ 'inference with batch size ' \
+ f'{imgs[0].size(0)}'
+ # TODO: support test augmentation for predefined proposals
+ assert 'proposals' not in kwargs
+ return self.aug_test(imgs, img_metas, **kwargs)
+
+ @auto_fp16(apply_to=('img', ))
+ def forward(self, img, img_metas, return_loss=True, **kwargs):
+ """Calls either :func:`forward_train` or :func:`forward_test` depending
+ on whether ``return_loss`` is ``True``.
+
+ Note this setting will change the expected inputs. When
+ ``return_loss=True``, img and img_meta are single-nested (i.e. Tensor
+ and List[dict]), and when ``resturn_loss=False``, img and img_meta
+ should be double nested (i.e. List[Tensor], List[List[dict]]), with
+ the outer list indicating test time augmentations.
+ """
+ if return_loss:
+ return self.forward_train(img, img_metas, **kwargs)
+ else:
+ return self.forward_test(img, img_metas, **kwargs)
+
+ def _parse_losses(self, losses):
+ """Parse the raw outputs (losses) of the network.
+
+ Args:
+ losses (dict): Raw output of the network, which usually contain
+ losses and other necessary infomation.
+
+ Returns:
+ tuple[Tensor, dict]: (loss, log_vars), loss is the loss tensor \
+ which may be a weighted sum of all losses, log_vars contains \
+ all the variables to be sent to the logger.
+ """
+ log_vars = OrderedDict()
+ for loss_name, loss_value in losses.items():
+ if isinstance(loss_value, torch.Tensor):
+ log_vars[loss_name] = loss_value.mean()
+ elif isinstance(loss_value, list):
+ log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value)
+ else:
+ raise TypeError(
+ f'{loss_name} is not a tensor or list of tensors')
+
+ loss = sum(_value for _key, _value in log_vars.items()
+ if 'loss' in _key)
+
+ log_vars['loss'] = loss
+ for loss_name, loss_value in log_vars.items():
+ # reduce loss when distributed training
+ if dist.is_available() and dist.is_initialized():
+ loss_value = loss_value.data.clone()
+ dist.all_reduce(loss_value.div_(dist.get_world_size()))
+ log_vars[loss_name] = loss_value.item()
+
+ return loss, log_vars
+
+ def train_step(self, data, optimizer):
+ """The iteration step during training.
+
+ This method defines an iteration step during training, except for the
+ back propagation and optimizer updating, which are done in an optimizer
+ hook. Note that in some complicated cases or models, the whole process
+ including back propagation and optimizer updating is also defined in
+ this method, such as GAN.
+
+ Args:
+ data (dict): The output of dataloader.
+ optimizer (:obj:`torch.optim.Optimizer` | dict): The optimizer of
+ runner is passed to ``train_step()``. This argument is unused
+ and reserved.
+
+ Returns:
+ dict: It should contain at least 3 keys: ``loss``, ``log_vars``, \
+ ``num_samples``.
+
+ - ``loss`` is a tensor for back propagation, which can be a \
+ weighted sum of multiple losses.
+ - ``log_vars`` contains all the variables to be sent to the
+ logger.
+ - ``num_samples`` indicates the batch size (when the model is \
+ DDP, it means the batch size on each GPU), which is used for \
+ averaging the logs.
+ """
+ losses = self(**data)
+ loss, log_vars = self._parse_losses(losses)
+
+ outputs = dict(
+ loss=loss, log_vars=log_vars, num_samples=len(data['img_metas']))
+
+ return outputs
+
+ def val_step(self, data, optimizer):
+ """The iteration step during validation.
+
+ This method shares the same signature as :func:`train_step`, but used
+ during val epochs. Note that the evaluation after training epochs is
+ not implemented with this method, but an evaluation hook.
+ """
+ losses = self(**data)
+ loss, log_vars = self._parse_losses(losses)
+
+ outputs = dict(
+ loss=loss, log_vars=log_vars, num_samples=len(data['img_metas']))
+
+ return outputs
+
+ def show_result(self,
+ img,
+ result,
+ score_thr=0.3,
+ bbox_color=(72, 101, 241),
+ text_color=(72, 101, 241),
+ mask_color=None,
+ thickness=2,
+ font_size=13,
+ win_name='',
+ show=False,
+ wait_time=0,
+ out_file=None):
+ """Draw `result` over `img`.
+
+ Args:
+ img (str or Tensor): The image to be displayed.
+ result (Tensor or tuple): The results to draw over `img`
+ bbox_result or (bbox_result, segm_result).
+ score_thr (float, optional): Minimum score of bboxes to be shown.
+ Default: 0.3.
+ bbox_color (str or tuple(int) or :obj:`Color`):Color of bbox lines.
+ The tuple of color should be in BGR order. Default: 'green'
+ text_color (str or tuple(int) or :obj:`Color`):Color of texts.
+ The tuple of color should be in BGR order. Default: 'green'
+ mask_color (None or str or tuple(int) or :obj:`Color`):
+ Color of masks. The tuple of color should be in BGR order.
+ Default: None
+ thickness (int): Thickness of lines. Default: 2
+ font_size (int): Font size of texts. Default: 13
+ win_name (str): The window name. Default: ''
+ wait_time (float): Value of waitKey param.
+ Default: 0.
+ show (bool): Whether to show the image.
+ Default: False.
+ out_file (str or None): The filename to write the image.
+ Default: None.
+
+ Returns:
+ img (Tensor): Only if not `show` or `out_file`
+ """
+ img = mmcv.imread(img)
+ img = img.copy()
+ if isinstance(result, tuple):
+ bbox_result, segm_result = result
+ if isinstance(segm_result, tuple):
+ segm_result = segm_result[0] # ms rcnn
+ else:
+ bbox_result, segm_result = result, None
+ bboxes = np.vstack(bbox_result)
+ labels = [
+ np.full(bbox.shape[0], i, dtype=np.int32)
+ for i, bbox in enumerate(bbox_result)
+ ]
+ labels = np.concatenate(labels)
+ # draw segmentation masks
+ segms = None
+ if segm_result is not None and len(labels) > 0: # non empty
+ segms = mmcv.concat_list(segm_result)
+ if isinstance(segms[0], torch.Tensor):
+ segms = torch.stack(segms, dim=0).detach().cpu().numpy()
+ else:
+ segms = np.stack(segms, axis=0)
+ # if out_file specified, do not show image in window
+ if out_file is not None:
+ show = False
+ # draw bounding boxes
+ img = imshow_det_bboxes(
+ img,
+ bboxes,
+ labels,
+ segms,
+ class_names=self.CLASSES,
+ score_thr=score_thr,
+ bbox_color=bbox_color,
+ text_color=text_color,
+ mask_color=mask_color,
+ thickness=thickness,
+ font_size=font_size,
+ win_name=win_name,
+ show=show,
+ wait_time=wait_time,
+ out_file=out_file)
+
+ if not (show or out_file):
+ return img
diff --git a/annotator/uniformer/mmdet/models/detectors/cascade_rcnn.py b/annotator/uniformer/mmdet/models/detectors/cascade_rcnn.py
new file mode 100644
index 0000000000000000000000000000000000000000..d873dceb7e4efdf8d1e7d282badfe9b7118426b9
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/detectors/cascade_rcnn.py
@@ -0,0 +1,46 @@
+from ..builder import DETECTORS
+from .two_stage import TwoStageDetector
+
+
+@DETECTORS.register_module()
+class CascadeRCNN(TwoStageDetector):
+ r"""Implementation of `Cascade R-CNN: Delving into High Quality Object
+ Detection `_"""
+
+ def __init__(self,
+ backbone,
+ neck=None,
+ rpn_head=None,
+ roi_head=None,
+ train_cfg=None,
+ test_cfg=None,
+ pretrained=None):
+ super(CascadeRCNN, self).__init__(
+ backbone=backbone,
+ neck=neck,
+ rpn_head=rpn_head,
+ roi_head=roi_head,
+ train_cfg=train_cfg,
+ test_cfg=test_cfg,
+ pretrained=pretrained)
+
+ def show_result(self, data, result, **kwargs):
+ """Show prediction results of the detector.
+
+ Args:
+ data (str or np.ndarray): Image filename or loaded image.
+ result (Tensor or tuple): The results to draw over `img`
+ bbox_result or (bbox_result, segm_result).
+
+ Returns:
+ np.ndarray: The image with bboxes drawn on it.
+ """
+ if self.with_mask:
+ ms_bbox_result, ms_segm_result = result
+ if isinstance(ms_bbox_result, dict):
+ result = (ms_bbox_result['ensemble'],
+ ms_segm_result['ensemble'])
+ else:
+ if isinstance(result, dict):
+ result = result['ensemble']
+ return super(CascadeRCNN, self).show_result(data, result, **kwargs)
diff --git a/annotator/uniformer/mmdet/models/detectors/cornernet.py b/annotator/uniformer/mmdet/models/detectors/cornernet.py
new file mode 100644
index 0000000000000000000000000000000000000000..bb8ccc1465ab66d1615ca16701a533a22b156295
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/detectors/cornernet.py
@@ -0,0 +1,95 @@
+import torch
+
+from mmdet.core import bbox2result, bbox_mapping_back
+from ..builder import DETECTORS
+from .single_stage import SingleStageDetector
+
+
+@DETECTORS.register_module()
+class CornerNet(SingleStageDetector):
+ """CornerNet.
+
+ This detector is the implementation of the paper `CornerNet: Detecting
+ Objects as Paired Keypoints `_ .
+ """
+
+ def __init__(self,
+ backbone,
+ neck,
+ bbox_head,
+ train_cfg=None,
+ test_cfg=None,
+ pretrained=None):
+ super(CornerNet, self).__init__(backbone, neck, bbox_head, train_cfg,
+ test_cfg, pretrained)
+
+ def merge_aug_results(self, aug_results, img_metas):
+ """Merge augmented detection bboxes and score.
+
+ Args:
+ aug_results (list[list[Tensor]]): Det_bboxes and det_labels of each
+ image.
+ img_metas (list[list[dict]]): Meta information of each image, e.g.,
+ image size, scaling factor, etc.
+
+ Returns:
+ tuple: (bboxes, labels)
+ """
+ recovered_bboxes, aug_labels = [], []
+ for bboxes_labels, img_info in zip(aug_results, img_metas):
+ img_shape = img_info[0]['img_shape'] # using shape before padding
+ scale_factor = img_info[0]['scale_factor']
+ flip = img_info[0]['flip']
+ bboxes, labels = bboxes_labels
+ bboxes, scores = bboxes[:, :4], bboxes[:, -1:]
+ bboxes = bbox_mapping_back(bboxes, img_shape, scale_factor, flip)
+ recovered_bboxes.append(torch.cat([bboxes, scores], dim=-1))
+ aug_labels.append(labels)
+
+ bboxes = torch.cat(recovered_bboxes, dim=0)
+ labels = torch.cat(aug_labels)
+
+ if bboxes.shape[0] > 0:
+ out_bboxes, out_labels = self.bbox_head._bboxes_nms(
+ bboxes, labels, self.bbox_head.test_cfg)
+ else:
+ out_bboxes, out_labels = bboxes, labels
+
+ return out_bboxes, out_labels
+
+ def aug_test(self, imgs, img_metas, rescale=False):
+ """Augment testing of CornerNet.
+
+ Args:
+ imgs (list[Tensor]): Augmented images.
+ img_metas (list[list[dict]]): Meta information of each image, e.g.,
+ image size, scaling factor, etc.
+ rescale (bool): If True, return boxes in original image space.
+ Default: False.
+
+ Note:
+ ``imgs`` must including flipped image pairs.
+
+ Returns:
+ list[list[np.ndarray]]: BBox results of each image and classes.
+ The outer list corresponds to each image. The inner list
+ corresponds to each class.
+ """
+ img_inds = list(range(len(imgs)))
+
+ assert img_metas[0][0]['flip'] + img_metas[1][0]['flip'], (
+ 'aug test must have flipped image pair')
+ aug_results = []
+ for ind, flip_ind in zip(img_inds[0::2], img_inds[1::2]):
+ img_pair = torch.cat([imgs[ind], imgs[flip_ind]])
+ x = self.extract_feat(img_pair)
+ outs = self.bbox_head(x)
+ bbox_list = self.bbox_head.get_bboxes(
+ *outs, [img_metas[ind], img_metas[flip_ind]], False, False)
+ aug_results.append(bbox_list[0])
+ aug_results.append(bbox_list[1])
+
+ bboxes, labels = self.merge_aug_results(aug_results, img_metas)
+ bbox_results = bbox2result(bboxes, labels, self.bbox_head.num_classes)
+
+ return [bbox_results]
diff --git a/annotator/uniformer/mmdet/models/detectors/detr.py b/annotator/uniformer/mmdet/models/detectors/detr.py
new file mode 100644
index 0000000000000000000000000000000000000000..5ff82a280daa0a015f662bdf2509fa11542d46d4
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/detectors/detr.py
@@ -0,0 +1,46 @@
+from mmdet.core import bbox2result
+from ..builder import DETECTORS
+from .single_stage import SingleStageDetector
+
+
+@DETECTORS.register_module()
+class DETR(SingleStageDetector):
+ r"""Implementation of `DETR: End-to-End Object Detection with
+ Transformers `_"""
+
+ def __init__(self,
+ backbone,
+ bbox_head,
+ train_cfg=None,
+ test_cfg=None,
+ pretrained=None):
+ super(DETR, self).__init__(backbone, None, bbox_head, train_cfg,
+ test_cfg, pretrained)
+
+ def simple_test(self, img, img_metas, rescale=False):
+ """Test function without test time augmentation.
+
+ Args:
+ imgs (list[torch.Tensor]): List of multiple images
+ img_metas (list[dict]): List of image information.
+ rescale (bool, optional): Whether to rescale the results.
+ Defaults to False.
+
+ Returns:
+ list[list[np.ndarray]]: BBox results of each image and classes.
+ The outer list corresponds to each image. The inner list
+ corresponds to each class.
+ """
+ batch_size = len(img_metas)
+ assert batch_size == 1, 'Currently only batch_size 1 for inference ' \
+ f'mode is supported. Found batch_size {batch_size}.'
+ x = self.extract_feat(img)
+ outs = self.bbox_head(x, img_metas)
+ bbox_list = self.bbox_head.get_bboxes(
+ *outs, img_metas, rescale=rescale)
+
+ bbox_results = [
+ bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes)
+ for det_bboxes, det_labels in bbox_list
+ ]
+ return bbox_results
diff --git a/annotator/uniformer/mmdet/models/detectors/fast_rcnn.py b/annotator/uniformer/mmdet/models/detectors/fast_rcnn.py
new file mode 100644
index 0000000000000000000000000000000000000000..3d6e242767b927ed37198b6bc7862abecef99a33
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/detectors/fast_rcnn.py
@@ -0,0 +1,52 @@
+from ..builder import DETECTORS
+from .two_stage import TwoStageDetector
+
+
+@DETECTORS.register_module()
+class FastRCNN(TwoStageDetector):
+ """Implementation of `Fast R-CNN `_"""
+
+ def __init__(self,
+ backbone,
+ roi_head,
+ train_cfg,
+ test_cfg,
+ neck=None,
+ pretrained=None):
+ super(FastRCNN, self).__init__(
+ backbone=backbone,
+ neck=neck,
+ roi_head=roi_head,
+ train_cfg=train_cfg,
+ test_cfg=test_cfg,
+ pretrained=pretrained)
+
+ def forward_test(self, imgs, img_metas, proposals, **kwargs):
+ """
+ Args:
+ imgs (List[Tensor]): the outer list indicates test-time
+ augmentations and inner Tensor should have a shape NxCxHxW,
+ which contains all images in the batch.
+ img_metas (List[List[dict]]): the outer list indicates test-time
+ augs (multiscale, flip, etc.) and the inner list indicates
+ images in a batch.
+ proposals (List[List[Tensor]]): the outer list indicates test-time
+ augs (multiscale, flip, etc.) and the inner list indicates
+ images in a batch. The Tensor should have a shape Px4, where
+ P is the number of proposals.
+ """
+ for var, name in [(imgs, 'imgs'), (img_metas, 'img_metas')]:
+ if not isinstance(var, list):
+ raise TypeError(f'{name} must be a list, but got {type(var)}')
+
+ num_augs = len(imgs)
+ if num_augs != len(img_metas):
+ raise ValueError(f'num of augmentations ({len(imgs)}) '
+ f'!= num of image meta ({len(img_metas)})')
+
+ if num_augs == 1:
+ return self.simple_test(imgs[0], img_metas[0], proposals[0],
+ **kwargs)
+ else:
+ # TODO: support test-time augmentation
+ assert NotImplementedError
diff --git a/annotator/uniformer/mmdet/models/detectors/faster_rcnn.py b/annotator/uniformer/mmdet/models/detectors/faster_rcnn.py
new file mode 100644
index 0000000000000000000000000000000000000000..81bad0f43a48b1022c4cd996e26d6c90be93d4d0
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/detectors/faster_rcnn.py
@@ -0,0 +1,24 @@
+from ..builder import DETECTORS
+from .two_stage import TwoStageDetector
+
+
+@DETECTORS.register_module()
+class FasterRCNN(TwoStageDetector):
+ """Implementation of `Faster R-CNN `_"""
+
+ def __init__(self,
+ backbone,
+ rpn_head,
+ roi_head,
+ train_cfg,
+ test_cfg,
+ neck=None,
+ pretrained=None):
+ super(FasterRCNN, self).__init__(
+ backbone=backbone,
+ neck=neck,
+ rpn_head=rpn_head,
+ roi_head=roi_head,
+ train_cfg=train_cfg,
+ test_cfg=test_cfg,
+ pretrained=pretrained)
diff --git a/annotator/uniformer/mmdet/models/detectors/fcos.py b/annotator/uniformer/mmdet/models/detectors/fcos.py
new file mode 100644
index 0000000000000000000000000000000000000000..58485c1864a11a66168b7597f345ea759ce20551
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/detectors/fcos.py
@@ -0,0 +1,17 @@
+from ..builder import DETECTORS
+from .single_stage import SingleStageDetector
+
+
+@DETECTORS.register_module()
+class FCOS(SingleStageDetector):
+ """Implementation of `FCOS `_"""
+
+ def __init__(self,
+ backbone,
+ neck,
+ bbox_head,
+ train_cfg=None,
+ test_cfg=None,
+ pretrained=None):
+ super(FCOS, self).__init__(backbone, neck, bbox_head, train_cfg,
+ test_cfg, pretrained)
diff --git a/annotator/uniformer/mmdet/models/detectors/fovea.py b/annotator/uniformer/mmdet/models/detectors/fovea.py
new file mode 100644
index 0000000000000000000000000000000000000000..22a578efffbd108db644d907bae95c7c8df31f2e
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/detectors/fovea.py
@@ -0,0 +1,17 @@
+from ..builder import DETECTORS
+from .single_stage import SingleStageDetector
+
+
+@DETECTORS.register_module()
+class FOVEA(SingleStageDetector):
+ """Implementation of `FoveaBox `_"""
+
+ def __init__(self,
+ backbone,
+ neck,
+ bbox_head,
+ train_cfg=None,
+ test_cfg=None,
+ pretrained=None):
+ super(FOVEA, self).__init__(backbone, neck, bbox_head, train_cfg,
+ test_cfg, pretrained)
diff --git a/annotator/uniformer/mmdet/models/detectors/fsaf.py b/annotator/uniformer/mmdet/models/detectors/fsaf.py
new file mode 100644
index 0000000000000000000000000000000000000000..9f10fa1ae10f31e6cb5de65505b14a4fc97dd022
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/detectors/fsaf.py
@@ -0,0 +1,17 @@
+from ..builder import DETECTORS
+from .single_stage import SingleStageDetector
+
+
+@DETECTORS.register_module()
+class FSAF(SingleStageDetector):
+ """Implementation of `FSAF `_"""
+
+ def __init__(self,
+ backbone,
+ neck,
+ bbox_head,
+ train_cfg=None,
+ test_cfg=None,
+ pretrained=None):
+ super(FSAF, self).__init__(backbone, neck, bbox_head, train_cfg,
+ test_cfg, pretrained)
diff --git a/annotator/uniformer/mmdet/models/detectors/gfl.py b/annotator/uniformer/mmdet/models/detectors/gfl.py
new file mode 100644
index 0000000000000000000000000000000000000000..64d65cb2dfb7a56f57e08c3fcad67e1539e1e841
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/detectors/gfl.py
@@ -0,0 +1,16 @@
+from ..builder import DETECTORS
+from .single_stage import SingleStageDetector
+
+
+@DETECTORS.register_module()
+class GFL(SingleStageDetector):
+
+ def __init__(self,
+ backbone,
+ neck,
+ bbox_head,
+ train_cfg=None,
+ test_cfg=None,
+ pretrained=None):
+ super(GFL, self).__init__(backbone, neck, bbox_head, train_cfg,
+ test_cfg, pretrained)
diff --git a/annotator/uniformer/mmdet/models/detectors/grid_rcnn.py b/annotator/uniformer/mmdet/models/detectors/grid_rcnn.py
new file mode 100644
index 0000000000000000000000000000000000000000..b6145a1464cd940bd4f98eaa15f6f9ecf6a10a20
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/detectors/grid_rcnn.py
@@ -0,0 +1,29 @@
+from ..builder import DETECTORS
+from .two_stage import TwoStageDetector
+
+
+@DETECTORS.register_module()
+class GridRCNN(TwoStageDetector):
+ """Grid R-CNN.
+
+ This detector is the implementation of:
+ - Grid R-CNN (https://arxiv.org/abs/1811.12030)
+ - Grid R-CNN Plus: Faster and Better (https://arxiv.org/abs/1906.05688)
+ """
+
+ def __init__(self,
+ backbone,
+ rpn_head,
+ roi_head,
+ train_cfg,
+ test_cfg,
+ neck=None,
+ pretrained=None):
+ super(GridRCNN, self).__init__(
+ backbone=backbone,
+ neck=neck,
+ rpn_head=rpn_head,
+ roi_head=roi_head,
+ train_cfg=train_cfg,
+ test_cfg=test_cfg,
+ pretrained=pretrained)
diff --git a/annotator/uniformer/mmdet/models/detectors/htc.py b/annotator/uniformer/mmdet/models/detectors/htc.py
new file mode 100644
index 0000000000000000000000000000000000000000..d9efdf420fa7373f7f1d116f8d97836d73b457bf
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/detectors/htc.py
@@ -0,0 +1,15 @@
+from ..builder import DETECTORS
+from .cascade_rcnn import CascadeRCNN
+
+
+@DETECTORS.register_module()
+class HybridTaskCascade(CascadeRCNN):
+ """Implementation of `HTC `_"""
+
+ def __init__(self, **kwargs):
+ super(HybridTaskCascade, self).__init__(**kwargs)
+
+ @property
+ def with_semantic(self):
+ """bool: whether the detector has a semantic head"""
+ return self.roi_head.with_semantic
diff --git a/annotator/uniformer/mmdet/models/detectors/kd_one_stage.py b/annotator/uniformer/mmdet/models/detectors/kd_one_stage.py
new file mode 100644
index 0000000000000000000000000000000000000000..671ec19015c87fefd065b84ae887147f90cc892b
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/detectors/kd_one_stage.py
@@ -0,0 +1,100 @@
+import mmcv
+import torch
+from mmcv.runner import load_checkpoint
+
+from .. import build_detector
+from ..builder import DETECTORS
+from .single_stage import SingleStageDetector
+
+
+@DETECTORS.register_module()
+class KnowledgeDistillationSingleStageDetector(SingleStageDetector):
+ r"""Implementation of `Distilling the Knowledge in a Neural Network.
+ `_.
+
+ Args:
+ teacher_config (str | dict): Config file path
+ or the config object of teacher model.
+ teacher_ckpt (str, optional): Checkpoint path of teacher model.
+ If left as None, the model will not load any weights.
+ """
+
+ def __init__(self,
+ backbone,
+ neck,
+ bbox_head,
+ teacher_config,
+ teacher_ckpt=None,
+ eval_teacher=True,
+ train_cfg=None,
+ test_cfg=None,
+ pretrained=None):
+ super().__init__(backbone, neck, bbox_head, train_cfg, test_cfg,
+ pretrained)
+ self.eval_teacher = eval_teacher
+ # Build teacher model
+ if isinstance(teacher_config, str):
+ teacher_config = mmcv.Config.fromfile(teacher_config)
+ self.teacher_model = build_detector(teacher_config['model'])
+ if teacher_ckpt is not None:
+ load_checkpoint(
+ self.teacher_model, teacher_ckpt, map_location='cpu')
+
+ def forward_train(self,
+ img,
+ img_metas,
+ gt_bboxes,
+ gt_labels,
+ gt_bboxes_ignore=None):
+ """
+ Args:
+ img (Tensor): Input images of shape (N, C, H, W).
+ Typically these should be mean centered and std scaled.
+ img_metas (list[dict]): A List of image info dict where each dict
+ has: 'img_shape', 'scale_factor', 'flip', and may also contain
+ 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
+ For details on the values of these keys see
+ :class:`mmdet.datasets.pipelines.Collect`.
+ gt_bboxes (list[Tensor]): Each item are the truth boxes for each
+ image in [tl_x, tl_y, br_x, br_y] format.
+ gt_labels (list[Tensor]): Class indices corresponding to each box
+ gt_bboxes_ignore (None | list[Tensor]): Specify which bounding
+ boxes can be ignored when computing the loss.
+ Returns:
+ dict[str, Tensor]: A dictionary of loss components.
+ """
+ x = self.extract_feat(img)
+ with torch.no_grad():
+ teacher_x = self.teacher_model.extract_feat(img)
+ out_teacher = self.teacher_model.bbox_head(teacher_x)
+ losses = self.bbox_head.forward_train(x, out_teacher, img_metas,
+ gt_bboxes, gt_labels,
+ gt_bboxes_ignore)
+ return losses
+
+ def cuda(self, device=None):
+ """Since teacher_model is registered as a plain object, it is necessary
+ to put the teacher model to cuda when calling cuda function."""
+ self.teacher_model.cuda(device=device)
+ return super().cuda(device=device)
+
+ def train(self, mode=True):
+ """Set the same train mode for teacher and student model."""
+ if self.eval_teacher:
+ self.teacher_model.train(False)
+ else:
+ self.teacher_model.train(mode)
+ super().train(mode)
+
+ def __setattr__(self, name, value):
+ """Set attribute, i.e. self.name = value
+
+ This reloading prevent the teacher model from being registered as a
+ nn.Module. The teacher module is registered as a plain object, so that
+ the teacher parameters will not show up when calling
+ ``self.parameters``, ``self.modules``, ``self.children`` methods.
+ """
+ if name == 'teacher_model':
+ object.__setattr__(self, name, value)
+ else:
+ super().__setattr__(name, value)
diff --git a/annotator/uniformer/mmdet/models/detectors/mask_rcnn.py b/annotator/uniformer/mmdet/models/detectors/mask_rcnn.py
new file mode 100644
index 0000000000000000000000000000000000000000..c15a7733170e059d2825138b3812319915b7cad6
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/detectors/mask_rcnn.py
@@ -0,0 +1,24 @@
+from ..builder import DETECTORS
+from .two_stage import TwoStageDetector
+
+
+@DETECTORS.register_module()
+class MaskRCNN(TwoStageDetector):
+ """Implementation of `Mask R-CNN `_"""
+
+ def __init__(self,
+ backbone,
+ rpn_head,
+ roi_head,
+ train_cfg,
+ test_cfg,
+ neck=None,
+ pretrained=None):
+ super(MaskRCNN, self).__init__(
+ backbone=backbone,
+ neck=neck,
+ rpn_head=rpn_head,
+ roi_head=roi_head,
+ train_cfg=train_cfg,
+ test_cfg=test_cfg,
+ pretrained=pretrained)
diff --git a/annotator/uniformer/mmdet/models/detectors/mask_scoring_rcnn.py b/annotator/uniformer/mmdet/models/detectors/mask_scoring_rcnn.py
new file mode 100644
index 0000000000000000000000000000000000000000..b6252b6e1d234a201725342a5780fade7e21957c
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/detectors/mask_scoring_rcnn.py
@@ -0,0 +1,27 @@
+from ..builder import DETECTORS
+from .two_stage import TwoStageDetector
+
+
+@DETECTORS.register_module()
+class MaskScoringRCNN(TwoStageDetector):
+ """Mask Scoring RCNN.
+
+ https://arxiv.org/abs/1903.00241
+ """
+
+ def __init__(self,
+ backbone,
+ rpn_head,
+ roi_head,
+ train_cfg,
+ test_cfg,
+ neck=None,
+ pretrained=None):
+ super(MaskScoringRCNN, self).__init__(
+ backbone=backbone,
+ neck=neck,
+ rpn_head=rpn_head,
+ roi_head=roi_head,
+ train_cfg=train_cfg,
+ test_cfg=test_cfg,
+ pretrained=pretrained)
diff --git a/annotator/uniformer/mmdet/models/detectors/nasfcos.py b/annotator/uniformer/mmdet/models/detectors/nasfcos.py
new file mode 100644
index 0000000000000000000000000000000000000000..fb0148351546f45a451ef5f7a2a9ef4024e85b7c
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/detectors/nasfcos.py
@@ -0,0 +1,20 @@
+from ..builder import DETECTORS
+from .single_stage import SingleStageDetector
+
+
+@DETECTORS.register_module()
+class NASFCOS(SingleStageDetector):
+ """NAS-FCOS: Fast Neural Architecture Search for Object Detection.
+
+ https://arxiv.org/abs/1906.0442
+ """
+
+ def __init__(self,
+ backbone,
+ neck,
+ bbox_head,
+ train_cfg=None,
+ test_cfg=None,
+ pretrained=None):
+ super(NASFCOS, self).__init__(backbone, neck, bbox_head, train_cfg,
+ test_cfg, pretrained)
diff --git a/annotator/uniformer/mmdet/models/detectors/paa.py b/annotator/uniformer/mmdet/models/detectors/paa.py
new file mode 100644
index 0000000000000000000000000000000000000000..9b4bb5e0939b824d9fef7fc3bd49a0164c29613a
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/detectors/paa.py
@@ -0,0 +1,17 @@
+from ..builder import DETECTORS
+from .single_stage import SingleStageDetector
+
+
+@DETECTORS.register_module()
+class PAA(SingleStageDetector):
+ """Implementation of `PAA `_."""
+
+ def __init__(self,
+ backbone,
+ neck,
+ bbox_head,
+ train_cfg=None,
+ test_cfg=None,
+ pretrained=None):
+ super(PAA, self).__init__(backbone, neck, bbox_head, train_cfg,
+ test_cfg, pretrained)
diff --git a/annotator/uniformer/mmdet/models/detectors/point_rend.py b/annotator/uniformer/mmdet/models/detectors/point_rend.py
new file mode 100644
index 0000000000000000000000000000000000000000..808ef2258ae88301d349db3aaa2711f223e5c971
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/detectors/point_rend.py
@@ -0,0 +1,29 @@
+from ..builder import DETECTORS
+from .two_stage import TwoStageDetector
+
+
+@DETECTORS.register_module()
+class PointRend(TwoStageDetector):
+ """PointRend: Image Segmentation as Rendering
+
+ This detector is the implementation of
+ `PointRend `_.
+
+ """
+
+ def __init__(self,
+ backbone,
+ rpn_head,
+ roi_head,
+ train_cfg,
+ test_cfg,
+ neck=None,
+ pretrained=None):
+ super(PointRend, self).__init__(
+ backbone=backbone,
+ neck=neck,
+ rpn_head=rpn_head,
+ roi_head=roi_head,
+ train_cfg=train_cfg,
+ test_cfg=test_cfg,
+ pretrained=pretrained)
diff --git a/annotator/uniformer/mmdet/models/detectors/reppoints_detector.py b/annotator/uniformer/mmdet/models/detectors/reppoints_detector.py
new file mode 100644
index 0000000000000000000000000000000000000000..a5f6be31e14488e4b8a006b7142a82c872388d82
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/detectors/reppoints_detector.py
@@ -0,0 +1,22 @@
+from ..builder import DETECTORS
+from .single_stage import SingleStageDetector
+
+
+@DETECTORS.register_module()
+class RepPointsDetector(SingleStageDetector):
+ """RepPoints: Point Set Representation for Object Detection.
+
+ This detector is the implementation of:
+ - RepPoints detector (https://arxiv.org/pdf/1904.11490)
+ """
+
+ def __init__(self,
+ backbone,
+ neck,
+ bbox_head,
+ train_cfg=None,
+ test_cfg=None,
+ pretrained=None):
+ super(RepPointsDetector,
+ self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg,
+ pretrained)
diff --git a/annotator/uniformer/mmdet/models/detectors/retinanet.py b/annotator/uniformer/mmdet/models/detectors/retinanet.py
new file mode 100644
index 0000000000000000000000000000000000000000..41378e8bc74bf9d5cbc7e3e6630bb1e6657049f9
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/detectors/retinanet.py
@@ -0,0 +1,17 @@
+from ..builder import DETECTORS
+from .single_stage import SingleStageDetector
+
+
+@DETECTORS.register_module()
+class RetinaNet(SingleStageDetector):
+ """Implementation of `RetinaNet `_"""
+
+ def __init__(self,
+ backbone,
+ neck,
+ bbox_head,
+ train_cfg=None,
+ test_cfg=None,
+ pretrained=None):
+ super(RetinaNet, self).__init__(backbone, neck, bbox_head, train_cfg,
+ test_cfg, pretrained)
diff --git a/annotator/uniformer/mmdet/models/detectors/rpn.py b/annotator/uniformer/mmdet/models/detectors/rpn.py
new file mode 100644
index 0000000000000000000000000000000000000000..1a77294549d1c3dc7821063c3f3d08bb331fbe59
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/detectors/rpn.py
@@ -0,0 +1,154 @@
+import mmcv
+from mmcv.image import tensor2imgs
+
+from mmdet.core import bbox_mapping
+from ..builder import DETECTORS, build_backbone, build_head, build_neck
+from .base import BaseDetector
+
+
+@DETECTORS.register_module()
+class RPN(BaseDetector):
+ """Implementation of Region Proposal Network."""
+
+ def __init__(self,
+ backbone,
+ neck,
+ rpn_head,
+ train_cfg,
+ test_cfg,
+ pretrained=None):
+ super(RPN, self).__init__()
+ self.backbone = build_backbone(backbone)
+ self.neck = build_neck(neck) if neck is not None else None
+ rpn_train_cfg = train_cfg.rpn if train_cfg is not None else None
+ rpn_head.update(train_cfg=rpn_train_cfg)
+ rpn_head.update(test_cfg=test_cfg.rpn)
+ self.rpn_head = build_head(rpn_head)
+ self.train_cfg = train_cfg
+ self.test_cfg = test_cfg
+ self.init_weights(pretrained=pretrained)
+
+ def init_weights(self, pretrained=None):
+ """Initialize the weights in detector.
+
+ Args:
+ pretrained (str, optional): Path to pre-trained weights.
+ Defaults to None.
+ """
+ super(RPN, self).init_weights(pretrained)
+ self.backbone.init_weights(pretrained=pretrained)
+ if self.with_neck:
+ self.neck.init_weights()
+ self.rpn_head.init_weights()
+
+ def extract_feat(self, img):
+ """Extract features.
+
+ Args:
+ img (torch.Tensor): Image tensor with shape (n, c, h ,w).
+
+ Returns:
+ list[torch.Tensor]: Multi-level features that may have
+ different resolutions.
+ """
+ x = self.backbone(img)
+ if self.with_neck:
+ x = self.neck(x)
+ return x
+
+ def forward_dummy(self, img):
+ """Dummy forward function."""
+ x = self.extract_feat(img)
+ rpn_outs = self.rpn_head(x)
+ return rpn_outs
+
+ def forward_train(self,
+ img,
+ img_metas,
+ gt_bboxes=None,
+ gt_bboxes_ignore=None):
+ """
+ Args:
+ img (Tensor): Input images of shape (N, C, H, W).
+ Typically these should be mean centered and std scaled.
+ img_metas (list[dict]): A List of image info dict where each dict
+ has: 'img_shape', 'scale_factor', 'flip', and may also contain
+ 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
+ For details on the values of these keys see
+ :class:`mmdet.datasets.pipelines.Collect`.
+ gt_bboxes (list[Tensor]): Each item are the truth boxes for each
+ image in [tl_x, tl_y, br_x, br_y] format.
+ gt_bboxes_ignore (None | list[Tensor]): Specify which bounding
+ boxes can be ignored when computing the loss.
+
+ Returns:
+ dict[str, Tensor]: A dictionary of loss components.
+ """
+ if (isinstance(self.train_cfg.rpn, dict)
+ and self.train_cfg.rpn.get('debug', False)):
+ self.rpn_head.debug_imgs = tensor2imgs(img)
+
+ x = self.extract_feat(img)
+ losses = self.rpn_head.forward_train(x, img_metas, gt_bboxes, None,
+ gt_bboxes_ignore)
+ return losses
+
+ def simple_test(self, img, img_metas, rescale=False):
+ """Test function without test time augmentation.
+
+ Args:
+ imgs (list[torch.Tensor]): List of multiple images
+ img_metas (list[dict]): List of image information.
+ rescale (bool, optional): Whether to rescale the results.
+ Defaults to False.
+
+ Returns:
+ list[np.ndarray]: proposals
+ """
+ x = self.extract_feat(img)
+ proposal_list = self.rpn_head.simple_test_rpn(x, img_metas)
+ if rescale:
+ for proposals, meta in zip(proposal_list, img_metas):
+ proposals[:, :4] /= proposals.new_tensor(meta['scale_factor'])
+
+ return [proposal.cpu().numpy() for proposal in proposal_list]
+
+ def aug_test(self, imgs, img_metas, rescale=False):
+ """Test function with test time augmentation.
+
+ Args:
+ imgs (list[torch.Tensor]): List of multiple images
+ img_metas (list[dict]): List of image information.
+ rescale (bool, optional): Whether to rescale the results.
+ Defaults to False.
+
+ Returns:
+ list[np.ndarray]: proposals
+ """
+ proposal_list = self.rpn_head.aug_test_rpn(
+ self.extract_feats(imgs), img_metas)
+ if not rescale:
+ for proposals, img_meta in zip(proposal_list, img_metas[0]):
+ img_shape = img_meta['img_shape']
+ scale_factor = img_meta['scale_factor']
+ flip = img_meta['flip']
+ flip_direction = img_meta['flip_direction']
+ proposals[:, :4] = bbox_mapping(proposals[:, :4], img_shape,
+ scale_factor, flip,
+ flip_direction)
+ return [proposal.cpu().numpy() for proposal in proposal_list]
+
+ def show_result(self, data, result, top_k=20, **kwargs):
+ """Show RPN proposals on the image.
+
+ Args:
+ data (str or np.ndarray): Image filename or loaded image.
+ result (Tensor or tuple): The results to draw over `img`
+ bbox_result or (bbox_result, segm_result).
+ top_k (int): Plot the first k bboxes only
+ if set positive. Default: 20
+
+ Returns:
+ np.ndarray: The image with bboxes drawn on it.
+ """
+ mmcv.imshow_bboxes(data, result, top_k=top_k)
diff --git a/annotator/uniformer/mmdet/models/detectors/scnet.py b/annotator/uniformer/mmdet/models/detectors/scnet.py
new file mode 100644
index 0000000000000000000000000000000000000000..04a2347c4ec1efcbfda59a134cddd8bde620d983
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/detectors/scnet.py
@@ -0,0 +1,10 @@
+from ..builder import DETECTORS
+from .cascade_rcnn import CascadeRCNN
+
+
+@DETECTORS.register_module()
+class SCNet(CascadeRCNN):
+ """Implementation of `SCNet `_"""
+
+ def __init__(self, **kwargs):
+ super(SCNet, self).__init__(**kwargs)
diff --git a/annotator/uniformer/mmdet/models/detectors/single_stage.py b/annotator/uniformer/mmdet/models/detectors/single_stage.py
new file mode 100644
index 0000000000000000000000000000000000000000..5172bdbd945889445eeaa18398c9f0118bb845ad
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/detectors/single_stage.py
@@ -0,0 +1,154 @@
+import torch
+import torch.nn as nn
+
+from mmdet.core import bbox2result
+from ..builder import DETECTORS, build_backbone, build_head, build_neck
+from .base import BaseDetector
+
+
+@DETECTORS.register_module()
+class SingleStageDetector(BaseDetector):
+ """Base class for single-stage detectors.
+
+ Single-stage detectors directly and densely predict bounding boxes on the
+ output features of the backbone+neck.
+ """
+
+ def __init__(self,
+ backbone,
+ neck=None,
+ bbox_head=None,
+ train_cfg=None,
+ test_cfg=None,
+ pretrained=None):
+ super(SingleStageDetector, self).__init__()
+ self.backbone = build_backbone(backbone)
+ if neck is not None:
+ self.neck = build_neck(neck)
+ bbox_head.update(train_cfg=train_cfg)
+ bbox_head.update(test_cfg=test_cfg)
+ self.bbox_head = build_head(bbox_head)
+ self.train_cfg = train_cfg
+ self.test_cfg = test_cfg
+ self.init_weights(pretrained=pretrained)
+
+ def init_weights(self, pretrained=None):
+ """Initialize the weights in detector.
+
+ Args:
+ pretrained (str, optional): Path to pre-trained weights.
+ Defaults to None.
+ """
+ super(SingleStageDetector, self).init_weights(pretrained)
+ self.backbone.init_weights(pretrained=pretrained)
+ if self.with_neck:
+ if isinstance(self.neck, nn.Sequential):
+ for m in self.neck:
+ m.init_weights()
+ else:
+ self.neck.init_weights()
+ self.bbox_head.init_weights()
+
+ def extract_feat(self, img):
+ """Directly extract features from the backbone+neck."""
+ x = self.backbone(img)
+ if self.with_neck:
+ x = self.neck(x)
+ return x
+
+ def forward_dummy(self, img):
+ """Used for computing network flops.
+
+ See `mmdetection/tools/analysis_tools/get_flops.py`
+ """
+ x = self.extract_feat(img)
+ outs = self.bbox_head(x)
+ return outs
+
+ def forward_train(self,
+ img,
+ img_metas,
+ gt_bboxes,
+ gt_labels,
+ gt_bboxes_ignore=None):
+ """
+ Args:
+ img (Tensor): Input images of shape (N, C, H, W).
+ Typically these should be mean centered and std scaled.
+ img_metas (list[dict]): A List of image info dict where each dict
+ has: 'img_shape', 'scale_factor', 'flip', and may also contain
+ 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
+ For details on the values of these keys see
+ :class:`mmdet.datasets.pipelines.Collect`.
+ gt_bboxes (list[Tensor]): Each item are the truth boxes for each
+ image in [tl_x, tl_y, br_x, br_y] format.
+ gt_labels (list[Tensor]): Class indices corresponding to each box
+ gt_bboxes_ignore (None | list[Tensor]): Specify which bounding
+ boxes can be ignored when computing the loss.
+
+ Returns:
+ dict[str, Tensor]: A dictionary of loss components.
+ """
+ super(SingleStageDetector, self).forward_train(img, img_metas)
+ x = self.extract_feat(img)
+ losses = self.bbox_head.forward_train(x, img_metas, gt_bboxes,
+ gt_labels, gt_bboxes_ignore)
+ return losses
+
+ def simple_test(self, img, img_metas, rescale=False):
+ """Test function without test time augmentation.
+
+ Args:
+ imgs (list[torch.Tensor]): List of multiple images
+ img_metas (list[dict]): List of image information.
+ rescale (bool, optional): Whether to rescale the results.
+ Defaults to False.
+
+ Returns:
+ list[list[np.ndarray]]: BBox results of each image and classes.
+ The outer list corresponds to each image. The inner list
+ corresponds to each class.
+ """
+ x = self.extract_feat(img)
+ outs = self.bbox_head(x)
+ # get origin input shape to support onnx dynamic shape
+ if torch.onnx.is_in_onnx_export():
+ # get shape as tensor
+ img_shape = torch._shape_as_tensor(img)[2:]
+ img_metas[0]['img_shape_for_onnx'] = img_shape
+ bbox_list = self.bbox_head.get_bboxes(
+ *outs, img_metas, rescale=rescale)
+ # skip post-processing when exporting to ONNX
+ if torch.onnx.is_in_onnx_export():
+ return bbox_list
+
+ bbox_results = [
+ bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes)
+ for det_bboxes, det_labels in bbox_list
+ ]
+ return bbox_results
+
+ def aug_test(self, imgs, img_metas, rescale=False):
+ """Test function with test time augmentation.
+
+ Args:
+ imgs (list[Tensor]): the outer list indicates test-time
+ augmentations and inner Tensor should have a shape NxCxHxW,
+ which contains all images in the batch.
+ img_metas (list[list[dict]]): the outer list indicates test-time
+ augs (multiscale, flip, etc.) and the inner list indicates
+ images in a batch. each dict has image information.
+ rescale (bool, optional): Whether to rescale the results.
+ Defaults to False.
+
+ Returns:
+ list[list[np.ndarray]]: BBox results of each image and classes.
+ The outer list corresponds to each image. The inner list
+ corresponds to each class.
+ """
+ assert hasattr(self.bbox_head, 'aug_test'), \
+ f'{self.bbox_head.__class__.__name__}' \
+ ' does not support test-time augmentation'
+
+ feats = self.extract_feats(imgs)
+ return [self.bbox_head.aug_test(feats, img_metas, rescale=rescale)]
diff --git a/annotator/uniformer/mmdet/models/detectors/sparse_rcnn.py b/annotator/uniformer/mmdet/models/detectors/sparse_rcnn.py
new file mode 100644
index 0000000000000000000000000000000000000000..0dbd0250f189e610a0bbc72b0dab2559e26857ae
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/detectors/sparse_rcnn.py
@@ -0,0 +1,110 @@
+from ..builder import DETECTORS
+from .two_stage import TwoStageDetector
+
+
+@DETECTORS.register_module()
+class SparseRCNN(TwoStageDetector):
+ r"""Implementation of `Sparse R-CNN: End-to-End Object Detection with
+ Learnable Proposals `_"""
+
+ def __init__(self, *args, **kwargs):
+ super(SparseRCNN, self).__init__(*args, **kwargs)
+ assert self.with_rpn, 'Sparse R-CNN do not support external proposals'
+
+ def forward_train(self,
+ img,
+ img_metas,
+ gt_bboxes,
+ gt_labels,
+ gt_bboxes_ignore=None,
+ gt_masks=None,
+ proposals=None,
+ **kwargs):
+ """Forward function of SparseR-CNN in train stage.
+
+ Args:
+ img (Tensor): of shape (N, C, H, W) encoding input images.
+ Typically these should be mean centered and std scaled.
+ img_metas (list[dict]): list of image info dict where each dict
+ has: 'img_shape', 'scale_factor', 'flip', and may also contain
+ 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
+ For details on the values of these keys see
+ :class:`mmdet.datasets.pipelines.Collect`.
+ gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
+ shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
+ gt_labels (list[Tensor]): class indices corresponding to each box
+ gt_bboxes_ignore (None | list[Tensor): specify which bounding
+ boxes can be ignored when computing the loss.
+ gt_masks (List[Tensor], optional) : Segmentation masks for
+ each box. But we don't support it in this architecture.
+ proposals (List[Tensor], optional): override rpn proposals with
+ custom proposals. Use when `with_rpn` is False.
+
+ Returns:
+ dict[str, Tensor]: a dictionary of loss components
+ """
+
+ assert proposals is None, 'Sparse R-CNN does not support' \
+ ' external proposals'
+ assert gt_masks is None, 'Sparse R-CNN does not instance segmentation'
+
+ x = self.extract_feat(img)
+ proposal_boxes, proposal_features, imgs_whwh = \
+ self.rpn_head.forward_train(x, img_metas)
+ roi_losses = self.roi_head.forward_train(
+ x,
+ proposal_boxes,
+ proposal_features,
+ img_metas,
+ gt_bboxes,
+ gt_labels,
+ gt_bboxes_ignore=gt_bboxes_ignore,
+ gt_masks=gt_masks,
+ imgs_whwh=imgs_whwh)
+ return roi_losses
+
+ def simple_test(self, img, img_metas, rescale=False):
+ """Test function without test time augmentation.
+
+ Args:
+ imgs (list[torch.Tensor]): List of multiple images
+ img_metas (list[dict]): List of image information.
+ rescale (bool): Whether to rescale the results.
+ Defaults to False.
+
+ Returns:
+ list[list[np.ndarray]]: BBox results of each image and classes.
+ The outer list corresponds to each image. The inner list
+ corresponds to each class.
+ """
+ x = self.extract_feat(img)
+ proposal_boxes, proposal_features, imgs_whwh = \
+ self.rpn_head.simple_test_rpn(x, img_metas)
+ bbox_results = self.roi_head.simple_test(
+ x,
+ proposal_boxes,
+ proposal_features,
+ img_metas,
+ imgs_whwh=imgs_whwh,
+ rescale=rescale)
+ return bbox_results
+
+ def forward_dummy(self, img):
+ """Used for computing network flops.
+
+ See `mmdetection/tools/analysis_tools/get_flops.py`
+ """
+ # backbone
+ x = self.extract_feat(img)
+ # rpn
+ num_imgs = len(img)
+ dummy_img_metas = [
+ dict(img_shape=(800, 1333, 3)) for _ in range(num_imgs)
+ ]
+ proposal_boxes, proposal_features, imgs_whwh = \
+ self.rpn_head.simple_test_rpn(x, dummy_img_metas)
+ # roi_head
+ roi_outs = self.roi_head.forward_dummy(x, proposal_boxes,
+ proposal_features,
+ dummy_img_metas)
+ return roi_outs
diff --git a/annotator/uniformer/mmdet/models/detectors/trident_faster_rcnn.py b/annotator/uniformer/mmdet/models/detectors/trident_faster_rcnn.py
new file mode 100644
index 0000000000000000000000000000000000000000..f0fd80d41407162df71ba5349fc659d4713cdb6e
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/detectors/trident_faster_rcnn.py
@@ -0,0 +1,66 @@
+from ..builder import DETECTORS
+from .faster_rcnn import FasterRCNN
+
+
+@DETECTORS.register_module()
+class TridentFasterRCNN(FasterRCNN):
+ """Implementation of `TridentNet `_"""
+
+ def __init__(self,
+ backbone,
+ rpn_head,
+ roi_head,
+ train_cfg,
+ test_cfg,
+ neck=None,
+ pretrained=None):
+
+ super(TridentFasterRCNN, self).__init__(
+ backbone=backbone,
+ neck=neck,
+ rpn_head=rpn_head,
+ roi_head=roi_head,
+ train_cfg=train_cfg,
+ test_cfg=test_cfg,
+ pretrained=pretrained)
+ assert self.backbone.num_branch == self.roi_head.num_branch
+ assert self.backbone.test_branch_idx == self.roi_head.test_branch_idx
+ self.num_branch = self.backbone.num_branch
+ self.test_branch_idx = self.backbone.test_branch_idx
+
+ def simple_test(self, img, img_metas, proposals=None, rescale=False):
+ """Test without augmentation."""
+ assert self.with_bbox, 'Bbox head must be implemented.'
+ x = self.extract_feat(img)
+ if proposals is None:
+ num_branch = (self.num_branch if self.test_branch_idx == -1 else 1)
+ trident_img_metas = img_metas * num_branch
+ proposal_list = self.rpn_head.simple_test_rpn(x, trident_img_metas)
+ else:
+ proposal_list = proposals
+
+ return self.roi_head.simple_test(
+ x, proposal_list, trident_img_metas, rescale=rescale)
+
+ def aug_test(self, imgs, img_metas, rescale=False):
+ """Test with augmentations.
+
+ If rescale is False, then returned bboxes and masks will fit the scale
+ of imgs[0].
+ """
+ x = self.extract_feats(imgs)
+ num_branch = (self.num_branch if self.test_branch_idx == -1 else 1)
+ trident_img_metas = [img_metas * num_branch for img_metas in img_metas]
+ proposal_list = self.rpn_head.aug_test_rpn(x, trident_img_metas)
+ return self.roi_head.aug_test(
+ x, proposal_list, img_metas, rescale=rescale)
+
+ def forward_train(self, img, img_metas, gt_bboxes, gt_labels, **kwargs):
+ """make copies of img and gts to fit multi-branch."""
+ trident_gt_bboxes = tuple(gt_bboxes * self.num_branch)
+ trident_gt_labels = tuple(gt_labels * self.num_branch)
+ trident_img_metas = tuple(img_metas * self.num_branch)
+
+ return super(TridentFasterRCNN,
+ self).forward_train(img, trident_img_metas,
+ trident_gt_bboxes, trident_gt_labels)
diff --git a/annotator/uniformer/mmdet/models/detectors/two_stage.py b/annotator/uniformer/mmdet/models/detectors/two_stage.py
new file mode 100644
index 0000000000000000000000000000000000000000..ba5bdde980dc0cd76375455c9c7ffaae4b25531e
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/detectors/two_stage.py
@@ -0,0 +1,215 @@
+import torch
+import torch.nn as nn
+
+# from mmdet.core import bbox2result, bbox2roi, build_assigner, build_sampler
+from ..builder import DETECTORS, build_backbone, build_head, build_neck
+from .base import BaseDetector
+
+
+@DETECTORS.register_module()
+class TwoStageDetector(BaseDetector):
+ """Base class for two-stage detectors.
+
+ Two-stage detectors typically consisting of a region proposal network and a
+ task-specific regression head.
+ """
+
+ def __init__(self,
+ backbone,
+ neck=None,
+ rpn_head=None,
+ roi_head=None,
+ train_cfg=None,
+ test_cfg=None,
+ pretrained=None):
+ super(TwoStageDetector, self).__init__()
+ self.backbone = build_backbone(backbone)
+
+ if neck is not None:
+ self.neck = build_neck(neck)
+
+ if rpn_head is not None:
+ rpn_train_cfg = train_cfg.rpn if train_cfg is not None else None
+ rpn_head_ = rpn_head.copy()
+ rpn_head_.update(train_cfg=rpn_train_cfg, test_cfg=test_cfg.rpn)
+ self.rpn_head = build_head(rpn_head_)
+
+ if roi_head is not None:
+ # update train and test cfg here for now
+ # TODO: refactor assigner & sampler
+ rcnn_train_cfg = train_cfg.rcnn if train_cfg is not None else None
+ roi_head.update(train_cfg=rcnn_train_cfg)
+ roi_head.update(test_cfg=test_cfg.rcnn)
+ self.roi_head = build_head(roi_head)
+
+ self.train_cfg = train_cfg
+ self.test_cfg = test_cfg
+
+ self.init_weights(pretrained=pretrained)
+
+ @property
+ def with_rpn(self):
+ """bool: whether the detector has RPN"""
+ return hasattr(self, 'rpn_head') and self.rpn_head is not None
+
+ @property
+ def with_roi_head(self):
+ """bool: whether the detector has a RoI head"""
+ return hasattr(self, 'roi_head') and self.roi_head is not None
+
+ def init_weights(self, pretrained=None):
+ """Initialize the weights in detector.
+
+ Args:
+ pretrained (str, optional): Path to pre-trained weights.
+ Defaults to None.
+ """
+ super(TwoStageDetector, self).init_weights(pretrained)
+ self.backbone.init_weights(pretrained=pretrained)
+ if self.with_neck:
+ if isinstance(self.neck, nn.Sequential):
+ for m in self.neck:
+ m.init_weights()
+ else:
+ self.neck.init_weights()
+ if self.with_rpn:
+ self.rpn_head.init_weights()
+ if self.with_roi_head:
+ self.roi_head.init_weights(pretrained)
+
+ def extract_feat(self, img):
+ """Directly extract features from the backbone+neck."""
+ x = self.backbone(img)
+ if self.with_neck:
+ x = self.neck(x)
+ return x
+
+ def forward_dummy(self, img):
+ """Used for computing network flops.
+
+ See `mmdetection/tools/analysis_tools/get_flops.py`
+ """
+ outs = ()
+ # backbone
+ x = self.extract_feat(img)
+ # rpn
+ if self.with_rpn:
+ rpn_outs = self.rpn_head(x)
+ outs = outs + (rpn_outs, )
+ proposals = torch.randn(1000, 4).to(img.device)
+ # roi_head
+ roi_outs = self.roi_head.forward_dummy(x, proposals)
+ outs = outs + (roi_outs, )
+ return outs
+
+ def forward_train(self,
+ img,
+ img_metas,
+ gt_bboxes,
+ gt_labels,
+ gt_bboxes_ignore=None,
+ gt_masks=None,
+ proposals=None,
+ **kwargs):
+ """
+ Args:
+ img (Tensor): of shape (N, C, H, W) encoding input images.
+ Typically these should be mean centered and std scaled.
+
+ img_metas (list[dict]): list of image info dict where each dict
+ has: 'img_shape', 'scale_factor', 'flip', and may also contain
+ 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
+ For details on the values of these keys see
+ `mmdet/datasets/pipelines/formatting.py:Collect`.
+
+ gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
+ shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
+
+ gt_labels (list[Tensor]): class indices corresponding to each box
+
+ gt_bboxes_ignore (None | list[Tensor]): specify which bounding
+ boxes can be ignored when computing the loss.
+
+ gt_masks (None | Tensor) : true segmentation masks for each box
+ used if the architecture supports a segmentation task.
+
+ proposals : override rpn proposals with custom proposals. Use when
+ `with_rpn` is False.
+
+ Returns:
+ dict[str, Tensor]: a dictionary of loss components
+ """
+ x = self.extract_feat(img)
+
+ losses = dict()
+
+ # RPN forward and loss
+ if self.with_rpn:
+ proposal_cfg = self.train_cfg.get('rpn_proposal',
+ self.test_cfg.rpn)
+ rpn_losses, proposal_list = self.rpn_head.forward_train(
+ x,
+ img_metas,
+ gt_bboxes,
+ gt_labels=None,
+ gt_bboxes_ignore=gt_bboxes_ignore,
+ proposal_cfg=proposal_cfg)
+ losses.update(rpn_losses)
+ else:
+ proposal_list = proposals
+
+ roi_losses = self.roi_head.forward_train(x, img_metas, proposal_list,
+ gt_bboxes, gt_labels,
+ gt_bboxes_ignore, gt_masks,
+ **kwargs)
+ losses.update(roi_losses)
+
+ return losses
+
+ async def async_simple_test(self,
+ img,
+ img_meta,
+ proposals=None,
+ rescale=False):
+ """Async test without augmentation."""
+ assert self.with_bbox, 'Bbox head must be implemented.'
+ x = self.extract_feat(img)
+
+ if proposals is None:
+ proposal_list = await self.rpn_head.async_simple_test_rpn(
+ x, img_meta)
+ else:
+ proposal_list = proposals
+
+ return await self.roi_head.async_simple_test(
+ x, proposal_list, img_meta, rescale=rescale)
+
+ def simple_test(self, img, img_metas, proposals=None, rescale=False):
+ """Test without augmentation."""
+ assert self.with_bbox, 'Bbox head must be implemented.'
+
+ x = self.extract_feat(img)
+
+ # get origin input shape to onnx dynamic input shape
+ if torch.onnx.is_in_onnx_export():
+ img_shape = torch._shape_as_tensor(img)[2:]
+ img_metas[0]['img_shape_for_onnx'] = img_shape
+
+ if proposals is None:
+ proposal_list = self.rpn_head.simple_test_rpn(x, img_metas)
+ else:
+ proposal_list = proposals
+
+ return self.roi_head.simple_test(
+ x, proposal_list, img_metas, rescale=rescale)
+
+ def aug_test(self, imgs, img_metas, rescale=False):
+ """Test with augmentations.
+
+ If rescale is False, then returned bboxes and masks will fit the scale
+ of imgs[0].
+ """
+ x = self.extract_feats(imgs)
+ proposal_list = self.rpn_head.aug_test_rpn(x, img_metas)
+ return self.roi_head.aug_test(
+ x, proposal_list, img_metas, rescale=rescale)
diff --git a/annotator/uniformer/mmdet/models/detectors/vfnet.py b/annotator/uniformer/mmdet/models/detectors/vfnet.py
new file mode 100644
index 0000000000000000000000000000000000000000..e23f89674c919921219ffd3486587a2d3c318fbd
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/detectors/vfnet.py
@@ -0,0 +1,18 @@
+from ..builder import DETECTORS
+from .single_stage import SingleStageDetector
+
+
+@DETECTORS.register_module()
+class VFNet(SingleStageDetector):
+ """Implementation of `VarifocalNet
+ (VFNet).`_"""
+
+ def __init__(self,
+ backbone,
+ neck,
+ bbox_head,
+ train_cfg=None,
+ test_cfg=None,
+ pretrained=None):
+ super(VFNet, self).__init__(backbone, neck, bbox_head, train_cfg,
+ test_cfg, pretrained)
diff --git a/annotator/uniformer/mmdet/models/detectors/yolact.py b/annotator/uniformer/mmdet/models/detectors/yolact.py
new file mode 100644
index 0000000000000000000000000000000000000000..f32fde0d3dcbb55a405e05df433c4353938a148b
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/detectors/yolact.py
@@ -0,0 +1,146 @@
+import torch
+
+from mmdet.core import bbox2result
+from ..builder import DETECTORS, build_head
+from .single_stage import SingleStageDetector
+
+
+@DETECTORS.register_module()
+class YOLACT(SingleStageDetector):
+ """Implementation of `YOLACT `_"""
+
+ def __init__(self,
+ backbone,
+ neck,
+ bbox_head,
+ segm_head,
+ mask_head,
+ train_cfg=None,
+ test_cfg=None,
+ pretrained=None):
+ super(YOLACT, self).__init__(backbone, neck, bbox_head, train_cfg,
+ test_cfg, pretrained)
+ self.segm_head = build_head(segm_head)
+ self.mask_head = build_head(mask_head)
+ self.init_segm_mask_weights()
+
+ def init_segm_mask_weights(self):
+ """Initialize weights of the YOLACT segm head and YOLACT mask head."""
+ self.segm_head.init_weights()
+ self.mask_head.init_weights()
+
+ def forward_dummy(self, img):
+ """Used for computing network flops.
+
+ See `mmdetection/tools/analysis_tools/get_flops.py`
+ """
+ raise NotImplementedError
+
+ def forward_train(self,
+ img,
+ img_metas,
+ gt_bboxes,
+ gt_labels,
+ gt_bboxes_ignore=None,
+ gt_masks=None):
+ """
+ Args:
+ img (Tensor): of shape (N, C, H, W) encoding input images.
+ Typically these should be mean centered and std scaled.
+ img_metas (list[dict]): list of image info dict where each dict
+ has: 'img_shape', 'scale_factor', 'flip', and may also contain
+ 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
+ For details on the values of these keys see
+ `mmdet/datasets/pipelines/formatting.py:Collect`.
+ gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
+ shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
+ gt_labels (list[Tensor]): class indices corresponding to each box
+ gt_bboxes_ignore (None | list[Tensor]): specify which bounding
+ boxes can be ignored when computing the loss.
+ gt_masks (None | Tensor) : true segmentation masks for each box
+ used if the architecture supports a segmentation task.
+
+ Returns:
+ dict[str, Tensor]: a dictionary of loss components
+ """
+ # convert Bitmap mask or Polygon Mask to Tensor here
+ gt_masks = [
+ gt_mask.to_tensor(dtype=torch.uint8, device=img.device)
+ for gt_mask in gt_masks
+ ]
+
+ x = self.extract_feat(img)
+
+ cls_score, bbox_pred, coeff_pred = self.bbox_head(x)
+ bbox_head_loss_inputs = (cls_score, bbox_pred) + (gt_bboxes, gt_labels,
+ img_metas)
+ losses, sampling_results = self.bbox_head.loss(
+ *bbox_head_loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore)
+
+ segm_head_outs = self.segm_head(x[0])
+ loss_segm = self.segm_head.loss(segm_head_outs, gt_masks, gt_labels)
+ losses.update(loss_segm)
+
+ mask_pred = self.mask_head(x[0], coeff_pred, gt_bboxes, img_metas,
+ sampling_results)
+ loss_mask = self.mask_head.loss(mask_pred, gt_masks, gt_bboxes,
+ img_metas, sampling_results)
+ losses.update(loss_mask)
+
+ # check NaN and Inf
+ for loss_name in losses.keys():
+ assert torch.isfinite(torch.stack(losses[loss_name]))\
+ .all().item(), '{} becomes infinite or NaN!'\
+ .format(loss_name)
+
+ return losses
+
+ def simple_test(self, img, img_metas, rescale=False):
+ """Test function without test time augmentation."""
+ x = self.extract_feat(img)
+
+ cls_score, bbox_pred, coeff_pred = self.bbox_head(x)
+
+ bbox_inputs = (cls_score, bbox_pred,
+ coeff_pred) + (img_metas, self.test_cfg, rescale)
+ det_bboxes, det_labels, det_coeffs = self.bbox_head.get_bboxes(
+ *bbox_inputs)
+ bbox_results = [
+ bbox2result(det_bbox, det_label, self.bbox_head.num_classes)
+ for det_bbox, det_label in zip(det_bboxes, det_labels)
+ ]
+
+ num_imgs = len(img_metas)
+ scale_factors = tuple(meta['scale_factor'] for meta in img_metas)
+ if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes):
+ segm_results = [[[] for _ in range(self.mask_head.num_classes)]
+ for _ in range(num_imgs)]
+ else:
+ # if det_bboxes is rescaled to the original image size, we need to
+ # rescale it back to the testing scale to obtain RoIs.
+ if rescale and not isinstance(scale_factors[0], float):
+ scale_factors = [
+ torch.from_numpy(scale_factor).to(det_bboxes[0].device)
+ for scale_factor in scale_factors
+ ]
+ _bboxes = [
+ det_bboxes[i][:, :4] *
+ scale_factors[i] if rescale else det_bboxes[i][:, :4]
+ for i in range(len(det_bboxes))
+ ]
+ mask_preds = self.mask_head(x[0], det_coeffs, _bboxes, img_metas)
+ # apply mask post-processing to each image individually
+ segm_results = []
+ for i in range(num_imgs):
+ if det_bboxes[i].shape[0] == 0:
+ segm_results.append(
+ [[] for _ in range(self.mask_head.num_classes)])
+ else:
+ segm_result = self.mask_head.get_seg_masks(
+ mask_preds[i], det_labels[i], img_metas[i], rescale)
+ segm_results.append(segm_result)
+ return list(zip(bbox_results, segm_results))
+
+ def aug_test(self, imgs, img_metas, rescale=False):
+ """Test with augmentations."""
+ raise NotImplementedError
diff --git a/annotator/uniformer/mmdet/models/detectors/yolo.py b/annotator/uniformer/mmdet/models/detectors/yolo.py
new file mode 100644
index 0000000000000000000000000000000000000000..240aab20f857befe25e64114300ebb15a66c6a70
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/detectors/yolo.py
@@ -0,0 +1,18 @@
+# Copyright (c) 2019 Western Digital Corporation or its affiliates.
+
+from ..builder import DETECTORS
+from .single_stage import SingleStageDetector
+
+
+@DETECTORS.register_module()
+class YOLOV3(SingleStageDetector):
+
+ def __init__(self,
+ backbone,
+ neck,
+ bbox_head,
+ train_cfg=None,
+ test_cfg=None,
+ pretrained=None):
+ super(YOLOV3, self).__init__(backbone, neck, bbox_head, train_cfg,
+ test_cfg, pretrained)
diff --git a/annotator/uniformer/mmdet/models/losses/__init__.py b/annotator/uniformer/mmdet/models/losses/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..297aa228277768eb0ba0e8a377f19704d1feeca8
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/losses/__init__.py
@@ -0,0 +1,29 @@
+from .accuracy import Accuracy, accuracy
+from .ae_loss import AssociativeEmbeddingLoss
+from .balanced_l1_loss import BalancedL1Loss, balanced_l1_loss
+from .cross_entropy_loss import (CrossEntropyLoss, binary_cross_entropy,
+ cross_entropy, mask_cross_entropy)
+from .focal_loss import FocalLoss, sigmoid_focal_loss
+from .gaussian_focal_loss import GaussianFocalLoss
+from .gfocal_loss import DistributionFocalLoss, QualityFocalLoss
+from .ghm_loss import GHMC, GHMR
+from .iou_loss import (BoundedIoULoss, CIoULoss, DIoULoss, GIoULoss, IoULoss,
+ bounded_iou_loss, iou_loss)
+from .kd_loss import KnowledgeDistillationKLDivLoss
+from .mse_loss import MSELoss, mse_loss
+from .pisa_loss import carl_loss, isr_p
+from .smooth_l1_loss import L1Loss, SmoothL1Loss, l1_loss, smooth_l1_loss
+from .utils import reduce_loss, weight_reduce_loss, weighted_loss
+from .varifocal_loss import VarifocalLoss
+
+__all__ = [
+ 'accuracy', 'Accuracy', 'cross_entropy', 'binary_cross_entropy',
+ 'mask_cross_entropy', 'CrossEntropyLoss', 'sigmoid_focal_loss',
+ 'FocalLoss', 'smooth_l1_loss', 'SmoothL1Loss', 'balanced_l1_loss',
+ 'BalancedL1Loss', 'mse_loss', 'MSELoss', 'iou_loss', 'bounded_iou_loss',
+ 'IoULoss', 'BoundedIoULoss', 'GIoULoss', 'DIoULoss', 'CIoULoss', 'GHMC',
+ 'GHMR', 'reduce_loss', 'weight_reduce_loss', 'weighted_loss', 'L1Loss',
+ 'l1_loss', 'isr_p', 'carl_loss', 'AssociativeEmbeddingLoss',
+ 'GaussianFocalLoss', 'QualityFocalLoss', 'DistributionFocalLoss',
+ 'VarifocalLoss', 'KnowledgeDistillationKLDivLoss'
+]
diff --git a/annotator/uniformer/mmdet/models/losses/accuracy.py b/annotator/uniformer/mmdet/models/losses/accuracy.py
new file mode 100644
index 0000000000000000000000000000000000000000..789a2240a491289c5801b6690116e8ca657d004f
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/losses/accuracy.py
@@ -0,0 +1,78 @@
+import mmcv
+import torch.nn as nn
+
+
+@mmcv.jit(coderize=True)
+def accuracy(pred, target, topk=1, thresh=None):
+ """Calculate accuracy according to the prediction and target.
+
+ Args:
+ pred (torch.Tensor): The model prediction, shape (N, num_class)
+ target (torch.Tensor): The target of each prediction, shape (N, )
+ topk (int | tuple[int], optional): If the predictions in ``topk``
+ matches the target, the predictions will be regarded as
+ correct ones. Defaults to 1.
+ thresh (float, optional): If not None, predictions with scores under
+ this threshold are considered incorrect. Default to None.
+
+ Returns:
+ float | tuple[float]: If the input ``topk`` is a single integer,
+ the function will return a single float as accuracy. If
+ ``topk`` is a tuple containing multiple integers, the
+ function will return a tuple containing accuracies of
+ each ``topk`` number.
+ """
+ assert isinstance(topk, (int, tuple))
+ if isinstance(topk, int):
+ topk = (topk, )
+ return_single = True
+ else:
+ return_single = False
+
+ maxk = max(topk)
+ if pred.size(0) == 0:
+ accu = [pred.new_tensor(0.) for i in range(len(topk))]
+ return accu[0] if return_single else accu
+ assert pred.ndim == 2 and target.ndim == 1
+ assert pred.size(0) == target.size(0)
+ assert maxk <= pred.size(1), \
+ f'maxk {maxk} exceeds pred dimension {pred.size(1)}'
+ pred_value, pred_label = pred.topk(maxk, dim=1)
+ pred_label = pred_label.t() # transpose to shape (maxk, N)
+ correct = pred_label.eq(target.view(1, -1).expand_as(pred_label))
+ if thresh is not None:
+ # Only prediction values larger than thresh are counted as correct
+ correct = correct & (pred_value > thresh).t()
+ res = []
+ for k in topk:
+ correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
+ res.append(correct_k.mul_(100.0 / pred.size(0)))
+ return res[0] if return_single else res
+
+
+class Accuracy(nn.Module):
+
+ def __init__(self, topk=(1, ), thresh=None):
+ """Module to calculate the accuracy.
+
+ Args:
+ topk (tuple, optional): The criterion used to calculate the
+ accuracy. Defaults to (1,).
+ thresh (float, optional): If not None, predictions with scores
+ under this threshold are considered incorrect. Default to None.
+ """
+ super().__init__()
+ self.topk = topk
+ self.thresh = thresh
+
+ def forward(self, pred, target):
+ """Forward function to calculate accuracy.
+
+ Args:
+ pred (torch.Tensor): Prediction of models.
+ target (torch.Tensor): Target for each prediction.
+
+ Returns:
+ tuple[float]: The accuracies under different topk criterions.
+ """
+ return accuracy(pred, target, self.topk, self.thresh)
diff --git a/annotator/uniformer/mmdet/models/losses/ae_loss.py b/annotator/uniformer/mmdet/models/losses/ae_loss.py
new file mode 100644
index 0000000000000000000000000000000000000000..cff472aa03080fb49dbb3adba6fec68647a575e6
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/losses/ae_loss.py
@@ -0,0 +1,102 @@
+import mmcv
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+from ..builder import LOSSES
+
+
+@mmcv.jit(derivate=True, coderize=True)
+def ae_loss_per_image(tl_preds, br_preds, match):
+ """Associative Embedding Loss in one image.
+
+ Associative Embedding Loss including two parts: pull loss and push loss.
+ Pull loss makes embedding vectors from same object closer to each other.
+ Push loss distinguish embedding vector from different objects, and makes
+ the gap between them is large enough.
+
+ During computing, usually there are 3 cases:
+ - no object in image: both pull loss and push loss will be 0.
+ - one object in image: push loss will be 0 and pull loss is computed
+ by the two corner of the only object.
+ - more than one objects in image: pull loss is computed by corner pairs
+ from each object, push loss is computed by each object with all
+ other objects. We use confusion matrix with 0 in diagonal to
+ compute the push loss.
+
+ Args:
+ tl_preds (tensor): Embedding feature map of left-top corner.
+ br_preds (tensor): Embedding feature map of bottim-right corner.
+ match (list): Downsampled coordinates pair of each ground truth box.
+ """
+
+ tl_list, br_list, me_list = [], [], []
+ if len(match) == 0: # no object in image
+ pull_loss = tl_preds.sum() * 0.
+ push_loss = tl_preds.sum() * 0.
+ else:
+ for m in match:
+ [tl_y, tl_x], [br_y, br_x] = m
+ tl_e = tl_preds[:, tl_y, tl_x].view(-1, 1)
+ br_e = br_preds[:, br_y, br_x].view(-1, 1)
+ tl_list.append(tl_e)
+ br_list.append(br_e)
+ me_list.append((tl_e + br_e) / 2.0)
+
+ tl_list = torch.cat(tl_list)
+ br_list = torch.cat(br_list)
+ me_list = torch.cat(me_list)
+
+ assert tl_list.size() == br_list.size()
+
+ # N is object number in image, M is dimension of embedding vector
+ N, M = tl_list.size()
+
+ pull_loss = (tl_list - me_list).pow(2) + (br_list - me_list).pow(2)
+ pull_loss = pull_loss.sum() / N
+
+ margin = 1 # exp setting of CornerNet, details in section 3.3 of paper
+
+ # confusion matrix of push loss
+ conf_mat = me_list.expand((N, N, M)).permute(1, 0, 2) - me_list
+ conf_weight = 1 - torch.eye(N).type_as(me_list)
+ conf_mat = conf_weight * (margin - conf_mat.sum(-1).abs())
+
+ if N > 1: # more than one object in current image
+ push_loss = F.relu(conf_mat).sum() / (N * (N - 1))
+ else:
+ push_loss = tl_preds.sum() * 0.
+
+ return pull_loss, push_loss
+
+
+@LOSSES.register_module()
+class AssociativeEmbeddingLoss(nn.Module):
+ """Associative Embedding Loss.
+
+ More details can be found in
+ `Associative Embedding `_ and
+ `CornerNet `_ .
+ Code is modified from `kp_utils.py `_ # noqa: E501
+
+ Args:
+ pull_weight (float): Loss weight for corners from same object.
+ push_weight (float): Loss weight for corners from different object.
+ """
+
+ def __init__(self, pull_weight=0.25, push_weight=0.25):
+ super(AssociativeEmbeddingLoss, self).__init__()
+ self.pull_weight = pull_weight
+ self.push_weight = push_weight
+
+ def forward(self, pred, target, match):
+ """Forward function."""
+ batch = pred.size(0)
+ pull_all, push_all = 0.0, 0.0
+ for i in range(batch):
+ pull, push = ae_loss_per_image(pred[i], target[i], match[i])
+
+ pull_all += self.pull_weight * pull
+ push_all += self.push_weight * push
+
+ return pull_all, push_all
diff --git a/annotator/uniformer/mmdet/models/losses/balanced_l1_loss.py b/annotator/uniformer/mmdet/models/losses/balanced_l1_loss.py
new file mode 100644
index 0000000000000000000000000000000000000000..7bcd13ff26dbdc9f6eff8d7c7b5bde742a8d7d1d
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/losses/balanced_l1_loss.py
@@ -0,0 +1,120 @@
+import mmcv
+import numpy as np
+import torch
+import torch.nn as nn
+
+from ..builder import LOSSES
+from .utils import weighted_loss
+
+
+@mmcv.jit(derivate=True, coderize=True)
+@weighted_loss
+def balanced_l1_loss(pred,
+ target,
+ beta=1.0,
+ alpha=0.5,
+ gamma=1.5,
+ reduction='mean'):
+ """Calculate balanced L1 loss.
+
+ Please see the `Libra R-CNN `_
+
+ Args:
+ pred (torch.Tensor): The prediction with shape (N, 4).
+ target (torch.Tensor): The learning target of the prediction with
+ shape (N, 4).
+ beta (float): The loss is a piecewise function of prediction and target
+ and ``beta`` serves as a threshold for the difference between the
+ prediction and target. Defaults to 1.0.
+ alpha (float): The denominator ``alpha`` in the balanced L1 loss.
+ Defaults to 0.5.
+ gamma (float): The ``gamma`` in the balanced L1 loss.
+ Defaults to 1.5.
+ reduction (str, optional): The method that reduces the loss to a
+ scalar. Options are "none", "mean" and "sum".
+
+ Returns:
+ torch.Tensor: The calculated loss
+ """
+ assert beta > 0
+ assert pred.size() == target.size() and target.numel() > 0
+
+ diff = torch.abs(pred - target)
+ b = np.e**(gamma / alpha) - 1
+ loss = torch.where(
+ diff < beta, alpha / b *
+ (b * diff + 1) * torch.log(b * diff / beta + 1) - alpha * diff,
+ gamma * diff + gamma / b - alpha * beta)
+
+ return loss
+
+
+@LOSSES.register_module()
+class BalancedL1Loss(nn.Module):
+ """Balanced L1 Loss.
+
+ arXiv: https://arxiv.org/pdf/1904.02701.pdf (CVPR 2019)
+
+ Args:
+ alpha (float): The denominator ``alpha`` in the balanced L1 loss.
+ Defaults to 0.5.
+ gamma (float): The ``gamma`` in the balanced L1 loss. Defaults to 1.5.
+ beta (float, optional): The loss is a piecewise function of prediction
+ and target. ``beta`` serves as a threshold for the difference
+ between the prediction and target. Defaults to 1.0.
+ reduction (str, optional): The method that reduces the loss to a
+ scalar. Options are "none", "mean" and "sum".
+ loss_weight (float, optional): The weight of the loss. Defaults to 1.0
+ """
+
+ def __init__(self,
+ alpha=0.5,
+ gamma=1.5,
+ beta=1.0,
+ reduction='mean',
+ loss_weight=1.0):
+ super(BalancedL1Loss, self).__init__()
+ self.alpha = alpha
+ self.gamma = gamma
+ self.beta = beta
+ self.reduction = reduction
+ self.loss_weight = loss_weight
+
+ def forward(self,
+ pred,
+ target,
+ weight=None,
+ avg_factor=None,
+ reduction_override=None,
+ **kwargs):
+ """Forward function of loss.
+
+ Args:
+ pred (torch.Tensor): The prediction with shape (N, 4).
+ target (torch.Tensor): The learning target of the prediction with
+ shape (N, 4).
+ weight (torch.Tensor, optional): Sample-wise loss weight with
+ shape (N, ).
+ avg_factor (int, optional): Average factor that is used to average
+ the loss. Defaults to None.
+ reduction_override (str, optional): The reduction method used to
+ override the original reduction method of the loss.
+ Options are "none", "mean" and "sum".
+
+ Returns:
+ torch.Tensor: The calculated loss
+ """
+ assert reduction_override in (None, 'none', 'mean', 'sum')
+ reduction = (
+ reduction_override if reduction_override else self.reduction)
+ loss_bbox = self.loss_weight * balanced_l1_loss(
+ pred,
+ target,
+ weight,
+ alpha=self.alpha,
+ gamma=self.gamma,
+ beta=self.beta,
+ reduction=reduction,
+ avg_factor=avg_factor,
+ **kwargs)
+ return loss_bbox
diff --git a/annotator/uniformer/mmdet/models/losses/cross_entropy_loss.py b/annotator/uniformer/mmdet/models/losses/cross_entropy_loss.py
new file mode 100644
index 0000000000000000000000000000000000000000..57994157960eeae5530bd983b8b86263de31d0ff
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/losses/cross_entropy_loss.py
@@ -0,0 +1,214 @@
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+from ..builder import LOSSES
+from .utils import weight_reduce_loss
+
+
+def cross_entropy(pred,
+ label,
+ weight=None,
+ reduction='mean',
+ avg_factor=None,
+ class_weight=None):
+ """Calculate the CrossEntropy loss.
+
+ Args:
+ pred (torch.Tensor): The prediction with shape (N, C), C is the number
+ of classes.
+ label (torch.Tensor): The learning label of the prediction.
+ weight (torch.Tensor, optional): Sample-wise loss weight.
+ reduction (str, optional): The method used to reduce the loss.
+ avg_factor (int, optional): Average factor that is used to average
+ the loss. Defaults to None.
+ class_weight (list[float], optional): The weight for each class.
+
+ Returns:
+ torch.Tensor: The calculated loss
+ """
+ # element-wise losses
+ loss = F.cross_entropy(pred, label, weight=class_weight, reduction='none')
+
+ # apply weights and do the reduction
+ if weight is not None:
+ weight = weight.float()
+ loss = weight_reduce_loss(
+ loss, weight=weight, reduction=reduction, avg_factor=avg_factor)
+
+ return loss
+
+
+def _expand_onehot_labels(labels, label_weights, label_channels):
+ bin_labels = labels.new_full((labels.size(0), label_channels), 0)
+ inds = torch.nonzero(
+ (labels >= 0) & (labels < label_channels), as_tuple=False).squeeze()
+ if inds.numel() > 0:
+ bin_labels[inds, labels[inds]] = 1
+
+ if label_weights is None:
+ bin_label_weights = None
+ else:
+ bin_label_weights = label_weights.view(-1, 1).expand(
+ label_weights.size(0), label_channels)
+
+ return bin_labels, bin_label_weights
+
+
+def binary_cross_entropy(pred,
+ label,
+ weight=None,
+ reduction='mean',
+ avg_factor=None,
+ class_weight=None):
+ """Calculate the binary CrossEntropy loss.
+
+ Args:
+ pred (torch.Tensor): The prediction with shape (N, 1).
+ label (torch.Tensor): The learning label of the prediction.
+ weight (torch.Tensor, optional): Sample-wise loss weight.
+ reduction (str, optional): The method used to reduce the loss.
+ Options are "none", "mean" and "sum".
+ avg_factor (int, optional): Average factor that is used to average
+ the loss. Defaults to None.
+ class_weight (list[float], optional): The weight for each class.
+
+ Returns:
+ torch.Tensor: The calculated loss
+ """
+ if pred.dim() != label.dim():
+ label, weight = _expand_onehot_labels(label, weight, pred.size(-1))
+
+ # weighted element-wise losses
+ if weight is not None:
+ weight = weight.float()
+ loss = F.binary_cross_entropy_with_logits(
+ pred, label.float(), pos_weight=class_weight, reduction='none')
+ # do the reduction for the weighted loss
+ loss = weight_reduce_loss(
+ loss, weight, reduction=reduction, avg_factor=avg_factor)
+
+ return loss
+
+
+def mask_cross_entropy(pred,
+ target,
+ label,
+ reduction='mean',
+ avg_factor=None,
+ class_weight=None):
+ """Calculate the CrossEntropy loss for masks.
+
+ Args:
+ pred (torch.Tensor): The prediction with shape (N, C, *), C is the
+ number of classes. The trailing * indicates arbitrary shape.
+ target (torch.Tensor): The learning label of the prediction.
+ label (torch.Tensor): ``label`` indicates the class label of the mask
+ corresponding object. This will be used to select the mask in the
+ of the class which the object belongs to when the mask prediction
+ if not class-agnostic.
+ reduction (str, optional): The method used to reduce the loss.
+ Options are "none", "mean" and "sum".
+ avg_factor (int, optional): Average factor that is used to average
+ the loss. Defaults to None.
+ class_weight (list[float], optional): The weight for each class.
+
+ Returns:
+ torch.Tensor: The calculated loss
+
+ Example:
+ >>> N, C = 3, 11
+ >>> H, W = 2, 2
+ >>> pred = torch.randn(N, C, H, W) * 1000
+ >>> target = torch.rand(N, H, W)
+ >>> label = torch.randint(0, C, size=(N,))
+ >>> reduction = 'mean'
+ >>> avg_factor = None
+ >>> class_weights = None
+ >>> loss = mask_cross_entropy(pred, target, label, reduction,
+ >>> avg_factor, class_weights)
+ >>> assert loss.shape == (1,)
+ """
+ # TODO: handle these two reserved arguments
+ assert reduction == 'mean' and avg_factor is None
+ num_rois = pred.size()[0]
+ inds = torch.arange(0, num_rois, dtype=torch.long, device=pred.device)
+ pred_slice = pred[inds, label].squeeze(1)
+ return F.binary_cross_entropy_with_logits(
+ pred_slice, target, weight=class_weight, reduction='mean')[None]
+
+
+@LOSSES.register_module()
+class CrossEntropyLoss(nn.Module):
+
+ def __init__(self,
+ use_sigmoid=False,
+ use_mask=False,
+ reduction='mean',
+ class_weight=None,
+ loss_weight=1.0):
+ """CrossEntropyLoss.
+
+ Args:
+ use_sigmoid (bool, optional): Whether the prediction uses sigmoid
+ of softmax. Defaults to False.
+ use_mask (bool, optional): Whether to use mask cross entropy loss.
+ Defaults to False.
+ reduction (str, optional): . Defaults to 'mean'.
+ Options are "none", "mean" and "sum".
+ class_weight (list[float], optional): Weight of each class.
+ Defaults to None.
+ loss_weight (float, optional): Weight of the loss. Defaults to 1.0.
+ """
+ super(CrossEntropyLoss, self).__init__()
+ assert (use_sigmoid is False) or (use_mask is False)
+ self.use_sigmoid = use_sigmoid
+ self.use_mask = use_mask
+ self.reduction = reduction
+ self.loss_weight = loss_weight
+ self.class_weight = class_weight
+
+ if self.use_sigmoid:
+ self.cls_criterion = binary_cross_entropy
+ elif self.use_mask:
+ self.cls_criterion = mask_cross_entropy
+ else:
+ self.cls_criterion = cross_entropy
+
+ def forward(self,
+ cls_score,
+ label,
+ weight=None,
+ avg_factor=None,
+ reduction_override=None,
+ **kwargs):
+ """Forward function.
+
+ Args:
+ cls_score (torch.Tensor): The prediction.
+ label (torch.Tensor): The learning label of the prediction.
+ weight (torch.Tensor, optional): Sample-wise loss weight.
+ avg_factor (int, optional): Average factor that is used to average
+ the loss. Defaults to None.
+ reduction (str, optional): The method used to reduce the loss.
+ Options are "none", "mean" and "sum".
+ Returns:
+ torch.Tensor: The calculated loss
+ """
+ assert reduction_override in (None, 'none', 'mean', 'sum')
+ reduction = (
+ reduction_override if reduction_override else self.reduction)
+ if self.class_weight is not None:
+ class_weight = cls_score.new_tensor(
+ self.class_weight, device=cls_score.device)
+ else:
+ class_weight = None
+ loss_cls = self.loss_weight * self.cls_criterion(
+ cls_score,
+ label,
+ weight,
+ class_weight=class_weight,
+ reduction=reduction,
+ avg_factor=avg_factor,
+ **kwargs)
+ return loss_cls
diff --git a/annotator/uniformer/mmdet/models/losses/focal_loss.py b/annotator/uniformer/mmdet/models/losses/focal_loss.py
new file mode 100644
index 0000000000000000000000000000000000000000..493907c6984d532175e0351daf2eafe4b9ff0256
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/losses/focal_loss.py
@@ -0,0 +1,181 @@
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from mmcv.ops import sigmoid_focal_loss as _sigmoid_focal_loss
+
+from ..builder import LOSSES
+from .utils import weight_reduce_loss
+
+
+# This method is only for debugging
+def py_sigmoid_focal_loss(pred,
+ target,
+ weight=None,
+ gamma=2.0,
+ alpha=0.25,
+ reduction='mean',
+ avg_factor=None):
+ """PyTorch version of `Focal Loss `_.
+
+ Args:
+ pred (torch.Tensor): The prediction with shape (N, C), C is the
+ number of classes
+ target (torch.Tensor): The learning label of the prediction.
+ weight (torch.Tensor, optional): Sample-wise loss weight.
+ gamma (float, optional): The gamma for calculating the modulating
+ factor. Defaults to 2.0.
+ alpha (float, optional): A balanced form for Focal Loss.
+ Defaults to 0.25.
+ reduction (str, optional): The method used to reduce the loss into
+ a scalar. Defaults to 'mean'.
+ avg_factor (int, optional): Average factor that is used to average
+ the loss. Defaults to None.
+ """
+ pred_sigmoid = pred.sigmoid()
+ target = target.type_as(pred)
+ pt = (1 - pred_sigmoid) * target + pred_sigmoid * (1 - target)
+ focal_weight = (alpha * target + (1 - alpha) *
+ (1 - target)) * pt.pow(gamma)
+ loss = F.binary_cross_entropy_with_logits(
+ pred, target, reduction='none') * focal_weight
+ if weight is not None:
+ if weight.shape != loss.shape:
+ if weight.size(0) == loss.size(0):
+ # For most cases, weight is of shape (num_priors, ),
+ # which means it does not have the second axis num_class
+ weight = weight.view(-1, 1)
+ else:
+ # Sometimes, weight per anchor per class is also needed. e.g.
+ # in FSAF. But it may be flattened of shape
+ # (num_priors x num_class, ), while loss is still of shape
+ # (num_priors, num_class).
+ assert weight.numel() == loss.numel()
+ weight = weight.view(loss.size(0), -1)
+ assert weight.ndim == loss.ndim
+ loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
+ return loss
+
+
+def sigmoid_focal_loss(pred,
+ target,
+ weight=None,
+ gamma=2.0,
+ alpha=0.25,
+ reduction='mean',
+ avg_factor=None):
+ r"""A warpper of cuda version `Focal Loss
+ `_.
+
+ Args:
+ pred (torch.Tensor): The prediction with shape (N, C), C is the number
+ of classes.
+ target (torch.Tensor): The learning label of the prediction.
+ weight (torch.Tensor, optional): Sample-wise loss weight.
+ gamma (float, optional): The gamma for calculating the modulating
+ factor. Defaults to 2.0.
+ alpha (float, optional): A balanced form for Focal Loss.
+ Defaults to 0.25.
+ reduction (str, optional): The method used to reduce the loss into
+ a scalar. Defaults to 'mean'. Options are "none", "mean" and "sum".
+ avg_factor (int, optional): Average factor that is used to average
+ the loss. Defaults to None.
+ """
+ # Function.apply does not accept keyword arguments, so the decorator
+ # "weighted_loss" is not applicable
+ loss = _sigmoid_focal_loss(pred.contiguous(), target, gamma, alpha, None,
+ 'none')
+ if weight is not None:
+ if weight.shape != loss.shape:
+ if weight.size(0) == loss.size(0):
+ # For most cases, weight is of shape (num_priors, ),
+ # which means it does not have the second axis num_class
+ weight = weight.view(-1, 1)
+ else:
+ # Sometimes, weight per anchor per class is also needed. e.g.
+ # in FSAF. But it may be flattened of shape
+ # (num_priors x num_class, ), while loss is still of shape
+ # (num_priors, num_class).
+ assert weight.numel() == loss.numel()
+ weight = weight.view(loss.size(0), -1)
+ assert weight.ndim == loss.ndim
+ loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
+ return loss
+
+
+@LOSSES.register_module()
+class FocalLoss(nn.Module):
+
+ def __init__(self,
+ use_sigmoid=True,
+ gamma=2.0,
+ alpha=0.25,
+ reduction='mean',
+ loss_weight=1.0):
+ """`Focal Loss `_
+
+ Args:
+ use_sigmoid (bool, optional): Whether to the prediction is
+ used for sigmoid or softmax. Defaults to True.
+ gamma (float, optional): The gamma for calculating the modulating
+ factor. Defaults to 2.0.
+ alpha (float, optional): A balanced form for Focal Loss.
+ Defaults to 0.25.
+ reduction (str, optional): The method used to reduce the loss into
+ a scalar. Defaults to 'mean'. Options are "none", "mean" and
+ "sum".
+ loss_weight (float, optional): Weight of loss. Defaults to 1.0.
+ """
+ super(FocalLoss, self).__init__()
+ assert use_sigmoid is True, 'Only sigmoid focal loss supported now.'
+ self.use_sigmoid = use_sigmoid
+ self.gamma = gamma
+ self.alpha = alpha
+ self.reduction = reduction
+ self.loss_weight = loss_weight
+
+ def forward(self,
+ pred,
+ target,
+ weight=None,
+ avg_factor=None,
+ reduction_override=None):
+ """Forward function.
+
+ Args:
+ pred (torch.Tensor): The prediction.
+ target (torch.Tensor): The learning label of the prediction.
+ weight (torch.Tensor, optional): The weight of loss for each
+ prediction. Defaults to None.
+ avg_factor (int, optional): Average factor that is used to average
+ the loss. Defaults to None.
+ reduction_override (str, optional): The reduction method used to
+ override the original reduction method of the loss.
+ Options are "none", "mean" and "sum".
+
+ Returns:
+ torch.Tensor: The calculated loss
+ """
+ assert reduction_override in (None, 'none', 'mean', 'sum')
+ reduction = (
+ reduction_override if reduction_override else self.reduction)
+ if self.use_sigmoid:
+ if torch.cuda.is_available() and pred.is_cuda:
+ calculate_loss_func = sigmoid_focal_loss
+ else:
+ num_classes = pred.size(1)
+ target = F.one_hot(target, num_classes=num_classes + 1)
+ target = target[:, :num_classes]
+ calculate_loss_func = py_sigmoid_focal_loss
+
+ loss_cls = self.loss_weight * calculate_loss_func(
+ pred,
+ target,
+ weight,
+ gamma=self.gamma,
+ alpha=self.alpha,
+ reduction=reduction,
+ avg_factor=avg_factor)
+
+ else:
+ raise NotImplementedError
+ return loss_cls
diff --git a/annotator/uniformer/mmdet/models/losses/gaussian_focal_loss.py b/annotator/uniformer/mmdet/models/losses/gaussian_focal_loss.py
new file mode 100644
index 0000000000000000000000000000000000000000..e45506a38e8e3c187be8288d0b714cc1ee29cf27
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/losses/gaussian_focal_loss.py
@@ -0,0 +1,91 @@
+import mmcv
+import torch.nn as nn
+
+from ..builder import LOSSES
+from .utils import weighted_loss
+
+
+@mmcv.jit(derivate=True, coderize=True)
+@weighted_loss
+def gaussian_focal_loss(pred, gaussian_target, alpha=2.0, gamma=4.0):
+ """`Focal Loss `_ for targets in gaussian
+ distribution.
+
+ Args:
+ pred (torch.Tensor): The prediction.
+ gaussian_target (torch.Tensor): The learning target of the prediction
+ in gaussian distribution.
+ alpha (float, optional): A balanced form for Focal Loss.
+ Defaults to 2.0.
+ gamma (float, optional): The gamma for calculating the modulating
+ factor. Defaults to 4.0.
+ """
+ eps = 1e-12
+ pos_weights = gaussian_target.eq(1)
+ neg_weights = (1 - gaussian_target).pow(gamma)
+ pos_loss = -(pred + eps).log() * (1 - pred).pow(alpha) * pos_weights
+ neg_loss = -(1 - pred + eps).log() * pred.pow(alpha) * neg_weights
+ return pos_loss + neg_loss
+
+
+@LOSSES.register_module()
+class GaussianFocalLoss(nn.Module):
+ """GaussianFocalLoss is a variant of focal loss.
+
+ More details can be found in the `paper
+ `_
+ Code is modified from `kp_utils.py
+ `_ # noqa: E501
+ Please notice that the target in GaussianFocalLoss is a gaussian heatmap,
+ not 0/1 binary target.
+
+ Args:
+ alpha (float): Power of prediction.
+ gamma (float): Power of target for negative samples.
+ reduction (str): Options are "none", "mean" and "sum".
+ loss_weight (float): Loss weight of current loss.
+ """
+
+ def __init__(self,
+ alpha=2.0,
+ gamma=4.0,
+ reduction='mean',
+ loss_weight=1.0):
+ super(GaussianFocalLoss, self).__init__()
+ self.alpha = alpha
+ self.gamma = gamma
+ self.reduction = reduction
+ self.loss_weight = loss_weight
+
+ def forward(self,
+ pred,
+ target,
+ weight=None,
+ avg_factor=None,
+ reduction_override=None):
+ """Forward function.
+
+ Args:
+ pred (torch.Tensor): The prediction.
+ target (torch.Tensor): The learning target of the prediction
+ in gaussian distribution.
+ weight (torch.Tensor, optional): The weight of loss for each
+ prediction. Defaults to None.
+ avg_factor (int, optional): Average factor that is used to average
+ the loss. Defaults to None.
+ reduction_override (str, optional): The reduction method used to
+ override the original reduction method of the loss.
+ Defaults to None.
+ """
+ assert reduction_override in (None, 'none', 'mean', 'sum')
+ reduction = (
+ reduction_override if reduction_override else self.reduction)
+ loss_reg = self.loss_weight * gaussian_focal_loss(
+ pred,
+ target,
+ weight,
+ alpha=self.alpha,
+ gamma=self.gamma,
+ reduction=reduction,
+ avg_factor=avg_factor)
+ return loss_reg
diff --git a/annotator/uniformer/mmdet/models/losses/gfocal_loss.py b/annotator/uniformer/mmdet/models/losses/gfocal_loss.py
new file mode 100644
index 0000000000000000000000000000000000000000..9d3b8833dc50c76f6741db5341dbf8da3402d07b
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/losses/gfocal_loss.py
@@ -0,0 +1,188 @@
+import mmcv
+import torch.nn as nn
+import torch.nn.functional as F
+
+from ..builder import LOSSES
+from .utils import weighted_loss
+
+
+@mmcv.jit(derivate=True, coderize=True)
+@weighted_loss
+def quality_focal_loss(pred, target, beta=2.0):
+ r"""Quality Focal Loss (QFL) is from `Generalized Focal Loss: Learning
+ Qualified and Distributed Bounding Boxes for Dense Object Detection
+ `_.
+
+ Args:
+ pred (torch.Tensor): Predicted joint representation of classification
+ and quality (IoU) estimation with shape (N, C), C is the number of
+ classes.
+ target (tuple([torch.Tensor])): Target category label with shape (N,)
+ and target quality label with shape (N,).
+ beta (float): The beta parameter for calculating the modulating factor.
+ Defaults to 2.0.
+
+ Returns:
+ torch.Tensor: Loss tensor with shape (N,).
+ """
+ assert len(target) == 2, """target for QFL must be a tuple of two elements,
+ including category label and quality label, respectively"""
+ # label denotes the category id, score denotes the quality score
+ label, score = target
+
+ # negatives are supervised by 0 quality score
+ pred_sigmoid = pred.sigmoid()
+ scale_factor = pred_sigmoid
+ zerolabel = scale_factor.new_zeros(pred.shape)
+ loss = F.binary_cross_entropy_with_logits(
+ pred, zerolabel, reduction='none') * scale_factor.pow(beta)
+
+ # FG cat_id: [0, num_classes -1], BG cat_id: num_classes
+ bg_class_ind = pred.size(1)
+ pos = ((label >= 0) & (label < bg_class_ind)).nonzero().squeeze(1)
+ pos_label = label[pos].long()
+ # positives are supervised by bbox quality (IoU) score
+ scale_factor = score[pos] - pred_sigmoid[pos, pos_label]
+ loss[pos, pos_label] = F.binary_cross_entropy_with_logits(
+ pred[pos, pos_label], score[pos],
+ reduction='none') * scale_factor.abs().pow(beta)
+
+ loss = loss.sum(dim=1, keepdim=False)
+ return loss
+
+
+@mmcv.jit(derivate=True, coderize=True)
+@weighted_loss
+def distribution_focal_loss(pred, label):
+ r"""Distribution Focal Loss (DFL) is from `Generalized Focal Loss: Learning
+ Qualified and Distributed Bounding Boxes for Dense Object Detection
+ `_.
+
+ Args:
+ pred (torch.Tensor): Predicted general distribution of bounding boxes
+ (before softmax) with shape (N, n+1), n is the max value of the
+ integral set `{0, ..., n}` in paper.
+ label (torch.Tensor): Target distance label for bounding boxes with
+ shape (N,).
+
+ Returns:
+ torch.Tensor: Loss tensor with shape (N,).
+ """
+ dis_left = label.long()
+ dis_right = dis_left + 1
+ weight_left = dis_right.float() - label
+ weight_right = label - dis_left.float()
+ loss = F.cross_entropy(pred, dis_left, reduction='none') * weight_left \
+ + F.cross_entropy(pred, dis_right, reduction='none') * weight_right
+ return loss
+
+
+@LOSSES.register_module()
+class QualityFocalLoss(nn.Module):
+ r"""Quality Focal Loss (QFL) is a variant of `Generalized Focal Loss:
+ Learning Qualified and Distributed Bounding Boxes for Dense Object
+ Detection `_.
+
+ Args:
+ use_sigmoid (bool): Whether sigmoid operation is conducted in QFL.
+ Defaults to True.
+ beta (float): The beta parameter for calculating the modulating factor.
+ Defaults to 2.0.
+ reduction (str): Options are "none", "mean" and "sum".
+ loss_weight (float): Loss weight of current loss.
+ """
+
+ def __init__(self,
+ use_sigmoid=True,
+ beta=2.0,
+ reduction='mean',
+ loss_weight=1.0):
+ super(QualityFocalLoss, self).__init__()
+ assert use_sigmoid is True, 'Only sigmoid in QFL supported now.'
+ self.use_sigmoid = use_sigmoid
+ self.beta = beta
+ self.reduction = reduction
+ self.loss_weight = loss_weight
+
+ def forward(self,
+ pred,
+ target,
+ weight=None,
+ avg_factor=None,
+ reduction_override=None):
+ """Forward function.
+
+ Args:
+ pred (torch.Tensor): Predicted joint representation of
+ classification and quality (IoU) estimation with shape (N, C),
+ C is the number of classes.
+ target (tuple([torch.Tensor])): Target category label with shape
+ (N,) and target quality label with shape (N,).
+ weight (torch.Tensor, optional): The weight of loss for each
+ prediction. Defaults to None.
+ avg_factor (int, optional): Average factor that is used to average
+ the loss. Defaults to None.
+ reduction_override (str, optional): The reduction method used to
+ override the original reduction method of the loss.
+ Defaults to None.
+ """
+ assert reduction_override in (None, 'none', 'mean', 'sum')
+ reduction = (
+ reduction_override if reduction_override else self.reduction)
+ if self.use_sigmoid:
+ loss_cls = self.loss_weight * quality_focal_loss(
+ pred,
+ target,
+ weight,
+ beta=self.beta,
+ reduction=reduction,
+ avg_factor=avg_factor)
+ else:
+ raise NotImplementedError
+ return loss_cls
+
+
+@LOSSES.register_module()
+class DistributionFocalLoss(nn.Module):
+ r"""Distribution Focal Loss (DFL) is a variant of `Generalized Focal Loss:
+ Learning Qualified and Distributed Bounding Boxes for Dense Object
+ Detection `_.
+
+ Args:
+ reduction (str): Options are `'none'`, `'mean'` and `'sum'`.
+ loss_weight (float): Loss weight of current loss.
+ """
+
+ def __init__(self, reduction='mean', loss_weight=1.0):
+ super(DistributionFocalLoss, self).__init__()
+ self.reduction = reduction
+ self.loss_weight = loss_weight
+
+ def forward(self,
+ pred,
+ target,
+ weight=None,
+ avg_factor=None,
+ reduction_override=None):
+ """Forward function.
+
+ Args:
+ pred (torch.Tensor): Predicted general distribution of bounding
+ boxes (before softmax) with shape (N, n+1), n is the max value
+ of the integral set `{0, ..., n}` in paper.
+ target (torch.Tensor): Target distance label for bounding boxes
+ with shape (N,).
+ weight (torch.Tensor, optional): The weight of loss for each
+ prediction. Defaults to None.
+ avg_factor (int, optional): Average factor that is used to average
+ the loss. Defaults to None.
+ reduction_override (str, optional): The reduction method used to
+ override the original reduction method of the loss.
+ Defaults to None.
+ """
+ assert reduction_override in (None, 'none', 'mean', 'sum')
+ reduction = (
+ reduction_override if reduction_override else self.reduction)
+ loss_cls = self.loss_weight * distribution_focal_loss(
+ pred, target, weight, reduction=reduction, avg_factor=avg_factor)
+ return loss_cls
diff --git a/annotator/uniformer/mmdet/models/losses/ghm_loss.py b/annotator/uniformer/mmdet/models/losses/ghm_loss.py
new file mode 100644
index 0000000000000000000000000000000000000000..8969a23fd98bb746415f96ac5e4ad9e37ba3af52
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/losses/ghm_loss.py
@@ -0,0 +1,172 @@
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+from ..builder import LOSSES
+
+
+def _expand_onehot_labels(labels, label_weights, label_channels):
+ bin_labels = labels.new_full((labels.size(0), label_channels), 0)
+ inds = torch.nonzero(
+ (labels >= 0) & (labels < label_channels), as_tuple=False).squeeze()
+ if inds.numel() > 0:
+ bin_labels[inds, labels[inds]] = 1
+ bin_label_weights = label_weights.view(-1, 1).expand(
+ label_weights.size(0), label_channels)
+ return bin_labels, bin_label_weights
+
+
+# TODO: code refactoring to make it consistent with other losses
+@LOSSES.register_module()
+class GHMC(nn.Module):
+ """GHM Classification Loss.
+
+ Details of the theorem can be viewed in the paper
+ `Gradient Harmonized Single-stage Detector
+ `_.
+
+ Args:
+ bins (int): Number of the unit regions for distribution calculation.
+ momentum (float): The parameter for moving average.
+ use_sigmoid (bool): Can only be true for BCE based loss now.
+ loss_weight (float): The weight of the total GHM-C loss.
+ """
+
+ def __init__(self, bins=10, momentum=0, use_sigmoid=True, loss_weight=1.0):
+ super(GHMC, self).__init__()
+ self.bins = bins
+ self.momentum = momentum
+ edges = torch.arange(bins + 1).float() / bins
+ self.register_buffer('edges', edges)
+ self.edges[-1] += 1e-6
+ if momentum > 0:
+ acc_sum = torch.zeros(bins)
+ self.register_buffer('acc_sum', acc_sum)
+ self.use_sigmoid = use_sigmoid
+ if not self.use_sigmoid:
+ raise NotImplementedError
+ self.loss_weight = loss_weight
+
+ def forward(self, pred, target, label_weight, *args, **kwargs):
+ """Calculate the GHM-C loss.
+
+ Args:
+ pred (float tensor of size [batch_num, class_num]):
+ The direct prediction of classification fc layer.
+ target (float tensor of size [batch_num, class_num]):
+ Binary class target for each sample.
+ label_weight (float tensor of size [batch_num, class_num]):
+ the value is 1 if the sample is valid and 0 if ignored.
+ Returns:
+ The gradient harmonized loss.
+ """
+ # the target should be binary class label
+ if pred.dim() != target.dim():
+ target, label_weight = _expand_onehot_labels(
+ target, label_weight, pred.size(-1))
+ target, label_weight = target.float(), label_weight.float()
+ edges = self.edges
+ mmt = self.momentum
+ weights = torch.zeros_like(pred)
+
+ # gradient length
+ g = torch.abs(pred.sigmoid().detach() - target)
+
+ valid = label_weight > 0
+ tot = max(valid.float().sum().item(), 1.0)
+ n = 0 # n valid bins
+ for i in range(self.bins):
+ inds = (g >= edges[i]) & (g < edges[i + 1]) & valid
+ num_in_bin = inds.sum().item()
+ if num_in_bin > 0:
+ if mmt > 0:
+ self.acc_sum[i] = mmt * self.acc_sum[i] \
+ + (1 - mmt) * num_in_bin
+ weights[inds] = tot / self.acc_sum[i]
+ else:
+ weights[inds] = tot / num_in_bin
+ n += 1
+ if n > 0:
+ weights = weights / n
+
+ loss = F.binary_cross_entropy_with_logits(
+ pred, target, weights, reduction='sum') / tot
+ return loss * self.loss_weight
+
+
+# TODO: code refactoring to make it consistent with other losses
+@LOSSES.register_module()
+class GHMR(nn.Module):
+ """GHM Regression Loss.
+
+ Details of the theorem can be viewed in the paper
+ `Gradient Harmonized Single-stage Detector
+ `_.
+
+ Args:
+ mu (float): The parameter for the Authentic Smooth L1 loss.
+ bins (int): Number of the unit regions for distribution calculation.
+ momentum (float): The parameter for moving average.
+ loss_weight (float): The weight of the total GHM-R loss.
+ """
+
+ def __init__(self, mu=0.02, bins=10, momentum=0, loss_weight=1.0):
+ super(GHMR, self).__init__()
+ self.mu = mu
+ self.bins = bins
+ edges = torch.arange(bins + 1).float() / bins
+ self.register_buffer('edges', edges)
+ self.edges[-1] = 1e3
+ self.momentum = momentum
+ if momentum > 0:
+ acc_sum = torch.zeros(bins)
+ self.register_buffer('acc_sum', acc_sum)
+ self.loss_weight = loss_weight
+
+ # TODO: support reduction parameter
+ def forward(self, pred, target, label_weight, avg_factor=None):
+ """Calculate the GHM-R loss.
+
+ Args:
+ pred (float tensor of size [batch_num, 4 (* class_num)]):
+ The prediction of box regression layer. Channel number can be 4
+ or 4 * class_num depending on whether it is class-agnostic.
+ target (float tensor of size [batch_num, 4 (* class_num)]):
+ The target regression values with the same size of pred.
+ label_weight (float tensor of size [batch_num, 4 (* class_num)]):
+ The weight of each sample, 0 if ignored.
+ Returns:
+ The gradient harmonized loss.
+ """
+ mu = self.mu
+ edges = self.edges
+ mmt = self.momentum
+
+ # ASL1 loss
+ diff = pred - target
+ loss = torch.sqrt(diff * diff + mu * mu) - mu
+
+ # gradient length
+ g = torch.abs(diff / torch.sqrt(mu * mu + diff * diff)).detach()
+ weights = torch.zeros_like(g)
+
+ valid = label_weight > 0
+ tot = max(label_weight.float().sum().item(), 1.0)
+ n = 0 # n: valid bins
+ for i in range(self.bins):
+ inds = (g >= edges[i]) & (g < edges[i + 1]) & valid
+ num_in_bin = inds.sum().item()
+ if num_in_bin > 0:
+ n += 1
+ if mmt > 0:
+ self.acc_sum[i] = mmt * self.acc_sum[i] \
+ + (1 - mmt) * num_in_bin
+ weights[inds] = tot / self.acc_sum[i]
+ else:
+ weights[inds] = tot / num_in_bin
+ if n > 0:
+ weights /= n
+
+ loss = loss * weights
+ loss = loss.sum() / tot
+ return loss * self.loss_weight
diff --git a/annotator/uniformer/mmdet/models/losses/iou_loss.py b/annotator/uniformer/mmdet/models/losses/iou_loss.py
new file mode 100644
index 0000000000000000000000000000000000000000..eba6f18b80981ca891c1add37007e6bf478c651f
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/losses/iou_loss.py
@@ -0,0 +1,436 @@
+import math
+
+import mmcv
+import torch
+import torch.nn as nn
+
+from mmdet.core import bbox_overlaps
+from ..builder import LOSSES
+from .utils import weighted_loss
+
+
+@mmcv.jit(derivate=True, coderize=True)
+@weighted_loss
+def iou_loss(pred, target, linear=False, eps=1e-6):
+ """IoU loss.
+
+ Computing the IoU loss between a set of predicted bboxes and target bboxes.
+ The loss is calculated as negative log of IoU.
+
+ Args:
+ pred (torch.Tensor): Predicted bboxes of format (x1, y1, x2, y2),
+ shape (n, 4).
+ target (torch.Tensor): Corresponding gt bboxes, shape (n, 4).
+ linear (bool, optional): If True, use linear scale of loss instead of
+ log scale. Default: False.
+ eps (float): Eps to avoid log(0).
+
+ Return:
+ torch.Tensor: Loss tensor.
+ """
+ ious = bbox_overlaps(pred, target, is_aligned=True).clamp(min=eps)
+ if linear:
+ loss = 1 - ious
+ else:
+ loss = -ious.log()
+ return loss
+
+
+@mmcv.jit(derivate=True, coderize=True)
+@weighted_loss
+def bounded_iou_loss(pred, target, beta=0.2, eps=1e-3):
+ """BIoULoss.
+
+ This is an implementation of paper
+ `Improving Object Localization with Fitness NMS and Bounded IoU Loss.
+ `_.
+
+ Args:
+ pred (torch.Tensor): Predicted bboxes.
+ target (torch.Tensor): Target bboxes.
+ beta (float): beta parameter in smoothl1.
+ eps (float): eps to avoid NaN.
+ """
+ pred_ctrx = (pred[:, 0] + pred[:, 2]) * 0.5
+ pred_ctry = (pred[:, 1] + pred[:, 3]) * 0.5
+ pred_w = pred[:, 2] - pred[:, 0]
+ pred_h = pred[:, 3] - pred[:, 1]
+ with torch.no_grad():
+ target_ctrx = (target[:, 0] + target[:, 2]) * 0.5
+ target_ctry = (target[:, 1] + target[:, 3]) * 0.5
+ target_w = target[:, 2] - target[:, 0]
+ target_h = target[:, 3] - target[:, 1]
+
+ dx = target_ctrx - pred_ctrx
+ dy = target_ctry - pred_ctry
+
+ loss_dx = 1 - torch.max(
+ (target_w - 2 * dx.abs()) /
+ (target_w + 2 * dx.abs() + eps), torch.zeros_like(dx))
+ loss_dy = 1 - torch.max(
+ (target_h - 2 * dy.abs()) /
+ (target_h + 2 * dy.abs() + eps), torch.zeros_like(dy))
+ loss_dw = 1 - torch.min(target_w / (pred_w + eps), pred_w /
+ (target_w + eps))
+ loss_dh = 1 - torch.min(target_h / (pred_h + eps), pred_h /
+ (target_h + eps))
+ loss_comb = torch.stack([loss_dx, loss_dy, loss_dw, loss_dh],
+ dim=-1).view(loss_dx.size(0), -1)
+
+ loss = torch.where(loss_comb < beta, 0.5 * loss_comb * loss_comb / beta,
+ loss_comb - 0.5 * beta)
+ return loss
+
+
+@mmcv.jit(derivate=True, coderize=True)
+@weighted_loss
+def giou_loss(pred, target, eps=1e-7):
+ r"""`Generalized Intersection over Union: A Metric and A Loss for Bounding
+ Box Regression `_.
+
+ Args:
+ pred (torch.Tensor): Predicted bboxes of format (x1, y1, x2, y2),
+ shape (n, 4).
+ target (torch.Tensor): Corresponding gt bboxes, shape (n, 4).
+ eps (float): Eps to avoid log(0).
+
+ Return:
+ Tensor: Loss tensor.
+ """
+ gious = bbox_overlaps(pred, target, mode='giou', is_aligned=True, eps=eps)
+ loss = 1 - gious
+ return loss
+
+
+@mmcv.jit(derivate=True, coderize=True)
+@weighted_loss
+def diou_loss(pred, target, eps=1e-7):
+ r"""`Implementation of Distance-IoU Loss: Faster and Better
+ Learning for Bounding Box Regression, https://arxiv.org/abs/1911.08287`_.
+
+ Code is modified from https://github.com/Zzh-tju/DIoU.
+
+ Args:
+ pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2),
+ shape (n, 4).
+ target (Tensor): Corresponding gt bboxes, shape (n, 4).
+ eps (float): Eps to avoid log(0).
+ Return:
+ Tensor: Loss tensor.
+ """
+ # overlap
+ lt = torch.max(pred[:, :2], target[:, :2])
+ rb = torch.min(pred[:, 2:], target[:, 2:])
+ wh = (rb - lt).clamp(min=0)
+ overlap = wh[:, 0] * wh[:, 1]
+
+ # union
+ ap = (pred[:, 2] - pred[:, 0]) * (pred[:, 3] - pred[:, 1])
+ ag = (target[:, 2] - target[:, 0]) * (target[:, 3] - target[:, 1])
+ union = ap + ag - overlap + eps
+
+ # IoU
+ ious = overlap / union
+
+ # enclose area
+ enclose_x1y1 = torch.min(pred[:, :2], target[:, :2])
+ enclose_x2y2 = torch.max(pred[:, 2:], target[:, 2:])
+ enclose_wh = (enclose_x2y2 - enclose_x1y1).clamp(min=0)
+
+ cw = enclose_wh[:, 0]
+ ch = enclose_wh[:, 1]
+
+ c2 = cw**2 + ch**2 + eps
+
+ b1_x1, b1_y1 = pred[:, 0], pred[:, 1]
+ b1_x2, b1_y2 = pred[:, 2], pred[:, 3]
+ b2_x1, b2_y1 = target[:, 0], target[:, 1]
+ b2_x2, b2_y2 = target[:, 2], target[:, 3]
+
+ left = ((b2_x1 + b2_x2) - (b1_x1 + b1_x2))**2 / 4
+ right = ((b2_y1 + b2_y2) - (b1_y1 + b1_y2))**2 / 4
+ rho2 = left + right
+
+ # DIoU
+ dious = ious - rho2 / c2
+ loss = 1 - dious
+ return loss
+
+
+@mmcv.jit(derivate=True, coderize=True)
+@weighted_loss
+def ciou_loss(pred, target, eps=1e-7):
+ r"""`Implementation of paper `Enhancing Geometric Factors into
+ Model Learning and Inference for Object Detection and Instance
+ Segmentation `_.
+
+ Code is modified from https://github.com/Zzh-tju/CIoU.
+
+ Args:
+ pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2),
+ shape (n, 4).
+ target (Tensor): Corresponding gt bboxes, shape (n, 4).
+ eps (float): Eps to avoid log(0).
+ Return:
+ Tensor: Loss tensor.
+ """
+ # overlap
+ lt = torch.max(pred[:, :2], target[:, :2])
+ rb = torch.min(pred[:, 2:], target[:, 2:])
+ wh = (rb - lt).clamp(min=0)
+ overlap = wh[:, 0] * wh[:, 1]
+
+ # union
+ ap = (pred[:, 2] - pred[:, 0]) * (pred[:, 3] - pred[:, 1])
+ ag = (target[:, 2] - target[:, 0]) * (target[:, 3] - target[:, 1])
+ union = ap + ag - overlap + eps
+
+ # IoU
+ ious = overlap / union
+
+ # enclose area
+ enclose_x1y1 = torch.min(pred[:, :2], target[:, :2])
+ enclose_x2y2 = torch.max(pred[:, 2:], target[:, 2:])
+ enclose_wh = (enclose_x2y2 - enclose_x1y1).clamp(min=0)
+
+ cw = enclose_wh[:, 0]
+ ch = enclose_wh[:, 1]
+
+ c2 = cw**2 + ch**2 + eps
+
+ b1_x1, b1_y1 = pred[:, 0], pred[:, 1]
+ b1_x2, b1_y2 = pred[:, 2], pred[:, 3]
+ b2_x1, b2_y1 = target[:, 0], target[:, 1]
+ b2_x2, b2_y2 = target[:, 2], target[:, 3]
+
+ w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps
+ w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps
+
+ left = ((b2_x1 + b2_x2) - (b1_x1 + b1_x2))**2 / 4
+ right = ((b2_y1 + b2_y2) - (b1_y1 + b1_y2))**2 / 4
+ rho2 = left + right
+
+ factor = 4 / math.pi**2
+ v = factor * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2)
+
+ # CIoU
+ cious = ious - (rho2 / c2 + v**2 / (1 - ious + v))
+ loss = 1 - cious
+ return loss
+
+
+@LOSSES.register_module()
+class IoULoss(nn.Module):
+ """IoULoss.
+
+ Computing the IoU loss between a set of predicted bboxes and target bboxes.
+
+ Args:
+ linear (bool): If True, use linear scale of loss instead of log scale.
+ Default: False.
+ eps (float): Eps to avoid log(0).
+ reduction (str): Options are "none", "mean" and "sum".
+ loss_weight (float): Weight of loss.
+ """
+
+ def __init__(self,
+ linear=False,
+ eps=1e-6,
+ reduction='mean',
+ loss_weight=1.0):
+ super(IoULoss, self).__init__()
+ self.linear = linear
+ self.eps = eps
+ self.reduction = reduction
+ self.loss_weight = loss_weight
+
+ def forward(self,
+ pred,
+ target,
+ weight=None,
+ avg_factor=None,
+ reduction_override=None,
+ **kwargs):
+ """Forward function.
+
+ Args:
+ pred (torch.Tensor): The prediction.
+ target (torch.Tensor): The learning target of the prediction.
+ weight (torch.Tensor, optional): The weight of loss for each
+ prediction. Defaults to None.
+ avg_factor (int, optional): Average factor that is used to average
+ the loss. Defaults to None.
+ reduction_override (str, optional): The reduction method used to
+ override the original reduction method of the loss.
+ Defaults to None. Options are "none", "mean" and "sum".
+ """
+ assert reduction_override in (None, 'none', 'mean', 'sum')
+ reduction = (
+ reduction_override if reduction_override else self.reduction)
+ if (weight is not None) and (not torch.any(weight > 0)) and (
+ reduction != 'none'):
+ return (pred * weight).sum() # 0
+ if weight is not None and weight.dim() > 1:
+ # TODO: remove this in the future
+ # reduce the weight of shape (n, 4) to (n,) to match the
+ # iou_loss of shape (n,)
+ assert weight.shape == pred.shape
+ weight = weight.mean(-1)
+ loss = self.loss_weight * iou_loss(
+ pred,
+ target,
+ weight,
+ linear=self.linear,
+ eps=self.eps,
+ reduction=reduction,
+ avg_factor=avg_factor,
+ **kwargs)
+ return loss
+
+
+@LOSSES.register_module()
+class BoundedIoULoss(nn.Module):
+
+ def __init__(self, beta=0.2, eps=1e-3, reduction='mean', loss_weight=1.0):
+ super(BoundedIoULoss, self).__init__()
+ self.beta = beta
+ self.eps = eps
+ self.reduction = reduction
+ self.loss_weight = loss_weight
+
+ def forward(self,
+ pred,
+ target,
+ weight=None,
+ avg_factor=None,
+ reduction_override=None,
+ **kwargs):
+ if weight is not None and not torch.any(weight > 0):
+ return (pred * weight).sum() # 0
+ assert reduction_override in (None, 'none', 'mean', 'sum')
+ reduction = (
+ reduction_override if reduction_override else self.reduction)
+ loss = self.loss_weight * bounded_iou_loss(
+ pred,
+ target,
+ weight,
+ beta=self.beta,
+ eps=self.eps,
+ reduction=reduction,
+ avg_factor=avg_factor,
+ **kwargs)
+ return loss
+
+
+@LOSSES.register_module()
+class GIoULoss(nn.Module):
+
+ def __init__(self, eps=1e-6, reduction='mean', loss_weight=1.0):
+ super(GIoULoss, self).__init__()
+ self.eps = eps
+ self.reduction = reduction
+ self.loss_weight = loss_weight
+
+ def forward(self,
+ pred,
+ target,
+ weight=None,
+ avg_factor=None,
+ reduction_override=None,
+ **kwargs):
+ if weight is not None and not torch.any(weight > 0):
+ return (pred * weight).sum() # 0
+ assert reduction_override in (None, 'none', 'mean', 'sum')
+ reduction = (
+ reduction_override if reduction_override else self.reduction)
+ if weight is not None and weight.dim() > 1:
+ # TODO: remove this in the future
+ # reduce the weight of shape (n, 4) to (n,) to match the
+ # giou_loss of shape (n,)
+ assert weight.shape == pred.shape
+ weight = weight.mean(-1)
+ loss = self.loss_weight * giou_loss(
+ pred,
+ target,
+ weight,
+ eps=self.eps,
+ reduction=reduction,
+ avg_factor=avg_factor,
+ **kwargs)
+ return loss
+
+
+@LOSSES.register_module()
+class DIoULoss(nn.Module):
+
+ def __init__(self, eps=1e-6, reduction='mean', loss_weight=1.0):
+ super(DIoULoss, self).__init__()
+ self.eps = eps
+ self.reduction = reduction
+ self.loss_weight = loss_weight
+
+ def forward(self,
+ pred,
+ target,
+ weight=None,
+ avg_factor=None,
+ reduction_override=None,
+ **kwargs):
+ if weight is not None and not torch.any(weight > 0):
+ return (pred * weight).sum() # 0
+ assert reduction_override in (None, 'none', 'mean', 'sum')
+ reduction = (
+ reduction_override if reduction_override else self.reduction)
+ if weight is not None and weight.dim() > 1:
+ # TODO: remove this in the future
+ # reduce the weight of shape (n, 4) to (n,) to match the
+ # giou_loss of shape (n,)
+ assert weight.shape == pred.shape
+ weight = weight.mean(-1)
+ loss = self.loss_weight * diou_loss(
+ pred,
+ target,
+ weight,
+ eps=self.eps,
+ reduction=reduction,
+ avg_factor=avg_factor,
+ **kwargs)
+ return loss
+
+
+@LOSSES.register_module()
+class CIoULoss(nn.Module):
+
+ def __init__(self, eps=1e-6, reduction='mean', loss_weight=1.0):
+ super(CIoULoss, self).__init__()
+ self.eps = eps
+ self.reduction = reduction
+ self.loss_weight = loss_weight
+
+ def forward(self,
+ pred,
+ target,
+ weight=None,
+ avg_factor=None,
+ reduction_override=None,
+ **kwargs):
+ if weight is not None and not torch.any(weight > 0):
+ return (pred * weight).sum() # 0
+ assert reduction_override in (None, 'none', 'mean', 'sum')
+ reduction = (
+ reduction_override if reduction_override else self.reduction)
+ if weight is not None and weight.dim() > 1:
+ # TODO: remove this in the future
+ # reduce the weight of shape (n, 4) to (n,) to match the
+ # giou_loss of shape (n,)
+ assert weight.shape == pred.shape
+ weight = weight.mean(-1)
+ loss = self.loss_weight * ciou_loss(
+ pred,
+ target,
+ weight,
+ eps=self.eps,
+ reduction=reduction,
+ avg_factor=avg_factor,
+ **kwargs)
+ return loss
diff --git a/annotator/uniformer/mmdet/models/losses/kd_loss.py b/annotator/uniformer/mmdet/models/losses/kd_loss.py
new file mode 100644
index 0000000000000000000000000000000000000000..f3abb68d4f7b3eec98b873f69c1105a22eb33913
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/losses/kd_loss.py
@@ -0,0 +1,87 @@
+import mmcv
+import torch.nn as nn
+import torch.nn.functional as F
+
+from ..builder import LOSSES
+from .utils import weighted_loss
+
+
+@mmcv.jit(derivate=True, coderize=True)
+@weighted_loss
+def knowledge_distillation_kl_div_loss(pred,
+ soft_label,
+ T,
+ detach_target=True):
+ r"""Loss function for knowledge distilling using KL divergence.
+
+ Args:
+ pred (Tensor): Predicted logits with shape (N, n + 1).
+ soft_label (Tensor): Target logits with shape (N, N + 1).
+ T (int): Temperature for distillation.
+ detach_target (bool): Remove soft_label from automatic differentiation
+
+ Returns:
+ torch.Tensor: Loss tensor with shape (N,).
+ """
+ assert pred.size() == soft_label.size()
+ target = F.softmax(soft_label / T, dim=1)
+ if detach_target:
+ target = target.detach()
+
+ kd_loss = F.kl_div(
+ F.log_softmax(pred / T, dim=1), target, reduction='none').mean(1) * (
+ T * T)
+
+ return kd_loss
+
+
+@LOSSES.register_module()
+class KnowledgeDistillationKLDivLoss(nn.Module):
+ """Loss function for knowledge distilling using KL divergence.
+
+ Args:
+ reduction (str): Options are `'none'`, `'mean'` and `'sum'`.
+ loss_weight (float): Loss weight of current loss.
+ T (int): Temperature for distillation.
+ """
+
+ def __init__(self, reduction='mean', loss_weight=1.0, T=10):
+ super(KnowledgeDistillationKLDivLoss, self).__init__()
+ assert T >= 1
+ self.reduction = reduction
+ self.loss_weight = loss_weight
+ self.T = T
+
+ def forward(self,
+ pred,
+ soft_label,
+ weight=None,
+ avg_factor=None,
+ reduction_override=None):
+ """Forward function.
+
+ Args:
+ pred (Tensor): Predicted logits with shape (N, n + 1).
+ soft_label (Tensor): Target logits with shape (N, N + 1).
+ weight (torch.Tensor, optional): The weight of loss for each
+ prediction. Defaults to None.
+ avg_factor (int, optional): Average factor that is used to average
+ the loss. Defaults to None.
+ reduction_override (str, optional): The reduction method used to
+ override the original reduction method of the loss.
+ Defaults to None.
+ """
+ assert reduction_override in (None, 'none', 'mean', 'sum')
+
+ reduction = (
+ reduction_override if reduction_override else self.reduction)
+
+ loss_kd = self.loss_weight * knowledge_distillation_kl_div_loss(
+ pred,
+ soft_label,
+ weight,
+ reduction=reduction,
+ avg_factor=avg_factor,
+ T=self.T)
+
+ return loss_kd
diff --git a/annotator/uniformer/mmdet/models/losses/mse_loss.py b/annotator/uniformer/mmdet/models/losses/mse_loss.py
new file mode 100644
index 0000000000000000000000000000000000000000..68d05752a245548862f4c9919448d4fb8dc1b8ca
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/losses/mse_loss.py
@@ -0,0 +1,49 @@
+import torch.nn as nn
+import torch.nn.functional as F
+
+from ..builder import LOSSES
+from .utils import weighted_loss
+
+
+@weighted_loss
+def mse_loss(pred, target):
+ """Warpper of mse loss."""
+ return F.mse_loss(pred, target, reduction='none')
+
+
+@LOSSES.register_module()
+class MSELoss(nn.Module):
+ """MSELoss.
+
+ Args:
+ reduction (str, optional): The method that reduces the loss to a
+ scalar. Options are "none", "mean" and "sum".
+ loss_weight (float, optional): The weight of the loss. Defaults to 1.0
+ """
+
+ def __init__(self, reduction='mean', loss_weight=1.0):
+ super().__init__()
+ self.reduction = reduction
+ self.loss_weight = loss_weight
+
+ def forward(self, pred, target, weight=None, avg_factor=None):
+ """Forward function of loss.
+
+ Args:
+ pred (torch.Tensor): The prediction.
+ target (torch.Tensor): The learning target of the prediction.
+ weight (torch.Tensor, optional): Weight of the loss for each
+ prediction. Defaults to None.
+ avg_factor (int, optional): Average factor that is used to average
+ the loss. Defaults to None.
+
+ Returns:
+ torch.Tensor: The calculated loss
+ """
+ loss = self.loss_weight * mse_loss(
+ pred,
+ target,
+ weight,
+ reduction=self.reduction,
+ avg_factor=avg_factor)
+ return loss
diff --git a/annotator/uniformer/mmdet/models/losses/pisa_loss.py b/annotator/uniformer/mmdet/models/losses/pisa_loss.py
new file mode 100644
index 0000000000000000000000000000000000000000..4a48adfcd400bb07b719a6fbd5a8af0508820629
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/losses/pisa_loss.py
@@ -0,0 +1,183 @@
+import mmcv
+import torch
+
+from mmdet.core import bbox_overlaps
+
+
+@mmcv.jit(derivate=True, coderize=True)
+def isr_p(cls_score,
+ bbox_pred,
+ bbox_targets,
+ rois,
+ sampling_results,
+ loss_cls,
+ bbox_coder,
+ k=2,
+ bias=0,
+ num_class=80):
+ """Importance-based Sample Reweighting (ISR_P), positive part.
+
+ Args:
+ cls_score (Tensor): Predicted classification scores.
+ bbox_pred (Tensor): Predicted bbox deltas.
+ bbox_targets (tuple[Tensor]): A tuple of bbox targets, the are
+ labels, label_weights, bbox_targets, bbox_weights, respectively.
+ rois (Tensor): Anchors (single_stage) in shape (n, 4) or RoIs
+ (two_stage) in shape (n, 5).
+ sampling_results (obj): Sampling results.
+ loss_cls (func): Classification loss func of the head.
+ bbox_coder (obj): BBox coder of the head.
+ k (float): Power of the non-linear mapping.
+ bias (float): Shift of the non-linear mapping.
+ num_class (int): Number of classes, default: 80.
+
+ Return:
+ tuple([Tensor]): labels, imp_based_label_weights, bbox_targets,
+ bbox_target_weights
+ """
+
+ labels, label_weights, bbox_targets, bbox_weights = bbox_targets
+ pos_label_inds = ((labels >= 0) &
+ (labels < num_class)).nonzero().reshape(-1)
+ pos_labels = labels[pos_label_inds]
+
+ # if no positive samples, return the original targets
+ num_pos = float(pos_label_inds.size(0))
+ if num_pos == 0:
+ return labels, label_weights, bbox_targets, bbox_weights
+
+ # merge pos_assigned_gt_inds of per image to a single tensor
+ gts = list()
+ last_max_gt = 0
+ for i in range(len(sampling_results)):
+ gt_i = sampling_results[i].pos_assigned_gt_inds
+ gts.append(gt_i + last_max_gt)
+ if len(gt_i) != 0:
+ last_max_gt = gt_i.max() + 1
+ gts = torch.cat(gts)
+ assert len(gts) == num_pos
+
+ cls_score = cls_score.detach()
+ bbox_pred = bbox_pred.detach()
+
+ # For single stage detectors, rois here indicate anchors, in shape (N, 4)
+ # For two stage detectors, rois are in shape (N, 5)
+ if rois.size(-1) == 5:
+ pos_rois = rois[pos_label_inds][:, 1:]
+ else:
+ pos_rois = rois[pos_label_inds]
+
+ if bbox_pred.size(-1) > 4:
+ bbox_pred = bbox_pred.view(bbox_pred.size(0), -1, 4)
+ pos_delta_pred = bbox_pred[pos_label_inds, pos_labels].view(-1, 4)
+ else:
+ pos_delta_pred = bbox_pred[pos_label_inds].view(-1, 4)
+
+ # compute iou of the predicted bbox and the corresponding GT
+ pos_delta_target = bbox_targets[pos_label_inds].view(-1, 4)
+ pos_bbox_pred = bbox_coder.decode(pos_rois, pos_delta_pred)
+ target_bbox_pred = bbox_coder.decode(pos_rois, pos_delta_target)
+ ious = bbox_overlaps(pos_bbox_pred, target_bbox_pred, is_aligned=True)
+
+ pos_imp_weights = label_weights[pos_label_inds]
+ # Two steps to compute IoU-HLR. Samples are first sorted by IoU locally,
+ # then sorted again within the same-rank group
+ max_l_num = pos_labels.bincount().max()
+ for label in pos_labels.unique():
+ l_inds = (pos_labels == label).nonzero().view(-1)
+ l_gts = gts[l_inds]
+ for t in l_gts.unique():
+ t_inds = l_inds[l_gts == t]
+ t_ious = ious[t_inds]
+ _, t_iou_rank_idx = t_ious.sort(descending=True)
+ _, t_iou_rank = t_iou_rank_idx.sort()
+ ious[t_inds] += max_l_num - t_iou_rank.float()
+ l_ious = ious[l_inds]
+ _, l_iou_rank_idx = l_ious.sort(descending=True)
+ _, l_iou_rank = l_iou_rank_idx.sort() # IoU-HLR
+ # linearly map HLR to label weights
+ pos_imp_weights[l_inds] *= (max_l_num - l_iou_rank.float()) / max_l_num
+
+ pos_imp_weights = (bias + pos_imp_weights * (1 - bias)).pow(k)
+
+ # normalize to make the new weighted loss value equal to the original loss
+ pos_loss_cls = loss_cls(
+ cls_score[pos_label_inds], pos_labels, reduction_override='none')
+ if pos_loss_cls.dim() > 1:
+ ori_pos_loss_cls = pos_loss_cls * label_weights[pos_label_inds][:,
+ None]
+ new_pos_loss_cls = pos_loss_cls * pos_imp_weights[:, None]
+ else:
+ ori_pos_loss_cls = pos_loss_cls * label_weights[pos_label_inds]
+ new_pos_loss_cls = pos_loss_cls * pos_imp_weights
+ pos_loss_cls_ratio = ori_pos_loss_cls.sum() / new_pos_loss_cls.sum()
+ pos_imp_weights = pos_imp_weights * pos_loss_cls_ratio
+ label_weights[pos_label_inds] = pos_imp_weights
+
+ bbox_targets = labels, label_weights, bbox_targets, bbox_weights
+ return bbox_targets
+
+
+@mmcv.jit(derivate=True, coderize=True)
+def carl_loss(cls_score,
+ labels,
+ bbox_pred,
+ bbox_targets,
+ loss_bbox,
+ k=1,
+ bias=0.2,
+ avg_factor=None,
+ sigmoid=False,
+ num_class=80):
+ """Classification-Aware Regression Loss (CARL).
+
+ Args:
+ cls_score (Tensor): Predicted classification scores.
+ labels (Tensor): Targets of classification.
+ bbox_pred (Tensor): Predicted bbox deltas.
+ bbox_targets (Tensor): Target of bbox regression.
+ loss_bbox (func): Regression loss func of the head.
+ bbox_coder (obj): BBox coder of the head.
+ k (float): Power of the non-linear mapping.
+ bias (float): Shift of the non-linear mapping.
+ avg_factor (int): Average factor used in regression loss.
+ sigmoid (bool): Activation of the classification score.
+ num_class (int): Number of classes, default: 80.
+
+ Return:
+ dict: CARL loss dict.
+ """
+ pos_label_inds = ((labels >= 0) &
+ (labels < num_class)).nonzero().reshape(-1)
+ if pos_label_inds.numel() == 0:
+ return dict(loss_carl=cls_score.sum()[None] * 0.)
+ pos_labels = labels[pos_label_inds]
+
+ # multiply pos_cls_score with the corresponding bbox weight
+ # and remain gradient
+ if sigmoid:
+ pos_cls_score = cls_score.sigmoid()[pos_label_inds, pos_labels]
+ else:
+ pos_cls_score = cls_score.softmax(-1)[pos_label_inds, pos_labels]
+ carl_loss_weights = (bias + (1 - bias) * pos_cls_score).pow(k)
+
+ # normalize carl_loss_weight to make its sum equal to num positive
+ num_pos = float(pos_cls_score.size(0))
+ weight_ratio = num_pos / carl_loss_weights.sum()
+ carl_loss_weights *= weight_ratio
+
+ if avg_factor is None:
+ avg_factor = bbox_targets.size(0)
+ # if is class agnostic, bbox pred is in shape (N, 4)
+ # otherwise, bbox pred is in shape (N, #classes, 4)
+ if bbox_pred.size(-1) > 4:
+ bbox_pred = bbox_pred.view(bbox_pred.size(0), -1, 4)
+ pos_bbox_preds = bbox_pred[pos_label_inds, pos_labels]
+ else:
+ pos_bbox_preds = bbox_pred[pos_label_inds]
+ ori_loss_reg = loss_bbox(
+ pos_bbox_preds,
+ bbox_targets[pos_label_inds],
+ reduction_override='none') / avg_factor
+ loss_carl = (ori_loss_reg * carl_loss_weights[:, None]).sum()
+ return dict(loss_carl=loss_carl[None])
diff --git a/annotator/uniformer/mmdet/models/losses/smooth_l1_loss.py b/annotator/uniformer/mmdet/models/losses/smooth_l1_loss.py
new file mode 100644
index 0000000000000000000000000000000000000000..ec9c98a52d1932d6ccff18938c17c36755bf1baf
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/losses/smooth_l1_loss.py
@@ -0,0 +1,139 @@
+import mmcv
+import torch
+import torch.nn as nn
+
+from ..builder import LOSSES
+from .utils import weighted_loss
+
+
+@mmcv.jit(derivate=True, coderize=True)
+@weighted_loss
+def smooth_l1_loss(pred, target, beta=1.0):
+ """Smooth L1 loss.
+
+ Args:
+ pred (torch.Tensor): The prediction.
+ target (torch.Tensor): The learning target of the prediction.
+ beta (float, optional): The threshold in the piecewise function.
+ Defaults to 1.0.
+
+ Returns:
+ torch.Tensor: Calculated loss
+ """
+ assert beta > 0
+ assert pred.size() == target.size() and target.numel() > 0
+ diff = torch.abs(pred - target)
+ loss = torch.where(diff < beta, 0.5 * diff * diff / beta,
+ diff - 0.5 * beta)
+ return loss
+
+
+@mmcv.jit(derivate=True, coderize=True)
+@weighted_loss
+def l1_loss(pred, target):
+ """L1 loss.
+
+ Args:
+ pred (torch.Tensor): The prediction.
+ target (torch.Tensor): The learning target of the prediction.
+
+ Returns:
+ torch.Tensor: Calculated loss
+ """
+ assert pred.size() == target.size() and target.numel() > 0
+ loss = torch.abs(pred - target)
+ return loss
+
+
+@LOSSES.register_module()
+class SmoothL1Loss(nn.Module):
+ """Smooth L1 loss.
+
+ Args:
+ beta (float, optional): The threshold in the piecewise function.
+ Defaults to 1.0.
+ reduction (str, optional): The method to reduce the loss.
+ Options are "none", "mean" and "sum". Defaults to "mean".
+ loss_weight (float, optional): The weight of loss.
+ """
+
+ def __init__(self, beta=1.0, reduction='mean', loss_weight=1.0):
+ super(SmoothL1Loss, self).__init__()
+ self.beta = beta
+ self.reduction = reduction
+ self.loss_weight = loss_weight
+
+ def forward(self,
+ pred,
+ target,
+ weight=None,
+ avg_factor=None,
+ reduction_override=None,
+ **kwargs):
+ """Forward function.
+
+ Args:
+ pred (torch.Tensor): The prediction.
+ target (torch.Tensor): The learning target of the prediction.
+ weight (torch.Tensor, optional): The weight of loss for each
+ prediction. Defaults to None.
+ avg_factor (int, optional): Average factor that is used to average
+ the loss. Defaults to None.
+ reduction_override (str, optional): The reduction method used to
+ override the original reduction method of the loss.
+ Defaults to None.
+ """
+ assert reduction_override in (None, 'none', 'mean', 'sum')
+ reduction = (
+ reduction_override if reduction_override else self.reduction)
+ loss_bbox = self.loss_weight * smooth_l1_loss(
+ pred,
+ target,
+ weight,
+ beta=self.beta,
+ reduction=reduction,
+ avg_factor=avg_factor,
+ **kwargs)
+ return loss_bbox
+
+
+@LOSSES.register_module()
+class L1Loss(nn.Module):
+ """L1 loss.
+
+ Args:
+ reduction (str, optional): The method to reduce the loss.
+ Options are "none", "mean" and "sum".
+ loss_weight (float, optional): The weight of loss.
+ """
+
+ def __init__(self, reduction='mean', loss_weight=1.0):
+ super(L1Loss, self).__init__()
+ self.reduction = reduction
+ self.loss_weight = loss_weight
+
+ def forward(self,
+ pred,
+ target,
+ weight=None,
+ avg_factor=None,
+ reduction_override=None):
+ """Forward function.
+
+ Args:
+ pred (torch.Tensor): The prediction.
+ target (torch.Tensor): The learning target of the prediction.
+ weight (torch.Tensor, optional): The weight of loss for each
+ prediction. Defaults to None.
+ avg_factor (int, optional): Average factor that is used to average
+ the loss. Defaults to None.
+ reduction_override (str, optional): The reduction method used to
+ override the original reduction method of the loss.
+ Defaults to None.
+ """
+ assert reduction_override in (None, 'none', 'mean', 'sum')
+ reduction = (
+ reduction_override if reduction_override else self.reduction)
+ loss_bbox = self.loss_weight * l1_loss(
+ pred, target, weight, reduction=reduction, avg_factor=avg_factor)
+ return loss_bbox
diff --git a/annotator/uniformer/mmdet/models/losses/utils.py b/annotator/uniformer/mmdet/models/losses/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..4756d7fcefd7cda1294c2662b4ca3e90c0a8e124
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/losses/utils.py
@@ -0,0 +1,100 @@
+import functools
+
+import mmcv
+import torch.nn.functional as F
+
+
+def reduce_loss(loss, reduction):
+ """Reduce loss as specified.
+
+ Args:
+ loss (Tensor): Elementwise loss tensor.
+ reduction (str): Options are "none", "mean" and "sum".
+
+ Return:
+ Tensor: Reduced loss tensor.
+ """
+ reduction_enum = F._Reduction.get_enum(reduction)
+ # none: 0, elementwise_mean:1, sum: 2
+ if reduction_enum == 0:
+ return loss
+ elif reduction_enum == 1:
+ return loss.mean()
+ elif reduction_enum == 2:
+ return loss.sum()
+
+
+@mmcv.jit(derivate=True, coderize=True)
+def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):
+ """Apply element-wise weight and reduce loss.
+
+ Args:
+ loss (Tensor): Element-wise loss.
+ weight (Tensor): Element-wise weights.
+ reduction (str): Same as built-in losses of PyTorch.
+ avg_factor (float): Avarage factor when computing the mean of losses.
+
+ Returns:
+ Tensor: Processed loss values.
+ """
+ # if weight is specified, apply element-wise weight
+ if weight is not None:
+ loss = loss * weight
+
+ # if avg_factor is not specified, just reduce the loss
+ if avg_factor is None:
+ loss = reduce_loss(loss, reduction)
+ else:
+ # if reduction is mean, then average the loss by avg_factor
+ if reduction == 'mean':
+ loss = loss.sum() / avg_factor
+ # if reduction is 'none', then do nothing, otherwise raise an error
+ elif reduction != 'none':
+ raise ValueError('avg_factor can not be used with reduction="sum"')
+ return loss
+
+
+def weighted_loss(loss_func):
+ """Create a weighted version of a given loss function.
+
+ To use this decorator, the loss function must have the signature like
+ `loss_func(pred, target, **kwargs)`. The function only needs to compute
+ element-wise loss without any reduction. This decorator will add weight
+ and reduction arguments to the function. The decorated function will have
+ the signature like `loss_func(pred, target, weight=None, reduction='mean',
+ avg_factor=None, **kwargs)`.
+
+ :Example:
+
+ >>> import torch
+ >>> @weighted_loss
+ >>> def l1_loss(pred, target):
+ >>> return (pred - target).abs()
+
+ >>> pred = torch.Tensor([0, 2, 3])
+ >>> target = torch.Tensor([1, 1, 1])
+ >>> weight = torch.Tensor([1, 0, 1])
+
+ >>> l1_loss(pred, target)
+ tensor(1.3333)
+ >>> l1_loss(pred, target, weight)
+ tensor(1.)
+ >>> l1_loss(pred, target, reduction='none')
+ tensor([1., 1., 2.])
+ >>> l1_loss(pred, target, weight, avg_factor=2)
+ tensor(1.5000)
+ """
+
+ @functools.wraps(loss_func)
+ def wrapper(pred,
+ target,
+ weight=None,
+ reduction='mean',
+ avg_factor=None,
+ **kwargs):
+ # get element-wise loss
+ loss = loss_func(pred, target, **kwargs)
+ loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
+ return loss
+
+ return wrapper
diff --git a/annotator/uniformer/mmdet/models/losses/varifocal_loss.py b/annotator/uniformer/mmdet/models/losses/varifocal_loss.py
new file mode 100644
index 0000000000000000000000000000000000000000..7f00bd6916c04fef45a9aeecb50888266420daf9
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/losses/varifocal_loss.py
@@ -0,0 +1,133 @@
+import mmcv
+import torch.nn as nn
+import torch.nn.functional as F
+
+from ..builder import LOSSES
+from .utils import weight_reduce_loss
+
+
+@mmcv.jit(derivate=True, coderize=True)
+def varifocal_loss(pred,
+ target,
+ weight=None,
+ alpha=0.75,
+ gamma=2.0,
+ iou_weighted=True,
+ reduction='mean',
+ avg_factor=None):
+ """`Varifocal Loss `_
+
+ Args:
+ pred (torch.Tensor): The prediction with shape (N, C), C is the
+ number of classes
+ target (torch.Tensor): The learning target of the iou-aware
+ classification score with shape (N, C), C is the number of classes.
+ weight (torch.Tensor, optional): The weight of loss for each
+ prediction. Defaults to None.
+ alpha (float, optional): A balance factor for the negative part of
+ Varifocal Loss, which is different from the alpha of Focal Loss.
+ Defaults to 0.75.
+ gamma (float, optional): The gamma for calculating the modulating
+ factor. Defaults to 2.0.
+ iou_weighted (bool, optional): Whether to weight the loss of the
+ positive example with the iou target. Defaults to True.
+ reduction (str, optional): The method used to reduce the loss into
+ a scalar. Defaults to 'mean'. Options are "none", "mean" and
+ "sum".
+ avg_factor (int, optional): Average factor that is used to average
+ the loss. Defaults to None.
+ """
+ # pred and target should be of the same size
+ assert pred.size() == target.size()
+ pred_sigmoid = pred.sigmoid()
+ target = target.type_as(pred)
+ if iou_weighted:
+ focal_weight = target * (target > 0.0).float() + \
+ alpha * (pred_sigmoid - target).abs().pow(gamma) * \
+ (target <= 0.0).float()
+ else:
+ focal_weight = (target > 0.0).float() + \
+ alpha * (pred_sigmoid - target).abs().pow(gamma) * \
+ (target <= 0.0).float()
+ loss = F.binary_cross_entropy_with_logits(
+ pred, target, reduction='none') * focal_weight
+ loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
+ return loss
+
+
+@LOSSES.register_module()
+class VarifocalLoss(nn.Module):
+
+ def __init__(self,
+ use_sigmoid=True,
+ alpha=0.75,
+ gamma=2.0,
+ iou_weighted=True,
+ reduction='mean',
+ loss_weight=1.0):
+ """`Varifocal Loss `_
+
+ Args:
+ use_sigmoid (bool, optional): Whether the prediction is
+ used for sigmoid or softmax. Defaults to True.
+ alpha (float, optional): A balance factor for the negative part of
+ Varifocal Loss, which is different from the alpha of Focal
+ Loss. Defaults to 0.75.
+ gamma (float, optional): The gamma for calculating the modulating
+ factor. Defaults to 2.0.
+ iou_weighted (bool, optional): Whether to weight the loss of the
+ positive examples with the iou target. Defaults to True.
+ reduction (str, optional): The method used to reduce the loss into
+ a scalar. Defaults to 'mean'. Options are "none", "mean" and
+ "sum".
+ loss_weight (float, optional): Weight of loss. Defaults to 1.0.
+ """
+ super(VarifocalLoss, self).__init__()
+ assert use_sigmoid is True, \
+ 'Only sigmoid varifocal loss supported now.'
+ assert alpha >= 0.0
+ self.use_sigmoid = use_sigmoid
+ self.alpha = alpha
+ self.gamma = gamma
+ self.iou_weighted = iou_weighted
+ self.reduction = reduction
+ self.loss_weight = loss_weight
+
+ def forward(self,
+ pred,
+ target,
+ weight=None,
+ avg_factor=None,
+ reduction_override=None):
+ """Forward function.
+
+ Args:
+ pred (torch.Tensor): The prediction.
+ target (torch.Tensor): The learning target of the prediction.
+ weight (torch.Tensor, optional): The weight of loss for each
+ prediction. Defaults to None.
+ avg_factor (int, optional): Average factor that is used to average
+ the loss. Defaults to None.
+ reduction_override (str, optional): The reduction method used to
+ override the original reduction method of the loss.
+ Options are "none", "mean" and "sum".
+
+ Returns:
+ torch.Tensor: The calculated loss
+ """
+ assert reduction_override in (None, 'none', 'mean', 'sum')
+ reduction = (
+ reduction_override if reduction_override else self.reduction)
+ if self.use_sigmoid:
+ loss_cls = self.loss_weight * varifocal_loss(
+ pred,
+ target,
+ weight,
+ alpha=self.alpha,
+ gamma=self.gamma,
+ iou_weighted=self.iou_weighted,
+ reduction=reduction,
+ avg_factor=avg_factor)
+ else:
+ raise NotImplementedError
+ return loss_cls
diff --git a/annotator/uniformer/mmdet/models/necks/__init__.py b/annotator/uniformer/mmdet/models/necks/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..02f833a8a0f538a8c06fef622d1cadc1a1b66ea2
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/necks/__init__.py
@@ -0,0 +1,16 @@
+from .bfp import BFP
+from .channel_mapper import ChannelMapper
+from .fpg import FPG
+from .fpn import FPN
+from .fpn_carafe import FPN_CARAFE
+from .hrfpn import HRFPN
+from .nas_fpn import NASFPN
+from .nasfcos_fpn import NASFCOS_FPN
+from .pafpn import PAFPN
+from .rfp import RFP
+from .yolo_neck import YOLOV3Neck
+
+__all__ = [
+ 'FPN', 'BFP', 'ChannelMapper', 'HRFPN', 'NASFPN', 'FPN_CARAFE', 'PAFPN',
+ 'NASFCOS_FPN', 'RFP', 'YOLOV3Neck', 'FPG'
+]
diff --git a/annotator/uniformer/mmdet/models/necks/bfp.py b/annotator/uniformer/mmdet/models/necks/bfp.py
new file mode 100644
index 0000000000000000000000000000000000000000..123f5515ab6b51867d5781aa1572a0810670235f
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/necks/bfp.py
@@ -0,0 +1,104 @@
+import torch.nn as nn
+import torch.nn.functional as F
+from mmcv.cnn import ConvModule, xavier_init
+from mmcv.cnn.bricks import NonLocal2d
+
+from ..builder import NECKS
+
+
+@NECKS.register_module()
+class BFP(nn.Module):
+ """BFP (Balanced Feature Pyramids)
+
+ BFP takes multi-level features as inputs and gather them into a single one,
+ then refine the gathered feature and scatter the refined results to
+ multi-level features. This module is used in Libra R-CNN (CVPR 2019), see
+ the paper `Libra R-CNN: Towards Balanced Learning for Object Detection
+ `_ for details.
+
+ Args:
+ in_channels (int): Number of input channels (feature maps of all levels
+ should have the same channels).
+ num_levels (int): Number of input feature levels.
+ conv_cfg (dict): The config dict for convolution layers.
+ norm_cfg (dict): The config dict for normalization layers.
+ refine_level (int): Index of integration and refine level of BSF in
+ multi-level features from bottom to top.
+ refine_type (str): Type of the refine op, currently support
+ [None, 'conv', 'non_local'].
+ """
+
+ def __init__(self,
+ in_channels,
+ num_levels,
+ refine_level=2,
+ refine_type=None,
+ conv_cfg=None,
+ norm_cfg=None):
+ super(BFP, self).__init__()
+ assert refine_type in [None, 'conv', 'non_local']
+
+ self.in_channels = in_channels
+ self.num_levels = num_levels
+ self.conv_cfg = conv_cfg
+ self.norm_cfg = norm_cfg
+
+ self.refine_level = refine_level
+ self.refine_type = refine_type
+ assert 0 <= self.refine_level < self.num_levels
+
+ if self.refine_type == 'conv':
+ self.refine = ConvModule(
+ self.in_channels,
+ self.in_channels,
+ 3,
+ padding=1,
+ conv_cfg=self.conv_cfg,
+ norm_cfg=self.norm_cfg)
+ elif self.refine_type == 'non_local':
+ self.refine = NonLocal2d(
+ self.in_channels,
+ reduction=1,
+ use_scale=False,
+ conv_cfg=self.conv_cfg,
+ norm_cfg=self.norm_cfg)
+
+ def init_weights(self):
+ """Initialize the weights of FPN module."""
+ for m in self.modules():
+ if isinstance(m, nn.Conv2d):
+ xavier_init(m, distribution='uniform')
+
+ def forward(self, inputs):
+ """Forward function."""
+ assert len(inputs) == self.num_levels
+
+ # step 1: gather multi-level features by resize and average
+ feats = []
+ gather_size = inputs[self.refine_level].size()[2:]
+ for i in range(self.num_levels):
+ if i < self.refine_level:
+ gathered = F.adaptive_max_pool2d(
+ inputs[i], output_size=gather_size)
+ else:
+ gathered = F.interpolate(
+ inputs[i], size=gather_size, mode='nearest')
+ feats.append(gathered)
+
+ bsf = sum(feats) / len(feats)
+
+ # step 2: refine gathered features
+ if self.refine_type is not None:
+ bsf = self.refine(bsf)
+
+ # step 3: scatter refined features to multi-levels by a residual path
+ outs = []
+ for i in range(self.num_levels):
+ out_size = inputs[i].size()[2:]
+ if i < self.refine_level:
+ residual = F.interpolate(bsf, size=out_size, mode='nearest')
+ else:
+ residual = F.adaptive_max_pool2d(bsf, output_size=out_size)
+ outs.append(residual + inputs[i])
+
+ return tuple(outs)
diff --git a/annotator/uniformer/mmdet/models/necks/channel_mapper.py b/annotator/uniformer/mmdet/models/necks/channel_mapper.py
new file mode 100644
index 0000000000000000000000000000000000000000..a4f5ed44caefb1612df67785b1f4f0d9ec46ee93
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/necks/channel_mapper.py
@@ -0,0 +1,74 @@
+import torch.nn as nn
+from mmcv.cnn import ConvModule, xavier_init
+
+from ..builder import NECKS
+
+
+@NECKS.register_module()
+class ChannelMapper(nn.Module):
+ r"""Channel Mapper to reduce/increase channels of backbone features.
+
+ This is used to reduce/increase channels of backbone features.
+
+ Args:
+ in_channels (List[int]): Number of input channels per scale.
+ out_channels (int): Number of output channels (used at each scale).
+ kernel_size (int, optional): kernel_size for reducing channels (used
+ at each scale). Default: 3.
+ conv_cfg (dict, optional): Config dict for convolution layer.
+ Default: None.
+ norm_cfg (dict, optional): Config dict for normalization layer.
+ Default: None.
+ act_cfg (dict, optional): Config dict for activation layer in
+ ConvModule. Default: dict(type='ReLU').
+
+ Example:
+ >>> import torch
+ >>> in_channels = [2, 3, 5, 7]
+ >>> scales = [340, 170, 84, 43]
+ >>> inputs = [torch.rand(1, c, s, s)
+ ... for c, s in zip(in_channels, scales)]
+ >>> self = ChannelMapper(in_channels, 11, 3).eval()
+ >>> outputs = self.forward(inputs)
+ >>> for i in range(len(outputs)):
+ ... print(f'outputs[{i}].shape = {outputs[i].shape}')
+ outputs[0].shape = torch.Size([1, 11, 340, 340])
+ outputs[1].shape = torch.Size([1, 11, 170, 170])
+ outputs[2].shape = torch.Size([1, 11, 84, 84])
+ outputs[3].shape = torch.Size([1, 11, 43, 43])
+ """
+
+ def __init__(self,
+ in_channels,
+ out_channels,
+ kernel_size=3,
+ conv_cfg=None,
+ norm_cfg=None,
+ act_cfg=dict(type='ReLU')):
+ super(ChannelMapper, self).__init__()
+ assert isinstance(in_channels, list)
+
+ self.convs = nn.ModuleList()
+ for in_channel in in_channels:
+ self.convs.append(
+ ConvModule(
+ in_channel,
+ out_channels,
+ kernel_size,
+ padding=(kernel_size - 1) // 2,
+ conv_cfg=conv_cfg,
+ norm_cfg=norm_cfg,
+ act_cfg=act_cfg))
+
+ # default init_weights for conv(msra) and norm in ConvModule
+ def init_weights(self):
+ """Initialize the weights of ChannelMapper module."""
+ for m in self.modules():
+ if isinstance(m, nn.Conv2d):
+ xavier_init(m, distribution='uniform')
+
+ def forward(self, inputs):
+ """Forward function."""
+ assert len(inputs) == len(self.convs)
+ outs = [self.convs[i](inputs[i]) for i in range(len(inputs))]
+ return tuple(outs)
diff --git a/annotator/uniformer/mmdet/models/necks/fpg.py b/annotator/uniformer/mmdet/models/necks/fpg.py
new file mode 100644
index 0000000000000000000000000000000000000000..c8e0d163ccf8cef6211530ba6c1b4d558ff6403f
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/necks/fpg.py
@@ -0,0 +1,398 @@
+import torch.nn as nn
+import torch.nn.functional as F
+from mmcv.cnn import ConvModule, caffe2_xavier_init, constant_init, is_norm
+
+from ..builder import NECKS
+
+
+class Transition(nn.Module):
+ """Base class for transition.
+
+ Args:
+ in_channels (int): Number of input channels.
+ out_channels (int): Number of output channels.
+ """
+
+ def __init__(self, in_channels, out_channels):
+ super().__init__()
+ self.in_channels = in_channels
+ self.out_channels = out_channels
+
+ def forward(x):
+ pass
+
+
+class UpInterpolationConv(Transition):
+ """A transition used for up-sampling.
+
+ Up-sample the input by interpolation then refines the feature by
+ a convolution layer.
+
+ Args:
+ in_channels (int): Number of input channels.
+ out_channels (int): Number of output channels.
+ scale_factor (int): Up-sampling factor. Default: 2.
+ mode (int): Interpolation mode. Default: nearest.
+ align_corners (bool): Whether align corners when interpolation.
+ Default: None.
+ kernel_size (int): Kernel size for the conv. Default: 3.
+ """
+
+ def __init__(self,
+ in_channels,
+ out_channels,
+ scale_factor=2,
+ mode='nearest',
+ align_corners=None,
+ kernel_size=3,
+ **kwargs):
+ super().__init__(in_channels, out_channels)
+ self.mode = mode
+ self.scale_factor = scale_factor
+ self.align_corners = align_corners
+ self.conv = ConvModule(
+ in_channels,
+ out_channels,
+ kernel_size,
+ padding=(kernel_size - 1) // 2,
+ **kwargs)
+
+ def forward(self, x):
+ x = F.interpolate(
+ x,
+ scale_factor=self.scale_factor,
+ mode=self.mode,
+ align_corners=self.align_corners)
+ x = self.conv(x)
+ return x
+
+
+class LastConv(Transition):
+ """A transition used for refining the output of the last stage.
+
+ Args:
+ in_channels (int): Number of input channels.
+ out_channels (int): Number of output channels.
+ num_inputs (int): Number of inputs of the FPN features.
+ kernel_size (int): Kernel size for the conv. Default: 3.
+ """
+
+ def __init__(self,
+ in_channels,
+ out_channels,
+ num_inputs,
+ kernel_size=3,
+ **kwargs):
+ super().__init__(in_channels, out_channels)
+ self.num_inputs = num_inputs
+ self.conv_out = ConvModule(
+ in_channels,
+ out_channels,
+ kernel_size,
+ padding=(kernel_size - 1) // 2,
+ **kwargs)
+
+ def forward(self, inputs):
+ assert len(inputs) == self.num_inputs
+ return self.conv_out(inputs[-1])
+
+
+@NECKS.register_module()
+class FPG(nn.Module):
+ """FPG.
+
+ Implementation of `Feature Pyramid Grids (FPG)
+ `_.
+ This implementation only gives the basic structure stated in the paper.
+ But users can implement different type of transitions to fully explore the
+ the potential power of the structure of FPG.
+
+ Args:
+ in_channels (int): Number of input channels (feature maps of all levels
+ should have the same channels).
+ out_channels (int): Number of output channels (used at each scale)
+ num_outs (int): Number of output scales.
+ stack_times (int): The number of times the pyramid architecture will
+ be stacked.
+ paths (list[str]): Specify the path order of each stack level.
+ Each element in the list should be either 'bu' (bottom-up) or
+ 'td' (top-down).
+ inter_channels (int): Number of inter channels.
+ same_up_trans (dict): Transition that goes down at the same stage.
+ same_down_trans (dict): Transition that goes up at the same stage.
+ across_lateral_trans (dict): Across-pathway same-stage
+ across_down_trans (dict): Across-pathway bottom-up connection.
+ across_up_trans (dict): Across-pathway top-down connection.
+ across_skip_trans (dict): Across-pathway skip connection.
+ output_trans (dict): Transition that trans the output of the
+ last stage.
+ start_level (int): Index of the start input backbone level used to
+ build the feature pyramid. Default: 0.
+ end_level (int): Index of the end input backbone level (exclusive) to
+ build the feature pyramid. Default: -1, which means the last level.
+ add_extra_convs (bool): It decides whether to add conv
+ layers on top of the original feature maps. Default to False.
+ If True, its actual mode is specified by `extra_convs_on_inputs`.
+ norm_cfg (dict): Config dict for normalization layer. Default: None.
+ """
+
+ transition_types = {
+ 'conv': ConvModule,
+ 'interpolation_conv': UpInterpolationConv,
+ 'last_conv': LastConv,
+ }
+
+ def __init__(self,
+ in_channels,
+ out_channels,
+ num_outs,
+ stack_times,
+ paths,
+ inter_channels=None,
+ same_down_trans=None,
+ same_up_trans=dict(
+ type='conv', kernel_size=3, stride=2, padding=1),
+ across_lateral_trans=dict(type='conv', kernel_size=1),
+ across_down_trans=dict(type='conv', kernel_size=3),
+ across_up_trans=None,
+ across_skip_trans=dict(type='identity'),
+ output_trans=dict(type='last_conv', kernel_size=3),
+ start_level=0,
+ end_level=-1,
+ add_extra_convs=False,
+ norm_cfg=None,
+ skip_inds=None):
+ super(FPG, self).__init__()
+ assert isinstance(in_channels, list)
+ self.in_channels = in_channels
+ self.out_channels = out_channels
+ self.num_ins = len(in_channels)
+ self.num_outs = num_outs
+ if inter_channels is None:
+ self.inter_channels = [out_channels for _ in range(num_outs)]
+ elif isinstance(inter_channels, int):
+ self.inter_channels = [inter_channels for _ in range(num_outs)]
+ else:
+ assert isinstance(inter_channels, list)
+ assert len(inter_channels) == num_outs
+ self.inter_channels = inter_channels
+ self.stack_times = stack_times
+ self.paths = paths
+ assert isinstance(paths, list) and len(paths) == stack_times
+ for d in paths:
+ assert d in ('bu', 'td')
+
+ self.same_down_trans = same_down_trans
+ self.same_up_trans = same_up_trans
+ self.across_lateral_trans = across_lateral_trans
+ self.across_down_trans = across_down_trans
+ self.across_up_trans = across_up_trans
+ self.output_trans = output_trans
+ self.across_skip_trans = across_skip_trans
+
+ self.with_bias = norm_cfg is None
+ # skip inds must be specified if across skip trans is not None
+ if self.across_skip_trans is not None:
+ skip_inds is not None
+ self.skip_inds = skip_inds
+ assert len(self.skip_inds[0]) <= self.stack_times
+
+ if end_level == -1:
+ self.backbone_end_level = self.num_ins
+ assert num_outs >= self.num_ins - start_level
+ else:
+ # if end_level < inputs, no extra level is allowed
+ self.backbone_end_level = end_level
+ assert end_level <= len(in_channels)
+ assert num_outs == end_level - start_level
+ self.start_level = start_level
+ self.end_level = end_level
+ self.add_extra_convs = add_extra_convs
+
+ # build lateral 1x1 convs to reduce channels
+ self.lateral_convs = nn.ModuleList()
+ for i in range(self.start_level, self.backbone_end_level):
+ l_conv = nn.Conv2d(self.in_channels[i],
+ self.inter_channels[i - self.start_level], 1)
+ self.lateral_convs.append(l_conv)
+
+ extra_levels = num_outs - self.backbone_end_level + self.start_level
+ self.extra_downsamples = nn.ModuleList()
+ for i in range(extra_levels):
+ if self.add_extra_convs:
+ fpn_idx = self.backbone_end_level - self.start_level + i
+ extra_conv = nn.Conv2d(
+ self.inter_channels[fpn_idx - 1],
+ self.inter_channels[fpn_idx],
+ 3,
+ stride=2,
+ padding=1)
+ self.extra_downsamples.append(extra_conv)
+ else:
+ self.extra_downsamples.append(nn.MaxPool2d(1, stride=2))
+
+ self.fpn_transitions = nn.ModuleList() # stack times
+ for s in range(self.stack_times):
+ stage_trans = nn.ModuleList() # num of feature levels
+ for i in range(self.num_outs):
+ # same, across_lateral, across_down, across_up
+ trans = nn.ModuleDict()
+ if s in self.skip_inds[i]:
+ stage_trans.append(trans)
+ continue
+ # build same-stage down trans (used in bottom-up paths)
+ if i == 0 or self.same_up_trans is None:
+ same_up_trans = None
+ else:
+ same_up_trans = self.build_trans(
+ self.same_up_trans, self.inter_channels[i - 1],
+ self.inter_channels[i])
+ trans['same_up'] = same_up_trans
+ # build same-stage up trans (used in top-down paths)
+ if i == self.num_outs - 1 or self.same_down_trans is None:
+ same_down_trans = None
+ else:
+ same_down_trans = self.build_trans(
+ self.same_down_trans, self.inter_channels[i + 1],
+ self.inter_channels[i])
+ trans['same_down'] = same_down_trans
+ # build across lateral trans
+ across_lateral_trans = self.build_trans(
+ self.across_lateral_trans, self.inter_channels[i],
+ self.inter_channels[i])
+ trans['across_lateral'] = across_lateral_trans
+ # build across down trans
+ if i == self.num_outs - 1 or self.across_down_trans is None:
+ across_down_trans = None
+ else:
+ across_down_trans = self.build_trans(
+ self.across_down_trans, self.inter_channels[i + 1],
+ self.inter_channels[i])
+ trans['across_down'] = across_down_trans
+ # build across up trans
+ if i == 0 or self.across_up_trans is None:
+ across_up_trans = None
+ else:
+ across_up_trans = self.build_trans(
+ self.across_up_trans, self.inter_channels[i - 1],
+ self.inter_channels[i])
+ trans['across_up'] = across_up_trans
+ if self.across_skip_trans is None:
+ across_skip_trans = None
+ else:
+ across_skip_trans = self.build_trans(
+ self.across_skip_trans, self.inter_channels[i - 1],
+ self.inter_channels[i])
+ trans['across_skip'] = across_skip_trans
+ # build across_skip trans
+ stage_trans.append(trans)
+ self.fpn_transitions.append(stage_trans)
+
+ self.output_transition = nn.ModuleList() # output levels
+ for i in range(self.num_outs):
+ trans = self.build_trans(
+ self.output_trans,
+ self.inter_channels[i],
+ self.out_channels,
+ num_inputs=self.stack_times + 1)
+ self.output_transition.append(trans)
+
+ self.relu = nn.ReLU(inplace=True)
+
+ def build_trans(self, cfg, in_channels, out_channels, **extra_args):
+ cfg_ = cfg.copy()
+ trans_type = cfg_.pop('type')
+ trans_cls = self.transition_types[trans_type]
+ return trans_cls(in_channels, out_channels, **cfg_, **extra_args)
+
+ def init_weights(self):
+ for m in self.modules():
+ if isinstance(m, nn.Conv2d):
+ caffe2_xavier_init(m)
+ elif is_norm(m):
+ constant_init(m, 1.0)
+
+ def fuse(self, fuse_dict):
+ out = None
+ for item in fuse_dict.values():
+ if item is not None:
+ if out is None:
+ out = item
+ else:
+ out = out + item
+ return out
+
+ def forward(self, inputs):
+ assert len(inputs) == len(self.in_channels)
+
+ # build all levels from original feature maps
+ feats = [
+ lateral_conv(inputs[i + self.start_level])
+ for i, lateral_conv in enumerate(self.lateral_convs)
+ ]
+ for downsample in self.extra_downsamples:
+ feats.append(downsample(feats[-1]))
+
+ outs = [feats]
+
+ for i in range(self.stack_times):
+ current_outs = outs[-1]
+ next_outs = []
+ direction = self.paths[i]
+ for j in range(self.num_outs):
+ if i in self.skip_inds[j]:
+ next_outs.append(outs[-1][j])
+ continue
+ # feature level
+ if direction == 'td':
+ lvl = self.num_outs - j - 1
+ else:
+ lvl = j
+ # get transitions
+ if direction == 'td':
+ same_trans = self.fpn_transitions[i][lvl]['same_down']
+ else:
+ same_trans = self.fpn_transitions[i][lvl]['same_up']
+ across_lateral_trans = self.fpn_transitions[i][lvl][
+ 'across_lateral']
+ across_down_trans = self.fpn_transitions[i][lvl]['across_down']
+ across_up_trans = self.fpn_transitions[i][lvl]['across_up']
+ across_skip_trans = self.fpn_transitions[i][lvl]['across_skip']
+ # init output
+ to_fuse = dict(
+ same=None, lateral=None, across_up=None, across_down=None)
+ # same downsample/upsample
+ if same_trans is not None:
+ to_fuse['same'] = same_trans(next_outs[-1])
+ # across lateral
+ if across_lateral_trans is not None:
+ to_fuse['lateral'] = across_lateral_trans(
+ current_outs[lvl])
+ # across downsample
+ if lvl > 0 and across_up_trans is not None:
+ to_fuse['across_up'] = across_up_trans(current_outs[lvl -
+ 1])
+ # across upsample
+ if (lvl < self.num_outs - 1 and across_down_trans is not None):
+ to_fuse['across_down'] = across_down_trans(
+ current_outs[lvl + 1])
+ if across_skip_trans is not None:
+ to_fuse['across_skip'] = across_skip_trans(outs[0][lvl])
+ x = self.fuse(to_fuse)
+ next_outs.append(x)
+
+ if direction == 'td':
+ outs.append(next_outs[::-1])
+ else:
+ outs.append(next_outs)
+
+ # output trans
+ final_outs = []
+ for i in range(self.num_outs):
+ lvl_out_list = []
+ for s in range(len(outs)):
+ lvl_out_list.append(outs[s][i])
+ lvl_out = self.output_transition[i](lvl_out_list)
+ final_outs.append(lvl_out)
+
+ return final_outs
diff --git a/annotator/uniformer/mmdet/models/necks/fpn.py b/annotator/uniformer/mmdet/models/necks/fpn.py
new file mode 100644
index 0000000000000000000000000000000000000000..5e5dfe685964f06e7a66b63a13e66162e63fcafd
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/necks/fpn.py
@@ -0,0 +1,221 @@
+import warnings
+
+import torch.nn as nn
+import torch.nn.functional as F
+from mmcv.cnn import ConvModule, xavier_init
+from mmcv.runner import auto_fp16
+
+from ..builder import NECKS
+
+
+@NECKS.register_module()
+class FPN(nn.Module):
+ r"""Feature Pyramid Network.
+
+ This is an implementation of paper `Feature Pyramid Networks for Object
+ Detection `_.
+
+ Args:
+ in_channels (List[int]): Number of input channels per scale.
+ out_channels (int): Number of output channels (used at each scale)
+ num_outs (int): Number of output scales.
+ start_level (int): Index of the start input backbone level used to
+ build the feature pyramid. Default: 0.
+ end_level (int): Index of the end input backbone level (exclusive) to
+ build the feature pyramid. Default: -1, which means the last level.
+ add_extra_convs (bool | str): If bool, it decides whether to add conv
+ layers on top of the original feature maps. Default to False.
+ If True, its actual mode is specified by `extra_convs_on_inputs`.
+ If str, it specifies the source feature map of the extra convs.
+ Only the following options are allowed
+
+ - 'on_input': Last feat map of neck inputs (i.e. backbone feature).
+ - 'on_lateral': Last feature map after lateral convs.
+ - 'on_output': The last output feature map after fpn convs.
+ extra_convs_on_inputs (bool, deprecated): Whether to apply extra convs
+ on the original feature from the backbone. If True,
+ it is equivalent to `add_extra_convs='on_input'`. If False, it is
+ equivalent to set `add_extra_convs='on_output'`. Default to True.
+ relu_before_extra_convs (bool): Whether to apply relu before the extra
+ conv. Default: False.
+ no_norm_on_lateral (bool): Whether to apply norm on lateral.
+ Default: False.
+ conv_cfg (dict): Config dict for convolution layer. Default: None.
+ norm_cfg (dict): Config dict for normalization layer. Default: None.
+ act_cfg (str): Config dict for activation layer in ConvModule.
+ Default: None.
+ upsample_cfg (dict): Config dict for interpolate layer.
+ Default: `dict(mode='nearest')`
+
+ Example:
+ >>> import torch
+ >>> in_channels = [2, 3, 5, 7]
+ >>> scales = [340, 170, 84, 43]
+ >>> inputs = [torch.rand(1, c, s, s)
+ ... for c, s in zip(in_channels, scales)]
+ >>> self = FPN(in_channels, 11, len(in_channels)).eval()
+ >>> outputs = self.forward(inputs)
+ >>> for i in range(len(outputs)):
+ ... print(f'outputs[{i}].shape = {outputs[i].shape}')
+ outputs[0].shape = torch.Size([1, 11, 340, 340])
+ outputs[1].shape = torch.Size([1, 11, 170, 170])
+ outputs[2].shape = torch.Size([1, 11, 84, 84])
+ outputs[3].shape = torch.Size([1, 11, 43, 43])
+ """
+
+ def __init__(self,
+ in_channels,
+ out_channels,
+ num_outs,
+ start_level=0,
+ end_level=-1,
+ add_extra_convs=False,
+ extra_convs_on_inputs=True,
+ relu_before_extra_convs=False,
+ no_norm_on_lateral=False,
+ conv_cfg=None,
+ norm_cfg=None,
+ act_cfg=None,
+ upsample_cfg=dict(mode='nearest')):
+ super(FPN, self).__init__()
+ assert isinstance(in_channels, list)
+ self.in_channels = in_channels
+ self.out_channels = out_channels
+ self.num_ins = len(in_channels)
+ self.num_outs = num_outs
+ self.relu_before_extra_convs = relu_before_extra_convs
+ self.no_norm_on_lateral = no_norm_on_lateral
+ self.fp16_enabled = False
+ self.upsample_cfg = upsample_cfg.copy()
+
+ if end_level == -1:
+ self.backbone_end_level = self.num_ins
+ assert num_outs >= self.num_ins - start_level
+ else:
+ # if end_level < inputs, no extra level is allowed
+ self.backbone_end_level = end_level
+ assert end_level <= len(in_channels)
+ assert num_outs == end_level - start_level
+ self.start_level = start_level
+ self.end_level = end_level
+ self.add_extra_convs = add_extra_convs
+ assert isinstance(add_extra_convs, (str, bool))
+ if isinstance(add_extra_convs, str):
+ # Extra_convs_source choices: 'on_input', 'on_lateral', 'on_output'
+ assert add_extra_convs in ('on_input', 'on_lateral', 'on_output')
+ elif add_extra_convs: # True
+ if extra_convs_on_inputs:
+ # TODO: deprecate `extra_convs_on_inputs`
+ warnings.simplefilter('once')
+ warnings.warn(
+ '"extra_convs_on_inputs" will be deprecated in v2.9.0,'
+ 'Please use "add_extra_convs"', DeprecationWarning)
+ self.add_extra_convs = 'on_input'
+ else:
+ self.add_extra_convs = 'on_output'
+
+ self.lateral_convs = nn.ModuleList()
+ self.fpn_convs = nn.ModuleList()
+
+ for i in range(self.start_level, self.backbone_end_level):
+ l_conv = ConvModule(
+ in_channels[i],
+ out_channels,
+ 1,
+ conv_cfg=conv_cfg,
+ norm_cfg=norm_cfg if not self.no_norm_on_lateral else None,
+ act_cfg=act_cfg,
+ inplace=False)
+ fpn_conv = ConvModule(
+ out_channels,
+ out_channels,
+ 3,
+ padding=1,
+ conv_cfg=conv_cfg,
+ norm_cfg=norm_cfg,
+ act_cfg=act_cfg,
+ inplace=False)
+
+ self.lateral_convs.append(l_conv)
+ self.fpn_convs.append(fpn_conv)
+
+ # add extra conv layers (e.g., RetinaNet)
+ extra_levels = num_outs - self.backbone_end_level + self.start_level
+ if self.add_extra_convs and extra_levels >= 1:
+ for i in range(extra_levels):
+ if i == 0 and self.add_extra_convs == 'on_input':
+ in_channels = self.in_channels[self.backbone_end_level - 1]
+ else:
+ in_channels = out_channels
+ extra_fpn_conv = ConvModule(
+ in_channels,
+ out_channels,
+ 3,
+ stride=2,
+ padding=1,
+ conv_cfg=conv_cfg,
+ norm_cfg=norm_cfg,
+ act_cfg=act_cfg,
+ inplace=False)
+ self.fpn_convs.append(extra_fpn_conv)
+
+ # default init_weights for conv(msra) and norm in ConvModule
+ def init_weights(self):
+ """Initialize the weights of FPN module."""
+ for m in self.modules():
+ if isinstance(m, nn.Conv2d):
+ xavier_init(m, distribution='uniform')
+
+ @auto_fp16()
+ def forward(self, inputs):
+ """Forward function."""
+ assert len(inputs) == len(self.in_channels)
+
+ # build laterals
+ laterals = [
+ lateral_conv(inputs[i + self.start_level])
+ for i, lateral_conv in enumerate(self.lateral_convs)
+ ]
+
+ # build top-down path
+ used_backbone_levels = len(laterals)
+ for i in range(used_backbone_levels - 1, 0, -1):
+ # In some cases, fixing `scale factor` (e.g. 2) is preferred, but
+ # it cannot co-exist with `size` in `F.interpolate`.
+ if 'scale_factor' in self.upsample_cfg:
+ laterals[i - 1] += F.interpolate(laterals[i],
+ **self.upsample_cfg)
+ else:
+ prev_shape = laterals[i - 1].shape[2:]
+ laterals[i - 1] += F.interpolate(
+ laterals[i], size=prev_shape, **self.upsample_cfg)
+
+ # build outputs
+ # part 1: from original levels
+ outs = [
+ self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels)
+ ]
+ # part 2: add extra levels
+ if self.num_outs > len(outs):
+ # use max pool to get more levels on top of outputs
+ # (e.g., Faster R-CNN, Mask R-CNN)
+ if not self.add_extra_convs:
+ for i in range(self.num_outs - used_backbone_levels):
+ outs.append(F.max_pool2d(outs[-1], 1, stride=2))
+ # add conv layers on top of original feature maps (RetinaNet)
+ else:
+ if self.add_extra_convs == 'on_input':
+ extra_source = inputs[self.backbone_end_level - 1]
+ elif self.add_extra_convs == 'on_lateral':
+ extra_source = laterals[-1]
+ elif self.add_extra_convs == 'on_output':
+ extra_source = outs[-1]
+ else:
+ raise NotImplementedError
+ outs.append(self.fpn_convs[used_backbone_levels](extra_source))
+ for i in range(used_backbone_levels + 1, self.num_outs):
+ if self.relu_before_extra_convs:
+ outs.append(self.fpn_convs[i](F.relu(outs[-1])))
+ else:
+ outs.append(self.fpn_convs[i](outs[-1]))
+ return tuple(outs)
diff --git a/annotator/uniformer/mmdet/models/necks/fpn_carafe.py b/annotator/uniformer/mmdet/models/necks/fpn_carafe.py
new file mode 100644
index 0000000000000000000000000000000000000000..302e6576df9914e49166539108d6048b78c1fe71
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/necks/fpn_carafe.py
@@ -0,0 +1,267 @@
+import torch.nn as nn
+from mmcv.cnn import ConvModule, build_upsample_layer, xavier_init
+from mmcv.ops.carafe import CARAFEPack
+
+from ..builder import NECKS
+
+
+@NECKS.register_module()
+class FPN_CARAFE(nn.Module):
+ """FPN_CARAFE is a more flexible implementation of FPN. It allows more
+ choice for upsample methods during the top-down pathway.
+
+ It can reproduce the performance of ICCV 2019 paper
+ CARAFE: Content-Aware ReAssembly of FEatures
+ Please refer to https://arxiv.org/abs/1905.02188 for more details.
+
+ Args:
+ in_channels (list[int]): Number of channels for each input feature map.
+ out_channels (int): Output channels of feature pyramids.
+ num_outs (int): Number of output stages.
+ start_level (int): Start level of feature pyramids.
+ (Default: 0)
+ end_level (int): End level of feature pyramids.
+ (Default: -1 indicates the last level).
+ norm_cfg (dict): Dictionary to construct and config norm layer.
+ activate (str): Type of activation function in ConvModule
+ (Default: None indicates w/o activation).
+ order (dict): Order of components in ConvModule.
+ upsample (str): Type of upsample layer.
+ upsample_cfg (dict): Dictionary to construct and config upsample layer.
+ """
+
+ def __init__(self,
+ in_channels,
+ out_channels,
+ num_outs,
+ start_level=0,
+ end_level=-1,
+ norm_cfg=None,
+ act_cfg=None,
+ order=('conv', 'norm', 'act'),
+ upsample_cfg=dict(
+ type='carafe',
+ up_kernel=5,
+ up_group=1,
+ encoder_kernel=3,
+ encoder_dilation=1)):
+ super(FPN_CARAFE, self).__init__()
+ assert isinstance(in_channels, list)
+ self.in_channels = in_channels
+ self.out_channels = out_channels
+ self.num_ins = len(in_channels)
+ self.num_outs = num_outs
+ self.norm_cfg = norm_cfg
+ self.act_cfg = act_cfg
+ self.with_bias = norm_cfg is None
+ self.upsample_cfg = upsample_cfg.copy()
+ self.upsample = self.upsample_cfg.get('type')
+ self.relu = nn.ReLU(inplace=False)
+
+ self.order = order
+ assert order in [('conv', 'norm', 'act'), ('act', 'conv', 'norm')]
+
+ assert self.upsample in [
+ 'nearest', 'bilinear', 'deconv', 'pixel_shuffle', 'carafe', None
+ ]
+ if self.upsample in ['deconv', 'pixel_shuffle']:
+ assert hasattr(
+ self.upsample_cfg,
+ 'upsample_kernel') and self.upsample_cfg.upsample_kernel > 0
+ self.upsample_kernel = self.upsample_cfg.pop('upsample_kernel')
+
+ if end_level == -1:
+ self.backbone_end_level = self.num_ins
+ assert num_outs >= self.num_ins - start_level
+ else:
+ # if end_level < inputs, no extra level is allowed
+ self.backbone_end_level = end_level
+ assert end_level <= len(in_channels)
+ assert num_outs == end_level - start_level
+ self.start_level = start_level
+ self.end_level = end_level
+
+ self.lateral_convs = nn.ModuleList()
+ self.fpn_convs = nn.ModuleList()
+ self.upsample_modules = nn.ModuleList()
+
+ for i in range(self.start_level, self.backbone_end_level):
+ l_conv = ConvModule(
+ in_channels[i],
+ out_channels,
+ 1,
+ norm_cfg=norm_cfg,
+ bias=self.with_bias,
+ act_cfg=act_cfg,
+ inplace=False,
+ order=self.order)
+ fpn_conv = ConvModule(
+ out_channels,
+ out_channels,
+ 3,
+ padding=1,
+ norm_cfg=self.norm_cfg,
+ bias=self.with_bias,
+ act_cfg=act_cfg,
+ inplace=False,
+ order=self.order)
+ if i != self.backbone_end_level - 1:
+ upsample_cfg_ = self.upsample_cfg.copy()
+ if self.upsample == 'deconv':
+ upsample_cfg_.update(
+ in_channels=out_channels,
+ out_channels=out_channels,
+ kernel_size=self.upsample_kernel,
+ stride=2,
+ padding=(self.upsample_kernel - 1) // 2,
+ output_padding=(self.upsample_kernel - 1) // 2)
+ elif self.upsample == 'pixel_shuffle':
+ upsample_cfg_.update(
+ in_channels=out_channels,
+ out_channels=out_channels,
+ scale_factor=2,
+ upsample_kernel=self.upsample_kernel)
+ elif self.upsample == 'carafe':
+ upsample_cfg_.update(channels=out_channels, scale_factor=2)
+ else:
+ # suppress warnings
+ align_corners = (None
+ if self.upsample == 'nearest' else False)
+ upsample_cfg_.update(
+ scale_factor=2,
+ mode=self.upsample,
+ align_corners=align_corners)
+ upsample_module = build_upsample_layer(upsample_cfg_)
+ self.upsample_modules.append(upsample_module)
+ self.lateral_convs.append(l_conv)
+ self.fpn_convs.append(fpn_conv)
+
+ # add extra conv layers (e.g., RetinaNet)
+ extra_out_levels = (
+ num_outs - self.backbone_end_level + self.start_level)
+ if extra_out_levels >= 1:
+ for i in range(extra_out_levels):
+ in_channels = (
+ self.in_channels[self.backbone_end_level -
+ 1] if i == 0 else out_channels)
+ extra_l_conv = ConvModule(
+ in_channels,
+ out_channels,
+ 3,
+ stride=2,
+ padding=1,
+ norm_cfg=norm_cfg,
+ bias=self.with_bias,
+ act_cfg=act_cfg,
+ inplace=False,
+ order=self.order)
+ if self.upsample == 'deconv':
+ upsampler_cfg_ = dict(
+ in_channels=out_channels,
+ out_channels=out_channels,
+ kernel_size=self.upsample_kernel,
+ stride=2,
+ padding=(self.upsample_kernel - 1) // 2,
+ output_padding=(self.upsample_kernel - 1) // 2)
+ elif self.upsample == 'pixel_shuffle':
+ upsampler_cfg_ = dict(
+ in_channels=out_channels,
+ out_channels=out_channels,
+ scale_factor=2,
+ upsample_kernel=self.upsample_kernel)
+ elif self.upsample == 'carafe':
+ upsampler_cfg_ = dict(
+ channels=out_channels,
+ scale_factor=2,
+ **self.upsample_cfg)
+ else:
+ # suppress warnings
+ align_corners = (None
+ if self.upsample == 'nearest' else False)
+ upsampler_cfg_ = dict(
+ scale_factor=2,
+ mode=self.upsample,
+ align_corners=align_corners)
+ upsampler_cfg_['type'] = self.upsample
+ upsample_module = build_upsample_layer(upsampler_cfg_)
+ extra_fpn_conv = ConvModule(
+ out_channels,
+ out_channels,
+ 3,
+ padding=1,
+ norm_cfg=self.norm_cfg,
+ bias=self.with_bias,
+ act_cfg=act_cfg,
+ inplace=False,
+ order=self.order)
+ self.upsample_modules.append(upsample_module)
+ self.fpn_convs.append(extra_fpn_conv)
+ self.lateral_convs.append(extra_l_conv)
+
+ # default init_weights for conv(msra) and norm in ConvModule
+ def init_weights(self):
+ """Initialize the weights of module."""
+ for m in self.modules():
+ if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):
+ xavier_init(m, distribution='uniform')
+ for m in self.modules():
+ if isinstance(m, CARAFEPack):
+ m.init_weights()
+
+ def slice_as(self, src, dst):
+ """Slice ``src`` as ``dst``
+
+ Note:
+ ``src`` should have the same or larger size than ``dst``.
+
+ Args:
+ src (torch.Tensor): Tensors to be sliced.
+ dst (torch.Tensor): ``src`` will be sliced to have the same
+ size as ``dst``.
+
+ Returns:
+ torch.Tensor: Sliced tensor.
+ """
+ assert (src.size(2) >= dst.size(2)) and (src.size(3) >= dst.size(3))
+ if src.size(2) == dst.size(2) and src.size(3) == dst.size(3):
+ return src
+ else:
+ return src[:, :, :dst.size(2), :dst.size(3)]
+
+ def tensor_add(self, a, b):
+ """Add tensors ``a`` and ``b`` that might have different sizes."""
+ if a.size() == b.size():
+ c = a + b
+ else:
+ c = a + self.slice_as(b, a)
+ return c
+
+ def forward(self, inputs):
+ """Forward function."""
+ assert len(inputs) == len(self.in_channels)
+
+ # build laterals
+ laterals = []
+ for i, lateral_conv in enumerate(self.lateral_convs):
+ if i <= self.backbone_end_level - self.start_level:
+ input = inputs[min(i + self.start_level, len(inputs) - 1)]
+ else:
+ input = laterals[-1]
+ lateral = lateral_conv(input)
+ laterals.append(lateral)
+
+ # build top-down path
+ for i in range(len(laterals) - 1, 0, -1):
+ if self.upsample is not None:
+ upsample_feat = self.upsample_modules[i - 1](laterals[i])
+ else:
+ upsample_feat = laterals[i]
+ laterals[i - 1] = self.tensor_add(laterals[i - 1], upsample_feat)
+
+ # build outputs
+ num_conv_outs = len(self.fpn_convs)
+ outs = []
+ for i in range(num_conv_outs):
+ out = self.fpn_convs[i](laterals[i])
+ outs.append(out)
+ return tuple(outs)
diff --git a/annotator/uniformer/mmdet/models/necks/hrfpn.py b/annotator/uniformer/mmdet/models/necks/hrfpn.py
new file mode 100644
index 0000000000000000000000000000000000000000..ed4f194832fc4b6ea77ce54262fb8ffa8675fc4e
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/necks/hrfpn.py
@@ -0,0 +1,102 @@
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from mmcv.cnn import ConvModule, caffe2_xavier_init
+from torch.utils.checkpoint import checkpoint
+
+from ..builder import NECKS
+
+
+@NECKS.register_module()
+class HRFPN(nn.Module):
+ """HRFPN (High Resolution Feature Pyramids)
+
+ paper: `High-Resolution Representations for Labeling Pixels and Regions
+ `_.
+
+ Args:
+ in_channels (list): number of channels for each branch.
+ out_channels (int): output channels of feature pyramids.
+ num_outs (int): number of output stages.
+ pooling_type (str): pooling for generating feature pyramids
+ from {MAX, AVG}.
+ conv_cfg (dict): dictionary to construct and config conv layer.
+ norm_cfg (dict): dictionary to construct and config norm layer.
+ with_cp (bool): Use checkpoint or not. Using checkpoint will save some
+ memory while slowing down the training speed.
+ stride (int): stride of 3x3 convolutional layers
+ """
+
+ def __init__(self,
+ in_channels,
+ out_channels,
+ num_outs=5,
+ pooling_type='AVG',
+ conv_cfg=None,
+ norm_cfg=None,
+ with_cp=False,
+ stride=1):
+ super(HRFPN, self).__init__()
+ assert isinstance(in_channels, list)
+ self.in_channels = in_channels
+ self.out_channels = out_channels
+ self.num_ins = len(in_channels)
+ self.num_outs = num_outs
+ self.with_cp = with_cp
+ self.conv_cfg = conv_cfg
+ self.norm_cfg = norm_cfg
+
+ self.reduction_conv = ConvModule(
+ sum(in_channels),
+ out_channels,
+ kernel_size=1,
+ conv_cfg=self.conv_cfg,
+ act_cfg=None)
+
+ self.fpn_convs = nn.ModuleList()
+ for i in range(self.num_outs):
+ self.fpn_convs.append(
+ ConvModule(
+ out_channels,
+ out_channels,
+ kernel_size=3,
+ padding=1,
+ stride=stride,
+ conv_cfg=self.conv_cfg,
+ act_cfg=None))
+
+ if pooling_type == 'MAX':
+ self.pooling = F.max_pool2d
+ else:
+ self.pooling = F.avg_pool2d
+
+ def init_weights(self):
+ """Initialize the weights of module."""
+ for m in self.modules():
+ if isinstance(m, nn.Conv2d):
+ caffe2_xavier_init(m)
+
+ def forward(self, inputs):
+ """Forward function."""
+ assert len(inputs) == self.num_ins
+ outs = [inputs[0]]
+ for i in range(1, self.num_ins):
+ outs.append(
+ F.interpolate(inputs[i], scale_factor=2**i, mode='bilinear'))
+ out = torch.cat(outs, dim=1)
+ if out.requires_grad and self.with_cp:
+ out = checkpoint(self.reduction_conv, out)
+ else:
+ out = self.reduction_conv(out)
+ outs = [out]
+ for i in range(1, self.num_outs):
+ outs.append(self.pooling(out, kernel_size=2**i, stride=2**i))
+ outputs = []
+
+ for i in range(self.num_outs):
+ if outs[i].requires_grad and self.with_cp:
+ tmp_out = checkpoint(self.fpn_convs[i], outs[i])
+ else:
+ tmp_out = self.fpn_convs[i](outs[i])
+ outputs.append(tmp_out)
+ return tuple(outputs)
diff --git a/annotator/uniformer/mmdet/models/necks/nas_fpn.py b/annotator/uniformer/mmdet/models/necks/nas_fpn.py
new file mode 100644
index 0000000000000000000000000000000000000000..8e333ce65d4d06c47c29af489526ba3142736ad7
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/necks/nas_fpn.py
@@ -0,0 +1,160 @@
+import torch.nn as nn
+from mmcv.cnn import ConvModule, caffe2_xavier_init
+from mmcv.ops.merge_cells import GlobalPoolingCell, SumCell
+
+from ..builder import NECKS
+
+
+@NECKS.register_module()
+class NASFPN(nn.Module):
+ """NAS-FPN.
+
+ Implementation of `NAS-FPN: Learning Scalable Feature Pyramid Architecture
+ for Object Detection `_
+
+ Args:
+ in_channels (List[int]): Number of input channels per scale.
+ out_channels (int): Number of output channels (used at each scale)
+ num_outs (int): Number of output scales.
+ stack_times (int): The number of times the pyramid architecture will
+ be stacked.
+ start_level (int): Index of the start input backbone level used to
+ build the feature pyramid. Default: 0.
+ end_level (int): Index of the end input backbone level (exclusive) to
+ build the feature pyramid. Default: -1, which means the last level.
+ add_extra_convs (bool): It decides whether to add conv
+ layers on top of the original feature maps. Default to False.
+ If True, its actual mode is specified by `extra_convs_on_inputs`.
+ """
+
+ def __init__(self,
+ in_channels,
+ out_channels,
+ num_outs,
+ stack_times,
+ start_level=0,
+ end_level=-1,
+ add_extra_convs=False,
+ norm_cfg=None):
+ super(NASFPN, self).__init__()
+ assert isinstance(in_channels, list)
+ self.in_channels = in_channels
+ self.out_channels = out_channels
+ self.num_ins = len(in_channels) # num of input feature levels
+ self.num_outs = num_outs # num of output feature levels
+ self.stack_times = stack_times
+ self.norm_cfg = norm_cfg
+
+ if end_level == -1:
+ self.backbone_end_level = self.num_ins
+ assert num_outs >= self.num_ins - start_level
+ else:
+ # if end_level < inputs, no extra level is allowed
+ self.backbone_end_level = end_level
+ assert end_level <= len(in_channels)
+ assert num_outs == end_level - start_level
+ self.start_level = start_level
+ self.end_level = end_level
+ self.add_extra_convs = add_extra_convs
+
+ # add lateral connections
+ self.lateral_convs = nn.ModuleList()
+ for i in range(self.start_level, self.backbone_end_level):
+ l_conv = ConvModule(
+ in_channels[i],
+ out_channels,
+ 1,
+ norm_cfg=norm_cfg,
+ act_cfg=None)
+ self.lateral_convs.append(l_conv)
+
+ # add extra downsample layers (stride-2 pooling or conv)
+ extra_levels = num_outs - self.backbone_end_level + self.start_level
+ self.extra_downsamples = nn.ModuleList()
+ for i in range(extra_levels):
+ extra_conv = ConvModule(
+ out_channels, out_channels, 1, norm_cfg=norm_cfg, act_cfg=None)
+ self.extra_downsamples.append(
+ nn.Sequential(extra_conv, nn.MaxPool2d(2, 2)))
+
+ # add NAS FPN connections
+ self.fpn_stages = nn.ModuleList()
+ for _ in range(self.stack_times):
+ stage = nn.ModuleDict()
+ # gp(p6, p4) -> p4_1
+ stage['gp_64_4'] = GlobalPoolingCell(
+ in_channels=out_channels,
+ out_channels=out_channels,
+ out_norm_cfg=norm_cfg)
+ # sum(p4_1, p4) -> p4_2
+ stage['sum_44_4'] = SumCell(
+ in_channels=out_channels,
+ out_channels=out_channels,
+ out_norm_cfg=norm_cfg)
+ # sum(p4_2, p3) -> p3_out
+ stage['sum_43_3'] = SumCell(
+ in_channels=out_channels,
+ out_channels=out_channels,
+ out_norm_cfg=norm_cfg)
+ # sum(p3_out, p4_2) -> p4_out
+ stage['sum_34_4'] = SumCell(
+ in_channels=out_channels,
+ out_channels=out_channels,
+ out_norm_cfg=norm_cfg)
+ # sum(p5, gp(p4_out, p3_out)) -> p5_out
+ stage['gp_43_5'] = GlobalPoolingCell(with_out_conv=False)
+ stage['sum_55_5'] = SumCell(
+ in_channels=out_channels,
+ out_channels=out_channels,
+ out_norm_cfg=norm_cfg)
+ # sum(p7, gp(p5_out, p4_2)) -> p7_out
+ stage['gp_54_7'] = GlobalPoolingCell(with_out_conv=False)
+ stage['sum_77_7'] = SumCell(
+ in_channels=out_channels,
+ out_channels=out_channels,
+ out_norm_cfg=norm_cfg)
+ # gp(p7_out, p5_out) -> p6_out
+ stage['gp_75_6'] = GlobalPoolingCell(
+ in_channels=out_channels,
+ out_channels=out_channels,
+ out_norm_cfg=norm_cfg)
+ self.fpn_stages.append(stage)
+
+ def init_weights(self):
+ """Initialize the weights of module."""
+ for m in self.modules():
+ if isinstance(m, nn.Conv2d):
+ caffe2_xavier_init(m)
+
+ def forward(self, inputs):
+ """Forward function."""
+ # build P3-P5
+ feats = [
+ lateral_conv(inputs[i + self.start_level])
+ for i, lateral_conv in enumerate(self.lateral_convs)
+ ]
+ # build P6-P7 on top of P5
+ for downsample in self.extra_downsamples:
+ feats.append(downsample(feats[-1]))
+
+ p3, p4, p5, p6, p7 = feats
+
+ for stage in self.fpn_stages:
+ # gp(p6, p4) -> p4_1
+ p4_1 = stage['gp_64_4'](p6, p4, out_size=p4.shape[-2:])
+ # sum(p4_1, p4) -> p4_2
+ p4_2 = stage['sum_44_4'](p4_1, p4, out_size=p4.shape[-2:])
+ # sum(p4_2, p3) -> p3_out
+ p3 = stage['sum_43_3'](p4_2, p3, out_size=p3.shape[-2:])
+ # sum(p3_out, p4_2) -> p4_out
+ p4 = stage['sum_34_4'](p3, p4_2, out_size=p4.shape[-2:])
+ # sum(p5, gp(p4_out, p3_out)) -> p5_out
+ p5_tmp = stage['gp_43_5'](p4, p3, out_size=p5.shape[-2:])
+ p5 = stage['sum_55_5'](p5, p5_tmp, out_size=p5.shape[-2:])
+ # sum(p7, gp(p5_out, p4_2)) -> p7_out
+ p7_tmp = stage['gp_54_7'](p5, p4_2, out_size=p7.shape[-2:])
+ p7 = stage['sum_77_7'](p7, p7_tmp, out_size=p7.shape[-2:])
+ # gp(p7_out, p5_out) -> p6_out
+ p6 = stage['gp_75_6'](p7, p5, out_size=p6.shape[-2:])
+
+ return p3, p4, p5, p6, p7
diff --git a/annotator/uniformer/mmdet/models/necks/nasfcos_fpn.py b/annotator/uniformer/mmdet/models/necks/nasfcos_fpn.py
new file mode 100644
index 0000000000000000000000000000000000000000..2daf79ef591373499184c624ccd27fb7456dec06
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/necks/nasfcos_fpn.py
@@ -0,0 +1,161 @@
+import torch.nn as nn
+import torch.nn.functional as F
+from mmcv.cnn import ConvModule, caffe2_xavier_init
+from mmcv.ops.merge_cells import ConcatCell
+
+from ..builder import NECKS
+
+
+@NECKS.register_module()
+class NASFCOS_FPN(nn.Module):
+ """FPN structure in NASFPN.
+
+ Implementation of paper `NAS-FCOS: Fast Neural Architecture Search for
+ Object Detection `_
+
+ Args:
+ in_channels (List[int]): Number of input channels per scale.
+ out_channels (int): Number of output channels (used at each scale)
+ num_outs (int): Number of output scales.
+ start_level (int): Index of the start input backbone level used to
+ build the feature pyramid. Default: 0.
+ end_level (int): Index of the end input backbone level (exclusive) to
+ build the feature pyramid. Default: -1, which means the last level.
+ add_extra_convs (bool): It decides whether to add conv
+ layers on top of the original feature maps. Default to False.
+ If True, its actual mode is specified by `extra_convs_on_inputs`.
+ conv_cfg (dict): dictionary to construct and config conv layer.
+ norm_cfg (dict): dictionary to construct and config norm layer.
+ """
+
+ def __init__(self,
+ in_channels,
+ out_channels,
+ num_outs,
+ start_level=1,
+ end_level=-1,
+ add_extra_convs=False,
+ conv_cfg=None,
+ norm_cfg=None):
+ super(NASFCOS_FPN, self).__init__()
+ assert isinstance(in_channels, list)
+ self.in_channels = in_channels
+ self.out_channels = out_channels
+ self.num_ins = len(in_channels)
+ self.num_outs = num_outs
+ self.norm_cfg = norm_cfg
+ self.conv_cfg = conv_cfg
+
+ if end_level == -1:
+ self.backbone_end_level = self.num_ins
+ assert num_outs >= self.num_ins - start_level
+ else:
+ self.backbone_end_level = end_level
+ assert end_level <= len(in_channels)
+ assert num_outs == end_level - start_level
+ self.start_level = start_level
+ self.end_level = end_level
+ self.add_extra_convs = add_extra_convs
+
+ self.adapt_convs = nn.ModuleList()
+ for i in range(self.start_level, self.backbone_end_level):
+ adapt_conv = ConvModule(
+ in_channels[i],
+ out_channels,
+ 1,
+ stride=1,
+ padding=0,
+ bias=False,
+ norm_cfg=dict(type='BN'),
+ act_cfg=dict(type='ReLU', inplace=False))
+ self.adapt_convs.append(adapt_conv)
+
+ # C2 is omitted according to the paper
+ extra_levels = num_outs - self.backbone_end_level + self.start_level
+
+ def build_concat_cell(with_input1_conv, with_input2_conv):
+ cell_conv_cfg = dict(
+ kernel_size=1, padding=0, bias=False, groups=out_channels)
+ return ConcatCell(
+ in_channels=out_channels,
+ out_channels=out_channels,
+ with_out_conv=True,
+ out_conv_cfg=cell_conv_cfg,
+ out_norm_cfg=dict(type='BN'),
+ out_conv_order=('norm', 'act', 'conv'),
+ with_input1_conv=with_input1_conv,
+ with_input2_conv=with_input2_conv,
+ input_conv_cfg=conv_cfg,
+ input_norm_cfg=norm_cfg,
+ upsample_mode='nearest')
+
+ # Denote c3=f0, c4=f1, c5=f2 for convince
+ self.fpn = nn.ModuleDict()
+ self.fpn['c22_1'] = build_concat_cell(True, True)
+ self.fpn['c22_2'] = build_concat_cell(True, True)
+ self.fpn['c32'] = build_concat_cell(True, False)
+ self.fpn['c02'] = build_concat_cell(True, False)
+ self.fpn['c42'] = build_concat_cell(True, True)
+ self.fpn['c36'] = build_concat_cell(True, True)
+ self.fpn['c61'] = build_concat_cell(True, True) # f9
+ self.extra_downsamples = nn.ModuleList()
+ for i in range(extra_levels):
+ extra_act_cfg = None if i == 0 \
+ else dict(type='ReLU', inplace=False)
+ self.extra_downsamples.append(
+ ConvModule(
+ out_channels,
+ out_channels,
+ 3,
+ stride=2,
+ padding=1,
+ act_cfg=extra_act_cfg,
+ order=('act', 'norm', 'conv')))
+
+ def forward(self, inputs):
+ """Forward function."""
+ feats = [
+ adapt_conv(inputs[i + self.start_level])
+ for i, adapt_conv in enumerate(self.adapt_convs)
+ ]
+
+ for (i, module_name) in enumerate(self.fpn):
+ idx_1, idx_2 = int(module_name[1]), int(module_name[2])
+ res = self.fpn[module_name](feats[idx_1], feats[idx_2])
+ feats.append(res)
+
+ ret = []
+ for (idx, input_idx) in zip([9, 8, 7], [1, 2, 3]): # add P3, P4, P5
+ feats1, feats2 = feats[idx], feats[5]
+ feats2_resize = F.interpolate(
+ feats2,
+ size=feats1.size()[2:],
+ mode='bilinear',
+ align_corners=False)
+
+ feats_sum = feats1 + feats2_resize
+ ret.append(
+ F.interpolate(
+ feats_sum,
+ size=inputs[input_idx].size()[2:],
+ mode='bilinear',
+ align_corners=False))
+
+ for submodule in self.extra_downsamples:
+ ret.append(submodule(ret[-1]))
+
+ return tuple(ret)
+
+ def init_weights(self):
+ """Initialize the weights of module."""
+ for module in self.fpn.values():
+ if hasattr(module, 'conv_out'):
+ caffe2_xavier_init(module.out_conv.conv)
+
+ for modules in [
+ self.adapt_convs.modules(),
+ self.extra_downsamples.modules()
+ ]:
+ for module in modules:
+ if isinstance(module, nn.Conv2d):
+ caffe2_xavier_init(module)
diff --git a/annotator/uniformer/mmdet/models/necks/pafpn.py b/annotator/uniformer/mmdet/models/necks/pafpn.py
new file mode 100644
index 0000000000000000000000000000000000000000..d7c0b50f29e882aacb5158b33ead3d4566d0ce0b
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/necks/pafpn.py
@@ -0,0 +1,142 @@
+import torch.nn as nn
+import torch.nn.functional as F
+from mmcv.cnn import ConvModule
+from mmcv.runner import auto_fp16
+
+from ..builder import NECKS
+from .fpn import FPN
+
+
+@NECKS.register_module()
+class PAFPN(FPN):
+ """Path Aggregation Network for Instance Segmentation.
+
+ This is an implementation of the `PAFPN in Path Aggregation Network
+ `_.
+
+ Args:
+ in_channels (List[int]): Number of input channels per scale.
+ out_channels (int): Number of output channels (used at each scale)
+ num_outs (int): Number of output scales.
+ start_level (int): Index of the start input backbone level used to
+ build the feature pyramid. Default: 0.
+ end_level (int): Index of the end input backbone level (exclusive) to
+ build the feature pyramid. Default: -1, which means the last level.
+ add_extra_convs (bool): Whether to add conv layers on top of the
+ original feature maps. Default: False.
+ extra_convs_on_inputs (bool): Whether to apply extra conv on
+ the original feature from the backbone. Default: False.
+ relu_before_extra_convs (bool): Whether to apply relu before the extra
+ conv. Default: False.
+ no_norm_on_lateral (bool): Whether to apply norm on lateral.
+ Default: False.
+ conv_cfg (dict): Config dict for convolution layer. Default: None.
+ norm_cfg (dict): Config dict for normalization layer. Default: None.
+ act_cfg (str): Config dict for activation layer in ConvModule.
+ Default: None.
+ """
+
+ def __init__(self,
+ in_channels,
+ out_channels,
+ num_outs,
+ start_level=0,
+ end_level=-1,
+ add_extra_convs=False,
+ extra_convs_on_inputs=True,
+ relu_before_extra_convs=False,
+ no_norm_on_lateral=False,
+ conv_cfg=None,
+ norm_cfg=None,
+ act_cfg=None):
+ super(PAFPN,
+ self).__init__(in_channels, out_channels, num_outs, start_level,
+ end_level, add_extra_convs, extra_convs_on_inputs,
+ relu_before_extra_convs, no_norm_on_lateral,
+ conv_cfg, norm_cfg, act_cfg)
+ # add extra bottom up pathway
+ self.downsample_convs = nn.ModuleList()
+ self.pafpn_convs = nn.ModuleList()
+ for i in range(self.start_level + 1, self.backbone_end_level):
+ d_conv = ConvModule(
+ out_channels,
+ out_channels,
+ 3,
+ stride=2,
+ padding=1,
+ conv_cfg=conv_cfg,
+ norm_cfg=norm_cfg,
+ act_cfg=act_cfg,
+ inplace=False)
+ pafpn_conv = ConvModule(
+ out_channels,
+ out_channels,
+ 3,
+ padding=1,
+ conv_cfg=conv_cfg,
+ norm_cfg=norm_cfg,
+ act_cfg=act_cfg,
+ inplace=False)
+ self.downsample_convs.append(d_conv)
+ self.pafpn_convs.append(pafpn_conv)
+
+ @auto_fp16()
+ def forward(self, inputs):
+ """Forward function."""
+ assert len(inputs) == len(self.in_channels)
+
+ # build laterals
+ laterals = [
+ lateral_conv(inputs[i + self.start_level])
+ for i, lateral_conv in enumerate(self.lateral_convs)
+ ]
+
+ # build top-down path
+ used_backbone_levels = len(laterals)
+ for i in range(used_backbone_levels - 1, 0, -1):
+ prev_shape = laterals[i - 1].shape[2:]
+ laterals[i - 1] += F.interpolate(
+ laterals[i], size=prev_shape, mode='nearest')
+
+ # build outputs
+ # part 1: from original levels
+ inter_outs = [
+ self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels)
+ ]
+
+ # part 2: add bottom-up path
+ for i in range(0, used_backbone_levels - 1):
+ inter_outs[i + 1] += self.downsample_convs[i](inter_outs[i])
+
+ outs = []
+ outs.append(inter_outs[0])
+ outs.extend([
+ self.pafpn_convs[i - 1](inter_outs[i])
+ for i in range(1, used_backbone_levels)
+ ])
+
+ # part 3: add extra levels
+ if self.num_outs > len(outs):
+ # use max pool to get more levels on top of outputs
+ # (e.g., Faster R-CNN, Mask R-CNN)
+ if not self.add_extra_convs:
+ for i in range(self.num_outs - used_backbone_levels):
+ outs.append(F.max_pool2d(outs[-1], 1, stride=2))
+ # add conv layers on top of original feature maps (RetinaNet)
+ else:
+ if self.add_extra_convs == 'on_input':
+ orig = inputs[self.backbone_end_level - 1]
+ outs.append(self.fpn_convs[used_backbone_levels](orig))
+ elif self.add_extra_convs == 'on_lateral':
+ outs.append(self.fpn_convs[used_backbone_levels](
+ laterals[-1]))
+ elif self.add_extra_convs == 'on_output':
+ outs.append(self.fpn_convs[used_backbone_levels](outs[-1]))
+ else:
+ raise NotImplementedError
+ for i in range(used_backbone_levels + 1, self.num_outs):
+ if self.relu_before_extra_convs:
+ outs.append(self.fpn_convs[i](F.relu(outs[-1])))
+ else:
+ outs.append(self.fpn_convs[i](outs[-1]))
+ return tuple(outs)
diff --git a/annotator/uniformer/mmdet/models/necks/rfp.py b/annotator/uniformer/mmdet/models/necks/rfp.py
new file mode 100644
index 0000000000000000000000000000000000000000..8a63e63bdef0094c26c17526d5ddde75bd309cea
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/necks/rfp.py
@@ -0,0 +1,128 @@
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from mmcv.cnn import constant_init, kaiming_init, xavier_init
+
+from ..builder import NECKS, build_backbone
+from .fpn import FPN
+
+
+class ASPP(nn.Module):
+ """ASPP (Atrous Spatial Pyramid Pooling)
+
+ This is an implementation of the ASPP module used in DetectoRS
+ (https://arxiv.org/pdf/2006.02334.pdf)
+
+ Args:
+ in_channels (int): Number of input channels.
+ out_channels (int): Number of channels produced by this module
+ dilations (tuple[int]): Dilations of the four branches.
+ Default: (1, 3, 6, 1)
+ """
+
+ def __init__(self, in_channels, out_channels, dilations=(1, 3, 6, 1)):
+ super().__init__()
+ assert dilations[-1] == 1
+ self.aspp = nn.ModuleList()
+ for dilation in dilations:
+ kernel_size = 3 if dilation > 1 else 1
+ padding = dilation if dilation > 1 else 0
+ conv = nn.Conv2d(
+ in_channels,
+ out_channels,
+ kernel_size=kernel_size,
+ stride=1,
+ dilation=dilation,
+ padding=padding,
+ bias=True)
+ self.aspp.append(conv)
+ self.gap = nn.AdaptiveAvgPool2d(1)
+ self.init_weights()
+
+ def init_weights(self):
+ for m in self.modules():
+ if isinstance(m, nn.Conv2d):
+ kaiming_init(m)
+
+ def forward(self, x):
+ avg_x = self.gap(x)
+ out = []
+ for aspp_idx in range(len(self.aspp)):
+ inp = avg_x if (aspp_idx == len(self.aspp) - 1) else x
+ out.append(F.relu_(self.aspp[aspp_idx](inp)))
+ out[-1] = out[-1].expand_as(out[-2])
+ out = torch.cat(out, dim=1)
+ return out
+
+
+@NECKS.register_module()
+class RFP(FPN):
+ """RFP (Recursive Feature Pyramid)
+
+ This is an implementation of RFP in `DetectoRS
+ `_. Different from standard FPN, the
+ input of RFP should be multi level features along with origin input image
+ of backbone.
+
+ Args:
+ rfp_steps (int): Number of unrolled steps of RFP.
+ rfp_backbone (dict): Configuration of the backbone for RFP.
+ aspp_out_channels (int): Number of output channels of ASPP module.
+ aspp_dilations (tuple[int]): Dilation rates of four branches.
+ Default: (1, 3, 6, 1)
+ """
+
+ def __init__(self,
+ rfp_steps,
+ rfp_backbone,
+ aspp_out_channels,
+ aspp_dilations=(1, 3, 6, 1),
+ **kwargs):
+ super().__init__(**kwargs)
+ self.rfp_steps = rfp_steps
+ self.rfp_modules = nn.ModuleList()
+ for rfp_idx in range(1, rfp_steps):
+ rfp_module = build_backbone(rfp_backbone)
+ self.rfp_modules.append(rfp_module)
+ self.rfp_aspp = ASPP(self.out_channels, aspp_out_channels,
+ aspp_dilations)
+ self.rfp_weight = nn.Conv2d(
+ self.out_channels,
+ 1,
+ kernel_size=1,
+ stride=1,
+ padding=0,
+ bias=True)
+
+ def init_weights(self):
+ # Avoid using super().init_weights(), which may alter the default
+ # initialization of the modules in self.rfp_modules that have missing
+ # keys in the pretrained checkpoint.
+ for convs in [self.lateral_convs, self.fpn_convs]:
+ for m in convs.modules():
+ if isinstance(m, nn.Conv2d):
+ xavier_init(m, distribution='uniform')
+ for rfp_idx in range(self.rfp_steps - 1):
+ self.rfp_modules[rfp_idx].init_weights(
+ self.rfp_modules[rfp_idx].pretrained)
+ constant_init(self.rfp_weight, 0)
+
+ def forward(self, inputs):
+ inputs = list(inputs)
+ assert len(inputs) == len(self.in_channels) + 1 # +1 for input image
+ img = inputs.pop(0)
+ # FPN forward
+ x = super().forward(tuple(inputs))
+ for rfp_idx in range(self.rfp_steps - 1):
+ rfp_feats = [x[0]] + list(
+ self.rfp_aspp(x[i]) for i in range(1, len(x)))
+ x_idx = self.rfp_modules[rfp_idx].rfp_forward(img, rfp_feats)
+ # FPN forward
+ x_idx = super().forward(x_idx)
+ x_new = []
+ for ft_idx in range(len(x_idx)):
+ add_weight = torch.sigmoid(self.rfp_weight(x_idx[ft_idx]))
+ x_new.append(add_weight * x_idx[ft_idx] +
+ (1 - add_weight) * x[ft_idx])
+ x = x_new
+ return x
diff --git a/annotator/uniformer/mmdet/models/necks/yolo_neck.py b/annotator/uniformer/mmdet/models/necks/yolo_neck.py
new file mode 100644
index 0000000000000000000000000000000000000000..c2f9b9ef3859796c284c16ad1a92fe41ecbed613
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/necks/yolo_neck.py
@@ -0,0 +1,136 @@
+# Copyright (c) 2019 Western Digital Corporation or its affiliates.
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from mmcv.cnn import ConvModule
+
+from ..builder import NECKS
+
+
+class DetectionBlock(nn.Module):
+ """Detection block in YOLO neck.
+
+ Let out_channels = n, the DetectionBlock contains:
+ Six ConvLayers, 1 Conv2D Layer and 1 YoloLayer.
+ The first 6 ConvLayers are formed the following way:
+ 1x1xn, 3x3x2n, 1x1xn, 3x3x2n, 1x1xn, 3x3x2n.
+ The Conv2D layer is 1x1x255.
+ Some block will have branch after the fifth ConvLayer.
+ The input channel is arbitrary (in_channels)
+
+ Args:
+ in_channels (int): The number of input channels.
+ out_channels (int): The number of output channels.
+ conv_cfg (dict): Config dict for convolution layer. Default: None.
+ norm_cfg (dict): Dictionary to construct and config norm layer.
+ Default: dict(type='BN', requires_grad=True)
+ act_cfg (dict): Config dict for activation layer.
+ Default: dict(type='LeakyReLU', negative_slope=0.1).
+ """
+
+ def __init__(self,
+ in_channels,
+ out_channels,
+ conv_cfg=None,
+ norm_cfg=dict(type='BN', requires_grad=True),
+ act_cfg=dict(type='LeakyReLU', negative_slope=0.1)):
+ super(DetectionBlock, self).__init__()
+ double_out_channels = out_channels * 2
+
+ # shortcut
+ cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)
+ self.conv1 = ConvModule(in_channels, out_channels, 1, **cfg)
+ self.conv2 = ConvModule(
+ out_channels, double_out_channels, 3, padding=1, **cfg)
+ self.conv3 = ConvModule(double_out_channels, out_channels, 1, **cfg)
+ self.conv4 = ConvModule(
+ out_channels, double_out_channels, 3, padding=1, **cfg)
+ self.conv5 = ConvModule(double_out_channels, out_channels, 1, **cfg)
+
+ def forward(self, x):
+ tmp = self.conv1(x)
+ tmp = self.conv2(tmp)
+ tmp = self.conv3(tmp)
+ tmp = self.conv4(tmp)
+ out = self.conv5(tmp)
+ return out
+
+
+@NECKS.register_module()
+class YOLOV3Neck(nn.Module):
+ """The neck of YOLOV3.
+
+ It can be treated as a simplified version of FPN. It
+ will take the result from Darknet backbone and do some upsampling and
+ concatenation. It will finally output the detection result.
+
+ Note:
+ The input feats should be from top to bottom.
+ i.e., from high-lvl to low-lvl
+ But YOLOV3Neck will process them in reversed order.
+ i.e., from bottom (high-lvl) to top (low-lvl)
+
+ Args:
+ num_scales (int): The number of scales / stages.
+ in_channels (int): The number of input channels.
+ out_channels (int): The number of output channels.
+ conv_cfg (dict): Config dict for convolution layer. Default: None.
+ norm_cfg (dict): Dictionary to construct and config norm layer.
+ Default: dict(type='BN', requires_grad=True)
+ act_cfg (dict): Config dict for activation layer.
+ Default: dict(type='LeakyReLU', negative_slope=0.1).
+ """
+
+ def __init__(self,
+ num_scales,
+ in_channels,
+ out_channels,
+ conv_cfg=None,
+ norm_cfg=dict(type='BN', requires_grad=True),
+ act_cfg=dict(type='LeakyReLU', negative_slope=0.1)):
+ super(YOLOV3Neck, self).__init__()
+ assert (num_scales == len(in_channels) == len(out_channels))
+ self.num_scales = num_scales
+ self.in_channels = in_channels
+ self.out_channels = out_channels
+
+ # shortcut
+ cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)
+
+ # To support arbitrary scales, the code looks awful, but it works.
+ # Better solution is welcomed.
+ self.detect1 = DetectionBlock(in_channels[0], out_channels[0], **cfg)
+ for i in range(1, self.num_scales):
+ in_c, out_c = self.in_channels[i], self.out_channels[i]
+ self.add_module(f'conv{i}', ConvModule(in_c, out_c, 1, **cfg))
+ # in_c + out_c : High-lvl feats will be cat with low-lvl feats
+ self.add_module(f'detect{i+1}',
+ DetectionBlock(in_c + out_c, out_c, **cfg))
+
+ def forward(self, feats):
+ assert len(feats) == self.num_scales
+
+ # processed from bottom (high-lvl) to top (low-lvl)
+ outs = []
+ out = self.detect1(feats[-1])
+ outs.append(out)
+
+ for i, x in enumerate(reversed(feats[:-1])):
+ conv = getattr(self, f'conv{i+1}')
+ tmp = conv(out)
+
+ # Cat with low-lvl feats
+ tmp = F.interpolate(tmp, scale_factor=2)
+ tmp = torch.cat((tmp, x), 1)
+
+ detect = getattr(self, f'detect{i+2}')
+ out = detect(tmp)
+ outs.append(out)
+
+ return tuple(outs)
+
+ def init_weights(self):
+ """Initialize the weights of module."""
+ # init is done in ConvModule
+ pass
diff --git a/annotator/uniformer/mmdet/models/roi_heads/__init__.py b/annotator/uniformer/mmdet/models/roi_heads/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..ca0a38ec42cd41fbd97e07589a13d1af46f47f2f
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/roi_heads/__init__.py
@@ -0,0 +1,34 @@
+from .base_roi_head import BaseRoIHead
+from .bbox_heads import (BBoxHead, ConvFCBBoxHead, DoubleConvFCBBoxHead,
+ SCNetBBoxHead, Shared2FCBBoxHead,
+ Shared4Conv1FCBBoxHead)
+from .cascade_roi_head import CascadeRoIHead
+from .double_roi_head import DoubleHeadRoIHead
+from .dynamic_roi_head import DynamicRoIHead
+from .grid_roi_head import GridRoIHead
+from .htc_roi_head import HybridTaskCascadeRoIHead
+from .mask_heads import (CoarseMaskHead, FCNMaskHead, FeatureRelayHead,
+ FusedSemanticHead, GlobalContextHead, GridHead,
+ HTCMaskHead, MaskIoUHead, MaskPointHead,
+ SCNetMaskHead, SCNetSemanticHead)
+from .mask_scoring_roi_head import MaskScoringRoIHead
+from .pisa_roi_head import PISARoIHead
+from .point_rend_roi_head import PointRendRoIHead
+from .roi_extractors import SingleRoIExtractor
+from .scnet_roi_head import SCNetRoIHead
+from .shared_heads import ResLayer
+from .sparse_roi_head import SparseRoIHead
+from .standard_roi_head import StandardRoIHead
+from .trident_roi_head import TridentRoIHead
+
+__all__ = [
+ 'BaseRoIHead', 'CascadeRoIHead', 'DoubleHeadRoIHead', 'MaskScoringRoIHead',
+ 'HybridTaskCascadeRoIHead', 'GridRoIHead', 'ResLayer', 'BBoxHead',
+ 'ConvFCBBoxHead', 'Shared2FCBBoxHead', 'StandardRoIHead',
+ 'Shared4Conv1FCBBoxHead', 'DoubleConvFCBBoxHead', 'FCNMaskHead',
+ 'HTCMaskHead', 'FusedSemanticHead', 'GridHead', 'MaskIoUHead',
+ 'SingleRoIExtractor', 'PISARoIHead', 'PointRendRoIHead', 'MaskPointHead',
+ 'CoarseMaskHead', 'DynamicRoIHead', 'SparseRoIHead', 'TridentRoIHead',
+ 'SCNetRoIHead', 'SCNetMaskHead', 'SCNetSemanticHead', 'SCNetBBoxHead',
+ 'FeatureRelayHead', 'GlobalContextHead'
+]
diff --git a/annotator/uniformer/mmdet/models/roi_heads/base_roi_head.py b/annotator/uniformer/mmdet/models/roi_heads/base_roi_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..2d61cc08007924c61b4a53d7fbc6e6fedfd68f08
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/roi_heads/base_roi_head.py
@@ -0,0 +1,103 @@
+from abc import ABCMeta, abstractmethod
+
+import torch.nn as nn
+
+from ..builder import build_shared_head
+
+
+class BaseRoIHead(nn.Module, metaclass=ABCMeta):
+ """Base class for RoIHeads."""
+
+ def __init__(self,
+ bbox_roi_extractor=None,
+ bbox_head=None,
+ mask_roi_extractor=None,
+ mask_head=None,
+ shared_head=None,
+ train_cfg=None,
+ test_cfg=None):
+ super(BaseRoIHead, self).__init__()
+ self.train_cfg = train_cfg
+ self.test_cfg = test_cfg
+ if shared_head is not None:
+ self.shared_head = build_shared_head(shared_head)
+
+ if bbox_head is not None:
+ self.init_bbox_head(bbox_roi_extractor, bbox_head)
+
+ if mask_head is not None:
+ self.init_mask_head(mask_roi_extractor, mask_head)
+
+ self.init_assigner_sampler()
+
+ @property
+ def with_bbox(self):
+ """bool: whether the RoI head contains a `bbox_head`"""
+ return hasattr(self, 'bbox_head') and self.bbox_head is not None
+
+ @property
+ def with_mask(self):
+ """bool: whether the RoI head contains a `mask_head`"""
+ return hasattr(self, 'mask_head') and self.mask_head is not None
+
+ @property
+ def with_shared_head(self):
+ """bool: whether the RoI head contains a `shared_head`"""
+ return hasattr(self, 'shared_head') and self.shared_head is not None
+
+ @abstractmethod
+ def init_weights(self, pretrained):
+ """Initialize the weights in head.
+
+ Args:
+ pretrained (str, optional): Path to pre-trained weights.
+ Defaults to None.
+ """
+ pass
+
+ @abstractmethod
+ def init_bbox_head(self):
+ """Initialize ``bbox_head``"""
+ pass
+
+ @abstractmethod
+ def init_mask_head(self):
+ """Initialize ``mask_head``"""
+ pass
+
+ @abstractmethod
+ def init_assigner_sampler(self):
+ """Initialize assigner and sampler."""
+ pass
+
+ @abstractmethod
+ def forward_train(self,
+ x,
+ img_meta,
+ proposal_list,
+ gt_bboxes,
+ gt_labels,
+ gt_bboxes_ignore=None,
+ gt_masks=None,
+ **kwargs):
+ """Forward function during training."""
+
+ async def async_simple_test(self, x, img_meta, **kwargs):
+ """Asynchronized test function."""
+ raise NotImplementedError
+
+ def simple_test(self,
+ x,
+ proposal_list,
+ img_meta,
+ proposals=None,
+ rescale=False,
+ **kwargs):
+ """Test without augmentation."""
+
+ def aug_test(self, x, proposal_list, img_metas, rescale=False, **kwargs):
+ """Test with augmentations.
+
+ If rescale is False, then returned bboxes and masks will fit the scale
+ of imgs[0].
+ """
diff --git a/annotator/uniformer/mmdet/models/roi_heads/bbox_heads/__init__.py b/annotator/uniformer/mmdet/models/roi_heads/bbox_heads/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..bc5d29ece5bbf2f168f538f151f06d1b263a5153
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/roi_heads/bbox_heads/__init__.py
@@ -0,0 +1,13 @@
+from .bbox_head import BBoxHead
+from .convfc_bbox_head import (ConvFCBBoxHead, Shared2FCBBoxHead,
+ Shared4Conv1FCBBoxHead)
+from .dii_head import DIIHead
+from .double_bbox_head import DoubleConvFCBBoxHead
+from .sabl_head import SABLHead
+from .scnet_bbox_head import SCNetBBoxHead
+
+__all__ = [
+ 'BBoxHead', 'ConvFCBBoxHead', 'Shared2FCBBoxHead',
+ 'Shared4Conv1FCBBoxHead', 'DoubleConvFCBBoxHead', 'SABLHead', 'DIIHead',
+ 'SCNetBBoxHead'
+]
diff --git a/annotator/uniformer/mmdet/models/roi_heads/bbox_heads/bbox_head.py b/annotator/uniformer/mmdet/models/roi_heads/bbox_heads/bbox_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..408abef3a244115b4e73748049a228e37ad0665c
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/roi_heads/bbox_heads/bbox_head.py
@@ -0,0 +1,483 @@
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from mmcv.runner import auto_fp16, force_fp32
+from torch.nn.modules.utils import _pair
+
+from mmdet.core import build_bbox_coder, multi_apply, multiclass_nms
+from mmdet.models.builder import HEADS, build_loss
+from mmdet.models.losses import accuracy
+
+
+@HEADS.register_module()
+class BBoxHead(nn.Module):
+ """Simplest RoI head, with only two fc layers for classification and
+ regression respectively."""
+
+ def __init__(self,
+ with_avg_pool=False,
+ with_cls=True,
+ with_reg=True,
+ roi_feat_size=7,
+ in_channels=256,
+ num_classes=80,
+ bbox_coder=dict(
+ type='DeltaXYWHBBoxCoder',
+ clip_border=True,
+ target_means=[0., 0., 0., 0.],
+ target_stds=[0.1, 0.1, 0.2, 0.2]),
+ reg_class_agnostic=False,
+ reg_decoded_bbox=False,
+ loss_cls=dict(
+ type='CrossEntropyLoss',
+ use_sigmoid=False,
+ loss_weight=1.0),
+ loss_bbox=dict(
+ type='SmoothL1Loss', beta=1.0, loss_weight=1.0)):
+ super(BBoxHead, self).__init__()
+ assert with_cls or with_reg
+ self.with_avg_pool = with_avg_pool
+ self.with_cls = with_cls
+ self.with_reg = with_reg
+ self.roi_feat_size = _pair(roi_feat_size)
+ self.roi_feat_area = self.roi_feat_size[0] * self.roi_feat_size[1]
+ self.in_channels = in_channels
+ self.num_classes = num_classes
+ self.reg_class_agnostic = reg_class_agnostic
+ self.reg_decoded_bbox = reg_decoded_bbox
+ self.fp16_enabled = False
+
+ self.bbox_coder = build_bbox_coder(bbox_coder)
+ self.loss_cls = build_loss(loss_cls)
+ self.loss_bbox = build_loss(loss_bbox)
+
+ in_channels = self.in_channels
+ if self.with_avg_pool:
+ self.avg_pool = nn.AvgPool2d(self.roi_feat_size)
+ else:
+ in_channels *= self.roi_feat_area
+ if self.with_cls:
+ # need to add background class
+ self.fc_cls = nn.Linear(in_channels, num_classes + 1)
+ if self.with_reg:
+ out_dim_reg = 4 if reg_class_agnostic else 4 * num_classes
+ self.fc_reg = nn.Linear(in_channels, out_dim_reg)
+ self.debug_imgs = None
+
+ def init_weights(self):
+ # conv layers are already initialized by ConvModule
+ if self.with_cls:
+ nn.init.normal_(self.fc_cls.weight, 0, 0.01)
+ nn.init.constant_(self.fc_cls.bias, 0)
+ if self.with_reg:
+ nn.init.normal_(self.fc_reg.weight, 0, 0.001)
+ nn.init.constant_(self.fc_reg.bias, 0)
+
+ @auto_fp16()
+ def forward(self, x):
+ if self.with_avg_pool:
+ x = self.avg_pool(x)
+ x = x.view(x.size(0), -1)
+ cls_score = self.fc_cls(x) if self.with_cls else None
+ bbox_pred = self.fc_reg(x) if self.with_reg else None
+ return cls_score, bbox_pred
+
+ def _get_target_single(self, pos_bboxes, neg_bboxes, pos_gt_bboxes,
+ pos_gt_labels, cfg):
+ """Calculate the ground truth for proposals in the single image
+ according to the sampling results.
+
+ Args:
+ pos_bboxes (Tensor): Contains all the positive boxes,
+ has shape (num_pos, 4), the last dimension 4
+ represents [tl_x, tl_y, br_x, br_y].
+ neg_bboxes (Tensor): Contains all the negative boxes,
+ has shape (num_neg, 4), the last dimension 4
+ represents [tl_x, tl_y, br_x, br_y].
+ pos_gt_bboxes (Tensor): Contains all the gt_boxes,
+ has shape (num_gt, 4), the last dimension 4
+ represents [tl_x, tl_y, br_x, br_y].
+ pos_gt_labels (Tensor): Contains all the gt_labels,
+ has shape (num_gt).
+ cfg (obj:`ConfigDict`): `train_cfg` of R-CNN.
+
+ Returns:
+ Tuple[Tensor]: Ground truth for proposals
+ in a single image. Containing the following Tensors:
+
+ - labels(Tensor): Gt_labels for all proposals, has
+ shape (num_proposals,).
+ - label_weights(Tensor): Labels_weights for all
+ proposals, has shape (num_proposals,).
+ - bbox_targets(Tensor):Regression target for all
+ proposals, has shape (num_proposals, 4), the
+ last dimension 4 represents [tl_x, tl_y, br_x, br_y].
+ - bbox_weights(Tensor):Regression weights for all
+ proposals, has shape (num_proposals, 4).
+ """
+ num_pos = pos_bboxes.size(0)
+ num_neg = neg_bboxes.size(0)
+ num_samples = num_pos + num_neg
+
+ # original implementation uses new_zeros since BG are set to be 0
+ # now use empty & fill because BG cat_id = num_classes,
+ # FG cat_id = [0, num_classes-1]
+ labels = pos_bboxes.new_full((num_samples, ),
+ self.num_classes,
+ dtype=torch.long)
+ label_weights = pos_bboxes.new_zeros(num_samples)
+ bbox_targets = pos_bboxes.new_zeros(num_samples, 4)
+ bbox_weights = pos_bboxes.new_zeros(num_samples, 4)
+ if num_pos > 0:
+ labels[:num_pos] = pos_gt_labels
+ pos_weight = 1.0 if cfg.pos_weight <= 0 else cfg.pos_weight
+ label_weights[:num_pos] = pos_weight
+ if not self.reg_decoded_bbox:
+ pos_bbox_targets = self.bbox_coder.encode(
+ pos_bboxes, pos_gt_bboxes)
+ else:
+ # When the regression loss (e.g. `IouLoss`, `GIouLoss`)
+ # is applied directly on the decoded bounding boxes, both
+ # the predicted boxes and regression targets should be with
+ # absolute coordinate format.
+ pos_bbox_targets = pos_gt_bboxes
+ bbox_targets[:num_pos, :] = pos_bbox_targets
+ bbox_weights[:num_pos, :] = 1
+ if num_neg > 0:
+ label_weights[-num_neg:] = 1.0
+
+ return labels, label_weights, bbox_targets, bbox_weights
+
+ def get_targets(self,
+ sampling_results,
+ gt_bboxes,
+ gt_labels,
+ rcnn_train_cfg,
+ concat=True):
+ """Calculate the ground truth for all samples in a batch according to
+ the sampling_results.
+
+ Almost the same as the implementation in bbox_head, we passed
+ additional parameters pos_inds_list and neg_inds_list to
+ `_get_target_single` function.
+
+ Args:
+ sampling_results (List[obj:SamplingResults]): Assign results of
+ all images in a batch after sampling.
+ gt_bboxes (list[Tensor]): Gt_bboxes of all images in a batch,
+ each tensor has shape (num_gt, 4), the last dimension 4
+ represents [tl_x, tl_y, br_x, br_y].
+ gt_labels (list[Tensor]): Gt_labels of all images in a batch,
+ each tensor has shape (num_gt,).
+ rcnn_train_cfg (obj:ConfigDict): `train_cfg` of RCNN.
+ concat (bool): Whether to concatenate the results of all
+ the images in a single batch.
+
+ Returns:
+ Tuple[Tensor]: Ground truth for proposals in a single image.
+ Containing the following list of Tensors:
+
+ - labels (list[Tensor],Tensor): Gt_labels for all
+ proposals in a batch, each tensor in list has
+ shape (num_proposals,) when `concat=False`, otherwise
+ just a single tensor has shape (num_all_proposals,).
+ - label_weights (list[Tensor]): Labels_weights for
+ all proposals in a batch, each tensor in list has
+ shape (num_proposals,) when `concat=False`, otherwise
+ just a single tensor has shape (num_all_proposals,).
+ - bbox_targets (list[Tensor],Tensor): Regression target
+ for all proposals in a batch, each tensor in list
+ has shape (num_proposals, 4) when `concat=False`,
+ otherwise just a single tensor has shape
+ (num_all_proposals, 4), the last dimension 4 represents
+ [tl_x, tl_y, br_x, br_y].
+ - bbox_weights (list[tensor],Tensor): Regression weights for
+ all proposals in a batch, each tensor in list has shape
+ (num_proposals, 4) when `concat=False`, otherwise just a
+ single tensor has shape (num_all_proposals, 4).
+ """
+ pos_bboxes_list = [res.pos_bboxes for res in sampling_results]
+ neg_bboxes_list = [res.neg_bboxes for res in sampling_results]
+ pos_gt_bboxes_list = [res.pos_gt_bboxes for res in sampling_results]
+ pos_gt_labels_list = [res.pos_gt_labels for res in sampling_results]
+ labels, label_weights, bbox_targets, bbox_weights = multi_apply(
+ self._get_target_single,
+ pos_bboxes_list,
+ neg_bboxes_list,
+ pos_gt_bboxes_list,
+ pos_gt_labels_list,
+ cfg=rcnn_train_cfg)
+
+ if concat:
+ labels = torch.cat(labels, 0)
+ label_weights = torch.cat(label_weights, 0)
+ bbox_targets = torch.cat(bbox_targets, 0)
+ bbox_weights = torch.cat(bbox_weights, 0)
+ return labels, label_weights, bbox_targets, bbox_weights
+
+ @force_fp32(apply_to=('cls_score', 'bbox_pred'))
+ def loss(self,
+ cls_score,
+ bbox_pred,
+ rois,
+ labels,
+ label_weights,
+ bbox_targets,
+ bbox_weights,
+ reduction_override=None):
+ losses = dict()
+ if cls_score is not None:
+ avg_factor = max(torch.sum(label_weights > 0).float().item(), 1.)
+ if cls_score.numel() > 0:
+ losses['loss_cls'] = self.loss_cls(
+ cls_score,
+ labels,
+ label_weights,
+ avg_factor=avg_factor,
+ reduction_override=reduction_override)
+ losses['acc'] = accuracy(cls_score, labels)
+ if bbox_pred is not None:
+ bg_class_ind = self.num_classes
+ # 0~self.num_classes-1 are FG, self.num_classes is BG
+ pos_inds = (labels >= 0) & (labels < bg_class_ind)
+ # do not perform bounding box regression for BG anymore.
+ if pos_inds.any():
+ if self.reg_decoded_bbox:
+ # When the regression loss (e.g. `IouLoss`,
+ # `GIouLoss`, `DIouLoss`) is applied directly on
+ # the decoded bounding boxes, it decodes the
+ # already encoded coordinates to absolute format.
+ bbox_pred = self.bbox_coder.decode(rois[:, 1:], bbox_pred)
+ if self.reg_class_agnostic:
+ pos_bbox_pred = bbox_pred.view(
+ bbox_pred.size(0), 4)[pos_inds.type(torch.bool)]
+ else:
+ pos_bbox_pred = bbox_pred.view(
+ bbox_pred.size(0), -1,
+ 4)[pos_inds.type(torch.bool),
+ labels[pos_inds.type(torch.bool)]]
+ losses['loss_bbox'] = self.loss_bbox(
+ pos_bbox_pred,
+ bbox_targets[pos_inds.type(torch.bool)],
+ bbox_weights[pos_inds.type(torch.bool)],
+ avg_factor=bbox_targets.size(0),
+ reduction_override=reduction_override)
+ else:
+ losses['loss_bbox'] = bbox_pred[pos_inds].sum()
+ return losses
+
+ @force_fp32(apply_to=('cls_score', 'bbox_pred'))
+ def get_bboxes(self,
+ rois,
+ cls_score,
+ bbox_pred,
+ img_shape,
+ scale_factor,
+ rescale=False,
+ cfg=None):
+ """Transform network output for a batch into bbox predictions.
+
+ If the input rois has batch dimension, the function would be in
+ `batch_mode` and return is a tuple[list[Tensor], list[Tensor]],
+ otherwise, the return is a tuple[Tensor, Tensor].
+
+ Args:
+ rois (Tensor): Boxes to be transformed. Has shape (num_boxes, 5)
+ or (B, num_boxes, 5)
+ cls_score (list[Tensor] or Tensor): Box scores for
+ each scale level, each is a 4D-tensor, the channel number is
+ num_points * num_classes.
+ bbox_pred (Tensor, optional): Box energies / deltas for each scale
+ level, each is a 4D-tensor, the channel number is
+ num_classes * 4.
+ img_shape (Sequence[int] or torch.Tensor or Sequence[
+ Sequence[int]], optional): Maximum bounds for boxes, specifies
+ (H, W, C) or (H, W). If rois shape is (B, num_boxes, 4), then
+ the max_shape should be a Sequence[Sequence[int]]
+ and the length of max_shape should also be B.
+ scale_factor (tuple[ndarray] or ndarray): Scale factor of the
+ image arange as (w_scale, h_scale, w_scale, h_scale). In
+ `batch_mode`, the scale_factor shape is tuple[ndarray].
+ rescale (bool): If True, return boxes in original image space.
+ Default: False.
+ cfg (obj:`ConfigDict`): `test_cfg` of Bbox Head. Default: None
+
+ Returns:
+ tuple[list[Tensor], list[Tensor]] or tuple[Tensor, Tensor]:
+ If the input has a batch dimension, the return value is
+ a tuple of the list. The first list contains the boxes of
+ the corresponding image in a batch, each tensor has the
+ shape (num_boxes, 5) and last dimension 5 represent
+ (tl_x, tl_y, br_x, br_y, score). Each Tensor in the second
+ list is the labels with shape (num_boxes, ). The length of
+ both lists should be equal to batch_size. Otherwise return
+ value is a tuple of two tensors, the first tensor is the
+ boxes with scores, the second tensor is the labels, both
+ have the same shape as the first case.
+ """
+ if isinstance(cls_score, list):
+ cls_score = sum(cls_score) / float(len(cls_score))
+
+ scores = F.softmax(
+ cls_score, dim=-1) if cls_score is not None else None
+
+ batch_mode = True
+ if rois.ndim == 2:
+ # e.g. AugTest, Cascade R-CNN, HTC, SCNet...
+ batch_mode = False
+
+ # add batch dimension
+ if scores is not None:
+ scores = scores.unsqueeze(0)
+ if bbox_pred is not None:
+ bbox_pred = bbox_pred.unsqueeze(0)
+ rois = rois.unsqueeze(0)
+
+ if bbox_pred is not None:
+ bboxes = self.bbox_coder.decode(
+ rois[..., 1:], bbox_pred, max_shape=img_shape)
+ else:
+ bboxes = rois[..., 1:].clone()
+ if img_shape is not None:
+ max_shape = bboxes.new_tensor(img_shape)[..., :2]
+ min_xy = bboxes.new_tensor(0)
+ max_xy = torch.cat(
+ [max_shape] * 2, dim=-1).flip(-1).unsqueeze(-2)
+ bboxes = torch.where(bboxes < min_xy, min_xy, bboxes)
+ bboxes = torch.where(bboxes > max_xy, max_xy, bboxes)
+
+ if rescale and bboxes.size(-2) > 0:
+ if not isinstance(scale_factor, tuple):
+ scale_factor = tuple([scale_factor])
+ # B, 1, bboxes.size(-1)
+ scale_factor = bboxes.new_tensor(scale_factor).unsqueeze(1).repeat(
+ 1, 1,
+ bboxes.size(-1) // 4)
+ bboxes /= scale_factor
+
+ det_bboxes = []
+ det_labels = []
+ for (bbox, score) in zip(bboxes, scores):
+ if cfg is not None:
+ det_bbox, det_label = multiclass_nms(bbox, score,
+ cfg.score_thr, cfg.nms,
+ cfg.max_per_img)
+ else:
+ det_bbox, det_label = bbox, score
+ det_bboxes.append(det_bbox)
+ det_labels.append(det_label)
+
+ if not batch_mode:
+ det_bboxes = det_bboxes[0]
+ det_labels = det_labels[0]
+ return det_bboxes, det_labels
+
+ @force_fp32(apply_to=('bbox_preds', ))
+ def refine_bboxes(self, rois, labels, bbox_preds, pos_is_gts, img_metas):
+ """Refine bboxes during training.
+
+ Args:
+ rois (Tensor): Shape (n*bs, 5), where n is image number per GPU,
+ and bs is the sampled RoIs per image. The first column is
+ the image id and the next 4 columns are x1, y1, x2, y2.
+ labels (Tensor): Shape (n*bs, ).
+ bbox_preds (Tensor): Shape (n*bs, 4) or (n*bs, 4*#class).
+ pos_is_gts (list[Tensor]): Flags indicating if each positive bbox
+ is a gt bbox.
+ img_metas (list[dict]): Meta info of each image.
+
+ Returns:
+ list[Tensor]: Refined bboxes of each image in a mini-batch.
+
+ Example:
+ >>> # xdoctest: +REQUIRES(module:kwarray)
+ >>> import kwarray
+ >>> import numpy as np
+ >>> from mmdet.core.bbox.demodata import random_boxes
+ >>> self = BBoxHead(reg_class_agnostic=True)
+ >>> n_roi = 2
+ >>> n_img = 4
+ >>> scale = 512
+ >>> rng = np.random.RandomState(0)
+ >>> img_metas = [{'img_shape': (scale, scale)}
+ ... for _ in range(n_img)]
+ >>> # Create rois in the expected format
+ >>> roi_boxes = random_boxes(n_roi, scale=scale, rng=rng)
+ >>> img_ids = torch.randint(0, n_img, (n_roi,))
+ >>> img_ids = img_ids.float()
+ >>> rois = torch.cat([img_ids[:, None], roi_boxes], dim=1)
+ >>> # Create other args
+ >>> labels = torch.randint(0, 2, (n_roi,)).long()
+ >>> bbox_preds = random_boxes(n_roi, scale=scale, rng=rng)
+ >>> # For each image, pretend random positive boxes are gts
+ >>> is_label_pos = (labels.numpy() > 0).astype(np.int)
+ >>> lbl_per_img = kwarray.group_items(is_label_pos,
+ ... img_ids.numpy())
+ >>> pos_per_img = [sum(lbl_per_img.get(gid, []))
+ ... for gid in range(n_img)]
+ >>> pos_is_gts = [
+ >>> torch.randint(0, 2, (npos,)).byte().sort(
+ >>> descending=True)[0]
+ >>> for npos in pos_per_img
+ >>> ]
+ >>> bboxes_list = self.refine_bboxes(rois, labels, bbox_preds,
+ >>> pos_is_gts, img_metas)
+ >>> print(bboxes_list)
+ """
+ img_ids = rois[:, 0].long().unique(sorted=True)
+ assert img_ids.numel() <= len(img_metas)
+
+ bboxes_list = []
+ for i in range(len(img_metas)):
+ inds = torch.nonzero(
+ rois[:, 0] == i, as_tuple=False).squeeze(dim=1)
+ num_rois = inds.numel()
+
+ bboxes_ = rois[inds, 1:]
+ label_ = labels[inds]
+ bbox_pred_ = bbox_preds[inds]
+ img_meta_ = img_metas[i]
+ pos_is_gts_ = pos_is_gts[i]
+
+ bboxes = self.regress_by_class(bboxes_, label_, bbox_pred_,
+ img_meta_)
+
+ # filter gt bboxes
+ pos_keep = 1 - pos_is_gts_
+ keep_inds = pos_is_gts_.new_ones(num_rois)
+ keep_inds[:len(pos_is_gts_)] = pos_keep
+
+ bboxes_list.append(bboxes[keep_inds.type(torch.bool)])
+
+ return bboxes_list
+
+ @force_fp32(apply_to=('bbox_pred', ))
+ def regress_by_class(self, rois, label, bbox_pred, img_meta):
+ """Regress the bbox for the predicted class. Used in Cascade R-CNN.
+
+ Args:
+ rois (Tensor): shape (n, 4) or (n, 5)
+ label (Tensor): shape (n, )
+ bbox_pred (Tensor): shape (n, 4*(#class)) or (n, 4)
+ img_meta (dict): Image meta info.
+
+ Returns:
+ Tensor: Regressed bboxes, the same shape as input rois.
+ """
+ assert rois.size(1) == 4 or rois.size(1) == 5, repr(rois.shape)
+
+ if not self.reg_class_agnostic:
+ label = label * 4
+ inds = torch.stack((label, label + 1, label + 2, label + 3), 1)
+ bbox_pred = torch.gather(bbox_pred, 1, inds)
+ assert bbox_pred.size(1) == 4
+
+ if rois.size(1) == 4:
+ new_rois = self.bbox_coder.decode(
+ rois, bbox_pred, max_shape=img_meta['img_shape'])
+ else:
+ bboxes = self.bbox_coder.decode(
+ rois[:, 1:], bbox_pred, max_shape=img_meta['img_shape'])
+ new_rois = torch.cat((rois[:, [0]], bboxes), dim=1)
+
+ return new_rois
diff --git a/annotator/uniformer/mmdet/models/roi_heads/bbox_heads/convfc_bbox_head.py b/annotator/uniformer/mmdet/models/roi_heads/bbox_heads/convfc_bbox_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..0e86d2ea67e154fae18dbf9d2bfde6d0a70e582c
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/roi_heads/bbox_heads/convfc_bbox_head.py
@@ -0,0 +1,205 @@
+import torch.nn as nn
+from mmcv.cnn import ConvModule
+
+from mmdet.models.builder import HEADS
+from .bbox_head import BBoxHead
+
+
+@HEADS.register_module()
+class ConvFCBBoxHead(BBoxHead):
+ r"""More general bbox head, with shared conv and fc layers and two optional
+ separated branches.
+
+ .. code-block:: none
+
+ /-> cls convs -> cls fcs -> cls
+ shared convs -> shared fcs
+ \-> reg convs -> reg fcs -> reg
+ """ # noqa: W605
+
+ def __init__(self,
+ num_shared_convs=0,
+ num_shared_fcs=0,
+ num_cls_convs=0,
+ num_cls_fcs=0,
+ num_reg_convs=0,
+ num_reg_fcs=0,
+ conv_out_channels=256,
+ fc_out_channels=1024,
+ conv_cfg=None,
+ norm_cfg=None,
+ *args,
+ **kwargs):
+ super(ConvFCBBoxHead, self).__init__(*args, **kwargs)
+ assert (num_shared_convs + num_shared_fcs + num_cls_convs +
+ num_cls_fcs + num_reg_convs + num_reg_fcs > 0)
+ if num_cls_convs > 0 or num_reg_convs > 0:
+ assert num_shared_fcs == 0
+ if not self.with_cls:
+ assert num_cls_convs == 0 and num_cls_fcs == 0
+ if not self.with_reg:
+ assert num_reg_convs == 0 and num_reg_fcs == 0
+ self.num_shared_convs = num_shared_convs
+ self.num_shared_fcs = num_shared_fcs
+ self.num_cls_convs = num_cls_convs
+ self.num_cls_fcs = num_cls_fcs
+ self.num_reg_convs = num_reg_convs
+ self.num_reg_fcs = num_reg_fcs
+ self.conv_out_channels = conv_out_channels
+ self.fc_out_channels = fc_out_channels
+ self.conv_cfg = conv_cfg
+ self.norm_cfg = norm_cfg
+
+ # add shared convs and fcs
+ self.shared_convs, self.shared_fcs, last_layer_dim = \
+ self._add_conv_fc_branch(
+ self.num_shared_convs, self.num_shared_fcs, self.in_channels,
+ True)
+ self.shared_out_channels = last_layer_dim
+
+ # add cls specific branch
+ self.cls_convs, self.cls_fcs, self.cls_last_dim = \
+ self._add_conv_fc_branch(
+ self.num_cls_convs, self.num_cls_fcs, self.shared_out_channels)
+
+ # add reg specific branch
+ self.reg_convs, self.reg_fcs, self.reg_last_dim = \
+ self._add_conv_fc_branch(
+ self.num_reg_convs, self.num_reg_fcs, self.shared_out_channels)
+
+ if self.num_shared_fcs == 0 and not self.with_avg_pool:
+ if self.num_cls_fcs == 0:
+ self.cls_last_dim *= self.roi_feat_area
+ if self.num_reg_fcs == 0:
+ self.reg_last_dim *= self.roi_feat_area
+
+ self.relu = nn.ReLU(inplace=True)
+ # reconstruct fc_cls and fc_reg since input channels are changed
+ if self.with_cls:
+ self.fc_cls = nn.Linear(self.cls_last_dim, self.num_classes + 1)
+ if self.with_reg:
+ out_dim_reg = (4 if self.reg_class_agnostic else 4 *
+ self.num_classes)
+ self.fc_reg = nn.Linear(self.reg_last_dim, out_dim_reg)
+
+ def _add_conv_fc_branch(self,
+ num_branch_convs,
+ num_branch_fcs,
+ in_channels,
+ is_shared=False):
+ """Add shared or separable branch.
+
+ convs -> avg pool (optional) -> fcs
+ """
+ last_layer_dim = in_channels
+ # add branch specific conv layers
+ branch_convs = nn.ModuleList()
+ if num_branch_convs > 0:
+ for i in range(num_branch_convs):
+ conv_in_channels = (
+ last_layer_dim if i == 0 else self.conv_out_channels)
+ branch_convs.append(
+ ConvModule(
+ conv_in_channels,
+ self.conv_out_channels,
+ 3,
+ padding=1,
+ conv_cfg=self.conv_cfg,
+ norm_cfg=self.norm_cfg))
+ last_layer_dim = self.conv_out_channels
+ # add branch specific fc layers
+ branch_fcs = nn.ModuleList()
+ if num_branch_fcs > 0:
+ # for shared branch, only consider self.with_avg_pool
+ # for separated branches, also consider self.num_shared_fcs
+ if (is_shared
+ or self.num_shared_fcs == 0) and not self.with_avg_pool:
+ last_layer_dim *= self.roi_feat_area
+ for i in range(num_branch_fcs):
+ fc_in_channels = (
+ last_layer_dim if i == 0 else self.fc_out_channels)
+ branch_fcs.append(
+ nn.Linear(fc_in_channels, self.fc_out_channels))
+ last_layer_dim = self.fc_out_channels
+ return branch_convs, branch_fcs, last_layer_dim
+
+ def init_weights(self):
+ super(ConvFCBBoxHead, self).init_weights()
+ # conv layers are already initialized by ConvModule
+ for module_list in [self.shared_fcs, self.cls_fcs, self.reg_fcs]:
+ for m in module_list.modules():
+ if isinstance(m, nn.Linear):
+ nn.init.xavier_uniform_(m.weight)
+ nn.init.constant_(m.bias, 0)
+
+ def forward(self, x):
+ # shared part
+ if self.num_shared_convs > 0:
+ for conv in self.shared_convs:
+ x = conv(x)
+
+ if self.num_shared_fcs > 0:
+ if self.with_avg_pool:
+ x = self.avg_pool(x)
+
+ x = x.flatten(1)
+
+ for fc in self.shared_fcs:
+ x = self.relu(fc(x))
+ # separate branches
+ x_cls = x
+ x_reg = x
+
+ for conv in self.cls_convs:
+ x_cls = conv(x_cls)
+ if x_cls.dim() > 2:
+ if self.with_avg_pool:
+ x_cls = self.avg_pool(x_cls)
+ x_cls = x_cls.flatten(1)
+ for fc in self.cls_fcs:
+ x_cls = self.relu(fc(x_cls))
+
+ for conv in self.reg_convs:
+ x_reg = conv(x_reg)
+ if x_reg.dim() > 2:
+ if self.with_avg_pool:
+ x_reg = self.avg_pool(x_reg)
+ x_reg = x_reg.flatten(1)
+ for fc in self.reg_fcs:
+ x_reg = self.relu(fc(x_reg))
+
+ cls_score = self.fc_cls(x_cls) if self.with_cls else None
+ bbox_pred = self.fc_reg(x_reg) if self.with_reg else None
+ return cls_score, bbox_pred
+
+
+@HEADS.register_module()
+class Shared2FCBBoxHead(ConvFCBBoxHead):
+
+ def __init__(self, fc_out_channels=1024, *args, **kwargs):
+ super(Shared2FCBBoxHead, self).__init__(
+ num_shared_convs=0,
+ num_shared_fcs=2,
+ num_cls_convs=0,
+ num_cls_fcs=0,
+ num_reg_convs=0,
+ num_reg_fcs=0,
+ fc_out_channels=fc_out_channels,
+ *args,
+ **kwargs)
+
+
+@HEADS.register_module()
+class Shared4Conv1FCBBoxHead(ConvFCBBoxHead):
+
+ def __init__(self, fc_out_channels=1024, *args, **kwargs):
+ super(Shared4Conv1FCBBoxHead, self).__init__(
+ num_shared_convs=4,
+ num_shared_fcs=1,
+ num_cls_convs=0,
+ num_cls_fcs=0,
+ num_reg_convs=0,
+ num_reg_fcs=0,
+ fc_out_channels=fc_out_channels,
+ *args,
+ **kwargs)
diff --git a/annotator/uniformer/mmdet/models/roi_heads/bbox_heads/dii_head.py b/annotator/uniformer/mmdet/models/roi_heads/bbox_heads/dii_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..8c970a78184672aaaa95edcdaecec03a26604390
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/roi_heads/bbox_heads/dii_head.py
@@ -0,0 +1,415 @@
+import torch
+import torch.nn as nn
+from mmcv.cnn import (bias_init_with_prob, build_activation_layer,
+ build_norm_layer)
+from mmcv.runner import auto_fp16, force_fp32
+
+from mmdet.core import multi_apply
+from mmdet.models.builder import HEADS, build_loss
+from mmdet.models.dense_heads.atss_head import reduce_mean
+from mmdet.models.losses import accuracy
+from mmdet.models.utils import FFN, MultiheadAttention, build_transformer
+from .bbox_head import BBoxHead
+
+
+@HEADS.register_module()
+class DIIHead(BBoxHead):
+ r"""Dynamic Instance Interactive Head for `Sparse R-CNN: End-to-End Object
+ Detection with Learnable Proposals `_
+
+ Args:
+ num_classes (int): Number of class in dataset.
+ Defaults to 80.
+ num_ffn_fcs (int): The number of fully-connected
+ layers in FFNs. Defaults to 2.
+ num_heads (int): The hidden dimension of FFNs.
+ Defaults to 8.
+ num_cls_fcs (int): The number of fully-connected
+ layers in classification subnet. Defaults to 1.
+ num_reg_fcs (int): The number of fully-connected
+ layers in regression subnet. Defaults to 3.
+ feedforward_channels (int): The hidden dimension
+ of FFNs. Defaults to 2048
+ in_channels (int): Hidden_channels of MultiheadAttention.
+ Defaults to 256.
+ dropout (float): Probability of drop the channel.
+ Defaults to 0.0
+ ffn_act_cfg (dict): The activation config for FFNs.
+ dynamic_conv_cfg (dict): The convolution config
+ for DynamicConv.
+ loss_iou (dict): The config for iou or giou loss.
+
+ """
+
+ def __init__(self,
+ num_classes=80,
+ num_ffn_fcs=2,
+ num_heads=8,
+ num_cls_fcs=1,
+ num_reg_fcs=3,
+ feedforward_channels=2048,
+ in_channels=256,
+ dropout=0.0,
+ ffn_act_cfg=dict(type='ReLU', inplace=True),
+ dynamic_conv_cfg=dict(
+ type='DynamicConv',
+ in_channels=256,
+ feat_channels=64,
+ out_channels=256,
+ input_feat_shape=7,
+ act_cfg=dict(type='ReLU', inplace=True),
+ norm_cfg=dict(type='LN')),
+ loss_iou=dict(type='GIoULoss', loss_weight=2.0),
+ **kwargs):
+ super(DIIHead, self).__init__(
+ num_classes=num_classes,
+ reg_decoded_bbox=True,
+ reg_class_agnostic=True,
+ **kwargs)
+ self.loss_iou = build_loss(loss_iou)
+ self.in_channels = in_channels
+ self.fp16_enabled = False
+ self.attention = MultiheadAttention(in_channels, num_heads, dropout)
+ self.attention_norm = build_norm_layer(dict(type='LN'), in_channels)[1]
+
+ self.instance_interactive_conv = build_transformer(dynamic_conv_cfg)
+ self.instance_interactive_conv_dropout = nn.Dropout(dropout)
+ self.instance_interactive_conv_norm = build_norm_layer(
+ dict(type='LN'), in_channels)[1]
+
+ self.ffn = FFN(
+ in_channels,
+ feedforward_channels,
+ num_ffn_fcs,
+ act_cfg=ffn_act_cfg,
+ dropout=dropout)
+ self.ffn_norm = build_norm_layer(dict(type='LN'), in_channels)[1]
+
+ self.cls_fcs = nn.ModuleList()
+ for _ in range(num_cls_fcs):
+ self.cls_fcs.append(
+ nn.Linear(in_channels, in_channels, bias=False))
+ self.cls_fcs.append(
+ build_norm_layer(dict(type='LN'), in_channels)[1])
+ self.cls_fcs.append(
+ build_activation_layer(dict(type='ReLU', inplace=True)))
+
+ # over load the self.fc_cls in BBoxHead
+ if self.loss_cls.use_sigmoid:
+ self.fc_cls = nn.Linear(in_channels, self.num_classes)
+ else:
+ self.fc_cls = nn.Linear(in_channels, self.num_classes + 1)
+
+ self.reg_fcs = nn.ModuleList()
+ for _ in range(num_reg_fcs):
+ self.reg_fcs.append(
+ nn.Linear(in_channels, in_channels, bias=False))
+ self.reg_fcs.append(
+ build_norm_layer(dict(type='LN'), in_channels)[1])
+ self.reg_fcs.append(
+ build_activation_layer(dict(type='ReLU', inplace=True)))
+ # over load the self.fc_cls in BBoxHead
+ self.fc_reg = nn.Linear(in_channels, 4)
+
+ assert self.reg_class_agnostic, 'DIIHead only ' \
+ 'suppport `reg_class_agnostic=True` '
+ assert self.reg_decoded_bbox, 'DIIHead only ' \
+ 'suppport `reg_decoded_bbox=True`'
+
+ def init_weights(self):
+ """Use xavier initialization for all weight parameter and set
+ classification head bias as a specific value when use focal loss."""
+ for p in self.parameters():
+ if p.dim() > 1:
+ nn.init.xavier_uniform_(p)
+ else:
+ # adopt the default initialization for
+ # the weight and bias of the layer norm
+ pass
+ if self.loss_cls.use_sigmoid:
+ bias_init = bias_init_with_prob(0.01)
+ nn.init.constant_(self.fc_cls.bias, bias_init)
+
+ @auto_fp16()
+ def forward(self, roi_feat, proposal_feat):
+ """Forward function of Dynamic Instance Interactive Head.
+
+ Args:
+ roi_feat (Tensor): Roi-pooling features with shape
+ (batch_size*num_proposals, feature_dimensions,
+ pooling_h , pooling_w).
+ proposal_feat (Tensor): Intermediate feature get from
+ diihead in last stage, has shape
+ (batch_size, num_proposals, feature_dimensions)
+
+ Returns:
+ tuple[Tensor]: Usually a tuple of classification scores
+ and bbox prediction and a intermediate feature.
+
+ - cls_scores (Tensor): Classification scores for
+ all proposals, has shape
+ (batch_size, num_proposals, num_classes).
+ - bbox_preds (Tensor): Box energies / deltas for
+ all proposals, has shape
+ (batch_size, num_proposals, 4).
+ - obj_feat (Tensor): Object feature before classification
+ and regression subnet, has shape
+ (batch_size, num_proposal, feature_dimensions).
+ """
+ N, num_proposals = proposal_feat.shape[:2]
+
+ # Self attention
+ proposal_feat = proposal_feat.permute(1, 0, 2)
+ proposal_feat = self.attention_norm(self.attention(proposal_feat))
+
+ # instance interactive
+ proposal_feat = proposal_feat.permute(1, 0,
+ 2).reshape(-1, self.in_channels)
+ proposal_feat_iic = self.instance_interactive_conv(
+ proposal_feat, roi_feat)
+ proposal_feat = proposal_feat + self.instance_interactive_conv_dropout(
+ proposal_feat_iic)
+ obj_feat = self.instance_interactive_conv_norm(proposal_feat)
+
+ # FFN
+ obj_feat = self.ffn_norm(self.ffn(obj_feat))
+
+ cls_feat = obj_feat
+ reg_feat = obj_feat
+
+ for cls_layer in self.cls_fcs:
+ cls_feat = cls_layer(cls_feat)
+ for reg_layer in self.reg_fcs:
+ reg_feat = reg_layer(reg_feat)
+
+ cls_score = self.fc_cls(cls_feat).view(N, num_proposals, -1)
+ bbox_delta = self.fc_reg(reg_feat).view(N, num_proposals, -1)
+
+ return cls_score, bbox_delta, obj_feat.view(N, num_proposals, -1)
+
+ @force_fp32(apply_to=('cls_score', 'bbox_pred'))
+ def loss(self,
+ cls_score,
+ bbox_pred,
+ labels,
+ label_weights,
+ bbox_targets,
+ bbox_weights,
+ imgs_whwh=None,
+ reduction_override=None,
+ **kwargs):
+ """"Loss function of DIIHead, get loss of all images.
+
+ Args:
+ cls_score (Tensor): Classification prediction
+ results of all class, has shape
+ (batch_size * num_proposals_single_image, num_classes)
+ bbox_pred (Tensor): Regression prediction results,
+ has shape
+ (batch_size * num_proposals_single_image, 4), the last
+ dimension 4 represents [tl_x, tl_y, br_x, br_y].
+ labels (Tensor): Label of each proposals, has shape
+ (batch_size * num_proposals_single_image
+ label_weights (Tensor): Classification loss
+ weight of each proposals, has shape
+ (batch_size * num_proposals_single_image
+ bbox_targets (Tensor): Regression targets of each
+ proposals, has shape
+ (batch_size * num_proposals_single_image, 4),
+ the last dimension 4 represents
+ [tl_x, tl_y, br_x, br_y].
+ bbox_weights (Tensor): Regression loss weight of each
+ proposals's coordinate, has shape
+ (batch_size * num_proposals_single_image, 4),
+ imgs_whwh (Tensor): imgs_whwh (Tensor): Tensor with\
+ shape (batch_size, num_proposals, 4), the last
+ dimension means
+ [img_width,img_height, img_width, img_height].
+ reduction_override (str, optional): The reduction
+ method used to override the original reduction
+ method of the loss. Options are "none",
+ "mean" and "sum". Defaults to None,
+
+ Returns:
+ dict[str, Tensor]: Dictionary of loss components
+ """
+ losses = dict()
+ bg_class_ind = self.num_classes
+ # note in spare rcnn num_gt == num_pos
+ pos_inds = (labels >= 0) & (labels < bg_class_ind)
+ num_pos = pos_inds.sum().float()
+ avg_factor = reduce_mean(num_pos)
+ if cls_score is not None:
+ if cls_score.numel() > 0:
+ losses['loss_cls'] = self.loss_cls(
+ cls_score,
+ labels,
+ label_weights,
+ avg_factor=avg_factor,
+ reduction_override=reduction_override)
+ losses['pos_acc'] = accuracy(cls_score[pos_inds],
+ labels[pos_inds])
+ if bbox_pred is not None:
+ # 0~self.num_classes-1 are FG, self.num_classes is BG
+ # do not perform bounding box regression for BG anymore.
+ if pos_inds.any():
+ pos_bbox_pred = bbox_pred.reshape(bbox_pred.size(0),
+ 4)[pos_inds.type(torch.bool)]
+ imgs_whwh = imgs_whwh.reshape(bbox_pred.size(0),
+ 4)[pos_inds.type(torch.bool)]
+ losses['loss_bbox'] = self.loss_bbox(
+ pos_bbox_pred / imgs_whwh,
+ bbox_targets[pos_inds.type(torch.bool)] / imgs_whwh,
+ bbox_weights[pos_inds.type(torch.bool)],
+ avg_factor=avg_factor)
+ losses['loss_iou'] = self.loss_iou(
+ pos_bbox_pred,
+ bbox_targets[pos_inds.type(torch.bool)],
+ bbox_weights[pos_inds.type(torch.bool)],
+ avg_factor=avg_factor)
+ else:
+ losses['loss_bbox'] = bbox_pred.sum() * 0
+ losses['loss_iou'] = bbox_pred.sum() * 0
+ return losses
+
+ def _get_target_single(self, pos_inds, neg_inds, pos_bboxes, neg_bboxes,
+ pos_gt_bboxes, pos_gt_labels, cfg):
+ """Calculate the ground truth for proposals in the single image
+ according to the sampling results.
+
+ Almost the same as the implementation in `bbox_head`,
+ we add pos_inds and neg_inds to select positive and
+ negative samples instead of selecting the first num_pos
+ as positive samples.
+
+ Args:
+ pos_inds (Tensor): The length is equal to the
+ positive sample numbers contain all index
+ of the positive sample in the origin proposal set.
+ neg_inds (Tensor): The length is equal to the
+ negative sample numbers contain all index
+ of the negative sample in the origin proposal set.
+ pos_bboxes (Tensor): Contains all the positive boxes,
+ has shape (num_pos, 4), the last dimension 4
+ represents [tl_x, tl_y, br_x, br_y].
+ neg_bboxes (Tensor): Contains all the negative boxes,
+ has shape (num_neg, 4), the last dimension 4
+ represents [tl_x, tl_y, br_x, br_y].
+ pos_gt_bboxes (Tensor): Contains all the gt_boxes,
+ has shape (num_gt, 4), the last dimension 4
+ represents [tl_x, tl_y, br_x, br_y].
+ pos_gt_labels (Tensor): Contains all the gt_labels,
+ has shape (num_gt).
+ cfg (obj:`ConfigDict`): `train_cfg` of R-CNN.
+
+ Returns:
+ Tuple[Tensor]: Ground truth for proposals in a single image.
+ Containing the following Tensors:
+
+ - labels(Tensor): Gt_labels for all proposals, has
+ shape (num_proposals,).
+ - label_weights(Tensor): Labels_weights for all proposals, has
+ shape (num_proposals,).
+ - bbox_targets(Tensor):Regression target for all proposals, has
+ shape (num_proposals, 4), the last dimension 4
+ represents [tl_x, tl_y, br_x, br_y].
+ - bbox_weights(Tensor):Regression weights for all proposals,
+ has shape (num_proposals, 4).
+ """
+ num_pos = pos_bboxes.size(0)
+ num_neg = neg_bboxes.size(0)
+ num_samples = num_pos + num_neg
+
+ # original implementation uses new_zeros since BG are set to be 0
+ # now use empty & fill because BG cat_id = num_classes,
+ # FG cat_id = [0, num_classes-1]
+ labels = pos_bboxes.new_full((num_samples, ),
+ self.num_classes,
+ dtype=torch.long)
+ label_weights = pos_bboxes.new_zeros(num_samples)
+ bbox_targets = pos_bboxes.new_zeros(num_samples, 4)
+ bbox_weights = pos_bboxes.new_zeros(num_samples, 4)
+ if num_pos > 0:
+ labels[pos_inds] = pos_gt_labels
+ pos_weight = 1.0 if cfg.pos_weight <= 0 else cfg.pos_weight
+ label_weights[pos_inds] = pos_weight
+ if not self.reg_decoded_bbox:
+ pos_bbox_targets = self.bbox_coder.encode(
+ pos_bboxes, pos_gt_bboxes)
+ else:
+ pos_bbox_targets = pos_gt_bboxes
+ bbox_targets[pos_inds, :] = pos_bbox_targets
+ bbox_weights[pos_inds, :] = 1
+ if num_neg > 0:
+ label_weights[neg_inds] = 1.0
+
+ return labels, label_weights, bbox_targets, bbox_weights
+
+ def get_targets(self,
+ sampling_results,
+ gt_bboxes,
+ gt_labels,
+ rcnn_train_cfg,
+ concat=True):
+ """Calculate the ground truth for all samples in a batch according to
+ the sampling_results.
+
+ Almost the same as the implementation in bbox_head, we passed
+ additional parameters pos_inds_list and neg_inds_list to
+ `_get_target_single` function.
+
+ Args:
+ sampling_results (List[obj:SamplingResults]): Assign results of
+ all images in a batch after sampling.
+ gt_bboxes (list[Tensor]): Gt_bboxes of all images in a batch,
+ each tensor has shape (num_gt, 4), the last dimension 4
+ represents [tl_x, tl_y, br_x, br_y].
+ gt_labels (list[Tensor]): Gt_labels of all images in a batch,
+ each tensor has shape (num_gt,).
+ rcnn_train_cfg (obj:`ConfigDict`): `train_cfg` of RCNN.
+ concat (bool): Whether to concatenate the results of all
+ the images in a single batch.
+
+ Returns:
+ Tuple[Tensor]: Ground truth for proposals in a single image.
+ Containing the following list of Tensors:
+
+ - labels (list[Tensor],Tensor): Gt_labels for all
+ proposals in a batch, each tensor in list has
+ shape (num_proposals,) when `concat=False`, otherwise just
+ a single tensor has shape (num_all_proposals,).
+ - label_weights (list[Tensor]): Labels_weights for
+ all proposals in a batch, each tensor in list has shape
+ (num_proposals,) when `concat=False`, otherwise just a
+ single tensor has shape (num_all_proposals,).
+ - bbox_targets (list[Tensor],Tensor): Regression target
+ for all proposals in a batch, each tensor in list has
+ shape (num_proposals, 4) when `concat=False`, otherwise
+ just a single tensor has shape (num_all_proposals, 4),
+ the last dimension 4 represents [tl_x, tl_y, br_x, br_y].
+ - bbox_weights (list[tensor],Tensor): Regression weights for
+ all proposals in a batch, each tensor in list has shape
+ (num_proposals, 4) when `concat=False`, otherwise just a
+ single tensor has shape (num_all_proposals, 4).
+ """
+ pos_inds_list = [res.pos_inds for res in sampling_results]
+ neg_inds_list = [res.neg_inds for res in sampling_results]
+ pos_bboxes_list = [res.pos_bboxes for res in sampling_results]
+ neg_bboxes_list = [res.neg_bboxes for res in sampling_results]
+ pos_gt_bboxes_list = [res.pos_gt_bboxes for res in sampling_results]
+ pos_gt_labels_list = [res.pos_gt_labels for res in sampling_results]
+ labels, label_weights, bbox_targets, bbox_weights = multi_apply(
+ self._get_target_single,
+ pos_inds_list,
+ neg_inds_list,
+ pos_bboxes_list,
+ neg_bboxes_list,
+ pos_gt_bboxes_list,
+ pos_gt_labels_list,
+ cfg=rcnn_train_cfg)
+ if concat:
+ labels = torch.cat(labels, 0)
+ label_weights = torch.cat(label_weights, 0)
+ bbox_targets = torch.cat(bbox_targets, 0)
+ bbox_weights = torch.cat(bbox_weights, 0)
+ return labels, label_weights, bbox_targets, bbox_weights
diff --git a/annotator/uniformer/mmdet/models/roi_heads/bbox_heads/double_bbox_head.py b/annotator/uniformer/mmdet/models/roi_heads/bbox_heads/double_bbox_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..6c154cb3c0d9d7639c3d4a2a1272406d3fab8acd
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/roi_heads/bbox_heads/double_bbox_head.py
@@ -0,0 +1,172 @@
+import torch.nn as nn
+from mmcv.cnn import ConvModule, normal_init, xavier_init
+
+from mmdet.models.backbones.resnet import Bottleneck
+from mmdet.models.builder import HEADS
+from .bbox_head import BBoxHead
+
+
+class BasicResBlock(nn.Module):
+ """Basic residual block.
+
+ This block is a little different from the block in the ResNet backbone.
+ The kernel size of conv1 is 1 in this block while 3 in ResNet BasicBlock.
+
+ Args:
+ in_channels (int): Channels of the input feature map.
+ out_channels (int): Channels of the output feature map.
+ conv_cfg (dict): The config dict for convolution layers.
+ norm_cfg (dict): The config dict for normalization layers.
+ """
+
+ def __init__(self,
+ in_channels,
+ out_channels,
+ conv_cfg=None,
+ norm_cfg=dict(type='BN')):
+ super(BasicResBlock, self).__init__()
+
+ # main path
+ self.conv1 = ConvModule(
+ in_channels,
+ in_channels,
+ kernel_size=3,
+ padding=1,
+ bias=False,
+ conv_cfg=conv_cfg,
+ norm_cfg=norm_cfg)
+ self.conv2 = ConvModule(
+ in_channels,
+ out_channels,
+ kernel_size=1,
+ bias=False,
+ conv_cfg=conv_cfg,
+ norm_cfg=norm_cfg,
+ act_cfg=None)
+
+ # identity path
+ self.conv_identity = ConvModule(
+ in_channels,
+ out_channels,
+ kernel_size=1,
+ conv_cfg=conv_cfg,
+ norm_cfg=norm_cfg,
+ act_cfg=None)
+
+ self.relu = nn.ReLU(inplace=True)
+
+ def forward(self, x):
+ identity = x
+
+ x = self.conv1(x)
+ x = self.conv2(x)
+
+ identity = self.conv_identity(identity)
+ out = x + identity
+
+ out = self.relu(out)
+ return out
+
+
+@HEADS.register_module()
+class DoubleConvFCBBoxHead(BBoxHead):
+ r"""Bbox head used in Double-Head R-CNN
+
+ .. code-block:: none
+
+ /-> cls
+ /-> shared convs ->
+ \-> reg
+ roi features
+ /-> cls
+ \-> shared fc ->
+ \-> reg
+ """ # noqa: W605
+
+ def __init__(self,
+ num_convs=0,
+ num_fcs=0,
+ conv_out_channels=1024,
+ fc_out_channels=1024,
+ conv_cfg=None,
+ norm_cfg=dict(type='BN'),
+ **kwargs):
+ kwargs.setdefault('with_avg_pool', True)
+ super(DoubleConvFCBBoxHead, self).__init__(**kwargs)
+ assert self.with_avg_pool
+ assert num_convs > 0
+ assert num_fcs > 0
+ self.num_convs = num_convs
+ self.num_fcs = num_fcs
+ self.conv_out_channels = conv_out_channels
+ self.fc_out_channels = fc_out_channels
+ self.conv_cfg = conv_cfg
+ self.norm_cfg = norm_cfg
+
+ # increase the channel of input features
+ self.res_block = BasicResBlock(self.in_channels,
+ self.conv_out_channels)
+
+ # add conv heads
+ self.conv_branch = self._add_conv_branch()
+ # add fc heads
+ self.fc_branch = self._add_fc_branch()
+
+ out_dim_reg = 4 if self.reg_class_agnostic else 4 * self.num_classes
+ self.fc_reg = nn.Linear(self.conv_out_channels, out_dim_reg)
+
+ self.fc_cls = nn.Linear(self.fc_out_channels, self.num_classes + 1)
+ self.relu = nn.ReLU(inplace=True)
+
+ def _add_conv_branch(self):
+ """Add the fc branch which consists of a sequential of conv layers."""
+ branch_convs = nn.ModuleList()
+ for i in range(self.num_convs):
+ branch_convs.append(
+ Bottleneck(
+ inplanes=self.conv_out_channels,
+ planes=self.conv_out_channels // 4,
+ conv_cfg=self.conv_cfg,
+ norm_cfg=self.norm_cfg))
+ return branch_convs
+
+ def _add_fc_branch(self):
+ """Add the fc branch which consists of a sequential of fc layers."""
+ branch_fcs = nn.ModuleList()
+ for i in range(self.num_fcs):
+ fc_in_channels = (
+ self.in_channels *
+ self.roi_feat_area if i == 0 else self.fc_out_channels)
+ branch_fcs.append(nn.Linear(fc_in_channels, self.fc_out_channels))
+ return branch_fcs
+
+ def init_weights(self):
+ # conv layers are already initialized by ConvModule
+ normal_init(self.fc_cls, std=0.01)
+ normal_init(self.fc_reg, std=0.001)
+
+ for m in self.fc_branch.modules():
+ if isinstance(m, nn.Linear):
+ xavier_init(m, distribution='uniform')
+
+ def forward(self, x_cls, x_reg):
+ # conv head
+ x_conv = self.res_block(x_reg)
+
+ for conv in self.conv_branch:
+ x_conv = conv(x_conv)
+
+ if self.with_avg_pool:
+ x_conv = self.avg_pool(x_conv)
+
+ x_conv = x_conv.view(x_conv.size(0), -1)
+ bbox_pred = self.fc_reg(x_conv)
+
+ # fc head
+ x_fc = x_cls.view(x_cls.size(0), -1)
+ for fc in self.fc_branch:
+ x_fc = self.relu(fc(x_fc))
+
+ cls_score = self.fc_cls(x_fc)
+
+ return cls_score, bbox_pred
diff --git a/annotator/uniformer/mmdet/models/roi_heads/bbox_heads/sabl_head.py b/annotator/uniformer/mmdet/models/roi_heads/bbox_heads/sabl_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..5153996aeb706d103d1ad14b61734914eddb7693
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/roi_heads/bbox_heads/sabl_head.py
@@ -0,0 +1,572 @@
+import numpy as np
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from mmcv.cnn import ConvModule, kaiming_init, normal_init, xavier_init
+from mmcv.runner import force_fp32
+
+from mmdet.core import build_bbox_coder, multi_apply, multiclass_nms
+from mmdet.models.builder import HEADS, build_loss
+from mmdet.models.losses import accuracy
+
+
+@HEADS.register_module()
+class SABLHead(nn.Module):
+ """Side-Aware Boundary Localization (SABL) for RoI-Head.
+
+ Side-Aware features are extracted by conv layers
+ with an attention mechanism.
+ Boundary Localization with Bucketing and Bucketing Guided Rescoring
+ are implemented in BucketingBBoxCoder.
+
+ Please refer to https://arxiv.org/abs/1912.04260 for more details.
+
+ Args:
+ cls_in_channels (int): Input channels of cls RoI feature. \
+ Defaults to 256.
+ reg_in_channels (int): Input channels of reg RoI feature. \
+ Defaults to 256.
+ roi_feat_size (int): Size of RoI features. Defaults to 7.
+ reg_feat_up_ratio (int): Upsample ratio of reg features. \
+ Defaults to 2.
+ reg_pre_kernel (int): Kernel of 2D conv layers before \
+ attention pooling. Defaults to 3.
+ reg_post_kernel (int): Kernel of 1D conv layers after \
+ attention pooling. Defaults to 3.
+ reg_pre_num (int): Number of pre convs. Defaults to 2.
+ reg_post_num (int): Number of post convs. Defaults to 1.
+ num_classes (int): Number of classes in dataset. Defaults to 80.
+ cls_out_channels (int): Hidden channels in cls fcs. Defaults to 1024.
+ reg_offset_out_channels (int): Hidden and output channel \
+ of reg offset branch. Defaults to 256.
+ reg_cls_out_channels (int): Hidden and output channel \
+ of reg cls branch. Defaults to 256.
+ num_cls_fcs (int): Number of fcs for cls branch. Defaults to 1.
+ num_reg_fcs (int): Number of fcs for reg branch.. Defaults to 0.
+ reg_class_agnostic (bool): Class agnostic regresion or not. \
+ Defaults to True.
+ norm_cfg (dict): Config of norm layers. Defaults to None.
+ bbox_coder (dict): Config of bbox coder. Defaults 'BucketingBBoxCoder'.
+ loss_cls (dict): Config of classification loss.
+ loss_bbox_cls (dict): Config of classification loss for bbox branch.
+ loss_bbox_reg (dict): Config of regression loss for bbox branch.
+ """
+
+ def __init__(self,
+ num_classes,
+ cls_in_channels=256,
+ reg_in_channels=256,
+ roi_feat_size=7,
+ reg_feat_up_ratio=2,
+ reg_pre_kernel=3,
+ reg_post_kernel=3,
+ reg_pre_num=2,
+ reg_post_num=1,
+ cls_out_channels=1024,
+ reg_offset_out_channels=256,
+ reg_cls_out_channels=256,
+ num_cls_fcs=1,
+ num_reg_fcs=0,
+ reg_class_agnostic=True,
+ norm_cfg=None,
+ bbox_coder=dict(
+ type='BucketingBBoxCoder',
+ num_buckets=14,
+ scale_factor=1.7),
+ loss_cls=dict(
+ type='CrossEntropyLoss',
+ use_sigmoid=False,
+ loss_weight=1.0),
+ loss_bbox_cls=dict(
+ type='CrossEntropyLoss',
+ use_sigmoid=True,
+ loss_weight=1.0),
+ loss_bbox_reg=dict(
+ type='SmoothL1Loss', beta=0.1, loss_weight=1.0)):
+ super(SABLHead, self).__init__()
+ self.cls_in_channels = cls_in_channels
+ self.reg_in_channels = reg_in_channels
+ self.roi_feat_size = roi_feat_size
+ self.reg_feat_up_ratio = int(reg_feat_up_ratio)
+ self.num_buckets = bbox_coder['num_buckets']
+ assert self.reg_feat_up_ratio // 2 >= 1
+ self.up_reg_feat_size = roi_feat_size * self.reg_feat_up_ratio
+ assert self.up_reg_feat_size == bbox_coder['num_buckets']
+ self.reg_pre_kernel = reg_pre_kernel
+ self.reg_post_kernel = reg_post_kernel
+ self.reg_pre_num = reg_pre_num
+ self.reg_post_num = reg_post_num
+ self.num_classes = num_classes
+ self.cls_out_channels = cls_out_channels
+ self.reg_offset_out_channels = reg_offset_out_channels
+ self.reg_cls_out_channels = reg_cls_out_channels
+ self.num_cls_fcs = num_cls_fcs
+ self.num_reg_fcs = num_reg_fcs
+ self.reg_class_agnostic = reg_class_agnostic
+ assert self.reg_class_agnostic
+ self.norm_cfg = norm_cfg
+
+ self.bbox_coder = build_bbox_coder(bbox_coder)
+ self.loss_cls = build_loss(loss_cls)
+ self.loss_bbox_cls = build_loss(loss_bbox_cls)
+ self.loss_bbox_reg = build_loss(loss_bbox_reg)
+
+ self.cls_fcs = self._add_fc_branch(self.num_cls_fcs,
+ self.cls_in_channels,
+ self.roi_feat_size,
+ self.cls_out_channels)
+
+ self.side_num = int(np.ceil(self.num_buckets / 2))
+
+ if self.reg_feat_up_ratio > 1:
+ self.upsample_x = nn.ConvTranspose1d(
+ reg_in_channels,
+ reg_in_channels,
+ self.reg_feat_up_ratio,
+ stride=self.reg_feat_up_ratio)
+ self.upsample_y = nn.ConvTranspose1d(
+ reg_in_channels,
+ reg_in_channels,
+ self.reg_feat_up_ratio,
+ stride=self.reg_feat_up_ratio)
+
+ self.reg_pre_convs = nn.ModuleList()
+ for i in range(self.reg_pre_num):
+ reg_pre_conv = ConvModule(
+ reg_in_channels,
+ reg_in_channels,
+ kernel_size=reg_pre_kernel,
+ padding=reg_pre_kernel // 2,
+ norm_cfg=norm_cfg,
+ act_cfg=dict(type='ReLU'))
+ self.reg_pre_convs.append(reg_pre_conv)
+
+ self.reg_post_conv_xs = nn.ModuleList()
+ for i in range(self.reg_post_num):
+ reg_post_conv_x = ConvModule(
+ reg_in_channels,
+ reg_in_channels,
+ kernel_size=(1, reg_post_kernel),
+ padding=(0, reg_post_kernel // 2),
+ norm_cfg=norm_cfg,
+ act_cfg=dict(type='ReLU'))
+ self.reg_post_conv_xs.append(reg_post_conv_x)
+ self.reg_post_conv_ys = nn.ModuleList()
+ for i in range(self.reg_post_num):
+ reg_post_conv_y = ConvModule(
+ reg_in_channels,
+ reg_in_channels,
+ kernel_size=(reg_post_kernel, 1),
+ padding=(reg_post_kernel // 2, 0),
+ norm_cfg=norm_cfg,
+ act_cfg=dict(type='ReLU'))
+ self.reg_post_conv_ys.append(reg_post_conv_y)
+
+ self.reg_conv_att_x = nn.Conv2d(reg_in_channels, 1, 1)
+ self.reg_conv_att_y = nn.Conv2d(reg_in_channels, 1, 1)
+
+ self.fc_cls = nn.Linear(self.cls_out_channels, self.num_classes + 1)
+ self.relu = nn.ReLU(inplace=True)
+
+ self.reg_cls_fcs = self._add_fc_branch(self.num_reg_fcs,
+ self.reg_in_channels, 1,
+ self.reg_cls_out_channels)
+ self.reg_offset_fcs = self._add_fc_branch(self.num_reg_fcs,
+ self.reg_in_channels, 1,
+ self.reg_offset_out_channels)
+ self.fc_reg_cls = nn.Linear(self.reg_cls_out_channels, 1)
+ self.fc_reg_offset = nn.Linear(self.reg_offset_out_channels, 1)
+
+ def _add_fc_branch(self, num_branch_fcs, in_channels, roi_feat_size,
+ fc_out_channels):
+ in_channels = in_channels * roi_feat_size * roi_feat_size
+ branch_fcs = nn.ModuleList()
+ for i in range(num_branch_fcs):
+ fc_in_channels = (in_channels if i == 0 else fc_out_channels)
+ branch_fcs.append(nn.Linear(fc_in_channels, fc_out_channels))
+ return branch_fcs
+
+ def init_weights(self):
+ for module_list in [
+ self.reg_cls_fcs, self.reg_offset_fcs, self.cls_fcs
+ ]:
+ for m in module_list.modules():
+ if isinstance(m, nn.Linear):
+ xavier_init(m, distribution='uniform')
+ if self.reg_feat_up_ratio > 1:
+ kaiming_init(self.upsample_x, distribution='normal')
+ kaiming_init(self.upsample_y, distribution='normal')
+
+ normal_init(self.reg_conv_att_x, 0, 0.01)
+ normal_init(self.reg_conv_att_y, 0, 0.01)
+ normal_init(self.fc_reg_offset, 0, 0.001)
+ normal_init(self.fc_reg_cls, 0, 0.01)
+ normal_init(self.fc_cls, 0, 0.01)
+
+ def cls_forward(self, cls_x):
+ cls_x = cls_x.view(cls_x.size(0), -1)
+ for fc in self.cls_fcs:
+ cls_x = self.relu(fc(cls_x))
+ cls_score = self.fc_cls(cls_x)
+ return cls_score
+
+ def attention_pool(self, reg_x):
+ """Extract direction-specific features fx and fy with attention
+ methanism."""
+ reg_fx = reg_x
+ reg_fy = reg_x
+ reg_fx_att = self.reg_conv_att_x(reg_fx).sigmoid()
+ reg_fy_att = self.reg_conv_att_y(reg_fy).sigmoid()
+ reg_fx_att = reg_fx_att / reg_fx_att.sum(dim=2).unsqueeze(2)
+ reg_fy_att = reg_fy_att / reg_fy_att.sum(dim=3).unsqueeze(3)
+ reg_fx = (reg_fx * reg_fx_att).sum(dim=2)
+ reg_fy = (reg_fy * reg_fy_att).sum(dim=3)
+ return reg_fx, reg_fy
+
+ def side_aware_feature_extractor(self, reg_x):
+ """Refine and extract side-aware features without split them."""
+ for reg_pre_conv in self.reg_pre_convs:
+ reg_x = reg_pre_conv(reg_x)
+ reg_fx, reg_fy = self.attention_pool(reg_x)
+
+ if self.reg_post_num > 0:
+ reg_fx = reg_fx.unsqueeze(2)
+ reg_fy = reg_fy.unsqueeze(3)
+ for i in range(self.reg_post_num):
+ reg_fx = self.reg_post_conv_xs[i](reg_fx)
+ reg_fy = self.reg_post_conv_ys[i](reg_fy)
+ reg_fx = reg_fx.squeeze(2)
+ reg_fy = reg_fy.squeeze(3)
+ if self.reg_feat_up_ratio > 1:
+ reg_fx = self.relu(self.upsample_x(reg_fx))
+ reg_fy = self.relu(self.upsample_y(reg_fy))
+ reg_fx = torch.transpose(reg_fx, 1, 2)
+ reg_fy = torch.transpose(reg_fy, 1, 2)
+ return reg_fx.contiguous(), reg_fy.contiguous()
+
+ def reg_pred(self, x, offset_fcs, cls_fcs):
+ """Predict bucketing estimation (cls_pred) and fine regression (offset
+ pred) with side-aware features."""
+ x_offset = x.view(-1, self.reg_in_channels)
+ x_cls = x.view(-1, self.reg_in_channels)
+
+ for fc in offset_fcs:
+ x_offset = self.relu(fc(x_offset))
+ for fc in cls_fcs:
+ x_cls = self.relu(fc(x_cls))
+ offset_pred = self.fc_reg_offset(x_offset)
+ cls_pred = self.fc_reg_cls(x_cls)
+
+ offset_pred = offset_pred.view(x.size(0), -1)
+ cls_pred = cls_pred.view(x.size(0), -1)
+
+ return offset_pred, cls_pred
+
+ def side_aware_split(self, feat):
+ """Split side-aware features aligned with orders of bucketing
+ targets."""
+ l_end = int(np.ceil(self.up_reg_feat_size / 2))
+ r_start = int(np.floor(self.up_reg_feat_size / 2))
+ feat_fl = feat[:, :l_end]
+ feat_fr = feat[:, r_start:].flip(dims=(1, ))
+ feat_fl = feat_fl.contiguous()
+ feat_fr = feat_fr.contiguous()
+ feat = torch.cat([feat_fl, feat_fr], dim=-1)
+ return feat
+
+ def bbox_pred_split(self, bbox_pred, num_proposals_per_img):
+ """Split batch bbox prediction back to each image."""
+ bucket_cls_preds, bucket_offset_preds = bbox_pred
+ bucket_cls_preds = bucket_cls_preds.split(num_proposals_per_img, 0)
+ bucket_offset_preds = bucket_offset_preds.split(
+ num_proposals_per_img, 0)
+ bbox_pred = tuple(zip(bucket_cls_preds, bucket_offset_preds))
+ return bbox_pred
+
+ def reg_forward(self, reg_x):
+ outs = self.side_aware_feature_extractor(reg_x)
+ edge_offset_preds = []
+ edge_cls_preds = []
+ reg_fx = outs[0]
+ reg_fy = outs[1]
+ offset_pred_x, cls_pred_x = self.reg_pred(reg_fx, self.reg_offset_fcs,
+ self.reg_cls_fcs)
+ offset_pred_y, cls_pred_y = self.reg_pred(reg_fy, self.reg_offset_fcs,
+ self.reg_cls_fcs)
+ offset_pred_x = self.side_aware_split(offset_pred_x)
+ offset_pred_y = self.side_aware_split(offset_pred_y)
+ cls_pred_x = self.side_aware_split(cls_pred_x)
+ cls_pred_y = self.side_aware_split(cls_pred_y)
+ edge_offset_preds = torch.cat([offset_pred_x, offset_pred_y], dim=-1)
+ edge_cls_preds = torch.cat([cls_pred_x, cls_pred_y], dim=-1)
+
+ return (edge_cls_preds, edge_offset_preds)
+
+ def forward(self, x):
+
+ bbox_pred = self.reg_forward(x)
+ cls_score = self.cls_forward(x)
+
+ return cls_score, bbox_pred
+
+ def get_targets(self, sampling_results, gt_bboxes, gt_labels,
+ rcnn_train_cfg):
+ pos_proposals = [res.pos_bboxes for res in sampling_results]
+ neg_proposals = [res.neg_bboxes for res in sampling_results]
+ pos_gt_bboxes = [res.pos_gt_bboxes for res in sampling_results]
+ pos_gt_labels = [res.pos_gt_labels for res in sampling_results]
+ cls_reg_targets = self.bucket_target(pos_proposals, neg_proposals,
+ pos_gt_bboxes, pos_gt_labels,
+ rcnn_train_cfg)
+ (labels, label_weights, bucket_cls_targets, bucket_cls_weights,
+ bucket_offset_targets, bucket_offset_weights) = cls_reg_targets
+ return (labels, label_weights, (bucket_cls_targets,
+ bucket_offset_targets),
+ (bucket_cls_weights, bucket_offset_weights))
+
+ def bucket_target(self,
+ pos_proposals_list,
+ neg_proposals_list,
+ pos_gt_bboxes_list,
+ pos_gt_labels_list,
+ rcnn_train_cfg,
+ concat=True):
+ (labels, label_weights, bucket_cls_targets, bucket_cls_weights,
+ bucket_offset_targets, bucket_offset_weights) = multi_apply(
+ self._bucket_target_single,
+ pos_proposals_list,
+ neg_proposals_list,
+ pos_gt_bboxes_list,
+ pos_gt_labels_list,
+ cfg=rcnn_train_cfg)
+
+ if concat:
+ labels = torch.cat(labels, 0)
+ label_weights = torch.cat(label_weights, 0)
+ bucket_cls_targets = torch.cat(bucket_cls_targets, 0)
+ bucket_cls_weights = torch.cat(bucket_cls_weights, 0)
+ bucket_offset_targets = torch.cat(bucket_offset_targets, 0)
+ bucket_offset_weights = torch.cat(bucket_offset_weights, 0)
+ return (labels, label_weights, bucket_cls_targets, bucket_cls_weights,
+ bucket_offset_targets, bucket_offset_weights)
+
+ def _bucket_target_single(self, pos_proposals, neg_proposals,
+ pos_gt_bboxes, pos_gt_labels, cfg):
+ """Compute bucketing estimation targets and fine regression targets for
+ a single image.
+
+ Args:
+ pos_proposals (Tensor): positive proposals of a single image,
+ Shape (n_pos, 4)
+ neg_proposals (Tensor): negative proposals of a single image,
+ Shape (n_neg, 4).
+ pos_gt_bboxes (Tensor): gt bboxes assigned to positive proposals
+ of a single image, Shape (n_pos, 4).
+ pos_gt_labels (Tensor): gt labels assigned to positive proposals
+ of a single image, Shape (n_pos, ).
+ cfg (dict): Config of calculating targets
+
+ Returns:
+ tuple:
+
+ - labels (Tensor): Labels in a single image. \
+ Shape (n,).
+ - label_weights (Tensor): Label weights in a single image.\
+ Shape (n,)
+ - bucket_cls_targets (Tensor): Bucket cls targets in \
+ a single image. Shape (n, num_buckets*2).
+ - bucket_cls_weights (Tensor): Bucket cls weights in \
+ a single image. Shape (n, num_buckets*2).
+ - bucket_offset_targets (Tensor): Bucket offset targets \
+ in a single image. Shape (n, num_buckets*2).
+ - bucket_offset_targets (Tensor): Bucket offset weights \
+ in a single image. Shape (n, num_buckets*2).
+ """
+ num_pos = pos_proposals.size(0)
+ num_neg = neg_proposals.size(0)
+ num_samples = num_pos + num_neg
+ labels = pos_gt_bboxes.new_full((num_samples, ),
+ self.num_classes,
+ dtype=torch.long)
+ label_weights = pos_proposals.new_zeros(num_samples)
+ bucket_cls_targets = pos_proposals.new_zeros(num_samples,
+ 4 * self.side_num)
+ bucket_cls_weights = pos_proposals.new_zeros(num_samples,
+ 4 * self.side_num)
+ bucket_offset_targets = pos_proposals.new_zeros(
+ num_samples, 4 * self.side_num)
+ bucket_offset_weights = pos_proposals.new_zeros(
+ num_samples, 4 * self.side_num)
+ if num_pos > 0:
+ labels[:num_pos] = pos_gt_labels
+ label_weights[:num_pos] = 1.0
+ (pos_bucket_offset_targets, pos_bucket_offset_weights,
+ pos_bucket_cls_targets,
+ pos_bucket_cls_weights) = self.bbox_coder.encode(
+ pos_proposals, pos_gt_bboxes)
+ bucket_cls_targets[:num_pos, :] = pos_bucket_cls_targets
+ bucket_cls_weights[:num_pos, :] = pos_bucket_cls_weights
+ bucket_offset_targets[:num_pos, :] = pos_bucket_offset_targets
+ bucket_offset_weights[:num_pos, :] = pos_bucket_offset_weights
+ if num_neg > 0:
+ label_weights[-num_neg:] = 1.0
+ return (labels, label_weights, bucket_cls_targets, bucket_cls_weights,
+ bucket_offset_targets, bucket_offset_weights)
+
+ def loss(self,
+ cls_score,
+ bbox_pred,
+ rois,
+ labels,
+ label_weights,
+ bbox_targets,
+ bbox_weights,
+ reduction_override=None):
+ losses = dict()
+ if cls_score is not None:
+ avg_factor = max(torch.sum(label_weights > 0).float().item(), 1.)
+ losses['loss_cls'] = self.loss_cls(
+ cls_score,
+ labels,
+ label_weights,
+ avg_factor=avg_factor,
+ reduction_override=reduction_override)
+ losses['acc'] = accuracy(cls_score, labels)
+
+ if bbox_pred is not None:
+ bucket_cls_preds, bucket_offset_preds = bbox_pred
+ bucket_cls_targets, bucket_offset_targets = bbox_targets
+ bucket_cls_weights, bucket_offset_weights = bbox_weights
+ # edge cls
+ bucket_cls_preds = bucket_cls_preds.view(-1, self.side_num)
+ bucket_cls_targets = bucket_cls_targets.view(-1, self.side_num)
+ bucket_cls_weights = bucket_cls_weights.view(-1, self.side_num)
+ losses['loss_bbox_cls'] = self.loss_bbox_cls(
+ bucket_cls_preds,
+ bucket_cls_targets,
+ bucket_cls_weights,
+ avg_factor=bucket_cls_targets.size(0),
+ reduction_override=reduction_override)
+
+ losses['loss_bbox_reg'] = self.loss_bbox_reg(
+ bucket_offset_preds,
+ bucket_offset_targets,
+ bucket_offset_weights,
+ avg_factor=bucket_offset_targets.size(0),
+ reduction_override=reduction_override)
+
+ return losses
+
+ @force_fp32(apply_to=('cls_score', 'bbox_pred'))
+ def get_bboxes(self,
+ rois,
+ cls_score,
+ bbox_pred,
+ img_shape,
+ scale_factor,
+ rescale=False,
+ cfg=None):
+ if isinstance(cls_score, list):
+ cls_score = sum(cls_score) / float(len(cls_score))
+ scores = F.softmax(cls_score, dim=1) if cls_score is not None else None
+
+ if bbox_pred is not None:
+ bboxes, confids = self.bbox_coder.decode(rois[:, 1:], bbox_pred,
+ img_shape)
+ else:
+ bboxes = rois[:, 1:].clone()
+ confids = None
+ if img_shape is not None:
+ bboxes[:, [0, 2]].clamp_(min=0, max=img_shape[1] - 1)
+ bboxes[:, [1, 3]].clamp_(min=0, max=img_shape[0] - 1)
+
+ if rescale and bboxes.size(0) > 0:
+ if isinstance(scale_factor, float):
+ bboxes /= scale_factor
+ else:
+ bboxes /= torch.from_numpy(scale_factor).to(bboxes.device)
+
+ if cfg is None:
+ return bboxes, scores
+ else:
+ det_bboxes, det_labels = multiclass_nms(
+ bboxes,
+ scores,
+ cfg.score_thr,
+ cfg.nms,
+ cfg.max_per_img,
+ score_factors=confids)
+
+ return det_bboxes, det_labels
+
+ @force_fp32(apply_to=('bbox_preds', ))
+ def refine_bboxes(self, rois, labels, bbox_preds, pos_is_gts, img_metas):
+ """Refine bboxes during training.
+
+ Args:
+ rois (Tensor): Shape (n*bs, 5), where n is image number per GPU,
+ and bs is the sampled RoIs per image.
+ labels (Tensor): Shape (n*bs, ).
+ bbox_preds (list[Tensor]): Shape [(n*bs, num_buckets*2), \
+ (n*bs, num_buckets*2)].
+ pos_is_gts (list[Tensor]): Flags indicating if each positive bbox
+ is a gt bbox.
+ img_metas (list[dict]): Meta info of each image.
+
+ Returns:
+ list[Tensor]: Refined bboxes of each image in a mini-batch.
+ """
+ img_ids = rois[:, 0].long().unique(sorted=True)
+ assert img_ids.numel() == len(img_metas)
+
+ bboxes_list = []
+ for i in range(len(img_metas)):
+ inds = torch.nonzero(
+ rois[:, 0] == i, as_tuple=False).squeeze(dim=1)
+ num_rois = inds.numel()
+
+ bboxes_ = rois[inds, 1:]
+ label_ = labels[inds]
+ edge_cls_preds, edge_offset_preds = bbox_preds
+ edge_cls_preds_ = edge_cls_preds[inds]
+ edge_offset_preds_ = edge_offset_preds[inds]
+ bbox_pred_ = [edge_cls_preds_, edge_offset_preds_]
+ img_meta_ = img_metas[i]
+ pos_is_gts_ = pos_is_gts[i]
+
+ bboxes = self.regress_by_class(bboxes_, label_, bbox_pred_,
+ img_meta_)
+ # filter gt bboxes
+ pos_keep = 1 - pos_is_gts_
+ keep_inds = pos_is_gts_.new_ones(num_rois)
+ keep_inds[:len(pos_is_gts_)] = pos_keep
+
+ bboxes_list.append(bboxes[keep_inds.type(torch.bool)])
+
+ return bboxes_list
+
+ @force_fp32(apply_to=('bbox_pred', ))
+ def regress_by_class(self, rois, label, bbox_pred, img_meta):
+ """Regress the bbox for the predicted class. Used in Cascade R-CNN.
+
+ Args:
+ rois (Tensor): shape (n, 4) or (n, 5)
+ label (Tensor): shape (n, )
+ bbox_pred (list[Tensor]): shape [(n, num_buckets *2), \
+ (n, num_buckets *2)]
+ img_meta (dict): Image meta info.
+
+ Returns:
+ Tensor: Regressed bboxes, the same shape as input rois.
+ """
+ assert rois.size(1) == 4 or rois.size(1) == 5
+
+ if rois.size(1) == 4:
+ new_rois, _ = self.bbox_coder.decode(rois, bbox_pred,
+ img_meta['img_shape'])
+ else:
+ bboxes, _ = self.bbox_coder.decode(rois[:, 1:], bbox_pred,
+ img_meta['img_shape'])
+ new_rois = torch.cat((rois[:, [0]], bboxes), dim=1)
+
+ return new_rois
diff --git a/annotator/uniformer/mmdet/models/roi_heads/bbox_heads/scnet_bbox_head.py b/annotator/uniformer/mmdet/models/roi_heads/bbox_heads/scnet_bbox_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..35758f4f4e3b2bddd460edb8a7f482b3a9da2919
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/roi_heads/bbox_heads/scnet_bbox_head.py
@@ -0,0 +1,76 @@
+from mmdet.models.builder import HEADS
+from .convfc_bbox_head import ConvFCBBoxHead
+
+
+@HEADS.register_module()
+class SCNetBBoxHead(ConvFCBBoxHead):
+ """BBox head for `SCNet `_.
+
+ This inherits ``ConvFCBBoxHead`` with modified forward() function, allow us
+ to get intermediate shared feature.
+ """
+
+ def _forward_shared(self, x):
+ """Forward function for shared part."""
+ if self.num_shared_convs > 0:
+ for conv in self.shared_convs:
+ x = conv(x)
+
+ if self.num_shared_fcs > 0:
+ if self.with_avg_pool:
+ x = self.avg_pool(x)
+
+ x = x.flatten(1)
+
+ for fc in self.shared_fcs:
+ x = self.relu(fc(x))
+
+ return x
+
+ def _forward_cls_reg(self, x):
+ """Forward function for classification and regression parts."""
+ x_cls = x
+ x_reg = x
+
+ for conv in self.cls_convs:
+ x_cls = conv(x_cls)
+ if x_cls.dim() > 2:
+ if self.with_avg_pool:
+ x_cls = self.avg_pool(x_cls)
+ x_cls = x_cls.flatten(1)
+ for fc in self.cls_fcs:
+ x_cls = self.relu(fc(x_cls))
+
+ for conv in self.reg_convs:
+ x_reg = conv(x_reg)
+ if x_reg.dim() > 2:
+ if self.with_avg_pool:
+ x_reg = self.avg_pool(x_reg)
+ x_reg = x_reg.flatten(1)
+ for fc in self.reg_fcs:
+ x_reg = self.relu(fc(x_reg))
+
+ cls_score = self.fc_cls(x_cls) if self.with_cls else None
+ bbox_pred = self.fc_reg(x_reg) if self.with_reg else None
+
+ return cls_score, bbox_pred
+
+ def forward(self, x, return_shared_feat=False):
+ """Forward function.
+
+ Args:
+ x (Tensor): input features
+ return_shared_feat (bool): If True, return cls-reg-shared feature.
+
+ Return:
+ out (tuple[Tensor]): contain ``cls_score`` and ``bbox_pred``,
+ if ``return_shared_feat`` is True, append ``x_shared`` to the
+ returned tuple.
+ """
+ x_shared = self._forward_shared(x)
+ out = self._forward_cls_reg(x_shared)
+
+ if return_shared_feat:
+ out += (x_shared, )
+
+ return out
diff --git a/annotator/uniformer/mmdet/models/roi_heads/cascade_roi_head.py b/annotator/uniformer/mmdet/models/roi_heads/cascade_roi_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..45b6f36a386cd37c50cc43666fcc516f2e14d868
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/roi_heads/cascade_roi_head.py
@@ -0,0 +1,507 @@
+import torch
+import torch.nn as nn
+
+from mmdet.core import (bbox2result, bbox2roi, bbox_mapping, build_assigner,
+ build_sampler, merge_aug_bboxes, merge_aug_masks,
+ multiclass_nms)
+from ..builder import HEADS, build_head, build_roi_extractor
+from .base_roi_head import BaseRoIHead
+from .test_mixins import BBoxTestMixin, MaskTestMixin
+
+
+@HEADS.register_module()
+class CascadeRoIHead(BaseRoIHead, BBoxTestMixin, MaskTestMixin):
+ """Cascade roi head including one bbox head and one mask head.
+
+ https://arxiv.org/abs/1712.00726
+ """
+
+ def __init__(self,
+ num_stages,
+ stage_loss_weights,
+ bbox_roi_extractor=None,
+ bbox_head=None,
+ mask_roi_extractor=None,
+ mask_head=None,
+ shared_head=None,
+ train_cfg=None,
+ test_cfg=None):
+ assert bbox_roi_extractor is not None
+ assert bbox_head is not None
+ assert shared_head is None, \
+ 'Shared head is not supported in Cascade RCNN anymore'
+ self.num_stages = num_stages
+ self.stage_loss_weights = stage_loss_weights
+ super(CascadeRoIHead, self).__init__(
+ bbox_roi_extractor=bbox_roi_extractor,
+ bbox_head=bbox_head,
+ mask_roi_extractor=mask_roi_extractor,
+ mask_head=mask_head,
+ shared_head=shared_head,
+ train_cfg=train_cfg,
+ test_cfg=test_cfg)
+
+ def init_bbox_head(self, bbox_roi_extractor, bbox_head):
+ """Initialize box head and box roi extractor.
+
+ Args:
+ bbox_roi_extractor (dict): Config of box roi extractor.
+ bbox_head (dict): Config of box in box head.
+ """
+ self.bbox_roi_extractor = nn.ModuleList()
+ self.bbox_head = nn.ModuleList()
+ if not isinstance(bbox_roi_extractor, list):
+ bbox_roi_extractor = [
+ bbox_roi_extractor for _ in range(self.num_stages)
+ ]
+ if not isinstance(bbox_head, list):
+ bbox_head = [bbox_head for _ in range(self.num_stages)]
+ assert len(bbox_roi_extractor) == len(bbox_head) == self.num_stages
+ for roi_extractor, head in zip(bbox_roi_extractor, bbox_head):
+ self.bbox_roi_extractor.append(build_roi_extractor(roi_extractor))
+ self.bbox_head.append(build_head(head))
+
+ def init_mask_head(self, mask_roi_extractor, mask_head):
+ """Initialize mask head and mask roi extractor.
+
+ Args:
+ mask_roi_extractor (dict): Config of mask roi extractor.
+ mask_head (dict): Config of mask in mask head.
+ """
+ self.mask_head = nn.ModuleList()
+ if not isinstance(mask_head, list):
+ mask_head = [mask_head for _ in range(self.num_stages)]
+ assert len(mask_head) == self.num_stages
+ for head in mask_head:
+ self.mask_head.append(build_head(head))
+ if mask_roi_extractor is not None:
+ self.share_roi_extractor = False
+ self.mask_roi_extractor = nn.ModuleList()
+ if not isinstance(mask_roi_extractor, list):
+ mask_roi_extractor = [
+ mask_roi_extractor for _ in range(self.num_stages)
+ ]
+ assert len(mask_roi_extractor) == self.num_stages
+ for roi_extractor in mask_roi_extractor:
+ self.mask_roi_extractor.append(
+ build_roi_extractor(roi_extractor))
+ else:
+ self.share_roi_extractor = True
+ self.mask_roi_extractor = self.bbox_roi_extractor
+
+ def init_assigner_sampler(self):
+ """Initialize assigner and sampler for each stage."""
+ self.bbox_assigner = []
+ self.bbox_sampler = []
+ if self.train_cfg is not None:
+ for idx, rcnn_train_cfg in enumerate(self.train_cfg):
+ self.bbox_assigner.append(
+ build_assigner(rcnn_train_cfg.assigner))
+ self.current_stage = idx
+ self.bbox_sampler.append(
+ build_sampler(rcnn_train_cfg.sampler, context=self))
+
+ def init_weights(self, pretrained):
+ """Initialize the weights in head.
+
+ Args:
+ pretrained (str, optional): Path to pre-trained weights.
+ Defaults to None.
+ """
+ if self.with_shared_head:
+ self.shared_head.init_weights(pretrained=pretrained)
+ for i in range(self.num_stages):
+ if self.with_bbox:
+ self.bbox_roi_extractor[i].init_weights()
+ self.bbox_head[i].init_weights()
+ if self.with_mask:
+ if not self.share_roi_extractor:
+ self.mask_roi_extractor[i].init_weights()
+ self.mask_head[i].init_weights()
+
+ def forward_dummy(self, x, proposals):
+ """Dummy forward function."""
+ # bbox head
+ outs = ()
+ rois = bbox2roi([proposals])
+ if self.with_bbox:
+ for i in range(self.num_stages):
+ bbox_results = self._bbox_forward(i, x, rois)
+ outs = outs + (bbox_results['cls_score'],
+ bbox_results['bbox_pred'])
+ # mask heads
+ if self.with_mask:
+ mask_rois = rois[:100]
+ for i in range(self.num_stages):
+ mask_results = self._mask_forward(i, x, mask_rois)
+ outs = outs + (mask_results['mask_pred'], )
+ return outs
+
+ def _bbox_forward(self, stage, x, rois):
+ """Box head forward function used in both training and testing."""
+ bbox_roi_extractor = self.bbox_roi_extractor[stage]
+ bbox_head = self.bbox_head[stage]
+ bbox_feats = bbox_roi_extractor(x[:bbox_roi_extractor.num_inputs],
+ rois)
+ # do not support caffe_c4 model anymore
+ cls_score, bbox_pred = bbox_head(bbox_feats)
+
+ bbox_results = dict(
+ cls_score=cls_score, bbox_pred=bbox_pred, bbox_feats=bbox_feats)
+ return bbox_results
+
+ def _bbox_forward_train(self, stage, x, sampling_results, gt_bboxes,
+ gt_labels, rcnn_train_cfg):
+ """Run forward function and calculate loss for box head in training."""
+ rois = bbox2roi([res.bboxes for res in sampling_results])
+ bbox_results = self._bbox_forward(stage, x, rois)
+ bbox_targets = self.bbox_head[stage].get_targets(
+ sampling_results, gt_bboxes, gt_labels, rcnn_train_cfg)
+ loss_bbox = self.bbox_head[stage].loss(bbox_results['cls_score'],
+ bbox_results['bbox_pred'], rois,
+ *bbox_targets)
+
+ bbox_results.update(
+ loss_bbox=loss_bbox, rois=rois, bbox_targets=bbox_targets)
+ return bbox_results
+
+ def _mask_forward(self, stage, x, rois):
+ """Mask head forward function used in both training and testing."""
+ mask_roi_extractor = self.mask_roi_extractor[stage]
+ mask_head = self.mask_head[stage]
+ mask_feats = mask_roi_extractor(x[:mask_roi_extractor.num_inputs],
+ rois)
+ # do not support caffe_c4 model anymore
+ mask_pred = mask_head(mask_feats)
+
+ mask_results = dict(mask_pred=mask_pred)
+ return mask_results
+
+ def _mask_forward_train(self,
+ stage,
+ x,
+ sampling_results,
+ gt_masks,
+ rcnn_train_cfg,
+ bbox_feats=None):
+ """Run forward function and calculate loss for mask head in
+ training."""
+ pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results])
+ mask_results = self._mask_forward(stage, x, pos_rois)
+
+ mask_targets = self.mask_head[stage].get_targets(
+ sampling_results, gt_masks, rcnn_train_cfg)
+ pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results])
+ loss_mask = self.mask_head[stage].loss(mask_results['mask_pred'],
+ mask_targets, pos_labels)
+
+ mask_results.update(loss_mask=loss_mask)
+ return mask_results
+
+ def forward_train(self,
+ x,
+ img_metas,
+ proposal_list,
+ gt_bboxes,
+ gt_labels,
+ gt_bboxes_ignore=None,
+ gt_masks=None):
+ """
+ Args:
+ x (list[Tensor]): list of multi-level img features.
+ img_metas (list[dict]): list of image info dict where each dict
+ has: 'img_shape', 'scale_factor', 'flip', and may also contain
+ 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
+ For details on the values of these keys see
+ `mmdet/datasets/pipelines/formatting.py:Collect`.
+ proposals (list[Tensors]): list of region proposals.
+ gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
+ shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
+ gt_labels (list[Tensor]): class indices corresponding to each box
+ gt_bboxes_ignore (None | list[Tensor]): specify which bounding
+ boxes can be ignored when computing the loss.
+ gt_masks (None | Tensor) : true segmentation masks for each box
+ used if the architecture supports a segmentation task.
+
+ Returns:
+ dict[str, Tensor]: a dictionary of loss components
+ """
+ losses = dict()
+ for i in range(self.num_stages):
+ self.current_stage = i
+ rcnn_train_cfg = self.train_cfg[i]
+ lw = self.stage_loss_weights[i]
+
+ # assign gts and sample proposals
+ sampling_results = []
+ if self.with_bbox or self.with_mask:
+ bbox_assigner = self.bbox_assigner[i]
+ bbox_sampler = self.bbox_sampler[i]
+ num_imgs = len(img_metas)
+ if gt_bboxes_ignore is None:
+ gt_bboxes_ignore = [None for _ in range(num_imgs)]
+
+ for j in range(num_imgs):
+ assign_result = bbox_assigner.assign(
+ proposal_list[j], gt_bboxes[j], gt_bboxes_ignore[j],
+ gt_labels[j])
+ sampling_result = bbox_sampler.sample(
+ assign_result,
+ proposal_list[j],
+ gt_bboxes[j],
+ gt_labels[j],
+ feats=[lvl_feat[j][None] for lvl_feat in x])
+ sampling_results.append(sampling_result)
+
+ # bbox head forward and loss
+ bbox_results = self._bbox_forward_train(i, x, sampling_results,
+ gt_bboxes, gt_labels,
+ rcnn_train_cfg)
+
+ for name, value in bbox_results['loss_bbox'].items():
+ losses[f's{i}.{name}'] = (
+ value * lw if 'loss' in name else value)
+
+ # mask head forward and loss
+ if self.with_mask:
+ mask_results = self._mask_forward_train(
+ i, x, sampling_results, gt_masks, rcnn_train_cfg,
+ bbox_results['bbox_feats'])
+ for name, value in mask_results['loss_mask'].items():
+ losses[f's{i}.{name}'] = (
+ value * lw if 'loss' in name else value)
+
+ # refine bboxes
+ if i < self.num_stages - 1:
+ pos_is_gts = [res.pos_is_gt for res in sampling_results]
+ # bbox_targets is a tuple
+ roi_labels = bbox_results['bbox_targets'][0]
+ with torch.no_grad():
+ roi_labels = torch.where(
+ roi_labels == self.bbox_head[i].num_classes,
+ bbox_results['cls_score'][:, :-1].argmax(1),
+ roi_labels)
+ proposal_list = self.bbox_head[i].refine_bboxes(
+ bbox_results['rois'], roi_labels,
+ bbox_results['bbox_pred'], pos_is_gts, img_metas)
+
+ return losses
+
+ def simple_test(self, x, proposal_list, img_metas, rescale=False):
+ """Test without augmentation."""
+ assert self.with_bbox, 'Bbox head must be implemented.'
+ num_imgs = len(proposal_list)
+ img_shapes = tuple(meta['img_shape'] for meta in img_metas)
+ ori_shapes = tuple(meta['ori_shape'] for meta in img_metas)
+ scale_factors = tuple(meta['scale_factor'] for meta in img_metas)
+
+ # "ms" in variable names means multi-stage
+ ms_bbox_result = {}
+ ms_segm_result = {}
+ ms_scores = []
+ rcnn_test_cfg = self.test_cfg
+
+ rois = bbox2roi(proposal_list)
+ for i in range(self.num_stages):
+ bbox_results = self._bbox_forward(i, x, rois)
+
+ # split batch bbox prediction back to each image
+ cls_score = bbox_results['cls_score']
+ bbox_pred = bbox_results['bbox_pred']
+ num_proposals_per_img = tuple(
+ len(proposals) for proposals in proposal_list)
+ rois = rois.split(num_proposals_per_img, 0)
+ cls_score = cls_score.split(num_proposals_per_img, 0)
+ if isinstance(bbox_pred, torch.Tensor):
+ bbox_pred = bbox_pred.split(num_proposals_per_img, 0)
+ else:
+ bbox_pred = self.bbox_head[i].bbox_pred_split(
+ bbox_pred, num_proposals_per_img)
+ ms_scores.append(cls_score)
+
+ if i < self.num_stages - 1:
+ bbox_label = [s[:, :-1].argmax(dim=1) for s in cls_score]
+ rois = torch.cat([
+ self.bbox_head[i].regress_by_class(rois[j], bbox_label[j],
+ bbox_pred[j],
+ img_metas[j])
+ for j in range(num_imgs)
+ ])
+
+ # average scores of each image by stages
+ cls_score = [
+ sum([score[i] for score in ms_scores]) / float(len(ms_scores))
+ for i in range(num_imgs)
+ ]
+
+ # apply bbox post-processing to each image individually
+ det_bboxes = []
+ det_labels = []
+ for i in range(num_imgs):
+ det_bbox, det_label = self.bbox_head[-1].get_bboxes(
+ rois[i],
+ cls_score[i],
+ bbox_pred[i],
+ img_shapes[i],
+ scale_factors[i],
+ rescale=rescale,
+ cfg=rcnn_test_cfg)
+ det_bboxes.append(det_bbox)
+ det_labels.append(det_label)
+
+ if torch.onnx.is_in_onnx_export():
+ return det_bboxes, det_labels
+ bbox_results = [
+ bbox2result(det_bboxes[i], det_labels[i],
+ self.bbox_head[-1].num_classes)
+ for i in range(num_imgs)
+ ]
+ ms_bbox_result['ensemble'] = bbox_results
+
+ if self.with_mask:
+ if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes):
+ mask_classes = self.mask_head[-1].num_classes
+ segm_results = [[[] for _ in range(mask_classes)]
+ for _ in range(num_imgs)]
+ else:
+ if rescale and not isinstance(scale_factors[0], float):
+ scale_factors = [
+ torch.from_numpy(scale_factor).to(det_bboxes[0].device)
+ for scale_factor in scale_factors
+ ]
+ _bboxes = [
+ det_bboxes[i][:, :4] *
+ scale_factors[i] if rescale else det_bboxes[i][:, :4]
+ for i in range(len(det_bboxes))
+ ]
+ mask_rois = bbox2roi(_bboxes)
+ num_mask_rois_per_img = tuple(
+ _bbox.size(0) for _bbox in _bboxes)
+ aug_masks = []
+ for i in range(self.num_stages):
+ mask_results = self._mask_forward(i, x, mask_rois)
+ mask_pred = mask_results['mask_pred']
+ # split batch mask prediction back to each image
+ mask_pred = mask_pred.split(num_mask_rois_per_img, 0)
+ aug_masks.append(
+ [m.sigmoid().cpu().numpy() for m in mask_pred])
+
+ # apply mask post-processing to each image individually
+ segm_results = []
+ for i in range(num_imgs):
+ if det_bboxes[i].shape[0] == 0:
+ segm_results.append(
+ [[]
+ for _ in range(self.mask_head[-1].num_classes)])
+ else:
+ aug_mask = [mask[i] for mask in aug_masks]
+ merged_masks = merge_aug_masks(
+ aug_mask, [[img_metas[i]]] * self.num_stages,
+ rcnn_test_cfg)
+ segm_result = self.mask_head[-1].get_seg_masks(
+ merged_masks, _bboxes[i], det_labels[i],
+ rcnn_test_cfg, ori_shapes[i], scale_factors[i],
+ rescale)
+ segm_results.append(segm_result)
+ ms_segm_result['ensemble'] = segm_results
+
+ if self.with_mask:
+ results = list(
+ zip(ms_bbox_result['ensemble'], ms_segm_result['ensemble']))
+ else:
+ results = ms_bbox_result['ensemble']
+
+ return results
+
+ def aug_test(self, features, proposal_list, img_metas, rescale=False):
+ """Test with augmentations.
+
+ If rescale is False, then returned bboxes and masks will fit the scale
+ of imgs[0].
+ """
+ rcnn_test_cfg = self.test_cfg
+ aug_bboxes = []
+ aug_scores = []
+ for x, img_meta in zip(features, img_metas):
+ # only one image in the batch
+ img_shape = img_meta[0]['img_shape']
+ scale_factor = img_meta[0]['scale_factor']
+ flip = img_meta[0]['flip']
+ flip_direction = img_meta[0]['flip_direction']
+
+ proposals = bbox_mapping(proposal_list[0][:, :4], img_shape,
+ scale_factor, flip, flip_direction)
+ # "ms" in variable names means multi-stage
+ ms_scores = []
+
+ rois = bbox2roi([proposals])
+ for i in range(self.num_stages):
+ bbox_results = self._bbox_forward(i, x, rois)
+ ms_scores.append(bbox_results['cls_score'])
+
+ if i < self.num_stages - 1:
+ bbox_label = bbox_results['cls_score'][:, :-1].argmax(
+ dim=1)
+ rois = self.bbox_head[i].regress_by_class(
+ rois, bbox_label, bbox_results['bbox_pred'],
+ img_meta[0])
+
+ cls_score = sum(ms_scores) / float(len(ms_scores))
+ bboxes, scores = self.bbox_head[-1].get_bboxes(
+ rois,
+ cls_score,
+ bbox_results['bbox_pred'],
+ img_shape,
+ scale_factor,
+ rescale=False,
+ cfg=None)
+ aug_bboxes.append(bboxes)
+ aug_scores.append(scores)
+
+ # after merging, bboxes will be rescaled to the original image size
+ merged_bboxes, merged_scores = merge_aug_bboxes(
+ aug_bboxes, aug_scores, img_metas, rcnn_test_cfg)
+ det_bboxes, det_labels = multiclass_nms(merged_bboxes, merged_scores,
+ rcnn_test_cfg.score_thr,
+ rcnn_test_cfg.nms,
+ rcnn_test_cfg.max_per_img)
+
+ bbox_result = bbox2result(det_bboxes, det_labels,
+ self.bbox_head[-1].num_classes)
+
+ if self.with_mask:
+ if det_bboxes.shape[0] == 0:
+ segm_result = [[[]
+ for _ in range(self.mask_head[-1].num_classes)]
+ ]
+ else:
+ aug_masks = []
+ aug_img_metas = []
+ for x, img_meta in zip(features, img_metas):
+ img_shape = img_meta[0]['img_shape']
+ scale_factor = img_meta[0]['scale_factor']
+ flip = img_meta[0]['flip']
+ flip_direction = img_meta[0]['flip_direction']
+ _bboxes = bbox_mapping(det_bboxes[:, :4], img_shape,
+ scale_factor, flip, flip_direction)
+ mask_rois = bbox2roi([_bboxes])
+ for i in range(self.num_stages):
+ mask_results = self._mask_forward(i, x, mask_rois)
+ aug_masks.append(
+ mask_results['mask_pred'].sigmoid().cpu().numpy())
+ aug_img_metas.append(img_meta)
+ merged_masks = merge_aug_masks(aug_masks, aug_img_metas,
+ self.test_cfg)
+
+ ori_shape = img_metas[0][0]['ori_shape']
+ segm_result = self.mask_head[-1].get_seg_masks(
+ merged_masks,
+ det_bboxes,
+ det_labels,
+ rcnn_test_cfg,
+ ori_shape,
+ scale_factor=1.0,
+ rescale=False)
+ return [(bbox_result, segm_result)]
+ else:
+ return [bbox_result]
diff --git a/annotator/uniformer/mmdet/models/roi_heads/double_roi_head.py b/annotator/uniformer/mmdet/models/roi_heads/double_roi_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..a1aa6c8244a889fbbed312a89574c3e11be294f0
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/roi_heads/double_roi_head.py
@@ -0,0 +1,33 @@
+from ..builder import HEADS
+from .standard_roi_head import StandardRoIHead
+
+
+@HEADS.register_module()
+class DoubleHeadRoIHead(StandardRoIHead):
+ """RoI head for Double Head RCNN.
+
+ https://arxiv.org/abs/1904.06493
+ """
+
+ def __init__(self, reg_roi_scale_factor, **kwargs):
+ super(DoubleHeadRoIHead, self).__init__(**kwargs)
+ self.reg_roi_scale_factor = reg_roi_scale_factor
+
+ def _bbox_forward(self, x, rois):
+ """Box head forward function used in both training and testing time."""
+ bbox_cls_feats = self.bbox_roi_extractor(
+ x[:self.bbox_roi_extractor.num_inputs], rois)
+ bbox_reg_feats = self.bbox_roi_extractor(
+ x[:self.bbox_roi_extractor.num_inputs],
+ rois,
+ roi_scale_factor=self.reg_roi_scale_factor)
+ if self.with_shared_head:
+ bbox_cls_feats = self.shared_head(bbox_cls_feats)
+ bbox_reg_feats = self.shared_head(bbox_reg_feats)
+ cls_score, bbox_pred = self.bbox_head(bbox_cls_feats, bbox_reg_feats)
+
+ bbox_results = dict(
+ cls_score=cls_score,
+ bbox_pred=bbox_pred,
+ bbox_feats=bbox_cls_feats)
+ return bbox_results
diff --git a/annotator/uniformer/mmdet/models/roi_heads/dynamic_roi_head.py b/annotator/uniformer/mmdet/models/roi_heads/dynamic_roi_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..89427a931f45f5a920c0e66fd88058bf9fa05f5c
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/roi_heads/dynamic_roi_head.py
@@ -0,0 +1,154 @@
+import numpy as np
+import torch
+
+from mmdet.core import bbox2roi
+from mmdet.models.losses import SmoothL1Loss
+from ..builder import HEADS
+from .standard_roi_head import StandardRoIHead
+
+EPS = 1e-15
+
+
+@HEADS.register_module()
+class DynamicRoIHead(StandardRoIHead):
+ """RoI head for `Dynamic R-CNN `_."""
+
+ def __init__(self, **kwargs):
+ super(DynamicRoIHead, self).__init__(**kwargs)
+ assert isinstance(self.bbox_head.loss_bbox, SmoothL1Loss)
+ # the IoU history of the past `update_iter_interval` iterations
+ self.iou_history = []
+ # the beta history of the past `update_iter_interval` iterations
+ self.beta_history = []
+
+ def forward_train(self,
+ x,
+ img_metas,
+ proposal_list,
+ gt_bboxes,
+ gt_labels,
+ gt_bboxes_ignore=None,
+ gt_masks=None):
+ """Forward function for training.
+
+ Args:
+ x (list[Tensor]): list of multi-level img features.
+
+ img_metas (list[dict]): list of image info dict where each dict
+ has: 'img_shape', 'scale_factor', 'flip', and may also contain
+ 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
+ For details on the values of these keys see
+ `mmdet/datasets/pipelines/formatting.py:Collect`.
+
+ proposals (list[Tensors]): list of region proposals.
+
+ gt_bboxes (list[Tensor]): each item are the truth boxes for each
+ image in [tl_x, tl_y, br_x, br_y] format.
+
+ gt_labels (list[Tensor]): class indices corresponding to each box
+
+ gt_bboxes_ignore (None | list[Tensor]): specify which bounding
+ boxes can be ignored when computing the loss.
+
+ gt_masks (None | Tensor) : true segmentation masks for each box
+ used if the architecture supports a segmentation task.
+
+ Returns:
+ dict[str, Tensor]: a dictionary of loss components
+ """
+ # assign gts and sample proposals
+ if self.with_bbox or self.with_mask:
+ num_imgs = len(img_metas)
+ if gt_bboxes_ignore is None:
+ gt_bboxes_ignore = [None for _ in range(num_imgs)]
+ sampling_results = []
+ cur_iou = []
+ for i in range(num_imgs):
+ assign_result = self.bbox_assigner.assign(
+ proposal_list[i], gt_bboxes[i], gt_bboxes_ignore[i],
+ gt_labels[i])
+ sampling_result = self.bbox_sampler.sample(
+ assign_result,
+ proposal_list[i],
+ gt_bboxes[i],
+ gt_labels[i],
+ feats=[lvl_feat[i][None] for lvl_feat in x])
+ # record the `iou_topk`-th largest IoU in an image
+ iou_topk = min(self.train_cfg.dynamic_rcnn.iou_topk,
+ len(assign_result.max_overlaps))
+ ious, _ = torch.topk(assign_result.max_overlaps, iou_topk)
+ cur_iou.append(ious[-1].item())
+ sampling_results.append(sampling_result)
+ # average the current IoUs over images
+ cur_iou = np.mean(cur_iou)
+ self.iou_history.append(cur_iou)
+
+ losses = dict()
+ # bbox head forward and loss
+ if self.with_bbox:
+ bbox_results = self._bbox_forward_train(x, sampling_results,
+ gt_bboxes, gt_labels,
+ img_metas)
+ losses.update(bbox_results['loss_bbox'])
+
+ # mask head forward and loss
+ if self.with_mask:
+ mask_results = self._mask_forward_train(x, sampling_results,
+ bbox_results['bbox_feats'],
+ gt_masks, img_metas)
+ losses.update(mask_results['loss_mask'])
+
+ # update IoU threshold and SmoothL1 beta
+ update_iter_interval = self.train_cfg.dynamic_rcnn.update_iter_interval
+ if len(self.iou_history) % update_iter_interval == 0:
+ new_iou_thr, new_beta = self.update_hyperparameters()
+
+ return losses
+
+ def _bbox_forward_train(self, x, sampling_results, gt_bboxes, gt_labels,
+ img_metas):
+ num_imgs = len(img_metas)
+ rois = bbox2roi([res.bboxes for res in sampling_results])
+ bbox_results = self._bbox_forward(x, rois)
+
+ bbox_targets = self.bbox_head.get_targets(sampling_results, gt_bboxes,
+ gt_labels, self.train_cfg)
+ # record the `beta_topk`-th smallest target
+ # `bbox_targets[2]` and `bbox_targets[3]` stand for bbox_targets
+ # and bbox_weights, respectively
+ pos_inds = bbox_targets[3][:, 0].nonzero().squeeze(1)
+ num_pos = len(pos_inds)
+ cur_target = bbox_targets[2][pos_inds, :2].abs().mean(dim=1)
+ beta_topk = min(self.train_cfg.dynamic_rcnn.beta_topk * num_imgs,
+ num_pos)
+ cur_target = torch.kthvalue(cur_target, beta_topk)[0].item()
+ self.beta_history.append(cur_target)
+ loss_bbox = self.bbox_head.loss(bbox_results['cls_score'],
+ bbox_results['bbox_pred'], rois,
+ *bbox_targets)
+
+ bbox_results.update(loss_bbox=loss_bbox)
+ return bbox_results
+
+ def update_hyperparameters(self):
+ """Update hyperparameters like IoU thresholds for assigner and beta for
+ SmoothL1 loss based on the training statistics.
+
+ Returns:
+ tuple[float]: the updated ``iou_thr`` and ``beta``.
+ """
+ new_iou_thr = max(self.train_cfg.dynamic_rcnn.initial_iou,
+ np.mean(self.iou_history))
+ self.iou_history = []
+ self.bbox_assigner.pos_iou_thr = new_iou_thr
+ self.bbox_assigner.neg_iou_thr = new_iou_thr
+ self.bbox_assigner.min_pos_iou = new_iou_thr
+ if (np.median(self.beta_history) < EPS):
+ # avoid 0 or too small value for new_beta
+ new_beta = self.bbox_head.loss_bbox.beta
+ else:
+ new_beta = min(self.train_cfg.dynamic_rcnn.initial_beta,
+ np.median(self.beta_history))
+ self.beta_history = []
+ self.bbox_head.loss_bbox.beta = new_beta
+ return new_iou_thr, new_beta
diff --git a/annotator/uniformer/mmdet/models/roi_heads/grid_roi_head.py b/annotator/uniformer/mmdet/models/roi_heads/grid_roi_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..4c52c79863ebaf17bd023382c7e5d4c237b4da77
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/roi_heads/grid_roi_head.py
@@ -0,0 +1,176 @@
+import torch
+
+from mmdet.core import bbox2result, bbox2roi
+from ..builder import HEADS, build_head, build_roi_extractor
+from .standard_roi_head import StandardRoIHead
+
+
+@HEADS.register_module()
+class GridRoIHead(StandardRoIHead):
+ """Grid roi head for Grid R-CNN.
+
+ https://arxiv.org/abs/1811.12030
+ """
+
+ def __init__(self, grid_roi_extractor, grid_head, **kwargs):
+ assert grid_head is not None
+ super(GridRoIHead, self).__init__(**kwargs)
+ if grid_roi_extractor is not None:
+ self.grid_roi_extractor = build_roi_extractor(grid_roi_extractor)
+ self.share_roi_extractor = False
+ else:
+ self.share_roi_extractor = True
+ self.grid_roi_extractor = self.bbox_roi_extractor
+ self.grid_head = build_head(grid_head)
+
+ def init_weights(self, pretrained):
+ """Initialize the weights in head.
+
+ Args:
+ pretrained (str, optional): Path to pre-trained weights.
+ Defaults to None.
+ """
+ super(GridRoIHead, self).init_weights(pretrained)
+ self.grid_head.init_weights()
+ if not self.share_roi_extractor:
+ self.grid_roi_extractor.init_weights()
+
+ def _random_jitter(self, sampling_results, img_metas, amplitude=0.15):
+ """Ramdom jitter positive proposals for training."""
+ for sampling_result, img_meta in zip(sampling_results, img_metas):
+ bboxes = sampling_result.pos_bboxes
+ random_offsets = bboxes.new_empty(bboxes.shape[0], 4).uniform_(
+ -amplitude, amplitude)
+ # before jittering
+ cxcy = (bboxes[:, 2:4] + bboxes[:, :2]) / 2
+ wh = (bboxes[:, 2:4] - bboxes[:, :2]).abs()
+ # after jittering
+ new_cxcy = cxcy + wh * random_offsets[:, :2]
+ new_wh = wh * (1 + random_offsets[:, 2:])
+ # xywh to xyxy
+ new_x1y1 = (new_cxcy - new_wh / 2)
+ new_x2y2 = (new_cxcy + new_wh / 2)
+ new_bboxes = torch.cat([new_x1y1, new_x2y2], dim=1)
+ # clip bboxes
+ max_shape = img_meta['img_shape']
+ if max_shape is not None:
+ new_bboxes[:, 0::2].clamp_(min=0, max=max_shape[1] - 1)
+ new_bboxes[:, 1::2].clamp_(min=0, max=max_shape[0] - 1)
+
+ sampling_result.pos_bboxes = new_bboxes
+ return sampling_results
+
+ def forward_dummy(self, x, proposals):
+ """Dummy forward function."""
+ # bbox head
+ outs = ()
+ rois = bbox2roi([proposals])
+ if self.with_bbox:
+ bbox_results = self._bbox_forward(x, rois)
+ outs = outs + (bbox_results['cls_score'],
+ bbox_results['bbox_pred'])
+
+ # grid head
+ grid_rois = rois[:100]
+ grid_feats = self.grid_roi_extractor(
+ x[:self.grid_roi_extractor.num_inputs], grid_rois)
+ if self.with_shared_head:
+ grid_feats = self.shared_head(grid_feats)
+ grid_pred = self.grid_head(grid_feats)
+ outs = outs + (grid_pred, )
+
+ # mask head
+ if self.with_mask:
+ mask_rois = rois[:100]
+ mask_results = self._mask_forward(x, mask_rois)
+ outs = outs + (mask_results['mask_pred'], )
+ return outs
+
+ def _bbox_forward_train(self, x, sampling_results, gt_bboxes, gt_labels,
+ img_metas):
+ """Run forward function and calculate loss for box head in training."""
+ bbox_results = super(GridRoIHead,
+ self)._bbox_forward_train(x, sampling_results,
+ gt_bboxes, gt_labels,
+ img_metas)
+
+ # Grid head forward and loss
+ sampling_results = self._random_jitter(sampling_results, img_metas)
+ pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results])
+
+ # GN in head does not support zero shape input
+ if pos_rois.shape[0] == 0:
+ return bbox_results
+
+ grid_feats = self.grid_roi_extractor(
+ x[:self.grid_roi_extractor.num_inputs], pos_rois)
+ if self.with_shared_head:
+ grid_feats = self.shared_head(grid_feats)
+ # Accelerate training
+ max_sample_num_grid = self.train_cfg.get('max_num_grid', 192)
+ sample_idx = torch.randperm(
+ grid_feats.shape[0])[:min(grid_feats.shape[0], max_sample_num_grid
+ )]
+ grid_feats = grid_feats[sample_idx]
+
+ grid_pred = self.grid_head(grid_feats)
+
+ grid_targets = self.grid_head.get_targets(sampling_results,
+ self.train_cfg)
+ grid_targets = grid_targets[sample_idx]
+
+ loss_grid = self.grid_head.loss(grid_pred, grid_targets)
+
+ bbox_results['loss_bbox'].update(loss_grid)
+ return bbox_results
+
+ def simple_test(self,
+ x,
+ proposal_list,
+ img_metas,
+ proposals=None,
+ rescale=False):
+ """Test without augmentation."""
+ assert self.with_bbox, 'Bbox head must be implemented.'
+
+ det_bboxes, det_labels = self.simple_test_bboxes(
+ x, img_metas, proposal_list, self.test_cfg, rescale=False)
+ # pack rois into bboxes
+ grid_rois = bbox2roi([det_bbox[:, :4] for det_bbox in det_bboxes])
+ if grid_rois.shape[0] != 0:
+ grid_feats = self.grid_roi_extractor(
+ x[:len(self.grid_roi_extractor.featmap_strides)], grid_rois)
+ self.grid_head.test_mode = True
+ grid_pred = self.grid_head(grid_feats)
+ # split batch grid head prediction back to each image
+ num_roi_per_img = tuple(len(det_bbox) for det_bbox in det_bboxes)
+ grid_pred = {
+ k: v.split(num_roi_per_img, 0)
+ for k, v in grid_pred.items()
+ }
+
+ # apply bbox post-processing to each image individually
+ bbox_results = []
+ num_imgs = len(det_bboxes)
+ for i in range(num_imgs):
+ if det_bboxes[i].shape[0] == 0:
+ bbox_results.append(grid_rois.new_tensor([]))
+ else:
+ det_bbox = self.grid_head.get_bboxes(
+ det_bboxes[i], grid_pred['fused'][i], [img_metas[i]])
+ if rescale:
+ det_bbox[:, :4] /= img_metas[i]['scale_factor']
+ bbox_results.append(
+ bbox2result(det_bbox, det_labels[i],
+ self.bbox_head.num_classes))
+ else:
+ bbox_results = [
+ grid_rois.new_tensor([]) for _ in range(len(det_bboxes))
+ ]
+
+ if not self.with_mask:
+ return bbox_results
+ else:
+ segm_results = self.simple_test_mask(
+ x, img_metas, det_bboxes, det_labels, rescale=rescale)
+ return list(zip(bbox_results, segm_results))
diff --git a/annotator/uniformer/mmdet/models/roi_heads/htc_roi_head.py b/annotator/uniformer/mmdet/models/roi_heads/htc_roi_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..5b5c2ec3bc9d579061fbd89f8b320e6e59909143
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/roi_heads/htc_roi_head.py
@@ -0,0 +1,589 @@
+import torch
+import torch.nn.functional as F
+
+from mmdet.core import (bbox2result, bbox2roi, bbox_mapping, merge_aug_bboxes,
+ merge_aug_masks, multiclass_nms)
+from ..builder import HEADS, build_head, build_roi_extractor
+from .cascade_roi_head import CascadeRoIHead
+
+
+@HEADS.register_module()
+class HybridTaskCascadeRoIHead(CascadeRoIHead):
+ """Hybrid task cascade roi head including one bbox head and one mask head.
+
+ https://arxiv.org/abs/1901.07518
+ """
+
+ def __init__(self,
+ num_stages,
+ stage_loss_weights,
+ semantic_roi_extractor=None,
+ semantic_head=None,
+ semantic_fusion=('bbox', 'mask'),
+ interleaved=True,
+ mask_info_flow=True,
+ **kwargs):
+ super(HybridTaskCascadeRoIHead,
+ self).__init__(num_stages, stage_loss_weights, **kwargs)
+ assert self.with_bbox and self.with_mask
+ assert not self.with_shared_head # shared head is not supported
+
+ if semantic_head is not None:
+ self.semantic_roi_extractor = build_roi_extractor(
+ semantic_roi_extractor)
+ self.semantic_head = build_head(semantic_head)
+
+ self.semantic_fusion = semantic_fusion
+ self.interleaved = interleaved
+ self.mask_info_flow = mask_info_flow
+
+ def init_weights(self, pretrained):
+ """Initialize the weights in head.
+
+ Args:
+ pretrained (str, optional): Path to pre-trained weights.
+ Defaults to None.
+ """
+ super(HybridTaskCascadeRoIHead, self).init_weights(pretrained)
+ if self.with_semantic:
+ self.semantic_head.init_weights()
+
+ @property
+ def with_semantic(self):
+ """bool: whether the head has semantic head"""
+ if hasattr(self, 'semantic_head') and self.semantic_head is not None:
+ return True
+ else:
+ return False
+
+ def forward_dummy(self, x, proposals):
+ """Dummy forward function."""
+ outs = ()
+ # semantic head
+ if self.with_semantic:
+ _, semantic_feat = self.semantic_head(x)
+ else:
+ semantic_feat = None
+ # bbox heads
+ rois = bbox2roi([proposals])
+ for i in range(self.num_stages):
+ bbox_results = self._bbox_forward(
+ i, x, rois, semantic_feat=semantic_feat)
+ outs = outs + (bbox_results['cls_score'],
+ bbox_results['bbox_pred'])
+ # mask heads
+ if self.with_mask:
+ mask_rois = rois[:100]
+ mask_roi_extractor = self.mask_roi_extractor[-1]
+ mask_feats = mask_roi_extractor(
+ x[:len(mask_roi_extractor.featmap_strides)], mask_rois)
+ if self.with_semantic and 'mask' in self.semantic_fusion:
+ mask_semantic_feat = self.semantic_roi_extractor(
+ [semantic_feat], mask_rois)
+ mask_feats += mask_semantic_feat
+ last_feat = None
+ for i in range(self.num_stages):
+ mask_head = self.mask_head[i]
+ if self.mask_info_flow:
+ mask_pred, last_feat = mask_head(mask_feats, last_feat)
+ else:
+ mask_pred = mask_head(mask_feats)
+ outs = outs + (mask_pred, )
+ return outs
+
+ def _bbox_forward_train(self,
+ stage,
+ x,
+ sampling_results,
+ gt_bboxes,
+ gt_labels,
+ rcnn_train_cfg,
+ semantic_feat=None):
+ """Run forward function and calculate loss for box head in training."""
+ bbox_head = self.bbox_head[stage]
+ rois = bbox2roi([res.bboxes for res in sampling_results])
+ bbox_results = self._bbox_forward(
+ stage, x, rois, semantic_feat=semantic_feat)
+
+ bbox_targets = bbox_head.get_targets(sampling_results, gt_bboxes,
+ gt_labels, rcnn_train_cfg)
+ loss_bbox = bbox_head.loss(bbox_results['cls_score'],
+ bbox_results['bbox_pred'], rois,
+ *bbox_targets)
+
+ bbox_results.update(
+ loss_bbox=loss_bbox,
+ rois=rois,
+ bbox_targets=bbox_targets,
+ )
+ return bbox_results
+
+ def _mask_forward_train(self,
+ stage,
+ x,
+ sampling_results,
+ gt_masks,
+ rcnn_train_cfg,
+ semantic_feat=None):
+ """Run forward function and calculate loss for mask head in
+ training."""
+ mask_roi_extractor = self.mask_roi_extractor[stage]
+ mask_head = self.mask_head[stage]
+ pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results])
+ mask_feats = mask_roi_extractor(x[:mask_roi_extractor.num_inputs],
+ pos_rois)
+
+ # semantic feature fusion
+ # element-wise sum for original features and pooled semantic features
+ if self.with_semantic and 'mask' in self.semantic_fusion:
+ mask_semantic_feat = self.semantic_roi_extractor([semantic_feat],
+ pos_rois)
+ if mask_semantic_feat.shape[-2:] != mask_feats.shape[-2:]:
+ mask_semantic_feat = F.adaptive_avg_pool2d(
+ mask_semantic_feat, mask_feats.shape[-2:])
+ mask_feats += mask_semantic_feat
+
+ # mask information flow
+ # forward all previous mask heads to obtain last_feat, and fuse it
+ # with the normal mask feature
+ if self.mask_info_flow:
+ last_feat = None
+ for i in range(stage):
+ last_feat = self.mask_head[i](
+ mask_feats, last_feat, return_logits=False)
+ mask_pred = mask_head(mask_feats, last_feat, return_feat=False)
+ else:
+ mask_pred = mask_head(mask_feats, return_feat=False)
+
+ mask_targets = mask_head.get_targets(sampling_results, gt_masks,
+ rcnn_train_cfg)
+ pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results])
+ loss_mask = mask_head.loss(mask_pred, mask_targets, pos_labels)
+
+ mask_results = dict(loss_mask=loss_mask)
+ return mask_results
+
+ def _bbox_forward(self, stage, x, rois, semantic_feat=None):
+ """Box head forward function used in both training and testing."""
+ bbox_roi_extractor = self.bbox_roi_extractor[stage]
+ bbox_head = self.bbox_head[stage]
+ bbox_feats = bbox_roi_extractor(
+ x[:len(bbox_roi_extractor.featmap_strides)], rois)
+ if self.with_semantic and 'bbox' in self.semantic_fusion:
+ bbox_semantic_feat = self.semantic_roi_extractor([semantic_feat],
+ rois)
+ if bbox_semantic_feat.shape[-2:] != bbox_feats.shape[-2:]:
+ bbox_semantic_feat = F.adaptive_avg_pool2d(
+ bbox_semantic_feat, bbox_feats.shape[-2:])
+ bbox_feats += bbox_semantic_feat
+ cls_score, bbox_pred = bbox_head(bbox_feats)
+
+ bbox_results = dict(cls_score=cls_score, bbox_pred=bbox_pred)
+ return bbox_results
+
+ def _mask_forward_test(self, stage, x, bboxes, semantic_feat=None):
+ """Mask head forward function for testing."""
+ mask_roi_extractor = self.mask_roi_extractor[stage]
+ mask_head = self.mask_head[stage]
+ mask_rois = bbox2roi([bboxes])
+ mask_feats = mask_roi_extractor(
+ x[:len(mask_roi_extractor.featmap_strides)], mask_rois)
+ if self.with_semantic and 'mask' in self.semantic_fusion:
+ mask_semantic_feat = self.semantic_roi_extractor([semantic_feat],
+ mask_rois)
+ if mask_semantic_feat.shape[-2:] != mask_feats.shape[-2:]:
+ mask_semantic_feat = F.adaptive_avg_pool2d(
+ mask_semantic_feat, mask_feats.shape[-2:])
+ mask_feats += mask_semantic_feat
+ if self.mask_info_flow:
+ last_feat = None
+ last_pred = None
+ for i in range(stage):
+ mask_pred, last_feat = self.mask_head[i](mask_feats, last_feat)
+ if last_pred is not None:
+ mask_pred = mask_pred + last_pred
+ last_pred = mask_pred
+ mask_pred = mask_head(mask_feats, last_feat, return_feat=False)
+ if last_pred is not None:
+ mask_pred = mask_pred + last_pred
+ else:
+ mask_pred = mask_head(mask_feats)
+ return mask_pred
+
+ def forward_train(self,
+ x,
+ img_metas,
+ proposal_list,
+ gt_bboxes,
+ gt_labels,
+ gt_bboxes_ignore=None,
+ gt_masks=None,
+ gt_semantic_seg=None):
+ """
+ Args:
+ x (list[Tensor]): list of multi-level img features.
+
+ img_metas (list[dict]): list of image info dict where each dict
+ has: 'img_shape', 'scale_factor', 'flip', and may also contain
+ 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
+ For details on the values of these keys see
+ `mmdet/datasets/pipelines/formatting.py:Collect`.
+
+ proposal_list (list[Tensors]): list of region proposals.
+
+ gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
+ shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
+
+ gt_labels (list[Tensor]): class indices corresponding to each box
+
+ gt_bboxes_ignore (None, list[Tensor]): specify which bounding
+ boxes can be ignored when computing the loss.
+
+ gt_masks (None, Tensor) : true segmentation masks for each box
+ used if the architecture supports a segmentation task.
+
+ gt_semantic_seg (None, list[Tensor]): semantic segmentation masks
+ used if the architecture supports semantic segmentation task.
+
+ Returns:
+ dict[str, Tensor]: a dictionary of loss components
+ """
+ # semantic segmentation part
+ # 2 outputs: segmentation prediction and embedded features
+ losses = dict()
+ if self.with_semantic:
+ semantic_pred, semantic_feat = self.semantic_head(x)
+ loss_seg = self.semantic_head.loss(semantic_pred, gt_semantic_seg)
+ losses['loss_semantic_seg'] = loss_seg
+ else:
+ semantic_feat = None
+
+ for i in range(self.num_stages):
+ self.current_stage = i
+ rcnn_train_cfg = self.train_cfg[i]
+ lw = self.stage_loss_weights[i]
+
+ # assign gts and sample proposals
+ sampling_results = []
+ bbox_assigner = self.bbox_assigner[i]
+ bbox_sampler = self.bbox_sampler[i]
+ num_imgs = len(img_metas)
+ if gt_bboxes_ignore is None:
+ gt_bboxes_ignore = [None for _ in range(num_imgs)]
+
+ for j in range(num_imgs):
+ assign_result = bbox_assigner.assign(proposal_list[j],
+ gt_bboxes[j],
+ gt_bboxes_ignore[j],
+ gt_labels[j])
+ sampling_result = bbox_sampler.sample(
+ assign_result,
+ proposal_list[j],
+ gt_bboxes[j],
+ gt_labels[j],
+ feats=[lvl_feat[j][None] for lvl_feat in x])
+ sampling_results.append(sampling_result)
+
+ # bbox head forward and loss
+ bbox_results = \
+ self._bbox_forward_train(
+ i, x, sampling_results, gt_bboxes, gt_labels,
+ rcnn_train_cfg, semantic_feat)
+ roi_labels = bbox_results['bbox_targets'][0]
+
+ for name, value in bbox_results['loss_bbox'].items():
+ losses[f's{i}.{name}'] = (
+ value * lw if 'loss' in name else value)
+
+ # mask head forward and loss
+ if self.with_mask:
+ # interleaved execution: use regressed bboxes by the box branch
+ # to train the mask branch
+ if self.interleaved:
+ pos_is_gts = [res.pos_is_gt for res in sampling_results]
+ with torch.no_grad():
+ proposal_list = self.bbox_head[i].refine_bboxes(
+ bbox_results['rois'], roi_labels,
+ bbox_results['bbox_pred'], pos_is_gts, img_metas)
+ # re-assign and sample 512 RoIs from 512 RoIs
+ sampling_results = []
+ for j in range(num_imgs):
+ assign_result = bbox_assigner.assign(
+ proposal_list[j], gt_bboxes[j],
+ gt_bboxes_ignore[j], gt_labels[j])
+ sampling_result = bbox_sampler.sample(
+ assign_result,
+ proposal_list[j],
+ gt_bboxes[j],
+ gt_labels[j],
+ feats=[lvl_feat[j][None] for lvl_feat in x])
+ sampling_results.append(sampling_result)
+ mask_results = self._mask_forward_train(
+ i, x, sampling_results, gt_masks, rcnn_train_cfg,
+ semantic_feat)
+ for name, value in mask_results['loss_mask'].items():
+ losses[f's{i}.{name}'] = (
+ value * lw if 'loss' in name else value)
+
+ # refine bboxes (same as Cascade R-CNN)
+ if i < self.num_stages - 1 and not self.interleaved:
+ pos_is_gts = [res.pos_is_gt for res in sampling_results]
+ with torch.no_grad():
+ proposal_list = self.bbox_head[i].refine_bboxes(
+ bbox_results['rois'], roi_labels,
+ bbox_results['bbox_pred'], pos_is_gts, img_metas)
+
+ return losses
+
+ def simple_test(self, x, proposal_list, img_metas, rescale=False):
+ """Test without augmentation."""
+ if self.with_semantic:
+ _, semantic_feat = self.semantic_head(x)
+ else:
+ semantic_feat = None
+
+ num_imgs = len(proposal_list)
+ img_shapes = tuple(meta['img_shape'] for meta in img_metas)
+ ori_shapes = tuple(meta['ori_shape'] for meta in img_metas)
+ scale_factors = tuple(meta['scale_factor'] for meta in img_metas)
+
+ # "ms" in variable names means multi-stage
+ ms_bbox_result = {}
+ ms_segm_result = {}
+ ms_scores = []
+ rcnn_test_cfg = self.test_cfg
+
+ rois = bbox2roi(proposal_list)
+ for i in range(self.num_stages):
+ bbox_head = self.bbox_head[i]
+ bbox_results = self._bbox_forward(
+ i, x, rois, semantic_feat=semantic_feat)
+ # split batch bbox prediction back to each image
+ cls_score = bbox_results['cls_score']
+ bbox_pred = bbox_results['bbox_pred']
+ num_proposals_per_img = tuple(len(p) for p in proposal_list)
+ rois = rois.split(num_proposals_per_img, 0)
+ cls_score = cls_score.split(num_proposals_per_img, 0)
+ bbox_pred = bbox_pred.split(num_proposals_per_img, 0)
+ ms_scores.append(cls_score)
+
+ if i < self.num_stages - 1:
+ bbox_label = [s[:, :-1].argmax(dim=1) for s in cls_score]
+ rois = torch.cat([
+ bbox_head.regress_by_class(rois[i], bbox_label[i],
+ bbox_pred[i], img_metas[i])
+ for i in range(num_imgs)
+ ])
+
+ # average scores of each image by stages
+ cls_score = [
+ sum([score[i] for score in ms_scores]) / float(len(ms_scores))
+ for i in range(num_imgs)
+ ]
+
+ # apply bbox post-processing to each image individually
+ det_bboxes = []
+ det_labels = []
+ for i in range(num_imgs):
+ det_bbox, det_label = self.bbox_head[-1].get_bboxes(
+ rois[i],
+ cls_score[i],
+ bbox_pred[i],
+ img_shapes[i],
+ scale_factors[i],
+ rescale=rescale,
+ cfg=rcnn_test_cfg)
+ det_bboxes.append(det_bbox)
+ det_labels.append(det_label)
+ bbox_result = [
+ bbox2result(det_bboxes[i], det_labels[i],
+ self.bbox_head[-1].num_classes)
+ for i in range(num_imgs)
+ ]
+ ms_bbox_result['ensemble'] = bbox_result
+
+ if self.with_mask:
+ if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes):
+ mask_classes = self.mask_head[-1].num_classes
+ segm_results = [[[] for _ in range(mask_classes)]
+ for _ in range(num_imgs)]
+ else:
+ if rescale and not isinstance(scale_factors[0], float):
+ scale_factors = [
+ torch.from_numpy(scale_factor).to(det_bboxes[0].device)
+ for scale_factor in scale_factors
+ ]
+ _bboxes = [
+ det_bboxes[i][:, :4] *
+ scale_factors[i] if rescale else det_bboxes[i]
+ for i in range(num_imgs)
+ ]
+ mask_rois = bbox2roi(_bboxes)
+ aug_masks = []
+ mask_roi_extractor = self.mask_roi_extractor[-1]
+ mask_feats = mask_roi_extractor(
+ x[:len(mask_roi_extractor.featmap_strides)], mask_rois)
+ if self.with_semantic and 'mask' in self.semantic_fusion:
+ mask_semantic_feat = self.semantic_roi_extractor(
+ [semantic_feat], mask_rois)
+ mask_feats += mask_semantic_feat
+ last_feat = None
+
+ num_bbox_per_img = tuple(len(_bbox) for _bbox in _bboxes)
+ for i in range(self.num_stages):
+ mask_head = self.mask_head[i]
+ if self.mask_info_flow:
+ mask_pred, last_feat = mask_head(mask_feats, last_feat)
+ else:
+ mask_pred = mask_head(mask_feats)
+
+ # split batch mask prediction back to each image
+ mask_pred = mask_pred.split(num_bbox_per_img, 0)
+ aug_masks.append(
+ [mask.sigmoid().cpu().numpy() for mask in mask_pred])
+
+ # apply mask post-processing to each image individually
+ segm_results = []
+ for i in range(num_imgs):
+ if det_bboxes[i].shape[0] == 0:
+ segm_results.append(
+ [[]
+ for _ in range(self.mask_head[-1].num_classes)])
+ else:
+ aug_mask = [mask[i] for mask in aug_masks]
+ merged_mask = merge_aug_masks(
+ aug_mask, [[img_metas[i]]] * self.num_stages,
+ rcnn_test_cfg)
+ segm_result = self.mask_head[-1].get_seg_masks(
+ merged_mask, _bboxes[i], det_labels[i],
+ rcnn_test_cfg, ori_shapes[i], scale_factors[i],
+ rescale)
+ segm_results.append(segm_result)
+ ms_segm_result['ensemble'] = segm_results
+
+ if self.with_mask:
+ results = list(
+ zip(ms_bbox_result['ensemble'], ms_segm_result['ensemble']))
+ else:
+ results = ms_bbox_result['ensemble']
+
+ return results
+
+ def aug_test(self, img_feats, proposal_list, img_metas, rescale=False):
+ """Test with augmentations.
+
+ If rescale is False, then returned bboxes and masks will fit the scale
+ of imgs[0].
+ """
+ if self.with_semantic:
+ semantic_feats = [
+ self.semantic_head(feat)[1] for feat in img_feats
+ ]
+ else:
+ semantic_feats = [None] * len(img_metas)
+
+ rcnn_test_cfg = self.test_cfg
+ aug_bboxes = []
+ aug_scores = []
+ for x, img_meta, semantic in zip(img_feats, img_metas, semantic_feats):
+ # only one image in the batch
+ img_shape = img_meta[0]['img_shape']
+ scale_factor = img_meta[0]['scale_factor']
+ flip = img_meta[0]['flip']
+ flip_direction = img_meta[0]['flip_direction']
+
+ proposals = bbox_mapping(proposal_list[0][:, :4], img_shape,
+ scale_factor, flip, flip_direction)
+ # "ms" in variable names means multi-stage
+ ms_scores = []
+
+ rois = bbox2roi([proposals])
+ for i in range(self.num_stages):
+ bbox_head = self.bbox_head[i]
+ bbox_results = self._bbox_forward(
+ i, x, rois, semantic_feat=semantic)
+ ms_scores.append(bbox_results['cls_score'])
+
+ if i < self.num_stages - 1:
+ bbox_label = bbox_results['cls_score'].argmax(dim=1)
+ rois = bbox_head.regress_by_class(
+ rois, bbox_label, bbox_results['bbox_pred'],
+ img_meta[0])
+
+ cls_score = sum(ms_scores) / float(len(ms_scores))
+ bboxes, scores = self.bbox_head[-1].get_bboxes(
+ rois,
+ cls_score,
+ bbox_results['bbox_pred'],
+ img_shape,
+ scale_factor,
+ rescale=False,
+ cfg=None)
+ aug_bboxes.append(bboxes)
+ aug_scores.append(scores)
+
+ # after merging, bboxes will be rescaled to the original image size
+ merged_bboxes, merged_scores = merge_aug_bboxes(
+ aug_bboxes, aug_scores, img_metas, rcnn_test_cfg)
+ det_bboxes, det_labels = multiclass_nms(merged_bboxes, merged_scores,
+ rcnn_test_cfg.score_thr,
+ rcnn_test_cfg.nms,
+ rcnn_test_cfg.max_per_img)
+
+ bbox_result = bbox2result(det_bboxes, det_labels,
+ self.bbox_head[-1].num_classes)
+
+ if self.with_mask:
+ if det_bboxes.shape[0] == 0:
+ segm_result = [[[]
+ for _ in range(self.mask_head[-1].num_classes)]
+ ]
+ else:
+ aug_masks = []
+ aug_img_metas = []
+ for x, img_meta, semantic in zip(img_feats, img_metas,
+ semantic_feats):
+ img_shape = img_meta[0]['img_shape']
+ scale_factor = img_meta[0]['scale_factor']
+ flip = img_meta[0]['flip']
+ flip_direction = img_meta[0]['flip_direction']
+ _bboxes = bbox_mapping(det_bboxes[:, :4], img_shape,
+ scale_factor, flip, flip_direction)
+ mask_rois = bbox2roi([_bboxes])
+ mask_feats = self.mask_roi_extractor[-1](
+ x[:len(self.mask_roi_extractor[-1].featmap_strides)],
+ mask_rois)
+ if self.with_semantic:
+ semantic_feat = semantic
+ mask_semantic_feat = self.semantic_roi_extractor(
+ [semantic_feat], mask_rois)
+ if mask_semantic_feat.shape[-2:] != mask_feats.shape[
+ -2:]:
+ mask_semantic_feat = F.adaptive_avg_pool2d(
+ mask_semantic_feat, mask_feats.shape[-2:])
+ mask_feats += mask_semantic_feat
+ last_feat = None
+ for i in range(self.num_stages):
+ mask_head = self.mask_head[i]
+ if self.mask_info_flow:
+ mask_pred, last_feat = mask_head(
+ mask_feats, last_feat)
+ else:
+ mask_pred = mask_head(mask_feats)
+ aug_masks.append(mask_pred.sigmoid().cpu().numpy())
+ aug_img_metas.append(img_meta)
+ merged_masks = merge_aug_masks(aug_masks, aug_img_metas,
+ self.test_cfg)
+
+ ori_shape = img_metas[0][0]['ori_shape']
+ segm_result = self.mask_head[-1].get_seg_masks(
+ merged_masks,
+ det_bboxes,
+ det_labels,
+ rcnn_test_cfg,
+ ori_shape,
+ scale_factor=1.0,
+ rescale=False)
+ return [(bbox_result, segm_result)]
+ else:
+ return [bbox_result]
diff --git a/annotator/uniformer/mmdet/models/roi_heads/mask_heads/__init__.py b/annotator/uniformer/mmdet/models/roi_heads/mask_heads/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..abfbe2624eecb73b029e9bcb7e2283bbf2a744ea
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/roi_heads/mask_heads/__init__.py
@@ -0,0 +1,17 @@
+from .coarse_mask_head import CoarseMaskHead
+from .fcn_mask_head import FCNMaskHead
+from .feature_relay_head import FeatureRelayHead
+from .fused_semantic_head import FusedSemanticHead
+from .global_context_head import GlobalContextHead
+from .grid_head import GridHead
+from .htc_mask_head import HTCMaskHead
+from .mask_point_head import MaskPointHead
+from .maskiou_head import MaskIoUHead
+from .scnet_mask_head import SCNetMaskHead
+from .scnet_semantic_head import SCNetSemanticHead
+
+__all__ = [
+ 'FCNMaskHead', 'HTCMaskHead', 'FusedSemanticHead', 'GridHead',
+ 'MaskIoUHead', 'CoarseMaskHead', 'MaskPointHead', 'SCNetMaskHead',
+ 'SCNetSemanticHead', 'GlobalContextHead', 'FeatureRelayHead'
+]
diff --git a/annotator/uniformer/mmdet/models/roi_heads/mask_heads/coarse_mask_head.py b/annotator/uniformer/mmdet/models/roi_heads/mask_heads/coarse_mask_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..d665dfff83855e6db3866c681559ccdef09f9999
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/roi_heads/mask_heads/coarse_mask_head.py
@@ -0,0 +1,91 @@
+import torch.nn as nn
+from mmcv.cnn import ConvModule, Linear, constant_init, xavier_init
+from mmcv.runner import auto_fp16
+
+from mmdet.models.builder import HEADS
+from .fcn_mask_head import FCNMaskHead
+
+
+@HEADS.register_module()
+class CoarseMaskHead(FCNMaskHead):
+ """Coarse mask head used in PointRend.
+
+ Compared with standard ``FCNMaskHead``, ``CoarseMaskHead`` will downsample
+ the input feature map instead of upsample it.
+
+ Args:
+ num_convs (int): Number of conv layers in the head. Default: 0.
+ num_fcs (int): Number of fc layers in the head. Default: 2.
+ fc_out_channels (int): Number of output channels of fc layer.
+ Default: 1024.
+ downsample_factor (int): The factor that feature map is downsampled by.
+ Default: 2.
+ """
+
+ def __init__(self,
+ num_convs=0,
+ num_fcs=2,
+ fc_out_channels=1024,
+ downsample_factor=2,
+ *arg,
+ **kwarg):
+ super(CoarseMaskHead, self).__init__(
+ *arg, num_convs=num_convs, upsample_cfg=dict(type=None), **kwarg)
+ self.num_fcs = num_fcs
+ assert self.num_fcs > 0
+ self.fc_out_channels = fc_out_channels
+ self.downsample_factor = downsample_factor
+ assert self.downsample_factor >= 1
+ # remove conv_logit
+ delattr(self, 'conv_logits')
+
+ if downsample_factor > 1:
+ downsample_in_channels = (
+ self.conv_out_channels
+ if self.num_convs > 0 else self.in_channels)
+ self.downsample_conv = ConvModule(
+ downsample_in_channels,
+ self.conv_out_channels,
+ kernel_size=downsample_factor,
+ stride=downsample_factor,
+ padding=0,
+ conv_cfg=self.conv_cfg,
+ norm_cfg=self.norm_cfg)
+ else:
+ self.downsample_conv = None
+
+ self.output_size = (self.roi_feat_size[0] // downsample_factor,
+ self.roi_feat_size[1] // downsample_factor)
+ self.output_area = self.output_size[0] * self.output_size[1]
+
+ last_layer_dim = self.conv_out_channels * self.output_area
+
+ self.fcs = nn.ModuleList()
+ for i in range(num_fcs):
+ fc_in_channels = (
+ last_layer_dim if i == 0 else self.fc_out_channels)
+ self.fcs.append(Linear(fc_in_channels, self.fc_out_channels))
+ last_layer_dim = self.fc_out_channels
+ output_channels = self.num_classes * self.output_area
+ self.fc_logits = Linear(last_layer_dim, output_channels)
+
+ def init_weights(self):
+ for m in self.fcs.modules():
+ if isinstance(m, nn.Linear):
+ xavier_init(m)
+ constant_init(self.fc_logits, 0.001)
+
+ @auto_fp16()
+ def forward(self, x):
+ for conv in self.convs:
+ x = conv(x)
+
+ if self.downsample_conv is not None:
+ x = self.downsample_conv(x)
+
+ x = x.flatten(1)
+ for fc in self.fcs:
+ x = self.relu(fc(x))
+ mask_pred = self.fc_logits(x).view(
+ x.size(0), self.num_classes, *self.output_size)
+ return mask_pred
diff --git a/annotator/uniformer/mmdet/models/roi_heads/mask_heads/fcn_mask_head.py b/annotator/uniformer/mmdet/models/roi_heads/mask_heads/fcn_mask_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..be6772fa6c471a7a65b77f2f18dfd217f4bd3289
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/roi_heads/mask_heads/fcn_mask_head.py
@@ -0,0 +1,377 @@
+import numpy as np
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from mmcv.cnn import Conv2d, ConvModule, build_upsample_layer
+from mmcv.ops.carafe import CARAFEPack
+from mmcv.runner import auto_fp16, force_fp32
+from torch.nn.modules.utils import _pair
+
+from mmdet.core import mask_target
+from mmdet.models.builder import HEADS, build_loss
+
+BYTES_PER_FLOAT = 4
+# TODO: This memory limit may be too much or too little. It would be better to
+# determine it based on available resources.
+GPU_MEM_LIMIT = 1024**3 # 1 GB memory limit
+
+
+@HEADS.register_module()
+class FCNMaskHead(nn.Module):
+
+ def __init__(self,
+ num_convs=4,
+ roi_feat_size=14,
+ in_channels=256,
+ conv_kernel_size=3,
+ conv_out_channels=256,
+ num_classes=80,
+ class_agnostic=False,
+ upsample_cfg=dict(type='deconv', scale_factor=2),
+ conv_cfg=None,
+ norm_cfg=None,
+ loss_mask=dict(
+ type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)):
+ super(FCNMaskHead, self).__init__()
+ self.upsample_cfg = upsample_cfg.copy()
+ if self.upsample_cfg['type'] not in [
+ None, 'deconv', 'nearest', 'bilinear', 'carafe'
+ ]:
+ raise ValueError(
+ f'Invalid upsample method {self.upsample_cfg["type"]}, '
+ 'accepted methods are "deconv", "nearest", "bilinear", '
+ '"carafe"')
+ self.num_convs = num_convs
+ # WARN: roi_feat_size is reserved and not used
+ self.roi_feat_size = _pair(roi_feat_size)
+ self.in_channels = in_channels
+ self.conv_kernel_size = conv_kernel_size
+ self.conv_out_channels = conv_out_channels
+ self.upsample_method = self.upsample_cfg.get('type')
+ self.scale_factor = self.upsample_cfg.pop('scale_factor', None)
+ self.num_classes = num_classes
+ self.class_agnostic = class_agnostic
+ self.conv_cfg = conv_cfg
+ self.norm_cfg = norm_cfg
+ self.fp16_enabled = False
+ self.loss_mask = build_loss(loss_mask)
+
+ self.convs = nn.ModuleList()
+ for i in range(self.num_convs):
+ in_channels = (
+ self.in_channels if i == 0 else self.conv_out_channels)
+ padding = (self.conv_kernel_size - 1) // 2
+ self.convs.append(
+ ConvModule(
+ in_channels,
+ self.conv_out_channels,
+ self.conv_kernel_size,
+ padding=padding,
+ conv_cfg=conv_cfg,
+ norm_cfg=norm_cfg))
+ upsample_in_channels = (
+ self.conv_out_channels if self.num_convs > 0 else in_channels)
+ upsample_cfg_ = self.upsample_cfg.copy()
+ if self.upsample_method is None:
+ self.upsample = None
+ elif self.upsample_method == 'deconv':
+ upsample_cfg_.update(
+ in_channels=upsample_in_channels,
+ out_channels=self.conv_out_channels,
+ kernel_size=self.scale_factor,
+ stride=self.scale_factor)
+ self.upsample = build_upsample_layer(upsample_cfg_)
+ elif self.upsample_method == 'carafe':
+ upsample_cfg_.update(
+ channels=upsample_in_channels, scale_factor=self.scale_factor)
+ self.upsample = build_upsample_layer(upsample_cfg_)
+ else:
+ # suppress warnings
+ align_corners = (None
+ if self.upsample_method == 'nearest' else False)
+ upsample_cfg_.update(
+ scale_factor=self.scale_factor,
+ mode=self.upsample_method,
+ align_corners=align_corners)
+ self.upsample = build_upsample_layer(upsample_cfg_)
+
+ out_channels = 1 if self.class_agnostic else self.num_classes
+ logits_in_channel = (
+ self.conv_out_channels
+ if self.upsample_method == 'deconv' else upsample_in_channels)
+ self.conv_logits = Conv2d(logits_in_channel, out_channels, 1)
+ self.relu = nn.ReLU(inplace=True)
+ self.debug_imgs = None
+
+ def init_weights(self):
+ for m in [self.upsample, self.conv_logits]:
+ if m is None:
+ continue
+ elif isinstance(m, CARAFEPack):
+ m.init_weights()
+ else:
+ nn.init.kaiming_normal_(
+ m.weight, mode='fan_out', nonlinearity='relu')
+ nn.init.constant_(m.bias, 0)
+
+ @auto_fp16()
+ def forward(self, x):
+ for conv in self.convs:
+ x = conv(x)
+ if self.upsample is not None:
+ x = self.upsample(x)
+ if self.upsample_method == 'deconv':
+ x = self.relu(x)
+ mask_pred = self.conv_logits(x)
+ return mask_pred
+
+ def get_targets(self, sampling_results, gt_masks, rcnn_train_cfg):
+ pos_proposals = [res.pos_bboxes for res in sampling_results]
+ pos_assigned_gt_inds = [
+ res.pos_assigned_gt_inds for res in sampling_results
+ ]
+ mask_targets = mask_target(pos_proposals, pos_assigned_gt_inds,
+ gt_masks, rcnn_train_cfg)
+ return mask_targets
+
+ @force_fp32(apply_to=('mask_pred', ))
+ def loss(self, mask_pred, mask_targets, labels):
+ """
+ Example:
+ >>> from mmdet.models.roi_heads.mask_heads.fcn_mask_head import * # NOQA
+ >>> N = 7 # N = number of extracted ROIs
+ >>> C, H, W = 11, 32, 32
+ >>> # Create example instance of FCN Mask Head.
+ >>> # There are lots of variations depending on the configuration
+ >>> self = FCNMaskHead(num_classes=C, num_convs=1)
+ >>> inputs = torch.rand(N, self.in_channels, H, W)
+ >>> mask_pred = self.forward(inputs)
+ >>> sf = self.scale_factor
+ >>> labels = torch.randint(0, C, size=(N,))
+ >>> # With the default properties the mask targets should indicate
+ >>> # a (potentially soft) single-class label
+ >>> mask_targets = torch.rand(N, H * sf, W * sf)
+ >>> loss = self.loss(mask_pred, mask_targets, labels)
+ >>> print('loss = {!r}'.format(loss))
+ """
+ loss = dict()
+ if mask_pred.size(0) == 0:
+ loss_mask = mask_pred.sum()
+ else:
+ if self.class_agnostic:
+ loss_mask = self.loss_mask(mask_pred, mask_targets,
+ torch.zeros_like(labels))
+ else:
+ loss_mask = self.loss_mask(mask_pred, mask_targets, labels)
+ loss['loss_mask'] = loss_mask
+ return loss
+
+ def get_seg_masks(self, mask_pred, det_bboxes, det_labels, rcnn_test_cfg,
+ ori_shape, scale_factor, rescale):
+ """Get segmentation masks from mask_pred and bboxes.
+
+ Args:
+ mask_pred (Tensor or ndarray): shape (n, #class, h, w).
+ For single-scale testing, mask_pred is the direct output of
+ model, whose type is Tensor, while for multi-scale testing,
+ it will be converted to numpy array outside of this method.
+ det_bboxes (Tensor): shape (n, 4/5)
+ det_labels (Tensor): shape (n, )
+ rcnn_test_cfg (dict): rcnn testing config
+ ori_shape (Tuple): original image height and width, shape (2,)
+ scale_factor(float | Tensor): If ``rescale is True``, box
+ coordinates are divided by this scale factor to fit
+ ``ori_shape``.
+ rescale (bool): If True, the resulting masks will be rescaled to
+ ``ori_shape``.
+
+ Returns:
+ list[list]: encoded masks. The c-th item in the outer list
+ corresponds to the c-th class. Given the c-th outer list, the
+ i-th item in that inner list is the mask for the i-th box with
+ class label c.
+
+ Example:
+ >>> import mmcv
+ >>> from mmdet.models.roi_heads.mask_heads.fcn_mask_head import * # NOQA
+ >>> N = 7 # N = number of extracted ROIs
+ >>> C, H, W = 11, 32, 32
+ >>> # Create example instance of FCN Mask Head.
+ >>> self = FCNMaskHead(num_classes=C, num_convs=0)
+ >>> inputs = torch.rand(N, self.in_channels, H, W)
+ >>> mask_pred = self.forward(inputs)
+ >>> # Each input is associated with some bounding box
+ >>> det_bboxes = torch.Tensor([[1, 1, 42, 42 ]] * N)
+ >>> det_labels = torch.randint(0, C, size=(N,))
+ >>> rcnn_test_cfg = mmcv.Config({'mask_thr_binary': 0, })
+ >>> ori_shape = (H * 4, W * 4)
+ >>> scale_factor = torch.FloatTensor((1, 1))
+ >>> rescale = False
+ >>> # Encoded masks are a list for each category.
+ >>> encoded_masks = self.get_seg_masks(
+ >>> mask_pred, det_bboxes, det_labels, rcnn_test_cfg, ori_shape,
+ >>> scale_factor, rescale
+ >>> )
+ >>> assert len(encoded_masks) == C
+ >>> assert sum(list(map(len, encoded_masks))) == N
+ """
+ if isinstance(mask_pred, torch.Tensor):
+ mask_pred = mask_pred.sigmoid()
+ else:
+ mask_pred = det_bboxes.new_tensor(mask_pred)
+
+ device = mask_pred.device
+ cls_segms = [[] for _ in range(self.num_classes)
+ ] # BG is not included in num_classes
+ bboxes = det_bboxes[:, :4]
+ labels = det_labels
+
+ if rescale:
+ img_h, img_w = ori_shape[:2]
+ else:
+ if isinstance(scale_factor, float):
+ img_h = np.round(ori_shape[0] * scale_factor).astype(np.int32)
+ img_w = np.round(ori_shape[1] * scale_factor).astype(np.int32)
+ else:
+ w_scale, h_scale = scale_factor[0], scale_factor[1]
+ img_h = np.round(ori_shape[0] * h_scale.item()).astype(
+ np.int32)
+ img_w = np.round(ori_shape[1] * w_scale.item()).astype(
+ np.int32)
+ scale_factor = 1.0
+
+ if not isinstance(scale_factor, (float, torch.Tensor)):
+ scale_factor = bboxes.new_tensor(scale_factor)
+ bboxes = bboxes / scale_factor
+
+ if torch.onnx.is_in_onnx_export():
+ # TODO: Remove after F.grid_sample is supported.
+ from torchvision.models.detection.roi_heads \
+ import paste_masks_in_image
+ masks = paste_masks_in_image(mask_pred, bboxes, ori_shape[:2])
+ thr = rcnn_test_cfg.get('mask_thr_binary', 0)
+ if thr > 0:
+ masks = masks >= thr
+ return masks
+
+ N = len(mask_pred)
+ # The actual implementation split the input into chunks,
+ # and paste them chunk by chunk.
+ if device.type == 'cpu':
+ # CPU is most efficient when they are pasted one by one with
+ # skip_empty=True, so that it performs minimal number of
+ # operations.
+ num_chunks = N
+ else:
+ # GPU benefits from parallelism for larger chunks,
+ # but may have memory issue
+ num_chunks = int(
+ np.ceil(N * img_h * img_w * BYTES_PER_FLOAT / GPU_MEM_LIMIT))
+ assert (num_chunks <=
+ N), 'Default GPU_MEM_LIMIT is too small; try increasing it'
+ chunks = torch.chunk(torch.arange(N, device=device), num_chunks)
+
+ threshold = rcnn_test_cfg.mask_thr_binary
+ im_mask = torch.zeros(
+ N,
+ img_h,
+ img_w,
+ device=device,
+ dtype=torch.bool if threshold >= 0 else torch.uint8)
+
+ if not self.class_agnostic:
+ mask_pred = mask_pred[range(N), labels][:, None]
+
+ for inds in chunks:
+ masks_chunk, spatial_inds = _do_paste_mask(
+ mask_pred[inds],
+ bboxes[inds],
+ img_h,
+ img_w,
+ skip_empty=device.type == 'cpu')
+
+ if threshold >= 0:
+ masks_chunk = (masks_chunk >= threshold).to(dtype=torch.bool)
+ else:
+ # for visualization and debugging
+ masks_chunk = (masks_chunk * 255).to(dtype=torch.uint8)
+
+ im_mask[(inds, ) + spatial_inds] = masks_chunk
+
+ for i in range(N):
+ cls_segms[labels[i]].append(im_mask[i].detach().cpu().numpy())
+ return cls_segms
+
+
+def _do_paste_mask(masks, boxes, img_h, img_w, skip_empty=True):
+ """Paste instance masks according to boxes.
+
+ This implementation is modified from
+ https://github.com/facebookresearch/detectron2/
+
+ Args:
+ masks (Tensor): N, 1, H, W
+ boxes (Tensor): N, 4
+ img_h (int): Height of the image to be pasted.
+ img_w (int): Width of the image to be pasted.
+ skip_empty (bool): Only paste masks within the region that
+ tightly bound all boxes, and returns the results this region only.
+ An important optimization for CPU.
+
+ Returns:
+ tuple: (Tensor, tuple). The first item is mask tensor, the second one
+ is the slice object.
+ If skip_empty == False, the whole image will be pasted. It will
+ return a mask of shape (N, img_h, img_w) and an empty tuple.
+ If skip_empty == True, only area around the mask will be pasted.
+ A mask of shape (N, h', w') and its start and end coordinates
+ in the original image will be returned.
+ """
+ # On GPU, paste all masks together (up to chunk size)
+ # by using the entire image to sample the masks
+ # Compared to pasting them one by one,
+ # this has more operations but is faster on COCO-scale dataset.
+ device = masks.device
+ if skip_empty:
+ x0_int, y0_int = torch.clamp(
+ boxes.min(dim=0).values.floor()[:2] - 1,
+ min=0).to(dtype=torch.int32)
+ x1_int = torch.clamp(
+ boxes[:, 2].max().ceil() + 1, max=img_w).to(dtype=torch.int32)
+ y1_int = torch.clamp(
+ boxes[:, 3].max().ceil() + 1, max=img_h).to(dtype=torch.int32)
+ else:
+ x0_int, y0_int = 0, 0
+ x1_int, y1_int = img_w, img_h
+ x0, y0, x1, y1 = torch.split(boxes, 1, dim=1) # each is Nx1
+
+ N = masks.shape[0]
+
+ img_y = torch.arange(
+ y0_int, y1_int, device=device, dtype=torch.float32) + 0.5
+ img_x = torch.arange(
+ x0_int, x1_int, device=device, dtype=torch.float32) + 0.5
+ img_y = (img_y - y0) / (y1 - y0) * 2 - 1
+ img_x = (img_x - x0) / (x1 - x0) * 2 - 1
+ # img_x, img_y have shapes (N, w), (N, h)
+ if torch.isinf(img_x).any():
+ inds = torch.where(torch.isinf(img_x))
+ img_x[inds] = 0
+ if torch.isinf(img_y).any():
+ inds = torch.where(torch.isinf(img_y))
+ img_y[inds] = 0
+
+ gx = img_x[:, None, :].expand(N, img_y.size(1), img_x.size(1))
+ gy = img_y[:, :, None].expand(N, img_y.size(1), img_x.size(1))
+ grid = torch.stack([gx, gy], dim=3)
+
+ if torch.onnx.is_in_onnx_export():
+ raise RuntimeError(
+ 'Exporting F.grid_sample from Pytorch to ONNX is not supported.')
+ img_masks = F.grid_sample(
+ masks.to(dtype=torch.float32), grid, align_corners=False)
+
+ if skip_empty:
+ return img_masks[:, 0], (slice(y0_int, y1_int), slice(x0_int, x1_int))
+ else:
+ return img_masks[:, 0], ()
diff --git a/annotator/uniformer/mmdet/models/roi_heads/mask_heads/feature_relay_head.py b/annotator/uniformer/mmdet/models/roi_heads/mask_heads/feature_relay_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..a1cfb2ce8631d51e5c465f9bbc4164a37acc4782
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/roi_heads/mask_heads/feature_relay_head.py
@@ -0,0 +1,55 @@
+import torch.nn as nn
+from mmcv.cnn import kaiming_init
+from mmcv.runner import auto_fp16
+
+from mmdet.models.builder import HEADS
+
+
+@HEADS.register_module()
+class FeatureRelayHead(nn.Module):
+ """Feature Relay Head used in `SCNet `_.
+
+ Args:
+ in_channels (int, optional): number of input channels. Default: 256.
+ conv_out_channels (int, optional): number of output channels before
+ classification layer. Default: 256.
+ roi_feat_size (int, optional): roi feat size at box head. Default: 7.
+ scale_factor (int, optional): scale factor to match roi feat size
+ at mask head. Default: 2.
+ """
+
+ def __init__(self,
+ in_channels=1024,
+ out_conv_channels=256,
+ roi_feat_size=7,
+ scale_factor=2):
+ super(FeatureRelayHead, self).__init__()
+ assert isinstance(roi_feat_size, int)
+
+ self.in_channels = in_channels
+ self.out_conv_channels = out_conv_channels
+ self.roi_feat_size = roi_feat_size
+ self.out_channels = (roi_feat_size**2) * out_conv_channels
+ self.scale_factor = scale_factor
+ self.fp16_enabled = False
+
+ self.fc = nn.Linear(self.in_channels, self.out_channels)
+ self.upsample = nn.Upsample(
+ scale_factor=scale_factor, mode='bilinear', align_corners=True)
+
+ def init_weights(self):
+ """Init weights for the head."""
+ kaiming_init(self.fc)
+
+ @auto_fp16()
+ def forward(self, x):
+ """Forward function."""
+ N, in_C = x.shape
+ if N > 0:
+ out_C = self.out_conv_channels
+ out_HW = self.roi_feat_size
+ x = self.fc(x)
+ x = x.reshape(N, out_C, out_HW, out_HW)
+ x = self.upsample(x)
+ return x
+ return None
diff --git a/annotator/uniformer/mmdet/models/roi_heads/mask_heads/fused_semantic_head.py b/annotator/uniformer/mmdet/models/roi_heads/mask_heads/fused_semantic_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..2aa6033eec17a30aeb68c0fdd218d8f0d41157e8
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/roi_heads/mask_heads/fused_semantic_head.py
@@ -0,0 +1,107 @@
+import torch.nn as nn
+import torch.nn.functional as F
+from mmcv.cnn import ConvModule, kaiming_init
+from mmcv.runner import auto_fp16, force_fp32
+
+from mmdet.models.builder import HEADS
+
+
+@HEADS.register_module()
+class FusedSemanticHead(nn.Module):
+ r"""Multi-level fused semantic segmentation head.
+
+ .. code-block:: none
+
+ in_1 -> 1x1 conv ---
+ |
+ in_2 -> 1x1 conv -- |
+ ||
+ in_3 -> 1x1 conv - ||
+ ||| /-> 1x1 conv (mask prediction)
+ in_4 -> 1x1 conv -----> 3x3 convs (*4)
+ | \-> 1x1 conv (feature)
+ in_5 -> 1x1 conv ---
+ """ # noqa: W605
+
+ def __init__(self,
+ num_ins,
+ fusion_level,
+ num_convs=4,
+ in_channels=256,
+ conv_out_channels=256,
+ num_classes=183,
+ ignore_label=255,
+ loss_weight=0.2,
+ conv_cfg=None,
+ norm_cfg=None):
+ super(FusedSemanticHead, self).__init__()
+ self.num_ins = num_ins
+ self.fusion_level = fusion_level
+ self.num_convs = num_convs
+ self.in_channels = in_channels
+ self.conv_out_channels = conv_out_channels
+ self.num_classes = num_classes
+ self.ignore_label = ignore_label
+ self.loss_weight = loss_weight
+ self.conv_cfg = conv_cfg
+ self.norm_cfg = norm_cfg
+ self.fp16_enabled = False
+
+ self.lateral_convs = nn.ModuleList()
+ for i in range(self.num_ins):
+ self.lateral_convs.append(
+ ConvModule(
+ self.in_channels,
+ self.in_channels,
+ 1,
+ conv_cfg=self.conv_cfg,
+ norm_cfg=self.norm_cfg,
+ inplace=False))
+
+ self.convs = nn.ModuleList()
+ for i in range(self.num_convs):
+ in_channels = self.in_channels if i == 0 else conv_out_channels
+ self.convs.append(
+ ConvModule(
+ in_channels,
+ conv_out_channels,
+ 3,
+ padding=1,
+ conv_cfg=self.conv_cfg,
+ norm_cfg=self.norm_cfg))
+ self.conv_embedding = ConvModule(
+ conv_out_channels,
+ conv_out_channels,
+ 1,
+ conv_cfg=self.conv_cfg,
+ norm_cfg=self.norm_cfg)
+ self.conv_logits = nn.Conv2d(conv_out_channels, self.num_classes, 1)
+
+ self.criterion = nn.CrossEntropyLoss(ignore_index=ignore_label)
+
+ def init_weights(self):
+ kaiming_init(self.conv_logits)
+
+ @auto_fp16()
+ def forward(self, feats):
+ x = self.lateral_convs[self.fusion_level](feats[self.fusion_level])
+ fused_size = tuple(x.shape[-2:])
+ for i, feat in enumerate(feats):
+ if i != self.fusion_level:
+ feat = F.interpolate(
+ feat, size=fused_size, mode='bilinear', align_corners=True)
+ x += self.lateral_convs[i](feat)
+
+ for i in range(self.num_convs):
+ x = self.convs[i](x)
+
+ mask_pred = self.conv_logits(x)
+ x = self.conv_embedding(x)
+ return mask_pred, x
+
+ @force_fp32(apply_to=('mask_pred', ))
+ def loss(self, mask_pred, labels):
+ labels = labels.squeeze(1).long()
+ loss_semantic_seg = self.criterion(mask_pred, labels)
+ loss_semantic_seg *= self.loss_weight
+ return loss_semantic_seg
diff --git a/annotator/uniformer/mmdet/models/roi_heads/mask_heads/global_context_head.py b/annotator/uniformer/mmdet/models/roi_heads/mask_heads/global_context_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..d8e8cbca95d69e86ec7a2a1e7ed7f158be1b5753
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/roi_heads/mask_heads/global_context_head.py
@@ -0,0 +1,102 @@
+import torch.nn as nn
+from mmcv.cnn import ConvModule
+from mmcv.runner import auto_fp16, force_fp32
+
+from mmdet.models.builder import HEADS
+from mmdet.models.utils import ResLayer, SimplifiedBasicBlock
+
+
+@HEADS.register_module()
+class GlobalContextHead(nn.Module):
+ """Global context head used in `SCNet `_.
+
+ Args:
+ num_convs (int, optional): number of convolutional layer in GlbCtxHead.
+ Default: 4.
+ in_channels (int, optional): number of input channels. Default: 256.
+ conv_out_channels (int, optional): number of output channels before
+ classification layer. Default: 256.
+ num_classes (int, optional): number of classes. Default: 80.
+ loss_weight (float, optional): global context loss weight. Default: 1.
+ conv_cfg (dict, optional): config to init conv layer. Default: None.
+ norm_cfg (dict, optional): config to init norm layer. Default: None.
+ conv_to_res (bool, optional): if True, 2 convs will be grouped into
+ 1 `SimplifiedBasicBlock` using a skip connection. Default: False.
+ """
+
+ def __init__(self,
+ num_convs=4,
+ in_channels=256,
+ conv_out_channels=256,
+ num_classes=80,
+ loss_weight=1.0,
+ conv_cfg=None,
+ norm_cfg=None,
+ conv_to_res=False):
+ super(GlobalContextHead, self).__init__()
+ self.num_convs = num_convs
+ self.in_channels = in_channels
+ self.conv_out_channels = conv_out_channels
+ self.num_classes = num_classes
+ self.loss_weight = loss_weight
+ self.conv_cfg = conv_cfg
+ self.norm_cfg = norm_cfg
+ self.conv_to_res = conv_to_res
+ self.fp16_enabled = False
+
+ if self.conv_to_res:
+ num_res_blocks = num_convs // 2
+ self.convs = ResLayer(
+ SimplifiedBasicBlock,
+ in_channels,
+ self.conv_out_channels,
+ num_res_blocks,
+ conv_cfg=self.conv_cfg,
+ norm_cfg=self.norm_cfg)
+ self.num_convs = num_res_blocks
+ else:
+ self.convs = nn.ModuleList()
+ for i in range(self.num_convs):
+ in_channels = self.in_channels if i == 0 else conv_out_channels
+ self.convs.append(
+ ConvModule(
+ in_channels,
+ conv_out_channels,
+ 3,
+ padding=1,
+ conv_cfg=self.conv_cfg,
+ norm_cfg=self.norm_cfg))
+
+ self.pool = nn.AdaptiveAvgPool2d(1)
+ self.fc = nn.Linear(conv_out_channels, num_classes)
+
+ self.criterion = nn.BCEWithLogitsLoss()
+
+ def init_weights(self):
+ """Init weights for the head."""
+ nn.init.normal_(self.fc.weight, 0, 0.01)
+ nn.init.constant_(self.fc.bias, 0)
+
+ @auto_fp16()
+ def forward(self, feats):
+ """Forward function."""
+ x = feats[-1]
+ for i in range(self.num_convs):
+ x = self.convs[i](x)
+ x = self.pool(x)
+
+ # multi-class prediction
+ mc_pred = x.reshape(x.size(0), -1)
+ mc_pred = self.fc(mc_pred)
+
+ return mc_pred, x
+
+ @force_fp32(apply_to=('pred', ))
+ def loss(self, pred, labels):
+ """Loss function."""
+ labels = [lbl.unique() for lbl in labels]
+ targets = pred.new_zeros(pred.size())
+ for i, label in enumerate(labels):
+ targets[i, label] = 1.0
+ loss = self.loss_weight * self.criterion(pred, targets)
+ return loss
diff --git a/annotator/uniformer/mmdet/models/roi_heads/mask_heads/grid_head.py b/annotator/uniformer/mmdet/models/roi_heads/mask_heads/grid_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..83058cbdda934ebfc3a76088e1820848ac01b78b
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/roi_heads/mask_heads/grid_head.py
@@ -0,0 +1,359 @@
+import numpy as np
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from mmcv.cnn import ConvModule, kaiming_init, normal_init
+
+from mmdet.models.builder import HEADS, build_loss
+
+
+@HEADS.register_module()
+class GridHead(nn.Module):
+
+ def __init__(self,
+ grid_points=9,
+ num_convs=8,
+ roi_feat_size=14,
+ in_channels=256,
+ conv_kernel_size=3,
+ point_feat_channels=64,
+ deconv_kernel_size=4,
+ class_agnostic=False,
+ loss_grid=dict(
+ type='CrossEntropyLoss', use_sigmoid=True,
+ loss_weight=15),
+ conv_cfg=None,
+ norm_cfg=dict(type='GN', num_groups=36)):
+ super(GridHead, self).__init__()
+ self.grid_points = grid_points
+ self.num_convs = num_convs
+ self.roi_feat_size = roi_feat_size
+ self.in_channels = in_channels
+ self.conv_kernel_size = conv_kernel_size
+ self.point_feat_channels = point_feat_channels
+ self.conv_out_channels = self.point_feat_channels * self.grid_points
+ self.class_agnostic = class_agnostic
+ self.conv_cfg = conv_cfg
+ self.norm_cfg = norm_cfg
+ if isinstance(norm_cfg, dict) and norm_cfg['type'] == 'GN':
+ assert self.conv_out_channels % norm_cfg['num_groups'] == 0
+
+ assert self.grid_points >= 4
+ self.grid_size = int(np.sqrt(self.grid_points))
+ if self.grid_size * self.grid_size != self.grid_points:
+ raise ValueError('grid_points must be a square number')
+
+ # the predicted heatmap is half of whole_map_size
+ if not isinstance(self.roi_feat_size, int):
+ raise ValueError('Only square RoIs are supporeted in Grid R-CNN')
+ self.whole_map_size = self.roi_feat_size * 4
+
+ # compute point-wise sub-regions
+ self.sub_regions = self.calc_sub_regions()
+
+ self.convs = []
+ for i in range(self.num_convs):
+ in_channels = (
+ self.in_channels if i == 0 else self.conv_out_channels)
+ stride = 2 if i == 0 else 1
+ padding = (self.conv_kernel_size - 1) // 2
+ self.convs.append(
+ ConvModule(
+ in_channels,
+ self.conv_out_channels,
+ self.conv_kernel_size,
+ stride=stride,
+ padding=padding,
+ conv_cfg=self.conv_cfg,
+ norm_cfg=self.norm_cfg,
+ bias=True))
+ self.convs = nn.Sequential(*self.convs)
+
+ self.deconv1 = nn.ConvTranspose2d(
+ self.conv_out_channels,
+ self.conv_out_channels,
+ kernel_size=deconv_kernel_size,
+ stride=2,
+ padding=(deconv_kernel_size - 2) // 2,
+ groups=grid_points)
+ self.norm1 = nn.GroupNorm(grid_points, self.conv_out_channels)
+ self.deconv2 = nn.ConvTranspose2d(
+ self.conv_out_channels,
+ grid_points,
+ kernel_size=deconv_kernel_size,
+ stride=2,
+ padding=(deconv_kernel_size - 2) // 2,
+ groups=grid_points)
+
+ # find the 4-neighbor of each grid point
+ self.neighbor_points = []
+ grid_size = self.grid_size
+ for i in range(grid_size): # i-th column
+ for j in range(grid_size): # j-th row
+ neighbors = []
+ if i > 0: # left: (i - 1, j)
+ neighbors.append((i - 1) * grid_size + j)
+ if j > 0: # up: (i, j - 1)
+ neighbors.append(i * grid_size + j - 1)
+ if j < grid_size - 1: # down: (i, j + 1)
+ neighbors.append(i * grid_size + j + 1)
+ if i < grid_size - 1: # right: (i + 1, j)
+ neighbors.append((i + 1) * grid_size + j)
+ self.neighbor_points.append(tuple(neighbors))
+ # total edges in the grid
+ self.num_edges = sum([len(p) for p in self.neighbor_points])
+
+ self.forder_trans = nn.ModuleList() # first-order feature transition
+ self.sorder_trans = nn.ModuleList() # second-order feature transition
+ for neighbors in self.neighbor_points:
+ fo_trans = nn.ModuleList()
+ so_trans = nn.ModuleList()
+ for _ in range(len(neighbors)):
+ # each transition module consists of a 5x5 depth-wise conv and
+ # 1x1 conv.
+ fo_trans.append(
+ nn.Sequential(
+ nn.Conv2d(
+ self.point_feat_channels,
+ self.point_feat_channels,
+ 5,
+ stride=1,
+ padding=2,
+ groups=self.point_feat_channels),
+ nn.Conv2d(self.point_feat_channels,
+ self.point_feat_channels, 1)))
+ so_trans.append(
+ nn.Sequential(
+ nn.Conv2d(
+ self.point_feat_channels,
+ self.point_feat_channels,
+ 5,
+ 1,
+ 2,
+ groups=self.point_feat_channels),
+ nn.Conv2d(self.point_feat_channels,
+ self.point_feat_channels, 1)))
+ self.forder_trans.append(fo_trans)
+ self.sorder_trans.append(so_trans)
+
+ self.loss_grid = build_loss(loss_grid)
+
+ def init_weights(self):
+ for m in self.modules():
+ if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
+ # TODO: compare mode = "fan_in" or "fan_out"
+ kaiming_init(m)
+ for m in self.modules():
+ if isinstance(m, nn.ConvTranspose2d):
+ normal_init(m, std=0.001)
+ nn.init.constant_(self.deconv2.bias, -np.log(0.99 / 0.01))
+
+ def forward(self, x):
+ assert x.shape[-1] == x.shape[-2] == self.roi_feat_size
+ # RoI feature transformation, downsample 2x
+ x = self.convs(x)
+
+ c = self.point_feat_channels
+ # first-order fusion
+ x_fo = [None for _ in range(self.grid_points)]
+ for i, points in enumerate(self.neighbor_points):
+ x_fo[i] = x[:, i * c:(i + 1) * c]
+ for j, point_idx in enumerate(points):
+ x_fo[i] = x_fo[i] + self.forder_trans[i][j](
+ x[:, point_idx * c:(point_idx + 1) * c])
+
+ # second-order fusion
+ x_so = [None for _ in range(self.grid_points)]
+ for i, points in enumerate(self.neighbor_points):
+ x_so[i] = x[:, i * c:(i + 1) * c]
+ for j, point_idx in enumerate(points):
+ x_so[i] = x_so[i] + self.sorder_trans[i][j](x_fo[point_idx])
+
+ # predicted heatmap with fused features
+ x2 = torch.cat(x_so, dim=1)
+ x2 = self.deconv1(x2)
+ x2 = F.relu(self.norm1(x2), inplace=True)
+ heatmap = self.deconv2(x2)
+
+ # predicted heatmap with original features (applicable during training)
+ if self.training:
+ x1 = x
+ x1 = self.deconv1(x1)
+ x1 = F.relu(self.norm1(x1), inplace=True)
+ heatmap_unfused = self.deconv2(x1)
+ else:
+ heatmap_unfused = heatmap
+
+ return dict(fused=heatmap, unfused=heatmap_unfused)
+
+ def calc_sub_regions(self):
+ """Compute point specific representation regions.
+
+ See Grid R-CNN Plus (https://arxiv.org/abs/1906.05688) for details.
+ """
+ # to make it consistent with the original implementation, half_size
+ # is computed as 2 * quarter_size, which is smaller
+ half_size = self.whole_map_size // 4 * 2
+ sub_regions = []
+ for i in range(self.grid_points):
+ x_idx = i // self.grid_size
+ y_idx = i % self.grid_size
+ if x_idx == 0:
+ sub_x1 = 0
+ elif x_idx == self.grid_size - 1:
+ sub_x1 = half_size
+ else:
+ ratio = x_idx / (self.grid_size - 1) - 0.25
+ sub_x1 = max(int(ratio * self.whole_map_size), 0)
+
+ if y_idx == 0:
+ sub_y1 = 0
+ elif y_idx == self.grid_size - 1:
+ sub_y1 = half_size
+ else:
+ ratio = y_idx / (self.grid_size - 1) - 0.25
+ sub_y1 = max(int(ratio * self.whole_map_size), 0)
+ sub_regions.append(
+ (sub_x1, sub_y1, sub_x1 + half_size, sub_y1 + half_size))
+ return sub_regions
+
+ def get_targets(self, sampling_results, rcnn_train_cfg):
+ # mix all samples (across images) together.
+ pos_bboxes = torch.cat([res.pos_bboxes for res in sampling_results],
+ dim=0).cpu()
+ pos_gt_bboxes = torch.cat(
+ [res.pos_gt_bboxes for res in sampling_results], dim=0).cpu()
+ assert pos_bboxes.shape == pos_gt_bboxes.shape
+
+ # expand pos_bboxes to 2x of original size
+ x1 = pos_bboxes[:, 0] - (pos_bboxes[:, 2] - pos_bboxes[:, 0]) / 2
+ y1 = pos_bboxes[:, 1] - (pos_bboxes[:, 3] - pos_bboxes[:, 1]) / 2
+ x2 = pos_bboxes[:, 2] + (pos_bboxes[:, 2] - pos_bboxes[:, 0]) / 2
+ y2 = pos_bboxes[:, 3] + (pos_bboxes[:, 3] - pos_bboxes[:, 1]) / 2
+ pos_bboxes = torch.stack([x1, y1, x2, y2], dim=-1)
+ pos_bbox_ws = (pos_bboxes[:, 2] - pos_bboxes[:, 0]).unsqueeze(-1)
+ pos_bbox_hs = (pos_bboxes[:, 3] - pos_bboxes[:, 1]).unsqueeze(-1)
+
+ num_rois = pos_bboxes.shape[0]
+ map_size = self.whole_map_size
+ # this is not the final target shape
+ targets = torch.zeros((num_rois, self.grid_points, map_size, map_size),
+ dtype=torch.float)
+
+ # pre-compute interpolation factors for all grid points.
+ # the first item is the factor of x-dim, and the second is y-dim.
+ # for a 9-point grid, factors are like (1, 0), (0.5, 0.5), (0, 1)
+ factors = []
+ for j in range(self.grid_points):
+ x_idx = j // self.grid_size
+ y_idx = j % self.grid_size
+ factors.append((1 - x_idx / (self.grid_size - 1),
+ 1 - y_idx / (self.grid_size - 1)))
+
+ radius = rcnn_train_cfg.pos_radius
+ radius2 = radius**2
+ for i in range(num_rois):
+ # ignore small bboxes
+ if (pos_bbox_ws[i] <= self.grid_size
+ or pos_bbox_hs[i] <= self.grid_size):
+ continue
+ # for each grid point, mark a small circle as positive
+ for j in range(self.grid_points):
+ factor_x, factor_y = factors[j]
+ gridpoint_x = factor_x * pos_gt_bboxes[i, 0] + (
+ 1 - factor_x) * pos_gt_bboxes[i, 2]
+ gridpoint_y = factor_y * pos_gt_bboxes[i, 1] + (
+ 1 - factor_y) * pos_gt_bboxes[i, 3]
+
+ cx = int((gridpoint_x - pos_bboxes[i, 0]) / pos_bbox_ws[i] *
+ map_size)
+ cy = int((gridpoint_y - pos_bboxes[i, 1]) / pos_bbox_hs[i] *
+ map_size)
+
+ for x in range(cx - radius, cx + radius + 1):
+ for y in range(cy - radius, cy + radius + 1):
+ if x >= 0 and x < map_size and y >= 0 and y < map_size:
+ if (x - cx)**2 + (y - cy)**2 <= radius2:
+ targets[i, j, y, x] = 1
+ # reduce the target heatmap size by a half
+ # proposed in Grid R-CNN Plus (https://arxiv.org/abs/1906.05688).
+ sub_targets = []
+ for i in range(self.grid_points):
+ sub_x1, sub_y1, sub_x2, sub_y2 = self.sub_regions[i]
+ sub_targets.append(targets[:, [i], sub_y1:sub_y2, sub_x1:sub_x2])
+ sub_targets = torch.cat(sub_targets, dim=1)
+ sub_targets = sub_targets.to(sampling_results[0].pos_bboxes.device)
+ return sub_targets
+
+ def loss(self, grid_pred, grid_targets):
+ loss_fused = self.loss_grid(grid_pred['fused'], grid_targets)
+ loss_unfused = self.loss_grid(grid_pred['unfused'], grid_targets)
+ loss_grid = loss_fused + loss_unfused
+ return dict(loss_grid=loss_grid)
+
+ def get_bboxes(self, det_bboxes, grid_pred, img_metas):
+ # TODO: refactoring
+ assert det_bboxes.shape[0] == grid_pred.shape[0]
+ det_bboxes = det_bboxes.cpu()
+ cls_scores = det_bboxes[:, [4]]
+ det_bboxes = det_bboxes[:, :4]
+ grid_pred = grid_pred.sigmoid().cpu()
+
+ R, c, h, w = grid_pred.shape
+ half_size = self.whole_map_size // 4 * 2
+ assert h == w == half_size
+ assert c == self.grid_points
+
+ # find the point with max scores in the half-sized heatmap
+ grid_pred = grid_pred.view(R * c, h * w)
+ pred_scores, pred_position = grid_pred.max(dim=1)
+ xs = pred_position % w
+ ys = pred_position // w
+
+ # get the position in the whole heatmap instead of half-sized heatmap
+ for i in range(self.grid_points):
+ xs[i::self.grid_points] += self.sub_regions[i][0]
+ ys[i::self.grid_points] += self.sub_regions[i][1]
+
+ # reshape to (num_rois, grid_points)
+ pred_scores, xs, ys = tuple(
+ map(lambda x: x.view(R, c), [pred_scores, xs, ys]))
+
+ # get expanded pos_bboxes
+ widths = (det_bboxes[:, 2] - det_bboxes[:, 0]).unsqueeze(-1)
+ heights = (det_bboxes[:, 3] - det_bboxes[:, 1]).unsqueeze(-1)
+ x1 = (det_bboxes[:, 0, None] - widths / 2)
+ y1 = (det_bboxes[:, 1, None] - heights / 2)
+ # map the grid point to the absolute coordinates
+ abs_xs = (xs.float() + 0.5) / w * widths + x1
+ abs_ys = (ys.float() + 0.5) / h * heights + y1
+
+ # get the grid points indices that fall on the bbox boundaries
+ x1_inds = [i for i in range(self.grid_size)]
+ y1_inds = [i * self.grid_size for i in range(self.grid_size)]
+ x2_inds = [
+ self.grid_points - self.grid_size + i
+ for i in range(self.grid_size)
+ ]
+ y2_inds = [(i + 1) * self.grid_size - 1 for i in range(self.grid_size)]
+
+ # voting of all grid points on some boundary
+ bboxes_x1 = (abs_xs[:, x1_inds] * pred_scores[:, x1_inds]).sum(
+ dim=1, keepdim=True) / (
+ pred_scores[:, x1_inds].sum(dim=1, keepdim=True))
+ bboxes_y1 = (abs_ys[:, y1_inds] * pred_scores[:, y1_inds]).sum(
+ dim=1, keepdim=True) / (
+ pred_scores[:, y1_inds].sum(dim=1, keepdim=True))
+ bboxes_x2 = (abs_xs[:, x2_inds] * pred_scores[:, x2_inds]).sum(
+ dim=1, keepdim=True) / (
+ pred_scores[:, x2_inds].sum(dim=1, keepdim=True))
+ bboxes_y2 = (abs_ys[:, y2_inds] * pred_scores[:, y2_inds]).sum(
+ dim=1, keepdim=True) / (
+ pred_scores[:, y2_inds].sum(dim=1, keepdim=True))
+
+ bbox_res = torch.cat(
+ [bboxes_x1, bboxes_y1, bboxes_x2, bboxes_y2, cls_scores], dim=1)
+ bbox_res[:, [0, 2]].clamp_(min=0, max=img_metas[0]['img_shape'][1])
+ bbox_res[:, [1, 3]].clamp_(min=0, max=img_metas[0]['img_shape'][0])
+
+ return bbox_res
diff --git a/annotator/uniformer/mmdet/models/roi_heads/mask_heads/htc_mask_head.py b/annotator/uniformer/mmdet/models/roi_heads/mask_heads/htc_mask_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..330b778ebad8d48d55d09ddd42baa70ec10ae463
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/roi_heads/mask_heads/htc_mask_head.py
@@ -0,0 +1,43 @@
+from mmcv.cnn import ConvModule
+
+from mmdet.models.builder import HEADS
+from .fcn_mask_head import FCNMaskHead
+
+
+@HEADS.register_module()
+class HTCMaskHead(FCNMaskHead):
+
+ def __init__(self, with_conv_res=True, *args, **kwargs):
+ super(HTCMaskHead, self).__init__(*args, **kwargs)
+ self.with_conv_res = with_conv_res
+ if self.with_conv_res:
+ self.conv_res = ConvModule(
+ self.conv_out_channels,
+ self.conv_out_channels,
+ 1,
+ conv_cfg=self.conv_cfg,
+ norm_cfg=self.norm_cfg)
+
+ def init_weights(self):
+ super(HTCMaskHead, self).init_weights()
+ if self.with_conv_res:
+ self.conv_res.init_weights()
+
+ def forward(self, x, res_feat=None, return_logits=True, return_feat=True):
+ if res_feat is not None:
+ assert self.with_conv_res
+ res_feat = self.conv_res(res_feat)
+ x = x + res_feat
+ for conv in self.convs:
+ x = conv(x)
+ res_feat = x
+ outs = []
+ if return_logits:
+ x = self.upsample(x)
+ if self.upsample_method == 'deconv':
+ x = self.relu(x)
+ mask_pred = self.conv_logits(x)
+ outs.append(mask_pred)
+ if return_feat:
+ outs.append(res_feat)
+ return outs if len(outs) > 1 else outs[0]
diff --git a/annotator/uniformer/mmdet/models/roi_heads/mask_heads/mask_point_head.py b/annotator/uniformer/mmdet/models/roi_heads/mask_heads/mask_point_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..fb92903a9488a44b984a489a354d838cc88f8ad4
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/roi_heads/mask_heads/mask_point_head.py
@@ -0,0 +1,300 @@
+# Modified from https://github.com/facebookresearch/detectron2/tree/master/projects/PointRend/point_head/point_head.py # noqa
+
+import torch
+import torch.nn as nn
+from mmcv.cnn import ConvModule, normal_init
+from mmcv.ops import point_sample, rel_roi_point_to_rel_img_point
+
+from mmdet.models.builder import HEADS, build_loss
+
+
+@HEADS.register_module()
+class MaskPointHead(nn.Module):
+ """A mask point head use in PointRend.
+
+ ``MaskPointHead`` use shared multi-layer perceptron (equivalent to
+ nn.Conv1d) to predict the logit of input points. The fine-grained feature
+ and coarse feature will be concatenate together for predication.
+
+ Args:
+ num_fcs (int): Number of fc layers in the head. Default: 3.
+ in_channels (int): Number of input channels. Default: 256.
+ fc_channels (int): Number of fc channels. Default: 256.
+ num_classes (int): Number of classes for logits. Default: 80.
+ class_agnostic (bool): Whether use class agnostic classification.
+ If so, the output channels of logits will be 1. Default: False.
+ coarse_pred_each_layer (bool): Whether concatenate coarse feature with
+ the output of each fc layer. Default: True.
+ conv_cfg (dict | None): Dictionary to construct and config conv layer.
+ Default: dict(type='Conv1d'))
+ norm_cfg (dict | None): Dictionary to construct and config norm layer.
+ Default: None.
+ loss_point (dict): Dictionary to construct and config loss layer of
+ point head. Default: dict(type='CrossEntropyLoss', use_mask=True,
+ loss_weight=1.0).
+ """
+
+ def __init__(self,
+ num_classes,
+ num_fcs=3,
+ in_channels=256,
+ fc_channels=256,
+ class_agnostic=False,
+ coarse_pred_each_layer=True,
+ conv_cfg=dict(type='Conv1d'),
+ norm_cfg=None,
+ act_cfg=dict(type='ReLU'),
+ loss_point=dict(
+ type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)):
+ super().__init__()
+ self.num_fcs = num_fcs
+ self.in_channels = in_channels
+ self.fc_channels = fc_channels
+ self.num_classes = num_classes
+ self.class_agnostic = class_agnostic
+ self.coarse_pred_each_layer = coarse_pred_each_layer
+ self.conv_cfg = conv_cfg
+ self.norm_cfg = norm_cfg
+ self.loss_point = build_loss(loss_point)
+
+ fc_in_channels = in_channels + num_classes
+ self.fcs = nn.ModuleList()
+ for _ in range(num_fcs):
+ fc = ConvModule(
+ fc_in_channels,
+ fc_channels,
+ kernel_size=1,
+ stride=1,
+ padding=0,
+ conv_cfg=conv_cfg,
+ norm_cfg=norm_cfg,
+ act_cfg=act_cfg)
+ self.fcs.append(fc)
+ fc_in_channels = fc_channels
+ fc_in_channels += num_classes if self.coarse_pred_each_layer else 0
+
+ out_channels = 1 if self.class_agnostic else self.num_classes
+ self.fc_logits = nn.Conv1d(
+ fc_in_channels, out_channels, kernel_size=1, stride=1, padding=0)
+
+ def init_weights(self):
+ """Initialize last classification layer of MaskPointHead, conv layers
+ are already initialized by ConvModule."""
+ normal_init(self.fc_logits, std=0.001)
+
+ def forward(self, fine_grained_feats, coarse_feats):
+ """Classify each point base on fine grained and coarse feats.
+
+ Args:
+ fine_grained_feats (Tensor): Fine grained feature sampled from FPN,
+ shape (num_rois, in_channels, num_points).
+ coarse_feats (Tensor): Coarse feature sampled from CoarseMaskHead,
+ shape (num_rois, num_classes, num_points).
+
+ Returns:
+ Tensor: Point classification results,
+ shape (num_rois, num_class, num_points).
+ """
+
+ x = torch.cat([fine_grained_feats, coarse_feats], dim=1)
+ for fc in self.fcs:
+ x = fc(x)
+ if self.coarse_pred_each_layer:
+ x = torch.cat((x, coarse_feats), dim=1)
+ return self.fc_logits(x)
+
+ def get_targets(self, rois, rel_roi_points, sampling_results, gt_masks,
+ cfg):
+ """Get training targets of MaskPointHead for all images.
+
+ Args:
+ rois (Tensor): Region of Interest, shape (num_rois, 5).
+ rel_roi_points: Points coordinates relative to RoI, shape
+ (num_rois, num_points, 2).
+ sampling_results (:obj:`SamplingResult`): Sampling result after
+ sampling and assignment.
+ gt_masks (Tensor) : Ground truth segmentation masks of
+ corresponding boxes, shape (num_rois, height, width).
+ cfg (dict): Training cfg.
+
+ Returns:
+ Tensor: Point target, shape (num_rois, num_points).
+ """
+
+ num_imgs = len(sampling_results)
+ rois_list = []
+ rel_roi_points_list = []
+ for batch_ind in range(num_imgs):
+ inds = (rois[:, 0] == batch_ind)
+ rois_list.append(rois[inds])
+ rel_roi_points_list.append(rel_roi_points[inds])
+ pos_assigned_gt_inds_list = [
+ res.pos_assigned_gt_inds for res in sampling_results
+ ]
+ cfg_list = [cfg for _ in range(num_imgs)]
+
+ point_targets = map(self._get_target_single, rois_list,
+ rel_roi_points_list, pos_assigned_gt_inds_list,
+ gt_masks, cfg_list)
+ point_targets = list(point_targets)
+
+ if len(point_targets) > 0:
+ point_targets = torch.cat(point_targets)
+
+ return point_targets
+
+ def _get_target_single(self, rois, rel_roi_points, pos_assigned_gt_inds,
+ gt_masks, cfg):
+ """Get training target of MaskPointHead for each image."""
+ num_pos = rois.size(0)
+ num_points = cfg.num_points
+ if num_pos > 0:
+ gt_masks_th = (
+ gt_masks.to_tensor(rois.dtype, rois.device).index_select(
+ 0, pos_assigned_gt_inds))
+ gt_masks_th = gt_masks_th.unsqueeze(1)
+ rel_img_points = rel_roi_point_to_rel_img_point(
+ rois, rel_roi_points, gt_masks_th.shape[2:])
+ point_targets = point_sample(gt_masks_th,
+ rel_img_points).squeeze(1)
+ else:
+ point_targets = rois.new_zeros((0, num_points))
+ return point_targets
+
+ def loss(self, point_pred, point_targets, labels):
+ """Calculate loss for MaskPointHead.
+
+ Args:
+ point_pred (Tensor): Point predication result, shape
+ (num_rois, num_classes, num_points).
+ point_targets (Tensor): Point targets, shape (num_roi, num_points).
+ labels (Tensor): Class label of corresponding boxes,
+ shape (num_rois, )
+
+ Returns:
+ dict[str, Tensor]: a dictionary of point loss components
+ """
+
+ loss = dict()
+ if self.class_agnostic:
+ loss_point = self.loss_point(point_pred, point_targets,
+ torch.zeros_like(labels))
+ else:
+ loss_point = self.loss_point(point_pred, point_targets, labels)
+ loss['loss_point'] = loss_point
+ return loss
+
+ def _get_uncertainty(self, mask_pred, labels):
+ """Estimate uncertainty based on pred logits.
+
+ We estimate uncertainty as L1 distance between 0.0 and the logits
+ prediction in 'mask_pred' for the foreground class in `classes`.
+
+ Args:
+ mask_pred (Tensor): mask predication logits, shape (num_rois,
+ num_classes, mask_height, mask_width).
+
+ labels (list[Tensor]): Either predicted or ground truth label for
+ each predicted mask, of length num_rois.
+
+ Returns:
+ scores (Tensor): Uncertainty scores with the most uncertain
+ locations having the highest uncertainty score,
+ shape (num_rois, 1, mask_height, mask_width)
+ """
+ if mask_pred.shape[1] == 1:
+ gt_class_logits = mask_pred.clone()
+ else:
+ inds = torch.arange(mask_pred.shape[0], device=mask_pred.device)
+ gt_class_logits = mask_pred[inds, labels].unsqueeze(1)
+ return -torch.abs(gt_class_logits)
+
+ def get_roi_rel_points_train(self, mask_pred, labels, cfg):
+ """Get ``num_points`` most uncertain points with random points during
+ train.
+
+ Sample points in [0, 1] x [0, 1] coordinate space based on their
+ uncertainty. The uncertainties are calculated for each point using
+ '_get_uncertainty()' function that takes point's logit prediction as
+ input.
+
+ Args:
+ mask_pred (Tensor): A tensor of shape (num_rois, num_classes,
+ mask_height, mask_width) for class-specific or class-agnostic
+ prediction.
+ labels (list): The ground truth class for each instance.
+ cfg (dict): Training config of point head.
+
+ Returns:
+ point_coords (Tensor): A tensor of shape (num_rois, num_points, 2)
+ that contains the coordinates sampled points.
+ """
+ num_points = cfg.num_points
+ oversample_ratio = cfg.oversample_ratio
+ importance_sample_ratio = cfg.importance_sample_ratio
+ assert oversample_ratio >= 1
+ assert 0 <= importance_sample_ratio <= 1
+ batch_size = mask_pred.shape[0]
+ num_sampled = int(num_points * oversample_ratio)
+ point_coords = torch.rand(
+ batch_size, num_sampled, 2, device=mask_pred.device)
+ point_logits = point_sample(mask_pred, point_coords)
+ # It is crucial to calculate uncertainty based on the sampled
+ # prediction value for the points. Calculating uncertainties of the
+ # coarse predictions first and sampling them for points leads to
+ # incorrect results. To illustrate this: assume uncertainty func(
+ # logits)=-abs(logits), a sampled point between two coarse
+ # predictions with -1 and 1 logits has 0 logits, and therefore 0
+ # uncertainty value. However, if we calculate uncertainties for the
+ # coarse predictions first, both will have -1 uncertainty,
+ # and sampled point will get -1 uncertainty.
+ point_uncertainties = self._get_uncertainty(point_logits, labels)
+ num_uncertain_points = int(importance_sample_ratio * num_points)
+ num_random_points = num_points - num_uncertain_points
+ idx = torch.topk(
+ point_uncertainties[:, 0, :], k=num_uncertain_points, dim=1)[1]
+ shift = num_sampled * torch.arange(
+ batch_size, dtype=torch.long, device=mask_pred.device)
+ idx += shift[:, None]
+ point_coords = point_coords.view(-1, 2)[idx.view(-1), :].view(
+ batch_size, num_uncertain_points, 2)
+ if num_random_points > 0:
+ rand_roi_coords = torch.rand(
+ batch_size, num_random_points, 2, device=mask_pred.device)
+ point_coords = torch.cat((point_coords, rand_roi_coords), dim=1)
+ return point_coords
+
+ def get_roi_rel_points_test(self, mask_pred, pred_label, cfg):
+ """Get ``num_points`` most uncertain points during test.
+
+ Args:
+ mask_pred (Tensor): A tensor of shape (num_rois, num_classes,
+ mask_height, mask_width) for class-specific or class-agnostic
+ prediction.
+ pred_label (list): The predication class for each instance.
+ cfg (dict): Testing config of point head.
+
+ Returns:
+ point_indices (Tensor): A tensor of shape (num_rois, num_points)
+ that contains indices from [0, mask_height x mask_width) of the
+ most uncertain points.
+ point_coords (Tensor): A tensor of shape (num_rois, num_points, 2)
+ that contains [0, 1] x [0, 1] normalized coordinates of the
+ most uncertain points from the [mask_height, mask_width] grid .
+ """
+ num_points = cfg.subdivision_num_points
+ uncertainty_map = self._get_uncertainty(mask_pred, pred_label)
+ num_rois, _, mask_height, mask_width = uncertainty_map.shape
+ h_step = 1.0 / mask_height
+ w_step = 1.0 / mask_width
+
+ uncertainty_map = uncertainty_map.view(num_rois,
+ mask_height * mask_width)
+ num_points = min(mask_height * mask_width, num_points)
+ point_indices = uncertainty_map.topk(num_points, dim=1)[1]
+ point_coords = uncertainty_map.new_zeros(num_rois, num_points, 2)
+ point_coords[:, :, 0] = w_step / 2.0 + (point_indices %
+ mask_width).float() * w_step
+ point_coords[:, :, 1] = h_step / 2.0 + (point_indices //
+ mask_width).float() * h_step
+ return point_indices, point_coords
diff --git a/annotator/uniformer/mmdet/models/roi_heads/mask_heads/maskiou_head.py b/annotator/uniformer/mmdet/models/roi_heads/mask_heads/maskiou_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..39bcd6a7dbdb089cd19cef811038e0b6a80ab89a
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/roi_heads/mask_heads/maskiou_head.py
@@ -0,0 +1,186 @@
+import numpy as np
+import torch
+import torch.nn as nn
+from mmcv.cnn import Conv2d, Linear, MaxPool2d, kaiming_init, normal_init
+from mmcv.runner import force_fp32
+from torch.nn.modules.utils import _pair
+
+from mmdet.models.builder import HEADS, build_loss
+
+
+@HEADS.register_module()
+class MaskIoUHead(nn.Module):
+ """Mask IoU Head.
+
+ This head predicts the IoU of predicted masks and corresponding gt masks.
+ """
+
+ def __init__(self,
+ num_convs=4,
+ num_fcs=2,
+ roi_feat_size=14,
+ in_channels=256,
+ conv_out_channels=256,
+ fc_out_channels=1024,
+ num_classes=80,
+ loss_iou=dict(type='MSELoss', loss_weight=0.5)):
+ super(MaskIoUHead, self).__init__()
+ self.in_channels = in_channels
+ self.conv_out_channels = conv_out_channels
+ self.fc_out_channels = fc_out_channels
+ self.num_classes = num_classes
+ self.fp16_enabled = False
+
+ self.convs = nn.ModuleList()
+ for i in range(num_convs):
+ if i == 0:
+ # concatenation of mask feature and mask prediction
+ in_channels = self.in_channels + 1
+ else:
+ in_channels = self.conv_out_channels
+ stride = 2 if i == num_convs - 1 else 1
+ self.convs.append(
+ Conv2d(
+ in_channels,
+ self.conv_out_channels,
+ 3,
+ stride=stride,
+ padding=1))
+
+ roi_feat_size = _pair(roi_feat_size)
+ pooled_area = (roi_feat_size[0] // 2) * (roi_feat_size[1] // 2)
+ self.fcs = nn.ModuleList()
+ for i in range(num_fcs):
+ in_channels = (
+ self.conv_out_channels *
+ pooled_area if i == 0 else self.fc_out_channels)
+ self.fcs.append(Linear(in_channels, self.fc_out_channels))
+
+ self.fc_mask_iou = Linear(self.fc_out_channels, self.num_classes)
+ self.relu = nn.ReLU()
+ self.max_pool = MaxPool2d(2, 2)
+ self.loss_iou = build_loss(loss_iou)
+
+ def init_weights(self):
+ for conv in self.convs:
+ kaiming_init(conv)
+ for fc in self.fcs:
+ kaiming_init(
+ fc,
+ a=1,
+ mode='fan_in',
+ nonlinearity='leaky_relu',
+ distribution='uniform')
+ normal_init(self.fc_mask_iou, std=0.01)
+
+ def forward(self, mask_feat, mask_pred):
+ mask_pred = mask_pred.sigmoid()
+ mask_pred_pooled = self.max_pool(mask_pred.unsqueeze(1))
+
+ x = torch.cat((mask_feat, mask_pred_pooled), 1)
+
+ for conv in self.convs:
+ x = self.relu(conv(x))
+ x = x.flatten(1)
+ for fc in self.fcs:
+ x = self.relu(fc(x))
+ mask_iou = self.fc_mask_iou(x)
+ return mask_iou
+
+ @force_fp32(apply_to=('mask_iou_pred', ))
+ def loss(self, mask_iou_pred, mask_iou_targets):
+ pos_inds = mask_iou_targets > 0
+ if pos_inds.sum() > 0:
+ loss_mask_iou = self.loss_iou(mask_iou_pred[pos_inds],
+ mask_iou_targets[pos_inds])
+ else:
+ loss_mask_iou = mask_iou_pred.sum() * 0
+ return dict(loss_mask_iou=loss_mask_iou)
+
+ @force_fp32(apply_to=('mask_pred', ))
+ def get_targets(self, sampling_results, gt_masks, mask_pred, mask_targets,
+ rcnn_train_cfg):
+ """Compute target of mask IoU.
+
+ Mask IoU target is the IoU of the predicted mask (inside a bbox) and
+ the gt mask of corresponding gt mask (the whole instance).
+ The intersection area is computed inside the bbox, and the gt mask area
+ is computed with two steps, firstly we compute the gt area inside the
+ bbox, then divide it by the area ratio of gt area inside the bbox and
+ the gt area of the whole instance.
+
+ Args:
+ sampling_results (list[:obj:`SamplingResult`]): sampling results.
+ gt_masks (BitmapMask | PolygonMask): Gt masks (the whole instance)
+ of each image, with the same shape of the input image.
+ mask_pred (Tensor): Predicted masks of each positive proposal,
+ shape (num_pos, h, w).
+ mask_targets (Tensor): Gt mask of each positive proposal,
+ binary map of the shape (num_pos, h, w).
+ rcnn_train_cfg (dict): Training config for R-CNN part.
+
+ Returns:
+ Tensor: mask iou target (length == num positive).
+ """
+ pos_proposals = [res.pos_bboxes for res in sampling_results]
+ pos_assigned_gt_inds = [
+ res.pos_assigned_gt_inds for res in sampling_results
+ ]
+
+ # compute the area ratio of gt areas inside the proposals and
+ # the whole instance
+ area_ratios = map(self._get_area_ratio, pos_proposals,
+ pos_assigned_gt_inds, gt_masks)
+ area_ratios = torch.cat(list(area_ratios))
+ assert mask_targets.size(0) == area_ratios.size(0)
+
+ mask_pred = (mask_pred > rcnn_train_cfg.mask_thr_binary).float()
+ mask_pred_areas = mask_pred.sum((-1, -2))
+
+ # mask_pred and mask_targets are binary maps
+ overlap_areas = (mask_pred * mask_targets).sum((-1, -2))
+
+ # compute the mask area of the whole instance
+ gt_full_areas = mask_targets.sum((-1, -2)) / (area_ratios + 1e-7)
+
+ mask_iou_targets = overlap_areas / (
+ mask_pred_areas + gt_full_areas - overlap_areas)
+ return mask_iou_targets
+
+ def _get_area_ratio(self, pos_proposals, pos_assigned_gt_inds, gt_masks):
+ """Compute area ratio of the gt mask inside the proposal and the gt
+ mask of the corresponding instance."""
+ num_pos = pos_proposals.size(0)
+ if num_pos > 0:
+ area_ratios = []
+ proposals_np = pos_proposals.cpu().numpy()
+ pos_assigned_gt_inds = pos_assigned_gt_inds.cpu().numpy()
+ # compute mask areas of gt instances (batch processing for speedup)
+ gt_instance_mask_area = gt_masks.areas
+ for i in range(num_pos):
+ gt_mask = gt_masks[pos_assigned_gt_inds[i]]
+
+ # crop the gt mask inside the proposal
+ bbox = proposals_np[i, :].astype(np.int32)
+ gt_mask_in_proposal = gt_mask.crop(bbox)
+
+ ratio = gt_mask_in_proposal.areas[0] / (
+ gt_instance_mask_area[pos_assigned_gt_inds[i]] + 1e-7)
+ area_ratios.append(ratio)
+ area_ratios = torch.from_numpy(np.stack(area_ratios)).float().to(
+ pos_proposals.device)
+ else:
+ area_ratios = pos_proposals.new_zeros((0, ))
+ return area_ratios
+
+ @force_fp32(apply_to=('mask_iou_pred', ))
+ def get_mask_scores(self, mask_iou_pred, det_bboxes, det_labels):
+ """Get the mask scores.
+
+ mask_score = bbox_score * mask_iou
+ """
+ inds = range(det_labels.size(0))
+ mask_scores = mask_iou_pred[inds, det_labels] * det_bboxes[inds, -1]
+ mask_scores = mask_scores.cpu().numpy()
+ det_labels = det_labels.cpu().numpy()
+ return [mask_scores[det_labels == i] for i in range(self.num_classes)]
diff --git a/annotator/uniformer/mmdet/models/roi_heads/mask_heads/scnet_mask_head.py b/annotator/uniformer/mmdet/models/roi_heads/mask_heads/scnet_mask_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..983a2d9db71a3b2b4980996725fdafb0b412b413
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/roi_heads/mask_heads/scnet_mask_head.py
@@ -0,0 +1,27 @@
+from mmdet.models.builder import HEADS
+from mmdet.models.utils import ResLayer, SimplifiedBasicBlock
+from .fcn_mask_head import FCNMaskHead
+
+
+@HEADS.register_module()
+class SCNetMaskHead(FCNMaskHead):
+ """Mask head for `SCNet `_.
+
+ Args:
+ conv_to_res (bool, optional): if True, change the conv layers to
+ ``SimplifiedBasicBlock``.
+ """
+
+ def __init__(self, conv_to_res=True, **kwargs):
+ super(SCNetMaskHead, self).__init__(**kwargs)
+ self.conv_to_res = conv_to_res
+ if conv_to_res:
+ assert self.conv_kernel_size == 3
+ self.num_res_blocks = self.num_convs // 2
+ self.convs = ResLayer(
+ SimplifiedBasicBlock,
+ self.in_channels,
+ self.conv_out_channels,
+ self.num_res_blocks,
+ conv_cfg=self.conv_cfg,
+ norm_cfg=self.norm_cfg)
diff --git a/annotator/uniformer/mmdet/models/roi_heads/mask_heads/scnet_semantic_head.py b/annotator/uniformer/mmdet/models/roi_heads/mask_heads/scnet_semantic_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..df85a0112d27d97301fff56189f99bee0bf8efa5
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/roi_heads/mask_heads/scnet_semantic_head.py
@@ -0,0 +1,27 @@
+from mmdet.models.builder import HEADS
+from mmdet.models.utils import ResLayer, SimplifiedBasicBlock
+from .fused_semantic_head import FusedSemanticHead
+
+
+@HEADS.register_module()
+class SCNetSemanticHead(FusedSemanticHead):
+ """Mask head for `SCNet `_.
+
+ Args:
+ conv_to_res (bool, optional): if True, change the conv layers to
+ ``SimplifiedBasicBlock``.
+ """
+
+ def __init__(self, conv_to_res=True, **kwargs):
+ super(SCNetSemanticHead, self).__init__(**kwargs)
+ self.conv_to_res = conv_to_res
+ if self.conv_to_res:
+ num_res_blocks = self.num_convs // 2
+ self.convs = ResLayer(
+ SimplifiedBasicBlock,
+ self.in_channels,
+ self.conv_out_channels,
+ num_res_blocks,
+ conv_cfg=self.conv_cfg,
+ norm_cfg=self.norm_cfg)
+ self.num_convs = num_res_blocks
diff --git a/annotator/uniformer/mmdet/models/roi_heads/mask_scoring_roi_head.py b/annotator/uniformer/mmdet/models/roi_heads/mask_scoring_roi_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..c6e55c7752209cb5c15eab689ad9e8ac1fef1b66
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/roi_heads/mask_scoring_roi_head.py
@@ -0,0 +1,122 @@
+import torch
+
+from mmdet.core import bbox2roi
+from ..builder import HEADS, build_head
+from .standard_roi_head import StandardRoIHead
+
+
+@HEADS.register_module()
+class MaskScoringRoIHead(StandardRoIHead):
+ """Mask Scoring RoIHead for Mask Scoring RCNN.
+
+ https://arxiv.org/abs/1903.00241
+ """
+
+ def __init__(self, mask_iou_head, **kwargs):
+ assert mask_iou_head is not None
+ super(MaskScoringRoIHead, self).__init__(**kwargs)
+ self.mask_iou_head = build_head(mask_iou_head)
+
+ def init_weights(self, pretrained):
+ """Initialize the weights in head.
+
+ Args:
+ pretrained (str, optional): Path to pre-trained weights.
+ Defaults to None.
+ """
+ super(MaskScoringRoIHead, self).init_weights(pretrained)
+ self.mask_iou_head.init_weights()
+
+ def _mask_forward_train(self, x, sampling_results, bbox_feats, gt_masks,
+ img_metas):
+ """Run forward function and calculate loss for Mask head in
+ training."""
+ pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results])
+ mask_results = super(MaskScoringRoIHead,
+ self)._mask_forward_train(x, sampling_results,
+ bbox_feats, gt_masks,
+ img_metas)
+ if mask_results['loss_mask'] is None:
+ return mask_results
+
+ # mask iou head forward and loss
+ pos_mask_pred = mask_results['mask_pred'][
+ range(mask_results['mask_pred'].size(0)), pos_labels]
+ mask_iou_pred = self.mask_iou_head(mask_results['mask_feats'],
+ pos_mask_pred)
+ pos_mask_iou_pred = mask_iou_pred[range(mask_iou_pred.size(0)),
+ pos_labels]
+
+ mask_iou_targets = self.mask_iou_head.get_targets(
+ sampling_results, gt_masks, pos_mask_pred,
+ mask_results['mask_targets'], self.train_cfg)
+ loss_mask_iou = self.mask_iou_head.loss(pos_mask_iou_pred,
+ mask_iou_targets)
+ mask_results['loss_mask'].update(loss_mask_iou)
+ return mask_results
+
+ def simple_test_mask(self,
+ x,
+ img_metas,
+ det_bboxes,
+ det_labels,
+ rescale=False):
+ """Obtain mask prediction without augmentation."""
+ # image shapes of images in the batch
+ ori_shapes = tuple(meta['ori_shape'] for meta in img_metas)
+ scale_factors = tuple(meta['scale_factor'] for meta in img_metas)
+
+ num_imgs = len(det_bboxes)
+ if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes):
+ num_classes = self.mask_head.num_classes
+ segm_results = [[[] for _ in range(num_classes)]
+ for _ in range(num_imgs)]
+ mask_scores = [[[] for _ in range(num_classes)]
+ for _ in range(num_imgs)]
+ else:
+ # if det_bboxes is rescaled to the original image size, we need to
+ # rescale it back to the testing scale to obtain RoIs.
+ if rescale and not isinstance(scale_factors[0], float):
+ scale_factors = [
+ torch.from_numpy(scale_factor).to(det_bboxes[0].device)
+ for scale_factor in scale_factors
+ ]
+ _bboxes = [
+ det_bboxes[i][:, :4] *
+ scale_factors[i] if rescale else det_bboxes[i]
+ for i in range(num_imgs)
+ ]
+ mask_rois = bbox2roi(_bboxes)
+ mask_results = self._mask_forward(x, mask_rois)
+ concat_det_labels = torch.cat(det_labels)
+ # get mask scores with mask iou head
+ mask_feats = mask_results['mask_feats']
+ mask_pred = mask_results['mask_pred']
+ mask_iou_pred = self.mask_iou_head(
+ mask_feats, mask_pred[range(concat_det_labels.size(0)),
+ concat_det_labels])
+ # split batch mask prediction back to each image
+ num_bboxes_per_img = tuple(len(_bbox) for _bbox in _bboxes)
+ mask_preds = mask_pred.split(num_bboxes_per_img, 0)
+ mask_iou_preds = mask_iou_pred.split(num_bboxes_per_img, 0)
+
+ # apply mask post-processing to each image individually
+ segm_results = []
+ mask_scores = []
+ for i in range(num_imgs):
+ if det_bboxes[i].shape[0] == 0:
+ segm_results.append(
+ [[] for _ in range(self.mask_head.num_classes)])
+ mask_scores.append(
+ [[] for _ in range(self.mask_head.num_classes)])
+ else:
+ segm_result = self.mask_head.get_seg_masks(
+ mask_preds[i], _bboxes[i], det_labels[i],
+ self.test_cfg, ori_shapes[i], scale_factors[i],
+ rescale)
+ # get mask scores with mask iou head
+ mask_score = self.mask_iou_head.get_mask_scores(
+ mask_iou_preds[i], det_bboxes[i], det_labels[i])
+ segm_results.append(segm_result)
+ mask_scores.append(mask_score)
+ return list(zip(segm_results, mask_scores))
diff --git a/annotator/uniformer/mmdet/models/roi_heads/pisa_roi_head.py b/annotator/uniformer/mmdet/models/roi_heads/pisa_roi_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..e01113629837eb9c065ba40cd4025899b7bd0172
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/roi_heads/pisa_roi_head.py
@@ -0,0 +1,159 @@
+from mmdet.core import bbox2roi
+from ..builder import HEADS
+from ..losses.pisa_loss import carl_loss, isr_p
+from .standard_roi_head import StandardRoIHead
+
+
+@HEADS.register_module()
+class PISARoIHead(StandardRoIHead):
+ r"""The RoI head for `Prime Sample Attention in Object Detection
+ `_."""
+
+ def forward_train(self,
+ x,
+ img_metas,
+ proposal_list,
+ gt_bboxes,
+ gt_labels,
+ gt_bboxes_ignore=None,
+ gt_masks=None):
+ """Forward function for training.
+
+ Args:
+ x (list[Tensor]): List of multi-level img features.
+ img_metas (list[dict]): List of image info dict where each dict
+ has: 'img_shape', 'scale_factor', 'flip', and may also contain
+ 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
+ For details on the values of these keys see
+ `mmdet/datasets/pipelines/formatting.py:Collect`.
+ proposals (list[Tensors]): List of region proposals.
+ gt_bboxes (list[Tensor]): Each item are the truth boxes for each
+ image in [tl_x, tl_y, br_x, br_y] format.
+ gt_labels (list[Tensor]): Class indices corresponding to each box
+ gt_bboxes_ignore (list[Tensor], optional): Specify which bounding
+ boxes can be ignored when computing the loss.
+ gt_masks (None | Tensor) : True segmentation masks for each box
+ used if the architecture supports a segmentation task.
+
+ Returns:
+ dict[str, Tensor]: a dictionary of loss components
+ """
+ # assign gts and sample proposals
+ if self.with_bbox or self.with_mask:
+ num_imgs = len(img_metas)
+ if gt_bboxes_ignore is None:
+ gt_bboxes_ignore = [None for _ in range(num_imgs)]
+ sampling_results = []
+ neg_label_weights = []
+ for i in range(num_imgs):
+ assign_result = self.bbox_assigner.assign(
+ proposal_list[i], gt_bboxes[i], gt_bboxes_ignore[i],
+ gt_labels[i])
+ sampling_result = self.bbox_sampler.sample(
+ assign_result,
+ proposal_list[i],
+ gt_bboxes[i],
+ gt_labels[i],
+ feats=[lvl_feat[i][None] for lvl_feat in x])
+ # neg label weight is obtained by sampling when using ISR-N
+ neg_label_weight = None
+ if isinstance(sampling_result, tuple):
+ sampling_result, neg_label_weight = sampling_result
+ sampling_results.append(sampling_result)
+ neg_label_weights.append(neg_label_weight)
+
+ losses = dict()
+ # bbox head forward and loss
+ if self.with_bbox:
+ bbox_results = self._bbox_forward_train(
+ x,
+ sampling_results,
+ gt_bboxes,
+ gt_labels,
+ img_metas,
+ neg_label_weights=neg_label_weights)
+ losses.update(bbox_results['loss_bbox'])
+
+ # mask head forward and loss
+ if self.with_mask:
+ mask_results = self._mask_forward_train(x, sampling_results,
+ bbox_results['bbox_feats'],
+ gt_masks, img_metas)
+ losses.update(mask_results['loss_mask'])
+
+ return losses
+
+ def _bbox_forward(self, x, rois):
+ """Box forward function used in both training and testing."""
+ # TODO: a more flexible way to decide which feature maps to use
+ bbox_feats = self.bbox_roi_extractor(
+ x[:self.bbox_roi_extractor.num_inputs], rois)
+ if self.with_shared_head:
+ bbox_feats = self.shared_head(bbox_feats)
+ cls_score, bbox_pred = self.bbox_head(bbox_feats)
+
+ bbox_results = dict(
+ cls_score=cls_score, bbox_pred=bbox_pred, bbox_feats=bbox_feats)
+ return bbox_results
+
+ def _bbox_forward_train(self,
+ x,
+ sampling_results,
+ gt_bboxes,
+ gt_labels,
+ img_metas,
+ neg_label_weights=None):
+ """Run forward function and calculate loss for box head in training."""
+ rois = bbox2roi([res.bboxes for res in sampling_results])
+
+ bbox_results = self._bbox_forward(x, rois)
+
+ bbox_targets = self.bbox_head.get_targets(sampling_results, gt_bboxes,
+ gt_labels, self.train_cfg)
+
+ # neg_label_weights obtained by sampler is image-wise, mapping back to
+ # the corresponding location in label weights
+ if neg_label_weights[0] is not None:
+ label_weights = bbox_targets[1]
+ cur_num_rois = 0
+ for i in range(len(sampling_results)):
+ num_pos = sampling_results[i].pos_inds.size(0)
+ num_neg = sampling_results[i].neg_inds.size(0)
+ label_weights[cur_num_rois + num_pos:cur_num_rois + num_pos +
+ num_neg] = neg_label_weights[i]
+ cur_num_rois += num_pos + num_neg
+
+ cls_score = bbox_results['cls_score']
+ bbox_pred = bbox_results['bbox_pred']
+
+ # Apply ISR-P
+ isr_cfg = self.train_cfg.get('isr', None)
+ if isr_cfg is not None:
+ bbox_targets = isr_p(
+ cls_score,
+ bbox_pred,
+ bbox_targets,
+ rois,
+ sampling_results,
+ self.bbox_head.loss_cls,
+ self.bbox_head.bbox_coder,
+ **isr_cfg,
+ num_class=self.bbox_head.num_classes)
+ loss_bbox = self.bbox_head.loss(cls_score, bbox_pred, rois,
+ *bbox_targets)
+
+ # Add CARL Loss
+ carl_cfg = self.train_cfg.get('carl', None)
+ if carl_cfg is not None:
+ loss_carl = carl_loss(
+ cls_score,
+ bbox_targets[0],
+ bbox_pred,
+ bbox_targets[2],
+ self.bbox_head.loss_bbox,
+ **carl_cfg,
+ num_class=self.bbox_head.num_classes)
+ loss_bbox.update(loss_carl)
+
+ bbox_results.update(loss_bbox=loss_bbox)
+ return bbox_results
diff --git a/annotator/uniformer/mmdet/models/roi_heads/point_rend_roi_head.py b/annotator/uniformer/mmdet/models/roi_heads/point_rend_roi_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..478cdf5bff6779e9291f94c543205289036ea2c6
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/roi_heads/point_rend_roi_head.py
@@ -0,0 +1,218 @@
+# Modified from https://github.com/facebookresearch/detectron2/tree/master/projects/PointRend # noqa
+
+import torch
+import torch.nn.functional as F
+from mmcv.ops import point_sample, rel_roi_point_to_rel_img_point
+
+from mmdet.core import bbox2roi, bbox_mapping, merge_aug_masks
+from .. import builder
+from ..builder import HEADS
+from .standard_roi_head import StandardRoIHead
+
+
+@HEADS.register_module()
+class PointRendRoIHead(StandardRoIHead):
+ """`PointRend `_."""
+
+ def __init__(self, point_head, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ assert self.with_bbox and self.with_mask
+ self.init_point_head(point_head)
+
+ def init_point_head(self, point_head):
+ """Initialize ``point_head``"""
+ self.point_head = builder.build_head(point_head)
+
+ def init_weights(self, pretrained):
+ """Initialize the weights in head.
+
+ Args:
+ pretrained (str, optional): Path to pre-trained weights.
+ """
+ super().init_weights(pretrained)
+ self.point_head.init_weights()
+
+ def _mask_forward_train(self, x, sampling_results, bbox_feats, gt_masks,
+ img_metas):
+ """Run forward function and calculate loss for mask head and point head
+ in training."""
+ mask_results = super()._mask_forward_train(x, sampling_results,
+ bbox_feats, gt_masks,
+ img_metas)
+ if mask_results['loss_mask'] is not None:
+ loss_point = self._mask_point_forward_train(
+ x, sampling_results, mask_results['mask_pred'], gt_masks,
+ img_metas)
+ mask_results['loss_mask'].update(loss_point)
+
+ return mask_results
+
+ def _mask_point_forward_train(self, x, sampling_results, mask_pred,
+ gt_masks, img_metas):
+ """Run forward function and calculate loss for point head in
+ training."""
+ pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results])
+ rel_roi_points = self.point_head.get_roi_rel_points_train(
+ mask_pred, pos_labels, cfg=self.train_cfg)
+ rois = bbox2roi([res.pos_bboxes for res in sampling_results])
+
+ fine_grained_point_feats = self._get_fine_grained_point_feats(
+ x, rois, rel_roi_points, img_metas)
+ coarse_point_feats = point_sample(mask_pred, rel_roi_points)
+ mask_point_pred = self.point_head(fine_grained_point_feats,
+ coarse_point_feats)
+ mask_point_target = self.point_head.get_targets(
+ rois, rel_roi_points, sampling_results, gt_masks, self.train_cfg)
+ loss_mask_point = self.point_head.loss(mask_point_pred,
+ mask_point_target, pos_labels)
+
+ return loss_mask_point
+
+ def _get_fine_grained_point_feats(self, x, rois, rel_roi_points,
+ img_metas):
+ """Sample fine grained feats from each level feature map and
+ concatenate them together."""
+ num_imgs = len(img_metas)
+ fine_grained_feats = []
+ for idx in range(self.mask_roi_extractor.num_inputs):
+ feats = x[idx]
+ spatial_scale = 1. / float(
+ self.mask_roi_extractor.featmap_strides[idx])
+ point_feats = []
+ for batch_ind in range(num_imgs):
+ # unravel batch dim
+ feat = feats[batch_ind].unsqueeze(0)
+ inds = (rois[:, 0].long() == batch_ind)
+ if inds.any():
+ rel_img_points = rel_roi_point_to_rel_img_point(
+ rois[inds], rel_roi_points[inds], feat.shape[2:],
+ spatial_scale).unsqueeze(0)
+ point_feat = point_sample(feat, rel_img_points)
+ point_feat = point_feat.squeeze(0).transpose(0, 1)
+ point_feats.append(point_feat)
+ fine_grained_feats.append(torch.cat(point_feats, dim=0))
+ return torch.cat(fine_grained_feats, dim=1)
+
+ def _mask_point_forward_test(self, x, rois, label_pred, mask_pred,
+ img_metas):
+ """Mask refining process with point head in testing."""
+ refined_mask_pred = mask_pred.clone()
+ for subdivision_step in range(self.test_cfg.subdivision_steps):
+ refined_mask_pred = F.interpolate(
+ refined_mask_pred,
+ scale_factor=self.test_cfg.scale_factor,
+ mode='bilinear',
+ align_corners=False)
+ # If `subdivision_num_points` is larger or equal to the
+ # resolution of the next step, then we can skip this step
+ num_rois, channels, mask_height, mask_width = \
+ refined_mask_pred.shape
+ if (self.test_cfg.subdivision_num_points >=
+ self.test_cfg.scale_factor**2 * mask_height * mask_width
+ and
+ subdivision_step < self.test_cfg.subdivision_steps - 1):
+ continue
+ point_indices, rel_roi_points = \
+ self.point_head.get_roi_rel_points_test(
+ refined_mask_pred, label_pred, cfg=self.test_cfg)
+ fine_grained_point_feats = self._get_fine_grained_point_feats(
+ x, rois, rel_roi_points, img_metas)
+ coarse_point_feats = point_sample(mask_pred, rel_roi_points)
+ mask_point_pred = self.point_head(fine_grained_point_feats,
+ coarse_point_feats)
+
+ point_indices = point_indices.unsqueeze(1).expand(-1, channels, -1)
+ refined_mask_pred = refined_mask_pred.reshape(
+ num_rois, channels, mask_height * mask_width)
+ refined_mask_pred = refined_mask_pred.scatter_(
+ 2, point_indices, mask_point_pred)
+ refined_mask_pred = refined_mask_pred.view(num_rois, channels,
+ mask_height, mask_width)
+
+ return refined_mask_pred
+
+ def simple_test_mask(self,
+ x,
+ img_metas,
+ det_bboxes,
+ det_labels,
+ rescale=False):
+ """Obtain mask prediction without augmentation."""
+ ori_shapes = tuple(meta['ori_shape'] for meta in img_metas)
+ scale_factors = tuple(meta['scale_factor'] for meta in img_metas)
+ num_imgs = len(det_bboxes)
+ if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes):
+ segm_results = [[[] for _ in range(self.mask_head.num_classes)]
+ for _ in range(num_imgs)]
+ else:
+ # if det_bboxes is rescaled to the original image size, we need to
+ # rescale it back to the testing scale to obtain RoIs.
+ if rescale and not isinstance(scale_factors[0], float):
+ scale_factors = [
+ torch.from_numpy(scale_factor).to(det_bboxes[0].device)
+ for scale_factor in scale_factors
+ ]
+ _bboxes = [
+ det_bboxes[i][:, :4] *
+ scale_factors[i] if rescale else det_bboxes[i][:, :4]
+ for i in range(len(det_bboxes))
+ ]
+ mask_rois = bbox2roi(_bboxes)
+ mask_results = self._mask_forward(x, mask_rois)
+ # split batch mask prediction back to each image
+ mask_pred = mask_results['mask_pred']
+ num_mask_roi_per_img = [len(det_bbox) for det_bbox in det_bboxes]
+ mask_preds = mask_pred.split(num_mask_roi_per_img, 0)
+ mask_rois = mask_rois.split(num_mask_roi_per_img, 0)
+
+ # apply mask post-processing to each image individually
+ segm_results = []
+ for i in range(num_imgs):
+ if det_bboxes[i].shape[0] == 0:
+ segm_results.append(
+ [[] for _ in range(self.mask_head.num_classes)])
+ else:
+ x_i = [xx[[i]] for xx in x]
+ mask_rois_i = mask_rois[i]
+ mask_rois_i[:, 0] = 0 # TODO: remove this hack
+ mask_pred_i = self._mask_point_forward_test(
+ x_i, mask_rois_i, det_labels[i], mask_preds[i],
+ [img_metas])
+ segm_result = self.mask_head.get_seg_masks(
+ mask_pred_i, _bboxes[i], det_labels[i], self.test_cfg,
+ ori_shapes[i], scale_factors[i], rescale)
+ segm_results.append(segm_result)
+ return segm_results
+
+ def aug_test_mask(self, feats, img_metas, det_bboxes, det_labels):
+ """Test for mask head with test time augmentation."""
+ if det_bboxes.shape[0] == 0:
+ segm_result = [[] for _ in range(self.mask_head.num_classes)]
+ else:
+ aug_masks = []
+ for x, img_meta in zip(feats, img_metas):
+ img_shape = img_meta[0]['img_shape']
+ scale_factor = img_meta[0]['scale_factor']
+ flip = img_meta[0]['flip']
+ _bboxes = bbox_mapping(det_bboxes[:, :4], img_shape,
+ scale_factor, flip)
+ mask_rois = bbox2roi([_bboxes])
+ mask_results = self._mask_forward(x, mask_rois)
+ mask_results['mask_pred'] = self._mask_point_forward_test(
+ x, mask_rois, det_labels, mask_results['mask_pred'],
+ img_metas)
+ # convert to numpy array to save memory
+ aug_masks.append(
+ mask_results['mask_pred'].sigmoid().cpu().numpy())
+ merged_masks = merge_aug_masks(aug_masks, img_metas, self.test_cfg)
+
+ ori_shape = img_metas[0][0]['ori_shape']
+ segm_result = self.mask_head.get_seg_masks(
+ merged_masks,
+ det_bboxes,
+ det_labels,
+ self.test_cfg,
+ ori_shape,
+ scale_factor=1.0,
+ rescale=False)
+ return segm_result
diff --git a/annotator/uniformer/mmdet/models/roi_heads/roi_extractors/__init__.py b/annotator/uniformer/mmdet/models/roi_heads/roi_extractors/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..a6ec0ecc3063cd23c2463f2f53f1c2a83b04d43b
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/roi_heads/roi_extractors/__init__.py
@@ -0,0 +1,7 @@
+from .generic_roi_extractor import GenericRoIExtractor
+from .single_level_roi_extractor import SingleRoIExtractor
+
+__all__ = [
+ 'SingleRoIExtractor',
+ 'GenericRoIExtractor',
+]
diff --git a/annotator/uniformer/mmdet/models/roi_heads/roi_extractors/base_roi_extractor.py b/annotator/uniformer/mmdet/models/roi_heads/roi_extractors/base_roi_extractor.py
new file mode 100644
index 0000000000000000000000000000000000000000..847932547c6c309ae38b45dc43ac0ef8ca66d347
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/roi_heads/roi_extractors/base_roi_extractor.py
@@ -0,0 +1,83 @@
+from abc import ABCMeta, abstractmethod
+
+import torch
+import torch.nn as nn
+from mmcv import ops
+
+
+class BaseRoIExtractor(nn.Module, metaclass=ABCMeta):
+ """Base class for RoI extractor.
+
+ Args:
+ roi_layer (dict): Specify RoI layer type and arguments.
+ out_channels (int): Output channels of RoI layers.
+ featmap_strides (List[int]): Strides of input feature maps.
+ """
+
+ def __init__(self, roi_layer, out_channels, featmap_strides):
+ super(BaseRoIExtractor, self).__init__()
+ self.roi_layers = self.build_roi_layers(roi_layer, featmap_strides)
+ self.out_channels = out_channels
+ self.featmap_strides = featmap_strides
+ self.fp16_enabled = False
+
+ @property
+ def num_inputs(self):
+ """int: Number of input feature maps."""
+ return len(self.featmap_strides)
+
+ def init_weights(self):
+ pass
+
+ def build_roi_layers(self, layer_cfg, featmap_strides):
+ """Build RoI operator to extract feature from each level feature map.
+
+ Args:
+ layer_cfg (dict): Dictionary to construct and config RoI layer
+ operation. Options are modules under ``mmcv/ops`` such as
+ ``RoIAlign``.
+ featmap_strides (List[int]): The stride of input feature map w.r.t
+ to the original image size, which would be used to scale RoI
+ coordinate (original image coordinate system) to feature
+ coordinate system.
+
+ Returns:
+ nn.ModuleList: The RoI extractor modules for each level feature
+ map.
+ """
+
+ cfg = layer_cfg.copy()
+ layer_type = cfg.pop('type')
+ assert hasattr(ops, layer_type)
+ layer_cls = getattr(ops, layer_type)
+ roi_layers = nn.ModuleList(
+ [layer_cls(spatial_scale=1 / s, **cfg) for s in featmap_strides])
+ return roi_layers
+
+ def roi_rescale(self, rois, scale_factor):
+ """Scale RoI coordinates by scale factor.
+
+ Args:
+ rois (torch.Tensor): RoI (Region of Interest), shape (n, 5)
+ scale_factor (float): Scale factor that RoI will be multiplied by.
+
+ Returns:
+ torch.Tensor: Scaled RoI.
+ """
+
+ cx = (rois[:, 1] + rois[:, 3]) * 0.5
+ cy = (rois[:, 2] + rois[:, 4]) * 0.5
+ w = rois[:, 3] - rois[:, 1]
+ h = rois[:, 4] - rois[:, 2]
+ new_w = w * scale_factor
+ new_h = h * scale_factor
+ x1 = cx - new_w * 0.5
+ x2 = cx + new_w * 0.5
+ y1 = cy - new_h * 0.5
+ y2 = cy + new_h * 0.5
+ new_rois = torch.stack((rois[:, 0], x1, y1, x2, y2), dim=-1)
+ return new_rois
+
+ @abstractmethod
+ def forward(self, feats, rois, roi_scale_factor=None):
+ pass
diff --git a/annotator/uniformer/mmdet/models/roi_heads/roi_extractors/generic_roi_extractor.py b/annotator/uniformer/mmdet/models/roi_heads/roi_extractors/generic_roi_extractor.py
new file mode 100644
index 0000000000000000000000000000000000000000..80c25bb8fde7844c994bfc1f4ae1a2d960cbf3d6
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/roi_heads/roi_extractors/generic_roi_extractor.py
@@ -0,0 +1,83 @@
+from mmcv.cnn.bricks import build_plugin_layer
+from mmcv.runner import force_fp32
+
+from mmdet.models.builder import ROI_EXTRACTORS
+from .base_roi_extractor import BaseRoIExtractor
+
+
+@ROI_EXTRACTORS.register_module()
+class GenericRoIExtractor(BaseRoIExtractor):
+ """Extract RoI features from all level feature maps levels.
+
+ This is the implementation of `A novel Region of Interest Extraction Layer
+ for Instance Segmentation `_.
+
+ Args:
+ aggregation (str): The method to aggregate multiple feature maps.
+ Options are 'sum', 'concat'. Default: 'sum'.
+ pre_cfg (dict | None): Specify pre-processing modules. Default: None.
+ post_cfg (dict | None): Specify post-processing modules. Default: None.
+ kwargs (keyword arguments): Arguments that are the same
+ as :class:`BaseRoIExtractor`.
+ """
+
+ def __init__(self,
+ aggregation='sum',
+ pre_cfg=None,
+ post_cfg=None,
+ **kwargs):
+ super(GenericRoIExtractor, self).__init__(**kwargs)
+
+ assert aggregation in ['sum', 'concat']
+
+ self.aggregation = aggregation
+ self.with_post = post_cfg is not None
+ self.with_pre = pre_cfg is not None
+ # build pre/post processing modules
+ if self.with_post:
+ self.post_module = build_plugin_layer(post_cfg, '_post_module')[1]
+ if self.with_pre:
+ self.pre_module = build_plugin_layer(pre_cfg, '_pre_module')[1]
+
+ @force_fp32(apply_to=('feats', ), out_fp16=True)
+ def forward(self, feats, rois, roi_scale_factor=None):
+ """Forward function."""
+ if len(feats) == 1:
+ return self.roi_layers[0](feats[0], rois)
+
+ out_size = self.roi_layers[0].output_size
+ num_levels = len(feats)
+ roi_feats = feats[0].new_zeros(
+ rois.size(0), self.out_channels, *out_size)
+
+ # some times rois is an empty tensor
+ if roi_feats.shape[0] == 0:
+ return roi_feats
+
+ if roi_scale_factor is not None:
+ rois = self.roi_rescale(rois, roi_scale_factor)
+
+ # mark the starting channels for concat mode
+ start_channels = 0
+ for i in range(num_levels):
+ roi_feats_t = self.roi_layers[i](feats[i], rois)
+ end_channels = start_channels + roi_feats_t.size(1)
+ if self.with_pre:
+ # apply pre-processing to a RoI extracted from each layer
+ roi_feats_t = self.pre_module(roi_feats_t)
+ if self.aggregation == 'sum':
+ # and sum them all
+ roi_feats += roi_feats_t
+ else:
+ # and concat them along channel dimension
+ roi_feats[:, start_channels:end_channels] = roi_feats_t
+ # update channels starting position
+ start_channels = end_channels
+ # check if concat channels match at the end
+ if self.aggregation == 'concat':
+ assert start_channels == self.out_channels
+
+ if self.with_post:
+ # apply post-processing before return the result
+ roi_feats = self.post_module(roi_feats)
+ return roi_feats
diff --git a/annotator/uniformer/mmdet/models/roi_heads/roi_extractors/single_level_roi_extractor.py b/annotator/uniformer/mmdet/models/roi_heads/roi_extractors/single_level_roi_extractor.py
new file mode 100644
index 0000000000000000000000000000000000000000..cfc838f23270a1ae4d70f90059b67a890850e981
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/roi_heads/roi_extractors/single_level_roi_extractor.py
@@ -0,0 +1,108 @@
+import torch
+from mmcv.runner import force_fp32
+
+from mmdet.models.builder import ROI_EXTRACTORS
+from .base_roi_extractor import BaseRoIExtractor
+
+
+@ROI_EXTRACTORS.register_module()
+class SingleRoIExtractor(BaseRoIExtractor):
+ """Extract RoI features from a single level feature map.
+
+ If there are multiple input feature levels, each RoI is mapped to a level
+ according to its scale. The mapping rule is proposed in
+ `FPN `_.
+
+ Args:
+ roi_layer (dict): Specify RoI layer type and arguments.
+ out_channels (int): Output channels of RoI layers.
+ featmap_strides (List[int]): Strides of input feature maps.
+ finest_scale (int): Scale threshold of mapping to level 0. Default: 56.
+ """
+
+ def __init__(self,
+ roi_layer,
+ out_channels,
+ featmap_strides,
+ finest_scale=56):
+ super(SingleRoIExtractor, self).__init__(roi_layer, out_channels,
+ featmap_strides)
+ self.finest_scale = finest_scale
+
+ def map_roi_levels(self, rois, num_levels):
+ """Map rois to corresponding feature levels by scales.
+
+ - scale < finest_scale * 2: level 0
+ - finest_scale * 2 <= scale < finest_scale * 4: level 1
+ - finest_scale * 4 <= scale < finest_scale * 8: level 2
+ - scale >= finest_scale * 8: level 3
+
+ Args:
+ rois (Tensor): Input RoIs, shape (k, 5).
+ num_levels (int): Total level number.
+
+ Returns:
+ Tensor: Level index (0-based) of each RoI, shape (k, )
+ """
+ scale = torch.sqrt(
+ (rois[:, 3] - rois[:, 1]) * (rois[:, 4] - rois[:, 2]))
+ target_lvls = torch.floor(torch.log2(scale / self.finest_scale + 1e-6))
+ target_lvls = target_lvls.clamp(min=0, max=num_levels - 1).long()
+ return target_lvls
+
+ @force_fp32(apply_to=('feats', ), out_fp16=True)
+ def forward(self, feats, rois, roi_scale_factor=None):
+ """Forward function."""
+ out_size = self.roi_layers[0].output_size
+ num_levels = len(feats)
+ expand_dims = (-1, self.out_channels * out_size[0] * out_size[1])
+ if torch.onnx.is_in_onnx_export():
+ # Work around to export mask-rcnn to onnx
+ roi_feats = rois[:, :1].clone().detach()
+ roi_feats = roi_feats.expand(*expand_dims)
+ roi_feats = roi_feats.reshape(-1, self.out_channels, *out_size)
+ roi_feats = roi_feats * 0
+ else:
+ roi_feats = feats[0].new_zeros(
+ rois.size(0), self.out_channels, *out_size)
+ # TODO: remove this when parrots supports
+ if torch.__version__ == 'parrots':
+ roi_feats.requires_grad = True
+
+ if num_levels == 1:
+ if len(rois) == 0:
+ return roi_feats
+ return self.roi_layers[0](feats[0], rois)
+
+ target_lvls = self.map_roi_levels(rois, num_levels)
+
+ if roi_scale_factor is not None:
+ rois = self.roi_rescale(rois, roi_scale_factor)
+
+ for i in range(num_levels):
+ mask = target_lvls == i
+ if torch.onnx.is_in_onnx_export():
+ # To keep all roi_align nodes exported to onnx
+ # and skip nonzero op
+ mask = mask.float().unsqueeze(-1).expand(*expand_dims).reshape(
+ roi_feats.shape)
+ roi_feats_t = self.roi_layers[i](feats[i], rois)
+ roi_feats_t *= mask
+ roi_feats += roi_feats_t
+ continue
+ inds = mask.nonzero(as_tuple=False).squeeze(1)
+ if inds.numel() > 0:
+ rois_ = rois[inds]
+ roi_feats_t = self.roi_layers[i](feats[i], rois_)
+ roi_feats[inds] = roi_feats_t
+ else:
+ # Sometimes some pyramid levels will not be used for RoI
+ # feature extraction and this will cause an incomplete
+ # computation graph in one GPU, which is different from those
+ # in other GPUs and will cause a hanging error.
+ # Therefore, we add it to ensure each feature pyramid is
+ # included in the computation graph to avoid runtime bugs.
+ roi_feats += sum(
+ x.view(-1)[0]
+ for x in self.parameters()) * 0. + feats[i].sum() * 0.
+ return roi_feats
diff --git a/annotator/uniformer/mmdet/models/roi_heads/scnet_roi_head.py b/annotator/uniformer/mmdet/models/roi_heads/scnet_roi_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..85aaa2f0600afbdfc8b0917cb5f341740776a603
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/roi_heads/scnet_roi_head.py
@@ -0,0 +1,582 @@
+import torch
+import torch.nn.functional as F
+
+from mmdet.core import (bbox2result, bbox2roi, bbox_mapping, merge_aug_bboxes,
+ merge_aug_masks, multiclass_nms)
+from ..builder import HEADS, build_head, build_roi_extractor
+from .cascade_roi_head import CascadeRoIHead
+
+
+@HEADS.register_module()
+class SCNetRoIHead(CascadeRoIHead):
+ """RoIHead for `SCNet `_.
+
+ Args:
+ num_stages (int): number of cascade stages.
+ stage_loss_weights (list): loss weight of cascade stages.
+ semantic_roi_extractor (dict): config to init semantic roi extractor.
+ semantic_head (dict): config to init semantic head.
+ feat_relay_head (dict): config to init feature_relay_head.
+ glbctx_head (dict): config to init global context head.
+ """
+
+ def __init__(self,
+ num_stages,
+ stage_loss_weights,
+ semantic_roi_extractor=None,
+ semantic_head=None,
+ feat_relay_head=None,
+ glbctx_head=None,
+ **kwargs):
+ super(SCNetRoIHead, self).__init__(num_stages, stage_loss_weights,
+ **kwargs)
+ assert self.with_bbox and self.with_mask
+ assert not self.with_shared_head # shared head is not supported
+
+ if semantic_head is not None:
+ self.semantic_roi_extractor = build_roi_extractor(
+ semantic_roi_extractor)
+ self.semantic_head = build_head(semantic_head)
+
+ if feat_relay_head is not None:
+ self.feat_relay_head = build_head(feat_relay_head)
+
+ if glbctx_head is not None:
+ self.glbctx_head = build_head(glbctx_head)
+
+ def init_mask_head(self, mask_roi_extractor, mask_head):
+ """Initialize ``mask_head``"""
+ if mask_roi_extractor is not None:
+ self.mask_roi_extractor = build_roi_extractor(mask_roi_extractor)
+ self.mask_head = build_head(mask_head)
+
+ def init_weights(self, pretrained):
+ """Initialize the weights in head.
+
+ Args:
+ pretrained (str, optional): Path to pre-trained weights.
+ Defaults to None.
+ """
+ for i in range(self.num_stages):
+ if self.with_bbox:
+ self.bbox_roi_extractor[i].init_weights()
+ self.bbox_head[i].init_weights()
+ if self.with_mask:
+ self.mask_roi_extractor.init_weights()
+ self.mask_head.init_weights()
+ if self.with_semantic:
+ self.semantic_head.init_weights()
+ if self.with_glbctx:
+ self.glbctx_head.init_weights()
+ if self.with_feat_relay:
+ self.feat_relay_head.init_weights()
+
+ @property
+ def with_semantic(self):
+ """bool: whether the head has semantic head"""
+ return hasattr(self,
+ 'semantic_head') and self.semantic_head is not None
+
+ @property
+ def with_feat_relay(self):
+ """bool: whether the head has feature relay head"""
+ return (hasattr(self, 'feat_relay_head')
+ and self.feat_relay_head is not None)
+
+ @property
+ def with_glbctx(self):
+ """bool: whether the head has global context head"""
+ return hasattr(self, 'glbctx_head') and self.glbctx_head is not None
+
+ def _fuse_glbctx(self, roi_feats, glbctx_feat, rois):
+ """Fuse global context feats with roi feats."""
+ assert roi_feats.size(0) == rois.size(0)
+ img_inds = torch.unique(rois[:, 0].cpu(), sorted=True).long()
+ fused_feats = torch.zeros_like(roi_feats)
+ for img_id in img_inds:
+ inds = (rois[:, 0] == img_id.item())
+ fused_feats[inds] = roi_feats[inds] + glbctx_feat[img_id]
+ return fused_feats
+
+ def _slice_pos_feats(self, feats, sampling_results):
+ """Get features from pos rois."""
+ num_rois = [res.bboxes.size(0) for res in sampling_results]
+ num_pos_rois = [res.pos_bboxes.size(0) for res in sampling_results]
+ inds = torch.zeros(sum(num_rois), dtype=torch.bool)
+ start = 0
+ for i in range(len(num_rois)):
+ start = 0 if i == 0 else start + num_rois[i - 1]
+ stop = start + num_pos_rois[i]
+ inds[start:stop] = 1
+ sliced_feats = feats[inds]
+ return sliced_feats
+
+ def _bbox_forward(self,
+ stage,
+ x,
+ rois,
+ semantic_feat=None,
+ glbctx_feat=None):
+ """Box head forward function used in both training and testing."""
+ bbox_roi_extractor = self.bbox_roi_extractor[stage]
+ bbox_head = self.bbox_head[stage]
+ bbox_feats = bbox_roi_extractor(
+ x[:len(bbox_roi_extractor.featmap_strides)], rois)
+ if self.with_semantic and semantic_feat is not None:
+ bbox_semantic_feat = self.semantic_roi_extractor([semantic_feat],
+ rois)
+ if bbox_semantic_feat.shape[-2:] != bbox_feats.shape[-2:]:
+ bbox_semantic_feat = F.adaptive_avg_pool2d(
+ bbox_semantic_feat, bbox_feats.shape[-2:])
+ bbox_feats += bbox_semantic_feat
+ if self.with_glbctx and glbctx_feat is not None:
+ bbox_feats = self._fuse_glbctx(bbox_feats, glbctx_feat, rois)
+ cls_score, bbox_pred, relayed_feat = bbox_head(
+ bbox_feats, return_shared_feat=True)
+
+ bbox_results = dict(
+ cls_score=cls_score,
+ bbox_pred=bbox_pred,
+ relayed_feat=relayed_feat)
+ return bbox_results
+
+ def _mask_forward(self,
+ x,
+ rois,
+ semantic_feat=None,
+ glbctx_feat=None,
+ relayed_feat=None):
+ """Mask head forward function used in both training and testing."""
+ mask_feats = self.mask_roi_extractor(
+ x[:self.mask_roi_extractor.num_inputs], rois)
+ if self.with_semantic and semantic_feat is not None:
+ mask_semantic_feat = self.semantic_roi_extractor([semantic_feat],
+ rois)
+ if mask_semantic_feat.shape[-2:] != mask_feats.shape[-2:]:
+ mask_semantic_feat = F.adaptive_avg_pool2d(
+ mask_semantic_feat, mask_feats.shape[-2:])
+ mask_feats += mask_semantic_feat
+ if self.with_glbctx and glbctx_feat is not None:
+ mask_feats = self._fuse_glbctx(mask_feats, glbctx_feat, rois)
+ if self.with_feat_relay and relayed_feat is not None:
+ mask_feats = mask_feats + relayed_feat
+ mask_pred = self.mask_head(mask_feats)
+ mask_results = dict(mask_pred=mask_pred)
+
+ return mask_results
+
+ def _bbox_forward_train(self,
+ stage,
+ x,
+ sampling_results,
+ gt_bboxes,
+ gt_labels,
+ rcnn_train_cfg,
+ semantic_feat=None,
+ glbctx_feat=None):
+ """Run forward function and calculate loss for box head in training."""
+ bbox_head = self.bbox_head[stage]
+ rois = bbox2roi([res.bboxes for res in sampling_results])
+ bbox_results = self._bbox_forward(
+ stage,
+ x,
+ rois,
+ semantic_feat=semantic_feat,
+ glbctx_feat=glbctx_feat)
+
+ bbox_targets = bbox_head.get_targets(sampling_results, gt_bboxes,
+ gt_labels, rcnn_train_cfg)
+ loss_bbox = bbox_head.loss(bbox_results['cls_score'],
+ bbox_results['bbox_pred'], rois,
+ *bbox_targets)
+
+ bbox_results.update(
+ loss_bbox=loss_bbox, rois=rois, bbox_targets=bbox_targets)
+ return bbox_results
+
+ def _mask_forward_train(self,
+ x,
+ sampling_results,
+ gt_masks,
+ rcnn_train_cfg,
+ semantic_feat=None,
+ glbctx_feat=None,
+ relayed_feat=None):
+ """Run forward function and calculate loss for mask head in
+ training."""
+ pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results])
+ mask_results = self._mask_forward(
+ x,
+ pos_rois,
+ semantic_feat=semantic_feat,
+ glbctx_feat=glbctx_feat,
+ relayed_feat=relayed_feat)
+
+ mask_targets = self.mask_head.get_targets(sampling_results, gt_masks,
+ rcnn_train_cfg)
+ pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results])
+ loss_mask = self.mask_head.loss(mask_results['mask_pred'],
+ mask_targets, pos_labels)
+
+ mask_results = loss_mask
+ return mask_results
+
+ def forward_train(self,
+ x,
+ img_metas,
+ proposal_list,
+ gt_bboxes,
+ gt_labels,
+ gt_bboxes_ignore=None,
+ gt_masks=None,
+ gt_semantic_seg=None):
+ """
+ Args:
+ x (list[Tensor]): list of multi-level img features.
+
+ img_metas (list[dict]): list of image info dict where each dict
+ has: 'img_shape', 'scale_factor', 'flip', and may also contain
+ 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
+ For details on the values of these keys see
+ `mmdet/datasets/pipelines/formatting.py:Collect`.
+
+ proposal_list (list[Tensors]): list of region proposals.
+
+ gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
+ shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
+
+ gt_labels (list[Tensor]): class indices corresponding to each box
+
+ gt_bboxes_ignore (None, list[Tensor]): specify which bounding
+ boxes can be ignored when computing the loss.
+
+ gt_masks (None, Tensor) : true segmentation masks for each box
+ used if the architecture supports a segmentation task.
+
+ gt_semantic_seg (None, list[Tensor]): semantic segmentation masks
+ used if the architecture supports semantic segmentation task.
+
+ Returns:
+ dict[str, Tensor]: a dictionary of loss components
+ """
+ losses = dict()
+
+ # semantic segmentation branch
+ if self.with_semantic:
+ semantic_pred, semantic_feat = self.semantic_head(x)
+ loss_seg = self.semantic_head.loss(semantic_pred, gt_semantic_seg)
+ losses['loss_semantic_seg'] = loss_seg
+ else:
+ semantic_feat = None
+
+ # global context branch
+ if self.with_glbctx:
+ mc_pred, glbctx_feat = self.glbctx_head(x)
+ loss_glbctx = self.glbctx_head.loss(mc_pred, gt_labels)
+ losses['loss_glbctx'] = loss_glbctx
+ else:
+ glbctx_feat = None
+
+ for i in range(self.num_stages):
+ self.current_stage = i
+ rcnn_train_cfg = self.train_cfg[i]
+ lw = self.stage_loss_weights[i]
+
+ # assign gts and sample proposals
+ sampling_results = []
+ bbox_assigner = self.bbox_assigner[i]
+ bbox_sampler = self.bbox_sampler[i]
+ num_imgs = len(img_metas)
+ if gt_bboxes_ignore is None:
+ gt_bboxes_ignore = [None for _ in range(num_imgs)]
+
+ for j in range(num_imgs):
+ assign_result = bbox_assigner.assign(proposal_list[j],
+ gt_bboxes[j],
+ gt_bboxes_ignore[j],
+ gt_labels[j])
+ sampling_result = bbox_sampler.sample(
+ assign_result,
+ proposal_list[j],
+ gt_bboxes[j],
+ gt_labels[j],
+ feats=[lvl_feat[j][None] for lvl_feat in x])
+ sampling_results.append(sampling_result)
+
+ bbox_results = \
+ self._bbox_forward_train(
+ i, x, sampling_results, gt_bboxes, gt_labels,
+ rcnn_train_cfg, semantic_feat, glbctx_feat)
+ roi_labels = bbox_results['bbox_targets'][0]
+
+ for name, value in bbox_results['loss_bbox'].items():
+ losses[f's{i}.{name}'] = (
+ value * lw if 'loss' in name else value)
+
+ # refine boxes
+ if i < self.num_stages - 1:
+ pos_is_gts = [res.pos_is_gt for res in sampling_results]
+ with torch.no_grad():
+ proposal_list = self.bbox_head[i].refine_bboxes(
+ bbox_results['rois'], roi_labels,
+ bbox_results['bbox_pred'], pos_is_gts, img_metas)
+
+ if self.with_feat_relay:
+ relayed_feat = self._slice_pos_feats(bbox_results['relayed_feat'],
+ sampling_results)
+ relayed_feat = self.feat_relay_head(relayed_feat)
+ else:
+ relayed_feat = None
+
+ mask_results = self._mask_forward_train(x, sampling_results, gt_masks,
+ rcnn_train_cfg, semantic_feat,
+ glbctx_feat, relayed_feat)
+ mask_lw = sum(self.stage_loss_weights)
+ losses['loss_mask'] = mask_lw * mask_results['loss_mask']
+
+ return losses
+
+ def simple_test(self, x, proposal_list, img_metas, rescale=False):
+ """Test without augmentation."""
+ if self.with_semantic:
+ _, semantic_feat = self.semantic_head(x)
+ else:
+ semantic_feat = None
+
+ if self.with_glbctx:
+ mc_pred, glbctx_feat = self.glbctx_head(x)
+ else:
+ glbctx_feat = None
+
+ num_imgs = len(proposal_list)
+ img_shapes = tuple(meta['img_shape'] for meta in img_metas)
+ ori_shapes = tuple(meta['ori_shape'] for meta in img_metas)
+ scale_factors = tuple(meta['scale_factor'] for meta in img_metas)
+
+ # "ms" in variable names means multi-stage
+ ms_scores = []
+ rcnn_test_cfg = self.test_cfg
+
+ rois = bbox2roi(proposal_list)
+ for i in range(self.num_stages):
+ bbox_head = self.bbox_head[i]
+ bbox_results = self._bbox_forward(
+ i,
+ x,
+ rois,
+ semantic_feat=semantic_feat,
+ glbctx_feat=glbctx_feat)
+ # split batch bbox prediction back to each image
+ cls_score = bbox_results['cls_score']
+ bbox_pred = bbox_results['bbox_pred']
+ num_proposals_per_img = tuple(len(p) for p in proposal_list)
+ rois = rois.split(num_proposals_per_img, 0)
+ cls_score = cls_score.split(num_proposals_per_img, 0)
+ bbox_pred = bbox_pred.split(num_proposals_per_img, 0)
+ ms_scores.append(cls_score)
+
+ if i < self.num_stages - 1:
+ bbox_label = [s[:, :-1].argmax(dim=1) for s in cls_score]
+ rois = torch.cat([
+ bbox_head.regress_by_class(rois[i], bbox_label[i],
+ bbox_pred[i], img_metas[i])
+ for i in range(num_imgs)
+ ])
+
+ # average scores of each image by stages
+ cls_score = [
+ sum([score[i] for score in ms_scores]) / float(len(ms_scores))
+ for i in range(num_imgs)
+ ]
+
+ # apply bbox post-processing to each image individually
+ det_bboxes = []
+ det_labels = []
+ for i in range(num_imgs):
+ det_bbox, det_label = self.bbox_head[-1].get_bboxes(
+ rois[i],
+ cls_score[i],
+ bbox_pred[i],
+ img_shapes[i],
+ scale_factors[i],
+ rescale=rescale,
+ cfg=rcnn_test_cfg)
+ det_bboxes.append(det_bbox)
+ det_labels.append(det_label)
+ det_bbox_results = [
+ bbox2result(det_bboxes[i], det_labels[i],
+ self.bbox_head[-1].num_classes)
+ for i in range(num_imgs)
+ ]
+
+ if self.with_mask:
+ if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes):
+ mask_classes = self.mask_head.num_classes
+ det_segm_results = [[[] for _ in range(mask_classes)]
+ for _ in range(num_imgs)]
+ else:
+ if rescale and not isinstance(scale_factors[0], float):
+ scale_factors = [
+ torch.from_numpy(scale_factor).to(det_bboxes[0].device)
+ for scale_factor in scale_factors
+ ]
+ _bboxes = [
+ det_bboxes[i][:, :4] *
+ scale_factors[i] if rescale else det_bboxes[i]
+ for i in range(num_imgs)
+ ]
+ mask_rois = bbox2roi(_bboxes)
+
+ # get relay feature on mask_rois
+ bbox_results = self._bbox_forward(
+ -1,
+ x,
+ mask_rois,
+ semantic_feat=semantic_feat,
+ glbctx_feat=glbctx_feat)
+ relayed_feat = bbox_results['relayed_feat']
+ relayed_feat = self.feat_relay_head(relayed_feat)
+
+ mask_results = self._mask_forward(
+ x,
+ mask_rois,
+ semantic_feat=semantic_feat,
+ glbctx_feat=glbctx_feat,
+ relayed_feat=relayed_feat)
+ mask_pred = mask_results['mask_pred']
+
+ # split batch mask prediction back to each image
+ num_bbox_per_img = tuple(len(_bbox) for _bbox in _bboxes)
+ mask_preds = mask_pred.split(num_bbox_per_img, 0)
+
+ # apply mask post-processing to each image individually
+ det_segm_results = []
+ for i in range(num_imgs):
+ if det_bboxes[i].shape[0] == 0:
+ det_segm_results.append(
+ [[] for _ in range(self.mask_head.num_classes)])
+ else:
+ segm_result = self.mask_head.get_seg_masks(
+ mask_preds[i], _bboxes[i], det_labels[i],
+ self.test_cfg, ori_shapes[i], scale_factors[i],
+ rescale)
+ det_segm_results.append(segm_result)
+
+ # return results
+ if self.with_mask:
+ return list(zip(det_bbox_results, det_segm_results))
+ else:
+ return det_bbox_results
+
+ def aug_test(self, img_feats, proposal_list, img_metas, rescale=False):
+ if self.with_semantic:
+ semantic_feats = [
+ self.semantic_head(feat)[1] for feat in img_feats
+ ]
+ else:
+ semantic_feats = [None] * len(img_metas)
+
+ if self.with_glbctx:
+ glbctx_feats = [self.glbctx_head(feat)[1] for feat in img_feats]
+ else:
+ glbctx_feats = [None] * len(img_metas)
+
+ rcnn_test_cfg = self.test_cfg
+ aug_bboxes = []
+ aug_scores = []
+ for x, img_meta, semantic_feat, glbctx_feat in zip(
+ img_feats, img_metas, semantic_feats, glbctx_feats):
+ # only one image in the batch
+ img_shape = img_meta[0]['img_shape']
+ scale_factor = img_meta[0]['scale_factor']
+ flip = img_meta[0]['flip']
+
+ proposals = bbox_mapping(proposal_list[0][:, :4], img_shape,
+ scale_factor, flip)
+ # "ms" in variable names means multi-stage
+ ms_scores = []
+
+ rois = bbox2roi([proposals])
+ for i in range(self.num_stages):
+ bbox_head = self.bbox_head[i]
+ bbox_results = self._bbox_forward(
+ i,
+ x,
+ rois,
+ semantic_feat=semantic_feat,
+ glbctx_feat=glbctx_feat)
+ ms_scores.append(bbox_results['cls_score'])
+ if i < self.num_stages - 1:
+ bbox_label = bbox_results['cls_score'].argmax(dim=1)
+ rois = bbox_head.regress_by_class(
+ rois, bbox_label, bbox_results['bbox_pred'],
+ img_meta[0])
+
+ cls_score = sum(ms_scores) / float(len(ms_scores))
+ bboxes, scores = self.bbox_head[-1].get_bboxes(
+ rois,
+ cls_score,
+ bbox_results['bbox_pred'],
+ img_shape,
+ scale_factor,
+ rescale=False,
+ cfg=None)
+ aug_bboxes.append(bboxes)
+ aug_scores.append(scores)
+
+ # after merging, bboxes will be rescaled to the original image size
+ merged_bboxes, merged_scores = merge_aug_bboxes(
+ aug_bboxes, aug_scores, img_metas, rcnn_test_cfg)
+ det_bboxes, det_labels = multiclass_nms(merged_bboxes, merged_scores,
+ rcnn_test_cfg.score_thr,
+ rcnn_test_cfg.nms,
+ rcnn_test_cfg.max_per_img)
+
+ det_bbox_results = bbox2result(det_bboxes, det_labels,
+ self.bbox_head[-1].num_classes)
+
+ if self.with_mask:
+ if det_bboxes.shape[0] == 0:
+ det_segm_results = [[]
+ for _ in range(self.mask_head.num_classes)]
+ else:
+ aug_masks = []
+ for x, img_meta, semantic_feat, glbctx_feat in zip(
+ img_feats, img_metas, semantic_feats, glbctx_feats):
+ img_shape = img_meta[0]['img_shape']
+ scale_factor = img_meta[0]['scale_factor']
+ flip = img_meta[0]['flip']
+ _bboxes = bbox_mapping(det_bboxes[:, :4], img_shape,
+ scale_factor, flip)
+ mask_rois = bbox2roi([_bboxes])
+ # get relay feature on mask_rois
+ bbox_results = self._bbox_forward(
+ -1,
+ x,
+ mask_rois,
+ semantic_feat=semantic_feat,
+ glbctx_feat=glbctx_feat)
+ relayed_feat = bbox_results['relayed_feat']
+ relayed_feat = self.feat_relay_head(relayed_feat)
+ mask_results = self._mask_forward(
+ x,
+ mask_rois,
+ semantic_feat=semantic_feat,
+ glbctx_feat=glbctx_feat,
+ relayed_feat=relayed_feat)
+ mask_pred = mask_results['mask_pred']
+ aug_masks.append(mask_pred.sigmoid().cpu().numpy())
+ merged_masks = merge_aug_masks(aug_masks, img_metas,
+ self.test_cfg)
+ ori_shape = img_metas[0][0]['ori_shape']
+ det_segm_results = self.mask_head.get_seg_masks(
+ merged_masks,
+ det_bboxes,
+ det_labels,
+ rcnn_test_cfg,
+ ori_shape,
+ scale_factor=1.0,
+ rescale=False)
+ return [(det_bbox_results, det_segm_results)]
+ else:
+ return [det_bbox_results]
diff --git a/annotator/uniformer/mmdet/models/roi_heads/shared_heads/__init__.py b/annotator/uniformer/mmdet/models/roi_heads/shared_heads/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..bbe70145b8bf7c304370f725f5afa8db98666679
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/roi_heads/shared_heads/__init__.py
@@ -0,0 +1,3 @@
+from .res_layer import ResLayer
+
+__all__ = ['ResLayer']
diff --git a/annotator/uniformer/mmdet/models/roi_heads/shared_heads/res_layer.py b/annotator/uniformer/mmdet/models/roi_heads/shared_heads/res_layer.py
new file mode 100644
index 0000000000000000000000000000000000000000..b5c343258b079a0dd832d4f999c18d002b06efac
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/roi_heads/shared_heads/res_layer.py
@@ -0,0 +1,77 @@
+import torch.nn as nn
+from mmcv.cnn import constant_init, kaiming_init
+from mmcv.runner import auto_fp16, load_checkpoint
+
+from mmdet.models.backbones import ResNet
+from mmdet.models.builder import SHARED_HEADS
+from mmdet.models.utils import ResLayer as _ResLayer
+from mmdet.utils import get_root_logger
+
+
+@SHARED_HEADS.register_module()
+class ResLayer(nn.Module):
+
+ def __init__(self,
+ depth,
+ stage=3,
+ stride=2,
+ dilation=1,
+ style='pytorch',
+ norm_cfg=dict(type='BN', requires_grad=True),
+ norm_eval=True,
+ with_cp=False,
+ dcn=None):
+ super(ResLayer, self).__init__()
+ self.norm_eval = norm_eval
+ self.norm_cfg = norm_cfg
+ self.stage = stage
+ self.fp16_enabled = False
+ block, stage_blocks = ResNet.arch_settings[depth]
+ stage_block = stage_blocks[stage]
+ planes = 64 * 2**stage
+ inplanes = 64 * 2**(stage - 1) * block.expansion
+
+ res_layer = _ResLayer(
+ block,
+ inplanes,
+ planes,
+ stage_block,
+ stride=stride,
+ dilation=dilation,
+ style=style,
+ with_cp=with_cp,
+ norm_cfg=self.norm_cfg,
+ dcn=dcn)
+ self.add_module(f'layer{stage + 1}', res_layer)
+
+ def init_weights(self, pretrained=None):
+ """Initialize the weights in the module.
+
+ Args:
+ pretrained (str, optional): Path to pre-trained weights.
+ Defaults to None.
+ """
+ if isinstance(pretrained, str):
+ logger = get_root_logger()
+ load_checkpoint(self, pretrained, strict=False, logger=logger)
+ elif pretrained is None:
+ for m in self.modules():
+ if isinstance(m, nn.Conv2d):
+ kaiming_init(m)
+ elif isinstance(m, nn.BatchNorm2d):
+ constant_init(m, 1)
+ else:
+ raise TypeError('pretrained must be a str or None')
+
+ @auto_fp16()
+ def forward(self, x):
+ res_layer = getattr(self, f'layer{self.stage + 1}')
+ out = res_layer(x)
+ return out
+
+ def train(self, mode=True):
+ super(ResLayer, self).train(mode)
+ if self.norm_eval:
+ for m in self.modules():
+ if isinstance(m, nn.BatchNorm2d):
+ m.eval()
diff --git a/annotator/uniformer/mmdet/models/roi_heads/sparse_roi_head.py b/annotator/uniformer/mmdet/models/roi_heads/sparse_roi_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..8d85ebc4698f3fc0b974e680c343f91deff4bb50
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/roi_heads/sparse_roi_head.py
@@ -0,0 +1,311 @@
+import torch
+
+from mmdet.core import bbox2result, bbox2roi, bbox_xyxy_to_cxcywh
+from mmdet.core.bbox.samplers import PseudoSampler
+from ..builder import HEADS
+from .cascade_roi_head import CascadeRoIHead
+
+
+@HEADS.register_module()
+class SparseRoIHead(CascadeRoIHead):
+ r"""The RoIHead for `Sparse R-CNN: End-to-End Object Detection with
+ Learnable Proposals `_
+
+ Args:
+ num_stages (int): Number of stage whole iterative process.
+ Defaults to 6.
+ stage_loss_weights (Tuple[float]): The loss
+ weight of each stage. By default all stages have
+ the same weight 1.
+ bbox_roi_extractor (dict): Config of box roi extractor.
+ bbox_head (dict): Config of box head.
+ train_cfg (dict, optional): Configuration information in train stage.
+ Defaults to None.
+ test_cfg (dict, optional): Configuration information in test stage.
+ Defaults to None.
+
+ """
+
+ def __init__(self,
+ num_stages=6,
+ stage_loss_weights=(1, 1, 1, 1, 1, 1),
+ proposal_feature_channel=256,
+ bbox_roi_extractor=dict(
+ type='SingleRoIExtractor',
+ roi_layer=dict(
+ type='RoIAlign', output_size=7, sampling_ratio=2),
+ out_channels=256,
+ featmap_strides=[4, 8, 16, 32]),
+ bbox_head=dict(
+ type='DIIHead',
+ num_classes=80,
+ num_fcs=2,
+ num_heads=8,
+ num_cls_fcs=1,
+ num_reg_fcs=3,
+ feedforward_channels=2048,
+ hidden_channels=256,
+ dropout=0.0,
+ roi_feat_size=7,
+ ffn_act_cfg=dict(type='ReLU', inplace=True)),
+ train_cfg=None,
+ test_cfg=None):
+ assert bbox_roi_extractor is not None
+ assert bbox_head is not None
+ assert len(stage_loss_weights) == num_stages
+ self.num_stages = num_stages
+ self.stage_loss_weights = stage_loss_weights
+ self.proposal_feature_channel = proposal_feature_channel
+ super(SparseRoIHead, self).__init__(
+ num_stages,
+ stage_loss_weights,
+ bbox_roi_extractor=bbox_roi_extractor,
+ bbox_head=bbox_head,
+ train_cfg=train_cfg,
+ test_cfg=test_cfg)
+ # train_cfg would be None when run the test.py
+ if train_cfg is not None:
+ for stage in range(num_stages):
+ assert isinstance(self.bbox_sampler[stage], PseudoSampler), \
+ 'Sparse R-CNN only support `PseudoSampler`'
+
+ def _bbox_forward(self, stage, x, rois, object_feats, img_metas):
+ """Box head forward function used in both training and testing. Returns
+ all regression, classification results and a intermediate feature.
+
+ Args:
+ stage (int): The index of current stage in
+ iterative process.
+ x (List[Tensor]): List of FPN features
+ rois (Tensor): Rois in total batch. With shape (num_proposal, 5).
+ the last dimension 5 represents (img_index, x1, y1, x2, y2).
+ object_feats (Tensor): The object feature extracted from
+ the previous stage.
+ img_metas (dict): meta information of images.
+
+ Returns:
+ dict[str, Tensor]: a dictionary of bbox head outputs,
+ Containing the following results:
+
+ - cls_score (Tensor): The score of each class, has
+ shape (batch_size, num_proposals, num_classes)
+ when use focal loss or
+ (batch_size, num_proposals, num_classes+1)
+ otherwise.
+ - decode_bbox_pred (Tensor): The regression results
+ with shape (batch_size, num_proposal, 4).
+ The last dimension 4 represents
+ [tl_x, tl_y, br_x, br_y].
+ - object_feats (Tensor): The object feature extracted
+ from current stage
+ - detach_cls_score_list (list[Tensor]): The detached
+ classification results, length is batch_size, and
+ each tensor has shape (num_proposal, num_classes).
+ - detach_proposal_list (list[tensor]): The detached
+ regression results, length is batch_size, and each
+ tensor has shape (num_proposal, 4). The last
+ dimension 4 represents [tl_x, tl_y, br_x, br_y].
+ """
+ num_imgs = len(img_metas)
+ bbox_roi_extractor = self.bbox_roi_extractor[stage]
+ bbox_head = self.bbox_head[stage]
+ bbox_feats = bbox_roi_extractor(x[:bbox_roi_extractor.num_inputs],
+ rois)
+ cls_score, bbox_pred, object_feats = bbox_head(bbox_feats,
+ object_feats)
+ proposal_list = self.bbox_head[stage].refine_bboxes(
+ rois,
+ rois.new_zeros(len(rois)), # dummy arg
+ bbox_pred.view(-1, bbox_pred.size(-1)),
+ [rois.new_zeros(object_feats.size(1)) for _ in range(num_imgs)],
+ img_metas)
+ bbox_results = dict(
+ cls_score=cls_score,
+ decode_bbox_pred=torch.cat(proposal_list),
+ object_feats=object_feats,
+ # detach then use it in label assign
+ detach_cls_score_list=[
+ cls_score[i].detach() for i in range(num_imgs)
+ ],
+ detach_proposal_list=[item.detach() for item in proposal_list])
+
+ return bbox_results
+
+ def forward_train(self,
+ x,
+ proposal_boxes,
+ proposal_features,
+ img_metas,
+ gt_bboxes,
+ gt_labels,
+ gt_bboxes_ignore=None,
+ imgs_whwh=None,
+ gt_masks=None):
+ """Forward function in training stage.
+
+ Args:
+ x (list[Tensor]): list of multi-level img features.
+ proposals (Tensor): Decoded proposal bboxes, has shape
+ (batch_size, num_proposals, 4)
+ proposal_features (Tensor): Expanded proposal
+ features, has shape
+ (batch_size, num_proposals, proposal_feature_channel)
+ img_metas (list[dict]): list of image info dict where
+ each dict has: 'img_shape', 'scale_factor', 'flip',
+ and may also contain 'filename', 'ori_shape',
+ 'pad_shape', and 'img_norm_cfg'. For details on the
+ values of these keys see
+ `mmdet/datasets/pipelines/formatting.py:Collect`.
+ gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
+ shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
+ gt_labels (list[Tensor]): class indices corresponding to each box
+ gt_bboxes_ignore (None | list[Tensor]): specify which bounding
+ boxes can be ignored when computing the loss.
+ imgs_whwh (Tensor): Tensor with shape (batch_size, 4),
+ the dimension means
+ [img_width,img_height, img_width, img_height].
+ gt_masks (None | Tensor) : true segmentation masks for each box
+ used if the architecture supports a segmentation task.
+
+ Returns:
+ dict[str, Tensor]: a dictionary of loss components of all stage.
+ """
+
+ num_imgs = len(img_metas)
+ num_proposals = proposal_boxes.size(1)
+ imgs_whwh = imgs_whwh.repeat(1, num_proposals, 1)
+ all_stage_bbox_results = []
+ proposal_list = [proposal_boxes[i] for i in range(len(proposal_boxes))]
+ object_feats = proposal_features
+ all_stage_loss = {}
+ for stage in range(self.num_stages):
+ rois = bbox2roi(proposal_list)
+ bbox_results = self._bbox_forward(stage, x, rois, object_feats,
+ img_metas)
+ all_stage_bbox_results.append(bbox_results)
+ if gt_bboxes_ignore is None:
+ # TODO support ignore
+ gt_bboxes_ignore = [None for _ in range(num_imgs)]
+ sampling_results = []
+ cls_pred_list = bbox_results['detach_cls_score_list']
+ proposal_list = bbox_results['detach_proposal_list']
+ for i in range(num_imgs):
+ normalize_bbox_ccwh = bbox_xyxy_to_cxcywh(proposal_list[i] /
+ imgs_whwh[i])
+ assign_result = self.bbox_assigner[stage].assign(
+ normalize_bbox_ccwh, cls_pred_list[i], gt_bboxes[i],
+ gt_labels[i], img_metas[i])
+ sampling_result = self.bbox_sampler[stage].sample(
+ assign_result, proposal_list[i], gt_bboxes[i])
+ sampling_results.append(sampling_result)
+ bbox_targets = self.bbox_head[stage].get_targets(
+ sampling_results, gt_bboxes, gt_labels, self.train_cfg[stage],
+ True)
+ cls_score = bbox_results['cls_score']
+ decode_bbox_pred = bbox_results['decode_bbox_pred']
+
+ single_stage_loss = self.bbox_head[stage].loss(
+ cls_score.view(-1, cls_score.size(-1)),
+ decode_bbox_pred.view(-1, 4),
+ *bbox_targets,
+ imgs_whwh=imgs_whwh)
+ for key, value in single_stage_loss.items():
+ all_stage_loss[f'stage{stage}_{key}'] = value * \
+ self.stage_loss_weights[stage]
+ object_feats = bbox_results['object_feats']
+
+ return all_stage_loss
+
+ def simple_test(self,
+ x,
+ proposal_boxes,
+ proposal_features,
+ img_metas,
+ imgs_whwh,
+ rescale=False):
+ """Test without augmentation.
+
+ Args:
+ x (list[Tensor]): list of multi-level img features.
+ proposal_boxes (Tensor): Decoded proposal bboxes, has shape
+ (batch_size, num_proposals, 4)
+ proposal_features (Tensor): Expanded proposal
+ features, has shape
+ (batch_size, num_proposals, proposal_feature_channel)
+ img_metas (dict): meta information of images.
+ imgs_whwh (Tensor): Tensor with shape (batch_size, 4),
+ the dimension means
+ [img_width,img_height, img_width, img_height].
+ rescale (bool): If True, return boxes in original image
+ space. Defaults to False.
+
+ Returns:
+ bbox_results (list[tuple[np.ndarray]]): \
+ [[cls1_det, cls2_det, ...], ...]. \
+ The outer list indicates images, and the inner \
+ list indicates per-class detected bboxes. The \
+ np.ndarray has shape (num_det, 5) and the last \
+ dimension 5 represents (x1, y1, x2, y2, score).
+ """
+ assert self.with_bbox, 'Bbox head must be implemented.'
+ # Decode initial proposals
+ num_imgs = len(img_metas)
+ proposal_list = [proposal_boxes[i] for i in range(num_imgs)]
+ object_feats = proposal_features
+ for stage in range(self.num_stages):
+ rois = bbox2roi(proposal_list)
+ bbox_results = self._bbox_forward(stage, x, rois, object_feats,
+ img_metas)
+ object_feats = bbox_results['object_feats']
+ cls_score = bbox_results['cls_score']
+ proposal_list = bbox_results['detach_proposal_list']
+
+ num_classes = self.bbox_head[-1].num_classes
+ det_bboxes = []
+ det_labels = []
+
+ if self.bbox_head[-1].loss_cls.use_sigmoid:
+ cls_score = cls_score.sigmoid()
+ else:
+ cls_score = cls_score.softmax(-1)[..., :-1]
+
+ for img_id in range(num_imgs):
+ cls_score_per_img = cls_score[img_id]
+ scores_per_img, topk_indices = cls_score_per_img.flatten(
+ 0, 1).topk(
+ self.test_cfg.max_per_img, sorted=False)
+ labels_per_img = topk_indices % num_classes
+ bbox_pred_per_img = proposal_list[img_id][topk_indices //
+ num_classes]
+ if rescale:
+ scale_factor = img_metas[img_id]['scale_factor']
+ bbox_pred_per_img /= bbox_pred_per_img.new_tensor(scale_factor)
+ det_bboxes.append(
+ torch.cat([bbox_pred_per_img, scores_per_img[:, None]], dim=1))
+ det_labels.append(labels_per_img)
+
+ bbox_results = [
+ bbox2result(det_bboxes[i], det_labels[i], num_classes)
+ for i in range(num_imgs)
+ ]
+
+ return bbox_results
+
+ def aug_test(self, features, proposal_list, img_metas, rescale=False):
+ raise NotImplementedError('Sparse R-CNN does not support `aug_test`')
+
+ def forward_dummy(self, x, proposal_boxes, proposal_features, img_metas):
+ """Dummy forward function when do the flops computing."""
+ all_stage_bbox_results = []
+ proposal_list = [proposal_boxes[i] for i in range(len(proposal_boxes))]
+ object_feats = proposal_features
+ if self.with_bbox:
+ for stage in range(self.num_stages):
+ rois = bbox2roi(proposal_list)
+ bbox_results = self._bbox_forward(stage, x, rois, object_feats,
+ img_metas)
+
+ all_stage_bbox_results.append(bbox_results)
+ proposal_list = bbox_results['detach_proposal_list']
+ object_feats = bbox_results['object_feats']
+ return all_stage_bbox_results
diff --git a/annotator/uniformer/mmdet/models/roi_heads/standard_roi_head.py b/annotator/uniformer/mmdet/models/roi_heads/standard_roi_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..c530f2a5ce904439492de12ff7d267cc1e757d3a
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/roi_heads/standard_roi_head.py
@@ -0,0 +1,295 @@
+import torch
+
+from mmdet.core import bbox2result, bbox2roi, build_assigner, build_sampler
+from ..builder import HEADS, build_head, build_roi_extractor
+from .base_roi_head import BaseRoIHead
+from .test_mixins import BBoxTestMixin, MaskTestMixin
+
+
+@HEADS.register_module()
+class StandardRoIHead(BaseRoIHead, BBoxTestMixin, MaskTestMixin):
+ """Simplest base roi head including one bbox head and one mask head."""
+
+ def init_assigner_sampler(self):
+ """Initialize assigner and sampler."""
+ self.bbox_assigner = None
+ self.bbox_sampler = None
+ if self.train_cfg:
+ self.bbox_assigner = build_assigner(self.train_cfg.assigner)
+ self.bbox_sampler = build_sampler(
+ self.train_cfg.sampler, context=self)
+
+ def init_bbox_head(self, bbox_roi_extractor, bbox_head):
+ """Initialize ``bbox_head``"""
+ self.bbox_roi_extractor = build_roi_extractor(bbox_roi_extractor)
+ self.bbox_head = build_head(bbox_head)
+
+ def init_mask_head(self, mask_roi_extractor, mask_head):
+ """Initialize ``mask_head``"""
+ if mask_roi_extractor is not None:
+ self.mask_roi_extractor = build_roi_extractor(mask_roi_extractor)
+ self.share_roi_extractor = False
+ else:
+ self.share_roi_extractor = True
+ self.mask_roi_extractor = self.bbox_roi_extractor
+ self.mask_head = build_head(mask_head)
+
+ def init_weights(self, pretrained):
+ """Initialize the weights in head.
+
+ Args:
+ pretrained (str, optional): Path to pre-trained weights.
+ Defaults to None.
+ """
+ if self.with_shared_head:
+ self.shared_head.init_weights(pretrained=pretrained)
+ if self.with_bbox:
+ self.bbox_roi_extractor.init_weights()
+ self.bbox_head.init_weights()
+ if self.with_mask:
+ self.mask_head.init_weights()
+ if not self.share_roi_extractor:
+ self.mask_roi_extractor.init_weights()
+
+ def forward_dummy(self, x, proposals):
+ """Dummy forward function."""
+ # bbox head
+ outs = ()
+ rois = bbox2roi([proposals])
+ if self.with_bbox:
+ bbox_results = self._bbox_forward(x, rois)
+ outs = outs + (bbox_results['cls_score'],
+ bbox_results['bbox_pred'])
+ # mask head
+ if self.with_mask:
+ mask_rois = rois[:100]
+ mask_results = self._mask_forward(x, mask_rois)
+ outs = outs + (mask_results['mask_pred'], )
+ return outs
+
+ def forward_train(self,
+ x,
+ img_metas,
+ proposal_list,
+ gt_bboxes,
+ gt_labels,
+ gt_bboxes_ignore=None,
+ gt_masks=None):
+ """
+ Args:
+ x (list[Tensor]): list of multi-level img features.
+ img_metas (list[dict]): list of image info dict where each dict
+ has: 'img_shape', 'scale_factor', 'flip', and may also contain
+ 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
+ For details on the values of these keys see
+ `mmdet/datasets/pipelines/formatting.py:Collect`.
+ proposals (list[Tensors]): list of region proposals.
+ gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
+ shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
+ gt_labels (list[Tensor]): class indices corresponding to each box
+ gt_bboxes_ignore (None | list[Tensor]): specify which bounding
+ boxes can be ignored when computing the loss.
+ gt_masks (None | Tensor) : true segmentation masks for each box
+ used if the architecture supports a segmentation task.
+
+ Returns:
+ dict[str, Tensor]: a dictionary of loss components
+ """
+ # assign gts and sample proposals
+ if self.with_bbox or self.with_mask:
+ num_imgs = len(img_metas)
+ if gt_bboxes_ignore is None:
+ gt_bboxes_ignore = [None for _ in range(num_imgs)]
+ sampling_results = []
+ for i in range(num_imgs):
+ assign_result = self.bbox_assigner.assign(
+ proposal_list[i], gt_bboxes[i], gt_bboxes_ignore[i],
+ gt_labels[i])
+ sampling_result = self.bbox_sampler.sample(
+ assign_result,
+ proposal_list[i],
+ gt_bboxes[i],
+ gt_labels[i],
+ feats=[lvl_feat[i][None] for lvl_feat in x])
+ sampling_results.append(sampling_result)
+
+ losses = dict()
+ # bbox head forward and loss
+ if self.with_bbox:
+ bbox_results = self._bbox_forward_train(x, sampling_results,
+ gt_bboxes, gt_labels,
+ img_metas)
+ losses.update(bbox_results['loss_bbox'])
+
+ # mask head forward and loss
+ if self.with_mask:
+ mask_results = self._mask_forward_train(x, sampling_results,
+ bbox_results['bbox_feats'],
+ gt_masks, img_metas)
+ losses.update(mask_results['loss_mask'])
+
+ return losses
+
+ def _bbox_forward(self, x, rois):
+ """Box head forward function used in both training and testing."""
+ # TODO: a more flexible way to decide which feature maps to use
+ bbox_feats = self.bbox_roi_extractor(
+ x[:self.bbox_roi_extractor.num_inputs], rois)
+ if self.with_shared_head:
+ bbox_feats = self.shared_head(bbox_feats)
+ cls_score, bbox_pred = self.bbox_head(bbox_feats)
+
+ bbox_results = dict(
+ cls_score=cls_score, bbox_pred=bbox_pred, bbox_feats=bbox_feats)
+ return bbox_results
+
+ def _bbox_forward_train(self, x, sampling_results, gt_bboxes, gt_labels,
+ img_metas):
+ """Run forward function and calculate loss for box head in training."""
+ rois = bbox2roi([res.bboxes for res in sampling_results])
+ bbox_results = self._bbox_forward(x, rois)
+
+ bbox_targets = self.bbox_head.get_targets(sampling_results, gt_bboxes,
+ gt_labels, self.train_cfg)
+ loss_bbox = self.bbox_head.loss(bbox_results['cls_score'],
+ bbox_results['bbox_pred'], rois,
+ *bbox_targets)
+
+ bbox_results.update(loss_bbox=loss_bbox)
+ return bbox_results
+
+ def _mask_forward_train(self, x, sampling_results, bbox_feats, gt_masks,
+ img_metas):
+ """Run forward function and calculate loss for mask head in
+ training."""
+ if not self.share_roi_extractor:
+ pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results])
+ mask_results = self._mask_forward(x, pos_rois)
+ else:
+ pos_inds = []
+ device = bbox_feats.device
+ for res in sampling_results:
+ pos_inds.append(
+ torch.ones(
+ res.pos_bboxes.shape[0],
+ device=device,
+ dtype=torch.uint8))
+ pos_inds.append(
+ torch.zeros(
+ res.neg_bboxes.shape[0],
+ device=device,
+ dtype=torch.uint8))
+ pos_inds = torch.cat(pos_inds)
+
+ mask_results = self._mask_forward(
+ x, pos_inds=pos_inds, bbox_feats=bbox_feats)
+
+ mask_targets = self.mask_head.get_targets(sampling_results, gt_masks,
+ self.train_cfg)
+ pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results])
+ loss_mask = self.mask_head.loss(mask_results['mask_pred'],
+ mask_targets, pos_labels)
+
+ mask_results.update(loss_mask=loss_mask, mask_targets=mask_targets)
+ return mask_results
+
+ def _mask_forward(self, x, rois=None, pos_inds=None, bbox_feats=None):
+ """Mask head forward function used in both training and testing."""
+ assert ((rois is not None) ^
+ (pos_inds is not None and bbox_feats is not None))
+ if rois is not None:
+ mask_feats = self.mask_roi_extractor(
+ x[:self.mask_roi_extractor.num_inputs], rois)
+ if self.with_shared_head:
+ mask_feats = self.shared_head(mask_feats)
+ else:
+ assert bbox_feats is not None
+ mask_feats = bbox_feats[pos_inds]
+
+ mask_pred = self.mask_head(mask_feats)
+ mask_results = dict(mask_pred=mask_pred, mask_feats=mask_feats)
+ return mask_results
+
+ async def async_simple_test(self,
+ x,
+ proposal_list,
+ img_metas,
+ proposals=None,
+ rescale=False):
+ """Async test without augmentation."""
+ assert self.with_bbox, 'Bbox head must be implemented.'
+
+ det_bboxes, det_labels = await self.async_test_bboxes(
+ x, img_metas, proposal_list, self.test_cfg, rescale=rescale)
+ bbox_results = bbox2result(det_bboxes, det_labels,
+ self.bbox_head.num_classes)
+ if not self.with_mask:
+ return bbox_results
+ else:
+ segm_results = await self.async_test_mask(
+ x,
+ img_metas,
+ det_bboxes,
+ det_labels,
+ rescale=rescale,
+ mask_test_cfg=self.test_cfg.get('mask'))
+ return bbox_results, segm_results
+
+ def simple_test(self,
+ x,
+ proposal_list,
+ img_metas,
+ proposals=None,
+ rescale=False):
+ """Test without augmentation."""
+ assert self.with_bbox, 'Bbox head must be implemented.'
+
+ det_bboxes, det_labels = self.simple_test_bboxes(
+ x, img_metas, proposal_list, self.test_cfg, rescale=rescale)
+ if torch.onnx.is_in_onnx_export():
+ if self.with_mask:
+ segm_results = self.simple_test_mask(
+ x, img_metas, det_bboxes, det_labels, rescale=rescale)
+ return det_bboxes, det_labels, segm_results
+ else:
+ return det_bboxes, det_labels
+
+ bbox_results = [
+ bbox2result(det_bboxes[i], det_labels[i],
+ self.bbox_head.num_classes)
+ for i in range(len(det_bboxes))
+ ]
+
+ if not self.with_mask:
+ return bbox_results
+ else:
+ segm_results = self.simple_test_mask(
+ x, img_metas, det_bboxes, det_labels, rescale=rescale)
+ return list(zip(bbox_results, segm_results))
+
+ def aug_test(self, x, proposal_list, img_metas, rescale=False):
+ """Test with augmentations.
+
+ If rescale is False, then returned bboxes and masks will fit the scale
+ of imgs[0].
+ """
+ det_bboxes, det_labels = self.aug_test_bboxes(x, img_metas,
+ proposal_list,
+ self.test_cfg)
+
+ if rescale:
+ _det_bboxes = det_bboxes
+ else:
+ _det_bboxes = det_bboxes.clone()
+ _det_bboxes[:, :4] *= det_bboxes.new_tensor(
+ img_metas[0][0]['scale_factor'])
+ bbox_results = bbox2result(_det_bboxes, det_labels,
+ self.bbox_head.num_classes)
+
+ # det_bboxes always keep the original scale
+ if self.with_mask:
+ segm_results = self.aug_test_mask(x, img_metas, det_bboxes,
+ det_labels)
+ return [(bbox_results, segm_results)]
+ else:
+ return [bbox_results]
diff --git a/annotator/uniformer/mmdet/models/roi_heads/test_mixins.py b/annotator/uniformer/mmdet/models/roi_heads/test_mixins.py
new file mode 100644
index 0000000000000000000000000000000000000000..78a092a431aa884ab7dfd08346f79a4ccf8303bf
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/roi_heads/test_mixins.py
@@ -0,0 +1,348 @@
+import logging
+import sys
+
+import torch
+
+from mmdet.core import (bbox2roi, bbox_mapping, merge_aug_bboxes,
+ merge_aug_masks, multiclass_nms)
+
+logger = logging.getLogger(__name__)
+
+if sys.version_info >= (3, 7):
+ from mmdet.utils.contextmanagers import completed
+
+
+class BBoxTestMixin(object):
+
+ if sys.version_info >= (3, 7):
+
+ async def async_test_bboxes(self,
+ x,
+ img_metas,
+ proposals,
+ rcnn_test_cfg,
+ rescale=False,
+ bbox_semaphore=None,
+ global_lock=None):
+ """Asynchronized test for box head without augmentation."""
+ rois = bbox2roi(proposals)
+ roi_feats = self.bbox_roi_extractor(
+ x[:len(self.bbox_roi_extractor.featmap_strides)], rois)
+ if self.with_shared_head:
+ roi_feats = self.shared_head(roi_feats)
+ sleep_interval = rcnn_test_cfg.get('async_sleep_interval', 0.017)
+
+ async with completed(
+ __name__, 'bbox_head_forward',
+ sleep_interval=sleep_interval):
+ cls_score, bbox_pred = self.bbox_head(roi_feats)
+
+ img_shape = img_metas[0]['img_shape']
+ scale_factor = img_metas[0]['scale_factor']
+ det_bboxes, det_labels = self.bbox_head.get_bboxes(
+ rois,
+ cls_score,
+ bbox_pred,
+ img_shape,
+ scale_factor,
+ rescale=rescale,
+ cfg=rcnn_test_cfg)
+ return det_bboxes, det_labels
+
+ def simple_test_bboxes(self,
+ x,
+ img_metas,
+ proposals,
+ rcnn_test_cfg,
+ rescale=False):
+ """Test only det bboxes without augmentation.
+
+ Args:
+ x (tuple[Tensor]): Feature maps of all scale level.
+ img_metas (list[dict]): Image meta info.
+ proposals (Tensor or List[Tensor]): Region proposals.
+ rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of R-CNN.
+ rescale (bool): If True, return boxes in original image space.
+ Default: False.
+
+ Returns:
+ tuple[list[Tensor], list[Tensor]]: The first list contains
+ the boxes of the corresponding image in a batch, each
+ tensor has the shape (num_boxes, 5) and last dimension
+ 5 represent (tl_x, tl_y, br_x, br_y, score). Each Tensor
+ in the second list is the labels with shape (num_boxes, ).
+ The length of both lists should be equal to batch_size.
+ """
+ # get origin input shape to support onnx dynamic input shape
+ if torch.onnx.is_in_onnx_export():
+ assert len(
+ img_metas
+ ) == 1, 'Only support one input image while in exporting to ONNX'
+ img_shapes = img_metas[0]['img_shape_for_onnx']
+ else:
+ img_shapes = tuple(meta['img_shape'] for meta in img_metas)
+ scale_factors = tuple(meta['scale_factor'] for meta in img_metas)
+
+ # The length of proposals of different batches may be different.
+ # In order to form a batch, a padding operation is required.
+ if isinstance(proposals, list):
+ # padding to form a batch
+ max_size = max([proposal.size(0) for proposal in proposals])
+ for i, proposal in enumerate(proposals):
+ supplement = proposal.new_full(
+ (max_size - proposal.size(0), proposal.size(1)), 0)
+ proposals[i] = torch.cat((supplement, proposal), dim=0)
+ rois = torch.stack(proposals, dim=0)
+ else:
+ rois = proposals
+
+ batch_index = torch.arange(
+ rois.size(0), device=rois.device).float().view(-1, 1, 1).expand(
+ rois.size(0), rois.size(1), 1)
+ rois = torch.cat([batch_index, rois[..., :4]], dim=-1)
+ batch_size = rois.shape[0]
+ num_proposals_per_img = rois.shape[1]
+
+ # Eliminate the batch dimension
+ rois = rois.view(-1, 5)
+ bbox_results = self._bbox_forward(x, rois)
+ cls_score = bbox_results['cls_score']
+ bbox_pred = bbox_results['bbox_pred']
+
+ # Recover the batch dimension
+ rois = rois.reshape(batch_size, num_proposals_per_img, -1)
+ cls_score = cls_score.reshape(batch_size, num_proposals_per_img, -1)
+
+ if not torch.onnx.is_in_onnx_export():
+ # remove padding
+ supplement_mask = rois[..., -1] == 0
+ cls_score[supplement_mask, :] = 0
+
+ # bbox_pred would be None in some detector when with_reg is False,
+ # e.g. Grid R-CNN.
+ if bbox_pred is not None:
+ # the bbox prediction of some detectors like SABL is not Tensor
+ if isinstance(bbox_pred, torch.Tensor):
+ bbox_pred = bbox_pred.reshape(batch_size,
+ num_proposals_per_img, -1)
+ if not torch.onnx.is_in_onnx_export():
+ bbox_pred[supplement_mask, :] = 0
+ else:
+ # TODO: Looking forward to a better way
+ # For SABL
+ bbox_preds = self.bbox_head.bbox_pred_split(
+ bbox_pred, num_proposals_per_img)
+ # apply bbox post-processing to each image individually
+ det_bboxes = []
+ det_labels = []
+ for i in range(len(proposals)):
+ # remove padding
+ supplement_mask = proposals[i][..., -1] == 0
+ for bbox in bbox_preds[i]:
+ bbox[supplement_mask] = 0
+ det_bbox, det_label = self.bbox_head.get_bboxes(
+ rois[i],
+ cls_score[i],
+ bbox_preds[i],
+ img_shapes[i],
+ scale_factors[i],
+ rescale=rescale,
+ cfg=rcnn_test_cfg)
+ det_bboxes.append(det_bbox)
+ det_labels.append(det_label)
+ return det_bboxes, det_labels
+ else:
+ bbox_pred = None
+
+ return self.bbox_head.get_bboxes(
+ rois,
+ cls_score,
+ bbox_pred,
+ img_shapes,
+ scale_factors,
+ rescale=rescale,
+ cfg=rcnn_test_cfg)
+
+ def aug_test_bboxes(self, feats, img_metas, proposal_list, rcnn_test_cfg):
+ """Test det bboxes with test time augmentation."""
+ aug_bboxes = []
+ aug_scores = []
+ for x, img_meta in zip(feats, img_metas):
+ # only one image in the batch
+ img_shape = img_meta[0]['img_shape']
+ scale_factor = img_meta[0]['scale_factor']
+ flip = img_meta[0]['flip']
+ flip_direction = img_meta[0]['flip_direction']
+ # TODO more flexible
+ proposals = bbox_mapping(proposal_list[0][:, :4], img_shape,
+ scale_factor, flip, flip_direction)
+ rois = bbox2roi([proposals])
+ bbox_results = self._bbox_forward(x, rois)
+ bboxes, scores = self.bbox_head.get_bboxes(
+ rois,
+ bbox_results['cls_score'],
+ bbox_results['bbox_pred'],
+ img_shape,
+ scale_factor,
+ rescale=False,
+ cfg=None)
+ aug_bboxes.append(bboxes)
+ aug_scores.append(scores)
+ # after merging, bboxes will be rescaled to the original image size
+ merged_bboxes, merged_scores = merge_aug_bboxes(
+ aug_bboxes, aug_scores, img_metas, rcnn_test_cfg)
+ det_bboxes, det_labels = multiclass_nms(merged_bboxes, merged_scores,
+ rcnn_test_cfg.score_thr,
+ rcnn_test_cfg.nms,
+ rcnn_test_cfg.max_per_img)
+ return det_bboxes, det_labels
+
+
+class MaskTestMixin(object):
+
+ if sys.version_info >= (3, 7):
+
+ async def async_test_mask(self,
+ x,
+ img_metas,
+ det_bboxes,
+ det_labels,
+ rescale=False,
+ mask_test_cfg=None):
+ """Asynchronized test for mask head without augmentation."""
+ # image shape of the first image in the batch (only one)
+ ori_shape = img_metas[0]['ori_shape']
+ scale_factor = img_metas[0]['scale_factor']
+ if det_bboxes.shape[0] == 0:
+ segm_result = [[] for _ in range(self.mask_head.num_classes)]
+ else:
+ if rescale and not isinstance(scale_factor,
+ (float, torch.Tensor)):
+ scale_factor = det_bboxes.new_tensor(scale_factor)
+ _bboxes = (
+ det_bboxes[:, :4] *
+ scale_factor if rescale else det_bboxes)
+ mask_rois = bbox2roi([_bboxes])
+ mask_feats = self.mask_roi_extractor(
+ x[:len(self.mask_roi_extractor.featmap_strides)],
+ mask_rois)
+
+ if self.with_shared_head:
+ mask_feats = self.shared_head(mask_feats)
+ if mask_test_cfg and mask_test_cfg.get('async_sleep_interval'):
+ sleep_interval = mask_test_cfg['async_sleep_interval']
+ else:
+ sleep_interval = 0.035
+ async with completed(
+ __name__,
+ 'mask_head_forward',
+ sleep_interval=sleep_interval):
+ mask_pred = self.mask_head(mask_feats)
+ segm_result = self.mask_head.get_seg_masks(
+ mask_pred, _bboxes, det_labels, self.test_cfg, ori_shape,
+ scale_factor, rescale)
+ return segm_result
+
+ def simple_test_mask(self,
+ x,
+ img_metas,
+ det_bboxes,
+ det_labels,
+ rescale=False):
+ """Simple test for mask head without augmentation."""
+ # image shapes of images in the batch
+ ori_shapes = tuple(meta['ori_shape'] for meta in img_metas)
+ scale_factors = tuple(meta['scale_factor'] for meta in img_metas)
+
+ # The length of proposals of different batches may be different.
+ # In order to form a batch, a padding operation is required.
+ if isinstance(det_bboxes, list):
+ # padding to form a batch
+ max_size = max([bboxes.size(0) for bboxes in det_bboxes])
+ for i, (bbox, label) in enumerate(zip(det_bboxes, det_labels)):
+ supplement_bbox = bbox.new_full(
+ (max_size - bbox.size(0), bbox.size(1)), 0)
+ supplement_label = label.new_full((max_size - label.size(0), ),
+ 0)
+ det_bboxes[i] = torch.cat((supplement_bbox, bbox), dim=0)
+ det_labels[i] = torch.cat((supplement_label, label), dim=0)
+ det_bboxes = torch.stack(det_bboxes, dim=0)
+ det_labels = torch.stack(det_labels, dim=0)
+
+ batch_size = det_bboxes.size(0)
+ num_proposals_per_img = det_bboxes.shape[1]
+
+ # if det_bboxes is rescaled to the original image size, we need to
+ # rescale it back to the testing scale to obtain RoIs.
+ det_bboxes = det_bboxes[..., :4]
+ if rescale:
+ if not isinstance(scale_factors[0], float):
+ scale_factors = det_bboxes.new_tensor(scale_factors)
+ det_bboxes = det_bboxes * scale_factors.unsqueeze(1)
+
+ batch_index = torch.arange(
+ det_bboxes.size(0), device=det_bboxes.device).float().view(
+ -1, 1, 1).expand(det_bboxes.size(0), det_bboxes.size(1), 1)
+ mask_rois = torch.cat([batch_index, det_bboxes], dim=-1)
+ mask_rois = mask_rois.view(-1, 5)
+ mask_results = self._mask_forward(x, mask_rois)
+ mask_pred = mask_results['mask_pred']
+
+ # Recover the batch dimension
+ mask_preds = mask_pred.reshape(batch_size, num_proposals_per_img,
+ *mask_pred.shape[1:])
+
+ # apply mask post-processing to each image individually
+ segm_results = []
+ for i in range(batch_size):
+ mask_pred = mask_preds[i]
+ det_bbox = det_bboxes[i]
+ det_label = det_labels[i]
+
+ # remove padding
+ supplement_mask = det_bbox[..., -1] != 0
+ mask_pred = mask_pred[supplement_mask]
+ det_bbox = det_bbox[supplement_mask]
+ det_label = det_label[supplement_mask]
+
+ if det_label.shape[0] == 0:
+ segm_results.append([[]
+ for _ in range(self.mask_head.num_classes)
+ ])
+ else:
+ segm_result = self.mask_head.get_seg_masks(
+ mask_pred, det_bbox, det_label, self.test_cfg,
+ ori_shapes[i], scale_factors[i], rescale)
+ segm_results.append(segm_result)
+ return segm_results
+
+ def aug_test_mask(self, feats, img_metas, det_bboxes, det_labels):
+ """Test for mask head with test time augmentation."""
+ if det_bboxes.shape[0] == 0:
+ segm_result = [[] for _ in range(self.mask_head.num_classes)]
+ else:
+ aug_masks = []
+ for x, img_meta in zip(feats, img_metas):
+ img_shape = img_meta[0]['img_shape']
+ scale_factor = img_meta[0]['scale_factor']
+ flip = img_meta[0]['flip']
+ flip_direction = img_meta[0]['flip_direction']
+ _bboxes = bbox_mapping(det_bboxes[:, :4], img_shape,
+ scale_factor, flip, flip_direction)
+ mask_rois = bbox2roi([_bboxes])
+ mask_results = self._mask_forward(x, mask_rois)
+ # convert to numpy array to save memory
+ aug_masks.append(
+ mask_results['mask_pred'].sigmoid().cpu().numpy())
+ merged_masks = merge_aug_masks(aug_masks, img_metas, self.test_cfg)
+
+ ori_shape = img_metas[0][0]['ori_shape']
+ segm_result = self.mask_head.get_seg_masks(
+ merged_masks,
+ det_bboxes,
+ det_labels,
+ self.test_cfg,
+ ori_shape,
+ scale_factor=1.0,
+ rescale=False)
+ return segm_result
diff --git a/annotator/uniformer/mmdet/models/roi_heads/trident_roi_head.py b/annotator/uniformer/mmdet/models/roi_heads/trident_roi_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..245569e50b45cc8e21ba8e7210edf4bd0c7f27c5
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/roi_heads/trident_roi_head.py
@@ -0,0 +1,119 @@
+import torch
+from mmcv.ops import batched_nms
+
+from mmdet.core import (bbox2result, bbox2roi, bbox_mapping, merge_aug_bboxes,
+ multiclass_nms)
+from mmdet.models.roi_heads.standard_roi_head import StandardRoIHead
+from ..builder import HEADS
+
+
+@HEADS.register_module()
+class TridentRoIHead(StandardRoIHead):
+ """Trident roi head.
+
+ Args:
+ num_branch (int): Number of branches in TridentNet.
+ test_branch_idx (int): In inference, all 3 branches will be used
+ if `test_branch_idx==-1`, otherwise only branch with index
+ `test_branch_idx` will be used.
+ """
+
+ def __init__(self, num_branch, test_branch_idx, **kwargs):
+ self.num_branch = num_branch
+ self.test_branch_idx = test_branch_idx
+ super(TridentRoIHead, self).__init__(**kwargs)
+
+ def merge_trident_bboxes(self, trident_det_bboxes, trident_det_labels):
+ """Merge bbox predictions of each branch."""
+ if trident_det_bboxes.numel() == 0:
+ det_bboxes = trident_det_bboxes.new_zeros((0, 5))
+ det_labels = trident_det_bboxes.new_zeros((0, ), dtype=torch.long)
+ else:
+ nms_bboxes = trident_det_bboxes[:, :4]
+ nms_scores = trident_det_bboxes[:, 4].contiguous()
+ nms_inds = trident_det_labels
+ nms_cfg = self.test_cfg['nms']
+ det_bboxes, keep = batched_nms(nms_bboxes, nms_scores, nms_inds,
+ nms_cfg)
+ det_labels = trident_det_labels[keep]
+ if self.test_cfg['max_per_img'] > 0:
+ det_labels = det_labels[:self.test_cfg['max_per_img']]
+ det_bboxes = det_bboxes[:self.test_cfg['max_per_img']]
+
+ return det_bboxes, det_labels
+
+ def simple_test(self,
+ x,
+ proposal_list,
+ img_metas,
+ proposals=None,
+ rescale=False):
+ """Test without augmentation as follows:
+
+ 1. Compute prediction bbox and label per branch.
+ 2. Merge predictions of each branch according to scores of
+ bboxes, i.e., bboxes with higher score are kept to give
+ top-k prediction.
+ """
+ assert self.with_bbox, 'Bbox head must be implemented.'
+ det_bboxes_list, det_labels_list = self.simple_test_bboxes(
+ x, img_metas, proposal_list, self.test_cfg, rescale=rescale)
+ num_branch = self.num_branch if self.test_branch_idx == -1 else 1
+ for _ in range(len(det_bboxes_list)):
+ if det_bboxes_list[_].shape[0] == 0:
+ det_bboxes_list[_] = det_bboxes_list[_].new_empty((0, 5))
+ det_bboxes, det_labels = [], []
+ for i in range(len(img_metas) // num_branch):
+ det_result = self.merge_trident_bboxes(
+ torch.cat(det_bboxes_list[i * num_branch:(i + 1) *
+ num_branch]),
+ torch.cat(det_labels_list[i * num_branch:(i + 1) *
+ num_branch]))
+ det_bboxes.append(det_result[0])
+ det_labels.append(det_result[1])
+
+ bbox_results = [
+ bbox2result(det_bboxes[i], det_labels[i],
+ self.bbox_head.num_classes)
+ for i in range(len(det_bboxes))
+ ]
+ return bbox_results
+
+ def aug_test_bboxes(self, feats, img_metas, proposal_list, rcnn_test_cfg):
+ """Test det bboxes with test time augmentation."""
+ aug_bboxes = []
+ aug_scores = []
+ for x, img_meta in zip(feats, img_metas):
+ # only one image in the batch
+ img_shape = img_meta[0]['img_shape']
+ scale_factor = img_meta[0]['scale_factor']
+ flip = img_meta[0]['flip']
+ flip_direction = img_meta[0]['flip_direction']
+
+ trident_bboxes, trident_scores = [], []
+ for branch_idx in range(len(proposal_list)):
+ proposals = bbox_mapping(proposal_list[0][:, :4], img_shape,
+ scale_factor, flip, flip_direction)
+ rois = bbox2roi([proposals])
+ bbox_results = self._bbox_forward(x, rois)
+ bboxes, scores = self.bbox_head.get_bboxes(
+ rois,
+ bbox_results['cls_score'],
+ bbox_results['bbox_pred'],
+ img_shape,
+ scale_factor,
+ rescale=False,
+ cfg=None)
+ trident_bboxes.append(bboxes)
+ trident_scores.append(scores)
+
+ aug_bboxes.append(torch.cat(trident_bboxes, 0))
+ aug_scores.append(torch.cat(trident_scores, 0))
+ # after merging, bboxes will be rescaled to the original image size
+ merged_bboxes, merged_scores = merge_aug_bboxes(
+ aug_bboxes, aug_scores, img_metas, rcnn_test_cfg)
+ det_bboxes, det_labels = multiclass_nms(merged_bboxes, merged_scores,
+ rcnn_test_cfg.score_thr,
+ rcnn_test_cfg.nms,
+ rcnn_test_cfg.max_per_img)
+ return det_bboxes, det_labels
diff --git a/annotator/uniformer/mmdet/models/utils/__init__.py b/annotator/uniformer/mmdet/models/utils/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..5165b22ce57d17f28392213e0f1b055c2b9360c1
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/utils/__init__.py
@@ -0,0 +1,16 @@
+from .builder import build_positional_encoding, build_transformer
+from .gaussian_target import gaussian_radius, gen_gaussian_target
+from .positional_encoding import (LearnedPositionalEncoding,
+ SinePositionalEncoding)
+from .res_layer import ResLayer, SimplifiedBasicBlock
+from .transformer import (FFN, DynamicConv, MultiheadAttention, Transformer,
+ TransformerDecoder, TransformerDecoderLayer,
+ TransformerEncoder, TransformerEncoderLayer)
+
+__all__ = [
+ 'ResLayer', 'gaussian_radius', 'gen_gaussian_target', 'MultiheadAttention',
+ 'FFN', 'TransformerEncoderLayer', 'TransformerEncoder',
+ 'TransformerDecoderLayer', 'TransformerDecoder', 'Transformer',
+ 'build_transformer', 'build_positional_encoding', 'SinePositionalEncoding',
+ 'LearnedPositionalEncoding', 'DynamicConv', 'SimplifiedBasicBlock'
+]
diff --git a/annotator/uniformer/mmdet/models/utils/builder.py b/annotator/uniformer/mmdet/models/utils/builder.py
new file mode 100644
index 0000000000000000000000000000000000000000..f362d1c92ca9d4ed95a2b3d28d3e6baedd14e462
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/utils/builder.py
@@ -0,0 +1,14 @@
+from mmcv.utils import Registry, build_from_cfg
+
+TRANSFORMER = Registry('Transformer')
+POSITIONAL_ENCODING = Registry('Position encoding')
+
+
+def build_transformer(cfg, default_args=None):
+ """Builder for Transformer."""
+ return build_from_cfg(cfg, TRANSFORMER, default_args)
+
+
+def build_positional_encoding(cfg, default_args=None):
+ """Builder for Position Encoding."""
+ return build_from_cfg(cfg, POSITIONAL_ENCODING, default_args)
diff --git a/annotator/uniformer/mmdet/models/utils/gaussian_target.py b/annotator/uniformer/mmdet/models/utils/gaussian_target.py
new file mode 100644
index 0000000000000000000000000000000000000000..7bb7160cb4bf2f47876f6e8373142aa5846920a9
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/utils/gaussian_target.py
@@ -0,0 +1,185 @@
+from math import sqrt
+
+import torch
+
+
+def gaussian2D(radius, sigma=1, dtype=torch.float32, device='cpu'):
+ """Generate 2D gaussian kernel.
+
+ Args:
+ radius (int): Radius of gaussian kernel.
+ sigma (int): Sigma of gaussian function. Default: 1.
+ dtype (torch.dtype): Dtype of gaussian tensor. Default: torch.float32.
+ device (str): Device of gaussian tensor. Default: 'cpu'.
+
+ Returns:
+ h (Tensor): Gaussian kernel with a
+ ``(2 * radius + 1) * (2 * radius + 1)`` shape.
+ """
+ x = torch.arange(
+ -radius, radius + 1, dtype=dtype, device=device).view(1, -1)
+ y = torch.arange(
+ -radius, radius + 1, dtype=dtype, device=device).view(-1, 1)
+
+ h = (-(x * x + y * y) / (2 * sigma * sigma)).exp()
+
+ h[h < torch.finfo(h.dtype).eps * h.max()] = 0
+ return h
+
+
+def gen_gaussian_target(heatmap, center, radius, k=1):
+ """Generate 2D gaussian heatmap.
+
+ Args:
+ heatmap (Tensor): Input heatmap, the gaussian kernel will cover on
+ it and maintain the max value.
+ center (list[int]): Coord of gaussian kernel's center.
+ radius (int): Radius of gaussian kernel.
+ k (int): Coefficient of gaussian kernel. Default: 1.
+
+ Returns:
+ out_heatmap (Tensor): Updated heatmap covered by gaussian kernel.
+ """
+ diameter = 2 * radius + 1
+ gaussian_kernel = gaussian2D(
+ radius, sigma=diameter / 6, dtype=heatmap.dtype, device=heatmap.device)
+
+ x, y = center
+
+ height, width = heatmap.shape[:2]
+
+ left, right = min(x, radius), min(width - x, radius + 1)
+ top, bottom = min(y, radius), min(height - y, radius + 1)
+
+ masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right]
+ masked_gaussian = gaussian_kernel[radius - top:radius + bottom,
+ radius - left:radius + right]
+ out_heatmap = heatmap
+ torch.max(
+ masked_heatmap,
+ masked_gaussian * k,
+ out=out_heatmap[y - top:y + bottom, x - left:x + right])
+
+ return out_heatmap
+
+
+def gaussian_radius(det_size, min_overlap):
+ r"""Generate 2D gaussian radius.
+
+ This function is modified from the `official github repo
+ `_.
+
+ Given ``min_overlap``, radius could computed by a quadratic equation
+ according to Vieta's formulas.
+
+ There are 3 cases for computing gaussian radius, details are following:
+
+ - Explanation of figure: ``lt`` and ``br`` indicates the left-top and
+ bottom-right corner of ground truth box. ``x`` indicates the
+ generated corner at the limited position when ``radius=r``.
+
+ - Case1: one corner is inside the gt box and the other is outside.
+
+ .. code:: text
+
+ |< width >|
+
+ lt-+----------+ -
+ | | | ^
+ +--x----------+--+
+ | | | |
+ | | | | height
+ | | overlap | |
+ | | | |
+ | | | | v
+ +--+---------br--+ -
+ | | |
+ +----------+--x
+
+ To ensure IoU of generated box and gt box is larger than ``min_overlap``:
+
+ .. math::
+ \cfrac{(w-r)*(h-r)}{w*h+(w+h)r-r^2} \ge {iou} \quad\Rightarrow\quad
+ {r^2-(w+h)r+\cfrac{1-iou}{1+iou}*w*h} \ge 0 \\
+ {a} = 1,\quad{b} = {-(w+h)},\quad{c} = {\cfrac{1-iou}{1+iou}*w*h}
+ {r} \le \cfrac{-b-\sqrt{b^2-4*a*c}}{2*a}
+
+ - Case2: both two corners are inside the gt box.
+
+ .. code:: text
+
+ |< width >|
+
+ lt-+----------+ -
+ | | | ^
+ +--x-------+ |
+ | | | |
+ | |overlap| | height
+ | | | |
+ | +-------x--+
+ | | | v
+ +----------+-br -
+
+ To ensure IoU of generated box and gt box is larger than ``min_overlap``:
+
+ .. math::
+ \cfrac{(w-2*r)*(h-2*r)}{w*h} \ge {iou} \quad\Rightarrow\quad
+ {4r^2-2(w+h)r+(1-iou)*w*h} \ge 0 \\
+ {a} = 4,\quad {b} = {-2(w+h)},\quad {c} = {(1-iou)*w*h}
+ {r} \le \cfrac{-b-\sqrt{b^2-4*a*c}}{2*a}
+
+ - Case3: both two corners are outside the gt box.
+
+ .. code:: text
+
+ |< width >|
+
+ x--+----------------+
+ | | |
+ +-lt-------------+ | -
+ | | | | ^
+ | | | |
+ | | overlap | | height
+ | | | |
+ | | | | v
+ | +------------br--+ -
+ | | |
+ +----------------+--x
+
+ To ensure IoU of generated box and gt box is larger than ``min_overlap``:
+
+ .. math::
+ \cfrac{w*h}{(w+2*r)*(h+2*r)} \ge {iou} \quad\Rightarrow\quad
+ {4*iou*r^2+2*iou*(w+h)r+(iou-1)*w*h} \le 0 \\
+ {a} = {4*iou},\quad {b} = {2*iou*(w+h)},\quad {c} = {(iou-1)*w*h} \\
+ {r} \le \cfrac{-b+\sqrt{b^2-4*a*c}}{2*a}
+
+ Args:
+ det_size (list[int]): Shape of object.
+ min_overlap (float): Min IoU with ground truth for boxes generated by
+ keypoints inside the gaussian kernel.
+
+ Returns:
+ radius (int): Radius of gaussian kernel.
+ """
+ height, width = det_size
+
+ a1 = 1
+ b1 = (height + width)
+ c1 = width * height * (1 - min_overlap) / (1 + min_overlap)
+ sq1 = sqrt(b1**2 - 4 * a1 * c1)
+ r1 = (b1 - sq1) / (2 * a1)
+
+ a2 = 4
+ b2 = 2 * (height + width)
+ c2 = (1 - min_overlap) * width * height
+ sq2 = sqrt(b2**2 - 4 * a2 * c2)
+ r2 = (b2 - sq2) / (2 * a2)
+
+ a3 = 4 * min_overlap
+ b3 = -2 * min_overlap * (height + width)
+ c3 = (min_overlap - 1) * width * height
+ sq3 = sqrt(b3**2 - 4 * a3 * c3)
+ r3 = (b3 + sq3) / (2 * a3)
+ return min(r1, r2, r3)
diff --git a/annotator/uniformer/mmdet/models/utils/positional_encoding.py b/annotator/uniformer/mmdet/models/utils/positional_encoding.py
new file mode 100644
index 0000000000000000000000000000000000000000..9bda2bbdbfcc28ba6304b6325ae556fa02554ac1
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/utils/positional_encoding.py
@@ -0,0 +1,150 @@
+import math
+
+import torch
+import torch.nn as nn
+from mmcv.cnn import uniform_init
+
+from .builder import POSITIONAL_ENCODING
+
+
+@POSITIONAL_ENCODING.register_module()
+class SinePositionalEncoding(nn.Module):
+ """Position encoding with sine and cosine functions.
+
+ See `End-to-End Object Detection with Transformers
+ `_ for details.
+
+ Args:
+ num_feats (int): The feature dimension for each position
+ along x-axis or y-axis. Note the final returned dimension
+ for each position is 2 times of this value.
+ temperature (int, optional): The temperature used for scaling
+ the position embedding. Default 10000.
+ normalize (bool, optional): Whether to normalize the position
+ embedding. Default False.
+ scale (float, optional): A scale factor that scales the position
+ embedding. The scale will be used only when `normalize` is True.
+ Default 2*pi.
+ eps (float, optional): A value added to the denominator for
+ numerical stability. Default 1e-6.
+ """
+
+ def __init__(self,
+ num_feats,
+ temperature=10000,
+ normalize=False,
+ scale=2 * math.pi,
+ eps=1e-6):
+ super(SinePositionalEncoding, self).__init__()
+ if normalize:
+ assert isinstance(scale, (float, int)), 'when normalize is set,' \
+ 'scale should be provided and in float or int type, ' \
+ f'found {type(scale)}'
+ self.num_feats = num_feats
+ self.temperature = temperature
+ self.normalize = normalize
+ self.scale = scale
+ self.eps = eps
+
+ def forward(self, mask):
+ """Forward function for `SinePositionalEncoding`.
+
+ Args:
+ mask (Tensor): ByteTensor mask. Non-zero values representing
+ ignored positions, while zero values means valid positions
+ for this image. Shape [bs, h, w].
+
+ Returns:
+ pos (Tensor): Returned position embedding with shape
+ [bs, num_feats*2, h, w].
+ """
+ not_mask = ~mask
+ y_embed = not_mask.cumsum(1, dtype=torch.float32)
+ x_embed = not_mask.cumsum(2, dtype=torch.float32)
+ if self.normalize:
+ y_embed = y_embed / (y_embed[:, -1:, :] + self.eps) * self.scale
+ x_embed = x_embed / (x_embed[:, :, -1:] + self.eps) * self.scale
+ dim_t = torch.arange(
+ self.num_feats, dtype=torch.float32, device=mask.device)
+ dim_t = self.temperature**(2 * (dim_t // 2) / self.num_feats)
+ pos_x = x_embed[:, :, :, None] / dim_t
+ pos_y = y_embed[:, :, :, None] / dim_t
+ pos_x = torch.stack(
+ (pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()),
+ dim=4).flatten(3)
+ pos_y = torch.stack(
+ (pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()),
+ dim=4).flatten(3)
+ pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
+ return pos
+
+ def __repr__(self):
+ """str: a string that describes the module"""
+ repr_str = self.__class__.__name__
+ repr_str += f'(num_feats={self.num_feats}, '
+ repr_str += f'temperature={self.temperature}, '
+ repr_str += f'normalize={self.normalize}, '
+ repr_str += f'scale={self.scale}, '
+ repr_str += f'eps={self.eps})'
+ return repr_str
+
+
+@POSITIONAL_ENCODING.register_module()
+class LearnedPositionalEncoding(nn.Module):
+ """Position embedding with learnable embedding weights.
+
+ Args:
+ num_feats (int): The feature dimension for each position
+ along x-axis or y-axis. The final returned dimension for
+ each position is 2 times of this value.
+ row_num_embed (int, optional): The dictionary size of row embeddings.
+ Default 50.
+ col_num_embed (int, optional): The dictionary size of col embeddings.
+ Default 50.
+ """
+
+ def __init__(self, num_feats, row_num_embed=50, col_num_embed=50):
+ super(LearnedPositionalEncoding, self).__init__()
+ self.row_embed = nn.Embedding(row_num_embed, num_feats)
+ self.col_embed = nn.Embedding(col_num_embed, num_feats)
+ self.num_feats = num_feats
+ self.row_num_embed = row_num_embed
+ self.col_num_embed = col_num_embed
+ self.init_weights()
+
+ def init_weights(self):
+ """Initialize the learnable weights."""
+ uniform_init(self.row_embed)
+ uniform_init(self.col_embed)
+
+ def forward(self, mask):
+ """Forward function for `LearnedPositionalEncoding`.
+
+ Args:
+ mask (Tensor): ByteTensor mask. Non-zero values representing
+ ignored positions, while zero values means valid positions
+ for this image. Shape [bs, h, w].
+
+ Returns:
+ pos (Tensor): Returned position embedding with shape
+ [bs, num_feats*2, h, w].
+ """
+ h, w = mask.shape[-2:]
+ x = torch.arange(w, device=mask.device)
+ y = torch.arange(h, device=mask.device)
+ x_embed = self.col_embed(x)
+ y_embed = self.row_embed(y)
+ pos = torch.cat(
+ (x_embed.unsqueeze(0).repeat(h, 1, 1), y_embed.unsqueeze(1).repeat(
+ 1, w, 1)),
+ dim=-1).permute(2, 0,
+ 1).unsqueeze(0).repeat(mask.shape[0], 1, 1, 1)
+ return pos
+
+ def __repr__(self):
+ """str: a string that describes the module"""
+ repr_str = self.__class__.__name__
+ repr_str += f'(num_feats={self.num_feats}, '
+ repr_str += f'row_num_embed={self.row_num_embed}, '
+ repr_str += f'col_num_embed={self.col_num_embed})'
+ return repr_str
diff --git a/annotator/uniformer/mmdet/models/utils/res_layer.py b/annotator/uniformer/mmdet/models/utils/res_layer.py
new file mode 100644
index 0000000000000000000000000000000000000000..4a4efd3dd30b30123ed5135eac080ad9f7f7b448
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/utils/res_layer.py
@@ -0,0 +1,187 @@
+from mmcv.cnn import build_conv_layer, build_norm_layer
+from torch import nn as nn
+
+
+class ResLayer(nn.Sequential):
+ """ResLayer to build ResNet style backbone.
+
+ Args:
+ block (nn.Module): block used to build ResLayer.
+ inplanes (int): inplanes of block.
+ planes (int): planes of block.
+ num_blocks (int): number of blocks.
+ stride (int): stride of the first block. Default: 1
+ avg_down (bool): Use AvgPool instead of stride conv when
+ downsampling in the bottleneck. Default: False
+ conv_cfg (dict): dictionary to construct and config conv layer.
+ Default: None
+ norm_cfg (dict): dictionary to construct and config norm layer.
+ Default: dict(type='BN')
+ downsample_first (bool): Downsample at the first block or last block.
+ False for Hourglass, True for ResNet. Default: True
+ """
+
+ def __init__(self,
+ block,
+ inplanes,
+ planes,
+ num_blocks,
+ stride=1,
+ avg_down=False,
+ conv_cfg=None,
+ norm_cfg=dict(type='BN'),
+ downsample_first=True,
+ **kwargs):
+ self.block = block
+
+ downsample = None
+ if stride != 1 or inplanes != planes * block.expansion:
+ downsample = []
+ conv_stride = stride
+ if avg_down:
+ conv_stride = 1
+ downsample.append(
+ nn.AvgPool2d(
+ kernel_size=stride,
+ stride=stride,
+ ceil_mode=True,
+ count_include_pad=False))
+ downsample.extend([
+ build_conv_layer(
+ conv_cfg,
+ inplanes,
+ planes * block.expansion,
+ kernel_size=1,
+ stride=conv_stride,
+ bias=False),
+ build_norm_layer(norm_cfg, planes * block.expansion)[1]
+ ])
+ downsample = nn.Sequential(*downsample)
+
+ layers = []
+ if downsample_first:
+ layers.append(
+ block(
+ inplanes=inplanes,
+ planes=planes,
+ stride=stride,
+ downsample=downsample,
+ conv_cfg=conv_cfg,
+ norm_cfg=norm_cfg,
+ **kwargs))
+ inplanes = planes * block.expansion
+ for _ in range(1, num_blocks):
+ layers.append(
+ block(
+ inplanes=inplanes,
+ planes=planes,
+ stride=1,
+ conv_cfg=conv_cfg,
+ norm_cfg=norm_cfg,
+ **kwargs))
+
+ else: # downsample_first=False is for HourglassModule
+ for _ in range(num_blocks - 1):
+ layers.append(
+ block(
+ inplanes=inplanes,
+ planes=inplanes,
+ stride=1,
+ conv_cfg=conv_cfg,
+ norm_cfg=norm_cfg,
+ **kwargs))
+ layers.append(
+ block(
+ inplanes=inplanes,
+ planes=planes,
+ stride=stride,
+ downsample=downsample,
+ conv_cfg=conv_cfg,
+ norm_cfg=norm_cfg,
+ **kwargs))
+ super(ResLayer, self).__init__(*layers)
+
+
+class SimplifiedBasicBlock(nn.Module):
+ """Simplified version of original basic residual block. This is used in
+ `SCNet `_.
+
+ - Norm layer is now optional
+ - Last ReLU in forward function is removed
+ """
+ expansion = 1
+
+ def __init__(self,
+ inplanes,
+ planes,
+ stride=1,
+ dilation=1,
+ downsample=None,
+ style='pytorch',
+ with_cp=False,
+ conv_cfg=None,
+ norm_cfg=dict(type='BN'),
+ dcn=None,
+ plugins=None):
+ super(SimplifiedBasicBlock, self).__init__()
+ assert dcn is None, 'Not implemented yet.'
+ assert plugins is None, 'Not implemented yet.'
+ assert not with_cp, 'Not implemented yet.'
+ self.with_norm = norm_cfg is not None
+ with_bias = True if norm_cfg is None else False
+ self.conv1 = build_conv_layer(
+ conv_cfg,
+ inplanes,
+ planes,
+ 3,
+ stride=stride,
+ padding=dilation,
+ dilation=dilation,
+ bias=with_bias)
+ if self.with_norm:
+ self.norm1_name, norm1 = build_norm_layer(
+ norm_cfg, planes, postfix=1)
+ self.add_module(self.norm1_name, norm1)
+ self.conv2 = build_conv_layer(
+ conv_cfg, planes, planes, 3, padding=1, bias=with_bias)
+ if self.with_norm:
+ self.norm2_name, norm2 = build_norm_layer(
+ norm_cfg, planes, postfix=2)
+ self.add_module(self.norm2_name, norm2)
+
+ self.relu = nn.ReLU(inplace=True)
+ self.downsample = downsample
+ self.stride = stride
+ self.dilation = dilation
+ self.with_cp = with_cp
+
+ @property
+ def norm1(self):
+ """nn.Module: normalization layer after the first convolution layer"""
+ return getattr(self, self.norm1_name) if self.with_norm else None
+
+ @property
+ def norm2(self):
+ """nn.Module: normalization layer after the second convolution layer"""
+ return getattr(self, self.norm2_name) if self.with_norm else None
+
+ def forward(self, x):
+ """Forward function."""
+
+ identity = x
+
+ out = self.conv1(x)
+ if self.with_norm:
+ out = self.norm1(out)
+ out = self.relu(out)
+
+ out = self.conv2(out)
+ if self.with_norm:
+ out = self.norm2(out)
+
+ if self.downsample is not None:
+ identity = self.downsample(x)
+
+ out += identity
+
+ return out
diff --git a/annotator/uniformer/mmdet/models/utils/transformer.py b/annotator/uniformer/mmdet/models/utils/transformer.py
new file mode 100644
index 0000000000000000000000000000000000000000..83870eead42f4b0bf73c9e19248d5512d3d044c5
--- /dev/null
+++ b/annotator/uniformer/mmdet/models/utils/transformer.py
@@ -0,0 +1,860 @@
+import torch
+import torch.nn as nn
+from mmcv.cnn import (Linear, build_activation_layer, build_norm_layer,
+ xavier_init)
+
+from .builder import TRANSFORMER
+
+
+class MultiheadAttention(nn.Module):
+ """A warpper for torch.nn.MultiheadAttention.
+
+ This module implements MultiheadAttention with residual connection,
+ and positional encoding used in DETR is also passed as input.
+
+ Args:
+ embed_dims (int): The embedding dimension.
+ num_heads (int): Parallel attention heads. Same as
+ `nn.MultiheadAttention`.
+ dropout (float): A Dropout layer on attn_output_weights. Default 0.0.
+ """
+
+ def __init__(self, embed_dims, num_heads, dropout=0.0):
+ super(MultiheadAttention, self).__init__()
+ assert embed_dims % num_heads == 0, 'embed_dims must be ' \
+ f'divisible by num_heads. got {embed_dims} and {num_heads}.'
+ self.embed_dims = embed_dims
+ self.num_heads = num_heads
+ self.dropout = dropout
+ self.attn = nn.MultiheadAttention(embed_dims, num_heads, dropout)
+ self.dropout = nn.Dropout(dropout)
+
+ def forward(self,
+ x,
+ key=None,
+ value=None,
+ residual=None,
+ query_pos=None,
+ key_pos=None,
+ attn_mask=None,
+ key_padding_mask=None):
+ """Forward function for `MultiheadAttention`.
+
+ Args:
+ x (Tensor): The input query with shape [num_query, bs,
+ embed_dims]. Same in `nn.MultiheadAttention.forward`.
+ key (Tensor): The key tensor with shape [num_key, bs,
+ embed_dims]. Same in `nn.MultiheadAttention.forward`.
+ Default None. If None, the `query` will be used.
+ value (Tensor): The value tensor with same shape as `key`.
+ Same in `nn.MultiheadAttention.forward`. Default None.
+ If None, the `key` will be used.
+ residual (Tensor): The tensor used for addition, with the
+ same shape as `x`. Default None. If None, `x` will be used.
+ query_pos (Tensor): The positional encoding for query, with
+ the same shape as `x`. Default None. If not None, it will
+ be added to `x` before forward function.
+ key_pos (Tensor): The positional encoding for `key`, with the
+ same shape as `key`. Default None. If not None, it will
+ be added to `key` before forward function. If None, and
+ `query_pos` has the same shape as `key`, then `query_pos`
+ will be used for `key_pos`.
+ attn_mask (Tensor): ByteTensor mask with shape [num_query,
+ num_key]. Same in `nn.MultiheadAttention.forward`.
+ Default None.
+ key_padding_mask (Tensor): ByteTensor with shape [bs, num_key].
+ Same in `nn.MultiheadAttention.forward`. Default None.
+
+ Returns:
+ Tensor: forwarded results with shape [num_query, bs, embed_dims].
+ """
+ query = x
+ if key is None:
+ key = query
+ if value is None:
+ value = key
+ if residual is None:
+ residual = x
+ if key_pos is None:
+ if query_pos is not None and key is not None:
+ if query_pos.shape == key.shape:
+ key_pos = query_pos
+ if query_pos is not None:
+ query = query + query_pos
+ if key_pos is not None:
+ key = key + key_pos
+ out = self.attn(
+ query,
+ key,
+ value=value,
+ attn_mask=attn_mask,
+ key_padding_mask=key_padding_mask)[0]
+
+ return residual + self.dropout(out)
+
+ def __repr__(self):
+ """str: a string that describes the module"""
+ repr_str = self.__class__.__name__
+ repr_str += f'(embed_dims={self.embed_dims}, '
+ repr_str += f'num_heads={self.num_heads}, '
+ repr_str += f'dropout={self.dropout})'
+ return repr_str
+
+
+class FFN(nn.Module):
+ """Implements feed-forward networks (FFNs) with residual connection.
+
+ Args:
+ embed_dims (int): The feature dimension. Same as
+ `MultiheadAttention`.
+ feedforward_channels (int): The hidden dimension of FFNs.
+ num_fcs (int, optional): The number of fully-connected layers in
+ FFNs. Defaults to 2.
+ act_cfg (dict, optional): The activation config for FFNs.
+ dropout (float, optional): Probability of an element to be
+ zeroed. Default 0.0.
+ add_residual (bool, optional): Add resudual connection.
+ Defaults to True.
+ """
+
+ def __init__(self,
+ embed_dims,
+ feedforward_channels,
+ num_fcs=2,
+ act_cfg=dict(type='ReLU', inplace=True),
+ dropout=0.0,
+ add_residual=True):
+ super(FFN, self).__init__()
+ assert num_fcs >= 2, 'num_fcs should be no less ' \
+ f'than 2. got {num_fcs}.'
+ self.embed_dims = embed_dims
+ self.feedforward_channels = feedforward_channels
+ self.num_fcs = num_fcs
+ self.act_cfg = act_cfg
+ self.dropout = dropout
+ self.activate = build_activation_layer(act_cfg)
+
+ layers = nn.ModuleList()
+ in_channels = embed_dims
+ for _ in range(num_fcs - 1):
+ layers.append(
+ nn.Sequential(
+ Linear(in_channels, feedforward_channels), self.activate,
+ nn.Dropout(dropout)))
+ in_channels = feedforward_channels
+ layers.append(Linear(feedforward_channels, embed_dims))
+ self.layers = nn.Sequential(*layers)
+ self.dropout = nn.Dropout(dropout)
+ self.add_residual = add_residual
+
+ def forward(self, x, residual=None):
+ """Forward function for `FFN`."""
+ out = self.layers(x)
+ if not self.add_residual:
+ return out
+ if residual is None:
+ residual = x
+ return residual + self.dropout(out)
+
+ def __repr__(self):
+ """str: a string that describes the module"""
+ repr_str = self.__class__.__name__
+ repr_str += f'(embed_dims={self.embed_dims}, '
+ repr_str += f'feedforward_channels={self.feedforward_channels}, '
+ repr_str += f'num_fcs={self.num_fcs}, '
+ repr_str += f'act_cfg={self.act_cfg}, '
+ repr_str += f'dropout={self.dropout}, '
+ repr_str += f'add_residual={self.add_residual})'
+ return repr_str
+
+
+class TransformerEncoderLayer(nn.Module):
+ """Implements one encoder layer in DETR transformer.
+
+ Args:
+ embed_dims (int): The feature dimension. Same as `FFN`.
+ num_heads (int): Parallel attention heads.
+ feedforward_channels (int): The hidden dimension for FFNs.
+ dropout (float): Probability of an element to be zeroed. Default 0.0.
+ order (tuple[str]): The order for encoder layer. Valid examples are
+ ('selfattn', 'norm', 'ffn', 'norm') and ('norm', 'selfattn',
+ 'norm', 'ffn'). Default ('selfattn', 'norm', 'ffn', 'norm').
+ act_cfg (dict): The activation config for FFNs. Default ReLU.
+ norm_cfg (dict): Config dict for normalization layer. Default
+ layer normalization.
+ num_fcs (int): The number of fully-connected layers for FFNs.
+ Default 2.
+ """
+
+ def __init__(self,
+ embed_dims,
+ num_heads,
+ feedforward_channels,
+ dropout=0.0,
+ order=('selfattn', 'norm', 'ffn', 'norm'),
+ act_cfg=dict(type='ReLU', inplace=True),
+ norm_cfg=dict(type='LN'),
+ num_fcs=2):
+ super(TransformerEncoderLayer, self).__init__()
+ assert isinstance(order, tuple) and len(order) == 4
+ assert set(order) == set(['selfattn', 'norm', 'ffn'])
+ self.embed_dims = embed_dims
+ self.num_heads = num_heads
+ self.feedforward_channels = feedforward_channels
+ self.dropout = dropout
+ self.order = order
+ self.act_cfg = act_cfg
+ self.norm_cfg = norm_cfg
+ self.num_fcs = num_fcs
+ self.pre_norm = order[0] == 'norm'
+ self.self_attn = MultiheadAttention(embed_dims, num_heads, dropout)
+ self.ffn = FFN(embed_dims, feedforward_channels, num_fcs, act_cfg,
+ dropout)
+ self.norms = nn.ModuleList()
+ self.norms.append(build_norm_layer(norm_cfg, embed_dims)[1])
+ self.norms.append(build_norm_layer(norm_cfg, embed_dims)[1])
+
+ def forward(self, x, pos=None, attn_mask=None, key_padding_mask=None):
+ """Forward function for `TransformerEncoderLayer`.
+
+ Args:
+ x (Tensor): The input query with shape [num_key, bs,
+ embed_dims]. Same in `MultiheadAttention.forward`.
+ pos (Tensor): The positional encoding for query. Default None.
+ Same as `query_pos` in `MultiheadAttention.forward`.
+ attn_mask (Tensor): ByteTensor mask with shape [num_key,
+ num_key]. Same in `MultiheadAttention.forward`. Default None.
+ key_padding_mask (Tensor): ByteTensor with shape [bs, num_key].
+ Same in `MultiheadAttention.forward`. Default None.
+
+ Returns:
+ Tensor: forwarded results with shape [num_key, bs, embed_dims].
+ """
+ norm_cnt = 0
+ inp_residual = x
+ for layer in self.order:
+ if layer == 'selfattn':
+ # self attention
+ query = key = value = x
+ x = self.self_attn(
+ query,
+ key,
+ value,
+ inp_residual if self.pre_norm else None,
+ query_pos=pos,
+ key_pos=pos,
+ attn_mask=attn_mask,
+ key_padding_mask=key_padding_mask)
+ inp_residual = x
+ elif layer == 'norm':
+ x = self.norms[norm_cnt](x)
+ norm_cnt += 1
+ elif layer == 'ffn':
+ x = self.ffn(x, inp_residual if self.pre_norm else None)
+ return x
+
+ def __repr__(self):
+ """str: a string that describes the module"""
+ repr_str = self.__class__.__name__
+ repr_str += f'(embed_dims={self.embed_dims}, '
+ repr_str += f'num_heads={self.num_heads}, '
+ repr_str += f'feedforward_channels={self.feedforward_channels}, '
+ repr_str += f'dropout={self.dropout}, '
+ repr_str += f'order={self.order}, '
+ repr_str += f'act_cfg={self.act_cfg}, '
+ repr_str += f'norm_cfg={self.norm_cfg}, '
+ repr_str += f'num_fcs={self.num_fcs})'
+ return repr_str
+
+
+class TransformerDecoderLayer(nn.Module):
+ """Implements one decoder layer in DETR transformer.
+
+ Args:
+ embed_dims (int): The feature dimension. Same as
+ `TransformerEncoderLayer`.
+ num_heads (int): Parallel attention heads.
+ feedforward_channels (int): Same as `TransformerEncoderLayer`.
+ dropout (float): Same as `TransformerEncoderLayer`. Default 0.0.
+ order (tuple[str]): The order for decoder layer. Valid examples are
+ ('selfattn', 'norm', 'multiheadattn', 'norm', 'ffn', 'norm') and
+ ('norm', 'selfattn', 'norm', 'multiheadattn', 'norm', 'ffn').
+ Default the former.
+ act_cfg (dict): Same as `TransformerEncoderLayer`. Default ReLU.
+ norm_cfg (dict): Config dict for normalization layer. Default
+ layer normalization.
+ num_fcs (int): The number of fully-connected layers in FFNs.
+ """
+
+ def __init__(self,
+ embed_dims,
+ num_heads,
+ feedforward_channels,
+ dropout=0.0,
+ order=('selfattn', 'norm', 'multiheadattn', 'norm', 'ffn',
+ 'norm'),
+ act_cfg=dict(type='ReLU', inplace=True),
+ norm_cfg=dict(type='LN'),
+ num_fcs=2):
+ super(TransformerDecoderLayer, self).__init__()
+ assert isinstance(order, tuple) and len(order) == 6
+ assert set(order) == set(['selfattn', 'norm', 'multiheadattn', 'ffn'])
+ self.embed_dims = embed_dims
+ self.num_heads = num_heads
+ self.feedforward_channels = feedforward_channels
+ self.dropout = dropout
+ self.order = order
+ self.act_cfg = act_cfg
+ self.norm_cfg = norm_cfg
+ self.num_fcs = num_fcs
+ self.pre_norm = order[0] == 'norm'
+ self.self_attn = MultiheadAttention(embed_dims, num_heads, dropout)
+ self.multihead_attn = MultiheadAttention(embed_dims, num_heads,
+ dropout)
+ self.ffn = FFN(embed_dims, feedforward_channels, num_fcs, act_cfg,
+ dropout)
+ self.norms = nn.ModuleList()
+ # 3 norm layers in official DETR's TransformerDecoderLayer
+ for _ in range(3):
+ self.norms.append(build_norm_layer(norm_cfg, embed_dims)[1])
+
+ def forward(self,
+ x,
+ memory,
+ memory_pos=None,
+ query_pos=None,
+ memory_attn_mask=None,
+ target_attn_mask=None,
+ memory_key_padding_mask=None,
+ target_key_padding_mask=None):
+ """Forward function for `TransformerDecoderLayer`.
+
+ Args:
+ x (Tensor): Input query with shape [num_query, bs, embed_dims].
+ memory (Tensor): Tensor got from `TransformerEncoder`, with shape
+ [num_key, bs, embed_dims].
+ memory_pos (Tensor): The positional encoding for `memory`. Default
+ None. Same as `key_pos` in `MultiheadAttention.forward`.
+ query_pos (Tensor): The positional encoding for `query`. Default
+ None. Same as `query_pos` in `MultiheadAttention.forward`.
+ memory_attn_mask (Tensor): ByteTensor mask for `memory`, with
+ shape [num_key, num_key]. Same as `attn_mask` in
+ `MultiheadAttention.forward`. Default None.
+ target_attn_mask (Tensor): ByteTensor mask for `x`, with shape
+ [num_query, num_query]. Same as `attn_mask` in
+ `MultiheadAttention.forward`. Default None.
+ memory_key_padding_mask (Tensor): ByteTensor for `memory`, with
+ shape [bs, num_key]. Same as `key_padding_mask` in
+ `MultiheadAttention.forward`. Default None.
+ target_key_padding_mask (Tensor): ByteTensor for `x`, with shape
+ [bs, num_query]. Same as `key_padding_mask` in
+ `MultiheadAttention.forward`. Default None.
+
+ Returns:
+ Tensor: forwarded results with shape [num_query, bs, embed_dims].
+ """
+ norm_cnt = 0
+ inp_residual = x
+ for layer in self.order:
+ if layer == 'selfattn':
+ query = key = value = x
+ x = self.self_attn(
+ query,
+ key,
+ value,
+ inp_residual if self.pre_norm else None,
+ query_pos,
+ key_pos=query_pos,
+ attn_mask=target_attn_mask,
+ key_padding_mask=target_key_padding_mask)
+ inp_residual = x
+ elif layer == 'norm':
+ x = self.norms[norm_cnt](x)
+ norm_cnt += 1
+ elif layer == 'multiheadattn':
+ query = x
+ key = value = memory
+ x = self.multihead_attn(
+ query,
+ key,
+ value,
+ inp_residual if self.pre_norm else None,
+ query_pos,
+ key_pos=memory_pos,
+ attn_mask=memory_attn_mask,
+ key_padding_mask=memory_key_padding_mask)
+ inp_residual = x
+ elif layer == 'ffn':
+ x = self.ffn(x, inp_residual if self.pre_norm else None)
+ return x
+
+ def __repr__(self):
+ """str: a string that describes the module"""
+ repr_str = self.__class__.__name__
+ repr_str += f'(embed_dims={self.embed_dims}, '
+ repr_str += f'num_heads={self.num_heads}, '
+ repr_str += f'feedforward_channels={self.feedforward_channels}, '
+ repr_str += f'dropout={self.dropout}, '
+ repr_str += f'order={self.order}, '
+ repr_str += f'act_cfg={self.act_cfg}, '
+ repr_str += f'norm_cfg={self.norm_cfg}, '
+ repr_str += f'num_fcs={self.num_fcs})'
+ return repr_str
+
+
+class TransformerEncoder(nn.Module):
+ """Implements the encoder in DETR transformer.
+
+ Args:
+ num_layers (int): The number of `TransformerEncoderLayer`.
+ embed_dims (int): Same as `TransformerEncoderLayer`.
+ num_heads (int): Same as `TransformerEncoderLayer`.
+ feedforward_channels (int): Same as `TransformerEncoderLayer`.
+ dropout (float): Same as `TransformerEncoderLayer`. Default 0.0.
+ order (tuple[str]): Same as `TransformerEncoderLayer`.
+ act_cfg (dict): Same as `TransformerEncoderLayer`. Default ReLU.
+ norm_cfg (dict): Same as `TransformerEncoderLayer`. Default
+ layer normalization.
+ num_fcs (int): Same as `TransformerEncoderLayer`. Default 2.
+ """
+
+ def __init__(self,
+ num_layers,
+ embed_dims,
+ num_heads,
+ feedforward_channels,
+ dropout=0.0,
+ order=('selfattn', 'norm', 'ffn', 'norm'),
+ act_cfg=dict(type='ReLU', inplace=True),
+ norm_cfg=dict(type='LN'),
+ num_fcs=2):
+ super(TransformerEncoder, self).__init__()
+ assert isinstance(order, tuple) and len(order) == 4
+ assert set(order) == set(['selfattn', 'norm', 'ffn'])
+ self.num_layers = num_layers
+ self.embed_dims = embed_dims
+ self.num_heads = num_heads
+ self.feedforward_channels = feedforward_channels
+ self.dropout = dropout
+ self.order = order
+ self.act_cfg = act_cfg
+ self.norm_cfg = norm_cfg
+ self.num_fcs = num_fcs
+ self.pre_norm = order[0] == 'norm'
+ self.layers = nn.ModuleList()
+ for _ in range(num_layers):
+ self.layers.append(
+ TransformerEncoderLayer(embed_dims, num_heads,
+ feedforward_channels, dropout, order,
+ act_cfg, norm_cfg, num_fcs))
+ self.norm = build_norm_layer(norm_cfg,
+ embed_dims)[1] if self.pre_norm else None
+
+ def forward(self, x, pos=None, attn_mask=None, key_padding_mask=None):
+ """Forward function for `TransformerEncoder`.
+
+ Args:
+ x (Tensor): Input query. Same in `TransformerEncoderLayer.forward`.
+ pos (Tensor): Positional encoding for query. Default None.
+ Same in `TransformerEncoderLayer.forward`.
+ attn_mask (Tensor): ByteTensor attention mask. Default None.
+ Same in `TransformerEncoderLayer.forward`.
+ key_padding_mask (Tensor): Same in
+ `TransformerEncoderLayer.forward`. Default None.
+
+ Returns:
+ Tensor: Results with shape [num_key, bs, embed_dims].
+ """
+ for layer in self.layers:
+ x = layer(x, pos, attn_mask, key_padding_mask)
+ if self.norm is not None:
+ x = self.norm(x)
+ return x
+
+ def __repr__(self):
+ """str: a string that describes the module"""
+ repr_str = self.__class__.__name__
+ repr_str += f'(num_layers={self.num_layers}, '
+ repr_str += f'embed_dims={self.embed_dims}, '
+ repr_str += f'num_heads={self.num_heads}, '
+ repr_str += f'feedforward_channels={self.feedforward_channels}, '
+ repr_str += f'dropout={self.dropout}, '
+ repr_str += f'order={self.order}, '
+ repr_str += f'act_cfg={self.act_cfg}, '
+ repr_str += f'norm_cfg={self.norm_cfg}, '
+ repr_str += f'num_fcs={self.num_fcs})'
+ return repr_str
+
+
+class TransformerDecoder(nn.Module):
+ """Implements the decoder in DETR transformer.
+
+ Args:
+ num_layers (int): The number of `TransformerDecoderLayer`.
+ embed_dims (int): Same as `TransformerDecoderLayer`.
+ num_heads (int): Same as `TransformerDecoderLayer`.
+ feedforward_channels (int): Same as `TransformerDecoderLayer`.
+ dropout (float): Same as `TransformerDecoderLayer`. Default 0.0.
+ order (tuple[str]): Same as `TransformerDecoderLayer`.
+ act_cfg (dict): Same as `TransformerDecoderLayer`. Default ReLU.
+ norm_cfg (dict): Same as `TransformerDecoderLayer`. Default
+ layer normalization.
+ num_fcs (int): Same as `TransformerDecoderLayer`. Default 2.
+ """
+
+ def __init__(self,
+ num_layers,
+ embed_dims,
+ num_heads,
+ feedforward_channels,
+ dropout=0.0,
+ order=('selfattn', 'norm', 'multiheadattn', 'norm', 'ffn',
+ 'norm'),
+ act_cfg=dict(type='ReLU', inplace=True),
+ norm_cfg=dict(type='LN'),
+ num_fcs=2,
+ return_intermediate=False):
+ super(TransformerDecoder, self).__init__()
+ assert isinstance(order, tuple) and len(order) == 6
+ assert set(order) == set(['selfattn', 'norm', 'multiheadattn', 'ffn'])
+ self.num_layers = num_layers
+ self.embed_dims = embed_dims
+ self.num_heads = num_heads
+ self.feedforward_channels = feedforward_channels
+ self.dropout = dropout
+ self.order = order
+ self.act_cfg = act_cfg
+ self.norm_cfg = norm_cfg
+ self.num_fcs = num_fcs
+ self.return_intermediate = return_intermediate
+ self.layers = nn.ModuleList()
+ for _ in range(num_layers):
+ self.layers.append(
+ TransformerDecoderLayer(embed_dims, num_heads,
+ feedforward_channels, dropout, order,
+ act_cfg, norm_cfg, num_fcs))
+ self.norm = build_norm_layer(norm_cfg, embed_dims)[1]
+
+ def forward(self,
+ x,
+ memory,
+ memory_pos=None,
+ query_pos=None,
+ memory_attn_mask=None,
+ target_attn_mask=None,
+ memory_key_padding_mask=None,
+ target_key_padding_mask=None):
+ """Forward function for `TransformerDecoder`.
+
+ Args:
+ x (Tensor): Input query. Same in `TransformerDecoderLayer.forward`.
+ memory (Tensor): Same in `TransformerDecoderLayer.forward`.
+ memory_pos (Tensor): Same in `TransformerDecoderLayer.forward`.
+ Default None.
+ query_pos (Tensor): Same in `TransformerDecoderLayer.forward`.
+ Default None.
+ memory_attn_mask (Tensor): Same in
+ `TransformerDecoderLayer.forward`. Default None.
+ target_attn_mask (Tensor): Same in
+ `TransformerDecoderLayer.forward`. Default None.
+ memory_key_padding_mask (Tensor): Same in
+ `TransformerDecoderLayer.forward`. Default None.
+ target_key_padding_mask (Tensor): Same in
+ `TransformerDecoderLayer.forward`. Default None.
+
+ Returns:
+ Tensor: Results with shape [num_query, bs, embed_dims].
+ """
+ intermediate = []
+ for layer in self.layers:
+ x = layer(x, memory, memory_pos, query_pos, memory_attn_mask,
+ target_attn_mask, memory_key_padding_mask,
+ target_key_padding_mask)
+ if self.return_intermediate:
+ intermediate.append(self.norm(x))
+ if self.norm is not None:
+ x = self.norm(x)
+ if self.return_intermediate:
+ intermediate.pop()
+ intermediate.append(x)
+ if self.return_intermediate:
+ return torch.stack(intermediate)
+ return x.unsqueeze(0)
+
+ def __repr__(self):
+ """str: a string that describes the module"""
+ repr_str = self.__class__.__name__
+ repr_str += f'(num_layers={self.num_layers}, '
+ repr_str += f'embed_dims={self.embed_dims}, '
+ repr_str += f'num_heads={self.num_heads}, '
+ repr_str += f'feedforward_channels={self.feedforward_channels}, '
+ repr_str += f'dropout={self.dropout}, '
+ repr_str += f'order={self.order}, '
+ repr_str += f'act_cfg={self.act_cfg}, '
+ repr_str += f'norm_cfg={self.norm_cfg}, '
+ repr_str += f'num_fcs={self.num_fcs}, '
+ repr_str += f'return_intermediate={self.return_intermediate})'
+ return repr_str
+
+
+@TRANSFORMER.register_module()
+class Transformer(nn.Module):
+ """Implements the DETR transformer.
+
+ Following the official DETR implementation, this module copy-paste
+ from torch.nn.Transformer with modifications:
+
+ * positional encodings are passed in MultiheadAttention
+ * extra LN at the end of encoder is removed
+ * decoder returns a stack of activations from all decoding layers
+
+ See `paper: End-to-End Object Detection with Transformers
+ `_ for details.
+
+ Args:
+ embed_dims (int): The feature dimension.
+ num_heads (int): Parallel attention heads. Same as
+ `nn.MultiheadAttention`.
+ num_encoder_layers (int): Number of `TransformerEncoderLayer`.
+ num_decoder_layers (int): Number of `TransformerDecoderLayer`.
+ feedforward_channels (int): The hidden dimension for FFNs used in both
+ encoder and decoder.
+ dropout (float): Probability of an element to be zeroed. Default 0.0.
+ act_cfg (dict): Activation config for FFNs used in both encoder
+ and decoder. Default ReLU.
+ norm_cfg (dict): Config dict for normalization used in both encoder
+ and decoder. Default layer normalization.
+ num_fcs (int): The number of fully-connected layers in FFNs, which is
+ used for both encoder and decoder.
+ pre_norm (bool): Whether the normalization layer is ordered
+ first in the encoder and decoder. Default False.
+ return_intermediate_dec (bool): Whether to return the intermediate
+ output from each TransformerDecoderLayer or only the last
+ TransformerDecoderLayer. Default False. If False, the returned
+ `hs` has shape [num_decoder_layers, bs, num_query, embed_dims].
+ If True, the returned `hs` will have shape [1, bs, num_query,
+ embed_dims].
+ """
+
+ def __init__(self,
+ embed_dims=512,
+ num_heads=8,
+ num_encoder_layers=6,
+ num_decoder_layers=6,
+ feedforward_channels=2048,
+ dropout=0.0,
+ act_cfg=dict(type='ReLU', inplace=True),
+ norm_cfg=dict(type='LN'),
+ num_fcs=2,
+ pre_norm=False,
+ return_intermediate_dec=False):
+ super(Transformer, self).__init__()
+ self.embed_dims = embed_dims
+ self.num_heads = num_heads
+ self.num_encoder_layers = num_encoder_layers
+ self.num_decoder_layers = num_decoder_layers
+ self.feedforward_channels = feedforward_channels
+ self.dropout = dropout
+ self.act_cfg = act_cfg
+ self.norm_cfg = norm_cfg
+ self.num_fcs = num_fcs
+ self.pre_norm = pre_norm
+ self.return_intermediate_dec = return_intermediate_dec
+ if self.pre_norm:
+ encoder_order = ('norm', 'selfattn', 'norm', 'ffn')
+ decoder_order = ('norm', 'selfattn', 'norm', 'multiheadattn',
+ 'norm', 'ffn')
+ else:
+ encoder_order = ('selfattn', 'norm', 'ffn', 'norm')
+ decoder_order = ('selfattn', 'norm', 'multiheadattn', 'norm',
+ 'ffn', 'norm')
+ self.encoder = TransformerEncoder(num_encoder_layers, embed_dims,
+ num_heads, feedforward_channels,
+ dropout, encoder_order, act_cfg,
+ norm_cfg, num_fcs)
+ self.decoder = TransformerDecoder(num_decoder_layers, embed_dims,
+ num_heads, feedforward_channels,
+ dropout, decoder_order, act_cfg,
+ norm_cfg, num_fcs,
+ return_intermediate_dec)
+
+ def init_weights(self, distribution='uniform'):
+ """Initialize the transformer weights."""
+ # follow the official DETR to init parameters
+ for m in self.modules():
+ if hasattr(m, 'weight') and m.weight.dim() > 1:
+ xavier_init(m, distribution=distribution)
+
+ def forward(self, x, mask, query_embed, pos_embed):
+ """Forward function for `Transformer`.
+
+ Args:
+ x (Tensor): Input query with shape [bs, c, h, w] where
+ c = embed_dims.
+ mask (Tensor): The key_padding_mask used for encoder and decoder,
+ with shape [bs, h, w].
+ query_embed (Tensor): The query embedding for decoder, with shape
+ [num_query, c].
+ pos_embed (Tensor): The positional encoding for encoder and
+ decoder, with the same shape as `x`.
+
+ Returns:
+ tuple[Tensor]: results of decoder containing the following tensor.
+
+ - out_dec: Output from decoder. If return_intermediate_dec \
+ is True output has shape [num_dec_layers, bs,
+ num_query, embed_dims], else has shape [1, bs, \
+ num_query, embed_dims].
+ - memory: Output results from encoder, with shape \
+ [bs, embed_dims, h, w].
+ """
+ bs, c, h, w = x.shape
+ x = x.flatten(2).permute(2, 0, 1) # [bs, c, h, w] -> [h*w, bs, c]
+ pos_embed = pos_embed.flatten(2).permute(2, 0, 1)
+ query_embed = query_embed.unsqueeze(1).repeat(
+ 1, bs, 1) # [num_query, dim] -> [num_query, bs, dim]
+ mask = mask.flatten(1) # [bs, h, w] -> [bs, h*w]
+ memory = self.encoder(
+ x, pos=pos_embed, attn_mask=None, key_padding_mask=mask)
+ target = torch.zeros_like(query_embed)
+ # out_dec: [num_layers, num_query, bs, dim]
+ out_dec = self.decoder(
+ target,
+ memory,
+ memory_pos=pos_embed,
+ query_pos=query_embed,
+ memory_attn_mask=None,
+ target_attn_mask=None,
+ memory_key_padding_mask=mask,
+ target_key_padding_mask=None)
+ out_dec = out_dec.transpose(1, 2)
+ memory = memory.permute(1, 2, 0).reshape(bs, c, h, w)
+ return out_dec, memory
+
+ def __repr__(self):
+ """str: a string that describes the module"""
+ repr_str = self.__class__.__name__
+ repr_str += f'(embed_dims={self.embed_dims}, '
+ repr_str += f'num_heads={self.num_heads}, '
+ repr_str += f'num_encoder_layers={self.num_encoder_layers}, '
+ repr_str += f'num_decoder_layers={self.num_decoder_layers}, '
+ repr_str += f'feedforward_channels={self.feedforward_channels}, '
+ repr_str += f'dropout={self.dropout}, '
+ repr_str += f'act_cfg={self.act_cfg}, '
+ repr_str += f'norm_cfg={self.norm_cfg}, '
+ repr_str += f'num_fcs={self.num_fcs}, '
+ repr_str += f'pre_norm={self.pre_norm}, '
+ repr_str += f'return_intermediate_dec={self.return_intermediate_dec})'
+ return repr_str
+
+
+@TRANSFORMER.register_module()
+class DynamicConv(nn.Module):
+ """Implements Dynamic Convolution.
+
+ This module generate parameters for each sample and
+ use bmm to implement 1*1 convolution. Code is modified
+ from the `official github repo `_ .
+
+ Args:
+ in_channels (int): The input feature channel.
+ Defaults to 256.
+ feat_channels (int): The inner feature channel.
+ Defaults to 64.
+ out_channels (int, optional): The output feature channel.
+ When not specified, it will be set to `in_channels`
+ by default
+ input_feat_shape (int): The shape of input feature.
+ Defaults to 7.
+ act_cfg (dict): The activation config for DynamicConv.
+ norm_cfg (dict): Config dict for normalization layer. Default
+ layer normalization.
+ """
+
+ def __init__(self,
+ in_channels=256,
+ feat_channels=64,
+ out_channels=None,
+ input_feat_shape=7,
+ act_cfg=dict(type='ReLU', inplace=True),
+ norm_cfg=dict(type='LN')):
+ super(DynamicConv, self).__init__()
+ self.in_channels = in_channels
+ self.feat_channels = feat_channels
+ self.out_channels_raw = out_channels
+ self.input_feat_shape = input_feat_shape
+ self.act_cfg = act_cfg
+ self.norm_cfg = norm_cfg
+ self.out_channels = out_channels if out_channels else in_channels
+
+ self.num_params_in = self.in_channels * self.feat_channels
+ self.num_params_out = self.out_channels * self.feat_channels
+ self.dynamic_layer = nn.Linear(
+ self.in_channels, self.num_params_in + self.num_params_out)
+
+ self.norm_in = build_norm_layer(norm_cfg, self.feat_channels)[1]
+ self.norm_out = build_norm_layer(norm_cfg, self.out_channels)[1]
+
+ self.activation = build_activation_layer(act_cfg)
+
+ num_output = self.out_channels * input_feat_shape**2
+ self.fc_layer = nn.Linear(num_output, self.out_channels)
+ self.fc_norm = build_norm_layer(norm_cfg, self.out_channels)[1]
+
+ def forward(self, param_feature, input_feature):
+ """Forward function for `DynamicConv`.
+
+ Args:
+ param_feature (Tensor): The feature can be used
+ to generate the parameter, has shape
+ (num_all_proposals, in_channels).
+ input_feature (Tensor): Feature that
+ interact with parameters, has shape
+ (num_all_proposals, in_channels, H, W).
+
+ Returns:
+ Tensor: The output feature has shape
+ (num_all_proposals, out_channels).
+ """
+ num_proposals = param_feature.size(0)
+ input_feature = input_feature.view(num_proposals, self.in_channels,
+ -1).permute(2, 0, 1)
+
+ input_feature = input_feature.permute(1, 0, 2)
+ parameters = self.dynamic_layer(param_feature)
+
+ param_in = parameters[:, :self.num_params_in].view(
+ -1, self.in_channels, self.feat_channels)
+ param_out = parameters[:, -self.num_params_out:].view(
+ -1, self.feat_channels, self.out_channels)
+
+ # input_feature has shape (num_all_proposals, H*W, in_channels)
+ # param_in has shape (num_all_proposals, in_channels, feat_channels)
+ # feature has shape (num_all_proposals, H*W, feat_channels)
+ features = torch.bmm(input_feature, param_in)
+ features = self.norm_in(features)
+ features = self.activation(features)
+
+ # param_out has shape (batch_size, feat_channels, out_channels)
+ features = torch.bmm(features, param_out)
+ features = self.norm_out(features)
+ features = self.activation(features)
+
+ features = features.flatten(1)
+ features = self.fc_layer(features)
+ features = self.fc_norm(features)
+ features = self.activation(features)
+
+ return features
+
+ def __repr__(self):
+ """str: a string that describes the module"""
+ repr_str = self.__class__.__name__
+ repr_str += f'(in_channels={self.in_channels}, '
+ repr_str += f'feat_channels={self.feat_channels}, '
+ repr_str += f'out_channels={self.out_channels_raw}, '
+ repr_str += f'input_feat_shape={self.input_feat_shape}, '
+ repr_str += f'act_cfg={self.act_cfg}, '
+ repr_str += f'norm_cfg={self.norm_cfg})'
+ return repr_str
diff --git a/annotator/uniformer/mmdet/utils/__init__.py b/annotator/uniformer/mmdet/utils/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e79ad8c02a2d465f0690a4aa80683a5c6d784d52
--- /dev/null
+++ b/annotator/uniformer/mmdet/utils/__init__.py
@@ -0,0 +1,5 @@
+from .collect_env import collect_env
+from .logger import get_root_logger
+from .optimizer import DistOptimizerHook
+
+__all__ = ['get_root_logger', 'collect_env', 'DistOptimizerHook']
diff --git a/annotator/uniformer/mmdet/utils/collect_env.py b/annotator/uniformer/mmdet/utils/collect_env.py
new file mode 100644
index 0000000000000000000000000000000000000000..89c064accdb10abec4a03de04f601d27aab2da70
--- /dev/null
+++ b/annotator/uniformer/mmdet/utils/collect_env.py
@@ -0,0 +1,16 @@
+from mmcv.utils import collect_env as collect_base_env
+from mmcv.utils import get_git_hash
+
+import mmdet
+
+
+def collect_env():
+ """Collect the information of the running environments."""
+ env_info = collect_base_env()
+ env_info['MMDetection'] = mmdet.__version__ + '+' + get_git_hash()[:7]
+ return env_info
+
+
+if __name__ == '__main__':
+ for name, val in collect_env().items():
+ print(f'{name}: {val}')
diff --git a/annotator/uniformer/mmdet/utils/contextmanagers.py b/annotator/uniformer/mmdet/utils/contextmanagers.py
new file mode 100644
index 0000000000000000000000000000000000000000..38a639262d949b5754dedf12f33fa814b030ea38
--- /dev/null
+++ b/annotator/uniformer/mmdet/utils/contextmanagers.py
@@ -0,0 +1,121 @@
+import asyncio
+import contextlib
+import logging
+import os
+import time
+from typing import List
+
+import torch
+
+logger = logging.getLogger(__name__)
+
+DEBUG_COMPLETED_TIME = bool(os.environ.get('DEBUG_COMPLETED_TIME', False))
+
+
+@contextlib.asynccontextmanager
+async def completed(trace_name='',
+ name='',
+ sleep_interval=0.05,
+ streams: List[torch.cuda.Stream] = None):
+ """Async context manager that waits for work to complete on given CUDA
+ streams."""
+ if not torch.cuda.is_available():
+ yield
+ return
+
+ stream_before_context_switch = torch.cuda.current_stream()
+ if not streams:
+ streams = [stream_before_context_switch]
+ else:
+ streams = [s if s else stream_before_context_switch for s in streams]
+
+ end_events = [
+ torch.cuda.Event(enable_timing=DEBUG_COMPLETED_TIME) for _ in streams
+ ]
+
+ if DEBUG_COMPLETED_TIME:
+ start = torch.cuda.Event(enable_timing=True)
+ stream_before_context_switch.record_event(start)
+
+ cpu_start = time.monotonic()
+ logger.debug('%s %s starting, streams: %s', trace_name, name, streams)
+ grad_enabled_before = torch.is_grad_enabled()
+ try:
+ yield
+ finally:
+ current_stream = torch.cuda.current_stream()
+ assert current_stream == stream_before_context_switch
+
+ if DEBUG_COMPLETED_TIME:
+ cpu_end = time.monotonic()
+ for i, stream in enumerate(streams):
+ event = end_events[i]
+ stream.record_event(event)
+
+ grad_enabled_after = torch.is_grad_enabled()
+
+ # observed change of torch.is_grad_enabled() during concurrent run of
+ # async_test_bboxes code
+ assert (grad_enabled_before == grad_enabled_after
+ ), 'Unexpected is_grad_enabled() value change'
+
+ are_done = [e.query() for e in end_events]
+ logger.debug('%s %s completed: %s streams: %s', trace_name, name,
+ are_done, streams)
+ with torch.cuda.stream(stream_before_context_switch):
+ while not all(are_done):
+ await asyncio.sleep(sleep_interval)
+ are_done = [e.query() for e in end_events]
+ logger.debug(
+ '%s %s completed: %s streams: %s',
+ trace_name,
+ name,
+ are_done,
+ streams,
+ )
+
+ current_stream = torch.cuda.current_stream()
+ assert current_stream == stream_before_context_switch
+
+ if DEBUG_COMPLETED_TIME:
+ cpu_time = (cpu_end - cpu_start) * 1000
+ stream_times_ms = ''
+ for i, stream in enumerate(streams):
+ elapsed_time = start.elapsed_time(end_events[i])
+ stream_times_ms += f' {stream} {elapsed_time:.2f} ms'
+ logger.info('%s %s %.2f ms %s', trace_name, name, cpu_time,
+ stream_times_ms)
+
+
+@contextlib.asynccontextmanager
+async def concurrent(streamqueue: asyncio.Queue,
+ trace_name='concurrent',
+ name='stream'):
+ """Run code concurrently in different streams.
+
+ :param streamqueue: asyncio.Queue instance.
+
+ Queue tasks define the pool of streams used for concurrent execution.
+ """
+ if not torch.cuda.is_available():
+ yield
+ return
+
+ initial_stream = torch.cuda.current_stream()
+
+ with torch.cuda.stream(initial_stream):
+ stream = await streamqueue.get()
+ assert isinstance(stream, torch.cuda.Stream)
+
+ try:
+ with torch.cuda.stream(stream):
+ logger.debug('%s %s is starting, stream: %s', trace_name, name,
+ stream)
+ yield
+ current = torch.cuda.current_stream()
+ assert current == stream
+ logger.debug('%s %s has finished, stream: %s', trace_name,
+ name, stream)
+ finally:
+ streamqueue.task_done()
+ streamqueue.put_nowait(stream)
diff --git a/annotator/uniformer/mmdet/utils/logger.py b/annotator/uniformer/mmdet/utils/logger.py
new file mode 100644
index 0000000000000000000000000000000000000000..6fc6e6b438a73e857ba6f173594985807cb88b30
--- /dev/null
+++ b/annotator/uniformer/mmdet/utils/logger.py
@@ -0,0 +1,19 @@
+import logging
+
+from mmcv.utils import get_logger
+
+
+def get_root_logger(log_file=None, log_level=logging.INFO):
+ """Get root logger.
+
+ Args:
+ log_file (str, optional): File path of log. Defaults to None.
+ log_level (int, optional): The level of logger.
+ Defaults to logging.INFO.
+
+ Returns:
+ :obj:`logging.Logger`: The obtained logger
+ """
+ logger = get_logger(name='mmdet', log_file=log_file, log_level=log_level)
+
+ return logger
diff --git a/annotator/uniformer/mmdet/utils/optimizer.py b/annotator/uniformer/mmdet/utils/optimizer.py
new file mode 100644
index 0000000000000000000000000000000000000000..9c9d11941c0b43d42bd6daad1e4b927eaca3e675
--- /dev/null
+++ b/annotator/uniformer/mmdet/utils/optimizer.py
@@ -0,0 +1,33 @@
+from mmcv.runner import OptimizerHook, HOOKS
+try:
+ import apex
+except:
+ print('apex is not installed')
+
+
+@HOOKS.register_module()
+class DistOptimizerHook(OptimizerHook):
+ """Optimizer hook for distributed training."""
+
+ def __init__(self, update_interval=1, grad_clip=None, coalesce=True, bucket_size_mb=-1, use_fp16=False):
+ self.grad_clip = grad_clip
+ self.coalesce = coalesce
+ self.bucket_size_mb = bucket_size_mb
+ self.update_interval = update_interval
+ self.use_fp16 = use_fp16
+
+ def before_run(self, runner):
+ runner.optimizer.zero_grad()
+
+ def after_train_iter(self, runner):
+ runner.outputs['loss'] /= self.update_interval
+ if self.use_fp16:
+ with apex.amp.scale_loss(runner.outputs['loss'], runner.optimizer) as scaled_loss:
+ scaled_loss.backward()
+ else:
+ runner.outputs['loss'].backward()
+ if self.every_n_iters(runner, self.update_interval):
+ if self.grad_clip is not None:
+ self.clip_grads(runner.model.parameters())
+ runner.optimizer.step()
+ runner.optimizer.zero_grad()
diff --git a/annotator/uniformer/mmdet/utils/profiling.py b/annotator/uniformer/mmdet/utils/profiling.py
new file mode 100644
index 0000000000000000000000000000000000000000..4be9222c37e922329d537f883f5587995e27efc6
--- /dev/null
+++ b/annotator/uniformer/mmdet/utils/profiling.py
@@ -0,0 +1,39 @@
+import contextlib
+import sys
+import time
+
+import torch
+
+if sys.version_info >= (3, 7):
+
+ @contextlib.contextmanager
+ def profile_time(trace_name,
+ name,
+ enabled=True,
+ stream=None,
+ end_stream=None):
+ """Print time spent by CPU and GPU.
+
+ Useful as a temporary context manager to find sweet spots of code
+ suitable for async implementation.
+ """
+ if (not enabled) or not torch.cuda.is_available():
+ yield
+ return
+ stream = stream if stream else torch.cuda.current_stream()
+ end_stream = end_stream if end_stream else stream
+ start = torch.cuda.Event(enable_timing=True)
+ end = torch.cuda.Event(enable_timing=True)
+ stream.record_event(start)
+ try:
+ cpu_start = time.monotonic()
+ yield
+ finally:
+ cpu_end = time.monotonic()
+ end_stream.record_event(end)
+ end.synchronize()
+ cpu_time = (cpu_end - cpu_start) * 1000
+ gpu_time = start.elapsed_time(end)
+ msg = f'{trace_name} {name} cpu_time {cpu_time:.2f} ms '
+ msg += f'gpu_time {gpu_time:.2f} ms stream {stream}'
+ print(msg, end_stream)
diff --git a/annotator/uniformer/mmdet/utils/util_mixins.py b/annotator/uniformer/mmdet/utils/util_mixins.py
new file mode 100644
index 0000000000000000000000000000000000000000..69669a3ca943eebe0f138b2784c5b61724196bbe
--- /dev/null
+++ b/annotator/uniformer/mmdet/utils/util_mixins.py
@@ -0,0 +1,104 @@
+"""This module defines the :class:`NiceRepr` mixin class, which defines a
+``__repr__`` and ``__str__`` method that only depend on a custom ``__nice__``
+method, which you must define. This means you only have to overload one
+function instead of two. Furthermore, if the object defines a ``__len__``
+method, then the ``__nice__`` method defaults to something sensible, otherwise
+it is treated as abstract and raises ``NotImplementedError``.
+
+To use simply have your object inherit from :class:`NiceRepr`
+(multi-inheritance should be ok).
+
+This code was copied from the ubelt library: https://github.com/Erotemic/ubelt
+
+Example:
+ >>> # Objects that define __nice__ have a default __str__ and __repr__
+ >>> class Student(NiceRepr):
+ ... def __init__(self, name):
+ ... self.name = name
+ ... def __nice__(self):
+ ... return self.name
+ >>> s1 = Student('Alice')
+ >>> s2 = Student('Bob')
+ >>> print(f's1 = {s1}')
+ >>> print(f's2 = {s2}')
+ s1 =
+ s2 =
+
+Example:
+ >>> # Objects that define __len__ have a default __nice__
+ >>> class Group(NiceRepr):
+ ... def __init__(self, data):
+ ... self.data = data
+ ... def __len__(self):
+ ... return len(self.data)
+ >>> g = Group([1, 2, 3])
+ >>> print(f'g = {g}')
+ g =
+"""
+import warnings
+
+
+class NiceRepr(object):
+ """Inherit from this class and define ``__nice__`` to "nicely" print your
+ objects.
+
+ Defines ``__str__`` and ``__repr__`` in terms of ``__nice__`` function
+ Classes that inherit from :class:`NiceRepr` should redefine ``__nice__``.
+ If the inheriting class has a ``__len__``, method then the default
+ ``__nice__`` method will return its length.
+
+ Example:
+ >>> class Foo(NiceRepr):
+ ... def __nice__(self):
+ ... return 'info'
+ >>> foo = Foo()
+ >>> assert str(foo) == ''
+ >>> assert repr(foo).startswith('>> class Bar(NiceRepr):
+ ... pass
+ >>> bar = Bar()
+ >>> import pytest
+ >>> with pytest.warns(None) as record:
+ >>> assert 'object at' in str(bar)
+ >>> assert 'object at' in repr(bar)
+
+ Example:
+ >>> class Baz(NiceRepr):
+ ... def __len__(self):
+ ... return 5
+ >>> baz = Baz()
+ >>> assert str(baz) == ''
+ """
+
+ def __nice__(self):
+ """str: a "nice" summary string describing this module"""
+ if hasattr(self, '__len__'):
+ # It is a common pattern for objects to use __len__ in __nice__
+ # As a convenience we define a default __nice__ for these objects
+ return str(len(self))
+ else:
+ # In all other cases force the subclass to overload __nice__
+ raise NotImplementedError(
+ f'Define the __nice__ method for {self.__class__!r}')
+
+ def __repr__(self):
+ """str: the string of the module"""
+ try:
+ nice = self.__nice__()
+ classname = self.__class__.__name__
+ return f'<{classname}({nice}) at {hex(id(self))}>'
+ except NotImplementedError as ex:
+ warnings.warn(str(ex), category=RuntimeWarning)
+ return object.__repr__(self)
+
+ def __str__(self):
+ """str: the string of the module"""
+ try:
+ classname = self.__class__.__name__
+ nice = self.__nice__()
+ return f'<{classname}({nice})>'
+ except NotImplementedError as ex:
+ warnings.warn(str(ex), category=RuntimeWarning)
+ return object.__repr__(self)
diff --git a/annotator/uniformer/mmdet/utils/util_random.py b/annotator/uniformer/mmdet/utils/util_random.py
new file mode 100644
index 0000000000000000000000000000000000000000..e313e9947bb3232a9458878fd219e1594ab93d57
--- /dev/null
+++ b/annotator/uniformer/mmdet/utils/util_random.py
@@ -0,0 +1,33 @@
+"""Helpers for random number generators."""
+import numpy as np
+
+
+def ensure_rng(rng=None):
+ """Coerces input into a random number generator.
+
+ If the input is None, then a global random state is returned.
+
+ If the input is a numeric value, then that is used as a seed to construct a
+ random state. Otherwise the input is returned as-is.
+
+ Adapted from [1]_.
+
+ Args:
+ rng (int | numpy.random.RandomState | None):
+ if None, then defaults to the global rng. Otherwise this can be an
+ integer or a RandomState class
+ Returns:
+ (numpy.random.RandomState) : rng -
+ a numpy random number generator
+
+ References:
+ .. [1] https://gitlab.kitware.com/computer-vision/kwarray/blob/master/kwarray/util_random.py#L270 # noqa: E501
+ """
+
+ if rng is None:
+ rng = np.random.mtrand._rand
+ elif isinstance(rng, int):
+ rng = np.random.RandomState(rng)
+ else:
+ rng = rng
+ return rng
diff --git a/annotator/uniformer/mmdet/version.py b/annotator/uniformer/mmdet/version.py
new file mode 100644
index 0000000000000000000000000000000000000000..a3b741aed16212ad1dee277d519b259ae3184b19
--- /dev/null
+++ b/annotator/uniformer/mmdet/version.py
@@ -0,0 +1,19 @@
+# Copyright (c) Open-MMLab. All rights reserved.
+
+__version__ = '2.11.0'
+short_version = __version__
+
+
+def parse_version_info(version_str):
+ version_info = []
+ for x in version_str.split('.'):
+ if x.isdigit():
+ version_info.append(int(x))
+ elif x.find('rc') != -1:
+ patch_version = x.split('rc')
+ version_info.append(int(patch_version[0]))
+ version_info.append(f'rc{patch_version[1]}')
+ return tuple(version_info)
+
+
+version_info = parse_version_info(__version__)
diff --git a/annotator/uniformer/mmdet_null/apis/__init__.py b/annotator/uniformer/mmdet_null/apis/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..1d8035b74877fdeccaa41cbc10a9f1f9924eac85
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/apis/__init__.py
@@ -0,0 +1,10 @@
+from .inference import (async_inference_detector, inference_detector,
+ init_detector, show_result_pyplot)
+from .test import multi_gpu_test, single_gpu_test
+from .train import get_root_logger, set_random_seed, train_detector
+
+__all__ = [
+ 'get_root_logger', 'set_random_seed', 'train_detector', 'init_detector',
+ 'async_inference_detector', 'inference_detector', 'show_result_pyplot',
+ 'multi_gpu_test', 'single_gpu_test'
+]
diff --git a/annotator/uniformer/mmdet_null/apis/inference.py b/annotator/uniformer/mmdet_null/apis/inference.py
new file mode 100644
index 0000000000000000000000000000000000000000..f4e5fc760a8eb7488c07d7ed83433a97ab03a45a
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/apis/inference.py
@@ -0,0 +1,218 @@
+import warnings
+
+import annotator.uniformer.mmcv as mmcv
+
+import numpy as np
+import torch
+# from annotator.uniformer.mmcv.ops import RoIPool
+from annotator.uniformer.mmcv.parallel import collate, scatter
+from annotator.uniformer.mmcv.runner import load_checkpoint
+
+from annotator.uniformer.mmdet.core import get_classes
+from annotator.uniformer.mmdet.datasets import replace_ImageToTensor
+from annotator.uniformer.mmdet.datasets.pipelines import Compose
+from annotator.uniformer.mmdet.models import build_detector
+
+
+def init_detector(config, checkpoint=None, device='cuda:0', cfg_options=None):
+ """Initialize a detector from config file.
+
+ Args:
+ config (str or :obj:`mmcv.Config`): Config file path or the config
+ object.
+ checkpoint (str, optional): Checkpoint path. If left as None, the model
+ will not load any weights.
+ cfg_options (dict): Options to override some settings in the used
+ config.
+
+ Returns:
+ nn.Module: The constructed detector.
+ """
+ if isinstance(config, str):
+ config = mmcv.Config.fromfile(config)
+ elif not isinstance(config, mmcv.Config):
+ raise TypeError('config must be a filename or Config object, '
+ f'but got {type(config)}')
+ if cfg_options is not None:
+ config.merge_from_dict(cfg_options)
+ config.model.pretrained = None
+ config.model.train_cfg = None
+ model = build_detector(config.model, test_cfg=config.get('test_cfg'))
+ if checkpoint is not None:
+ map_loc = 'cpu' if device == 'cpu' else None
+ checkpoint = load_checkpoint(model, checkpoint, map_location=map_loc)
+ if 'CLASSES' in checkpoint.get('meta', {}):
+ model.CLASSES = checkpoint['meta']['CLASSES']
+ else:
+ warnings.simplefilter('once')
+ warnings.warn('Class names are not saved in the checkpoint\'s '
+ 'meta data, use COCO classes by default.')
+ model.CLASSES = get_classes('coco')
+ model.cfg = config # save the config in the model for convenience
+ model.to(device)
+ model.eval()
+ return model
+
+
+class LoadImage(object):
+ """Deprecated.
+
+ A simple pipeline to load image.
+ """
+
+ def __call__(self, results):
+ """Call function to load images into results.
+
+ Args:
+ results (dict): A result dict contains the file name
+ of the image to be read.
+ Returns:
+ dict: ``results`` will be returned containing loaded image.
+ """
+ warnings.simplefilter('once')
+ warnings.warn('`LoadImage` is deprecated and will be removed in '
+ 'future releases. You may use `LoadImageFromWebcam` '
+ 'from `mmdet.datasets.pipelines.` instead.')
+ if isinstance(results['img'], str):
+ results['filename'] = results['img']
+ results['ori_filename'] = results['img']
+ else:
+ results['filename'] = None
+ results['ori_filename'] = None
+ img = mmcv.imread(results['img'])
+ results['img'] = img
+ results['img_fields'] = ['img']
+ results['img_shape'] = img.shape
+ results['ori_shape'] = img.shape
+ return results
+
+
+def inference_detector(model, imgs):
+ """Inference image(s) with the detector.
+
+ Args:
+ model (nn.Module): The loaded detector.
+ imgs (str/ndarray or list[str/ndarray] or tuple[str/ndarray]):
+ Either image files or loaded images.
+
+ Returns:
+ If imgs is a list or tuple, the same length list type results
+ will be returned, otherwise return the detection results directly.
+ """
+
+ if isinstance(imgs, (list, tuple)):
+ is_batch = True
+ else:
+ imgs = [imgs]
+ is_batch = False
+
+ cfg = model.cfg
+ device = next(model.parameters()).device # model device
+
+ if isinstance(imgs[0], np.ndarray):
+ cfg = cfg.copy()
+ # set loading pipeline type
+ cfg.data.test.pipeline[0].type = 'LoadImageFromWebcam'
+
+ cfg.data.test.pipeline = replace_ImageToTensor(cfg.data.test.pipeline)
+ test_pipeline = Compose(cfg.data.test.pipeline)
+
+ datas = []
+ for img in imgs:
+ # prepare data
+ if isinstance(img, np.ndarray):
+ # directly add img
+ data = dict(img=img)
+ else:
+ # add information into dict
+ data = dict(img_info=dict(filename=img), img_prefix=None)
+ # build the data pipeline
+ data = test_pipeline(data)
+ datas.append(data)
+
+ data = collate(datas, samples_per_gpu=len(imgs))
+ # just get the actual data from DataContainer
+ data['img_metas'] = [img_metas.data[0] for img_metas in data['img_metas']]
+ data['img'] = [img.data[0] for img in data['img']]
+ if next(model.parameters()).is_cuda:
+ # scatter to specified GPU
+ data = scatter(data, [device])[0]
+ else:
+ for m in model.modules():
+ assert not isinstance(
+ m, RoIPool
+ ), 'CPU inference with RoIPool is not supported currently.'
+
+ # forward the model
+ with torch.no_grad():
+ results = model(return_loss=False, rescale=True, **data)
+
+ if not is_batch:
+ return results[0]
+ else:
+ return results
+
+
+async def async_inference_detector(model, img):
+ """Async inference image(s) with the detector.
+
+ Args:
+ model (nn.Module): The loaded detector.
+ img (str | ndarray): Either image files or loaded images.
+
+ Returns:
+ Awaitable detection results.
+ """
+ cfg = model.cfg
+ device = next(model.parameters()).device # model device
+ # prepare data
+ if isinstance(img, np.ndarray):
+ # directly add img
+ data = dict(img=img)
+ cfg = cfg.copy()
+ # set loading pipeline type
+ cfg.data.test.pipeline[0].type = 'LoadImageFromWebcam'
+ else:
+ # add information into dict
+ data = dict(img_info=dict(filename=img), img_prefix=None)
+ # build the data pipeline
+ test_pipeline = Compose(cfg.data.test.pipeline)
+ data = test_pipeline(data)
+ data = scatter(collate([data], samples_per_gpu=1), [device])[0]
+
+ # We don't restore `torch.is_grad_enabled()` value during concurrent
+ # inference since execution can overlap
+ torch.set_grad_enabled(False)
+ result = await model.aforward_test(rescale=True, **data)
+ return result
+
+
+def show_result_pyplot(model,
+ img,
+ result,
+ score_thr=0.3,
+ title='result',
+ wait_time=0):
+ """Visualize the detection results on the image.
+
+ Args:
+ model (nn.Module): The loaded detector.
+ img (str or np.ndarray): Image filename or loaded image.
+ result (tuple[list] or list): The detection result, can be either
+ (bbox, segm) or just bbox.
+ score_thr (float): The threshold to visualize the bboxes and masks.
+ title (str): Title of the pyplot figure.
+ wait_time (float): Value of waitKey param.
+ Default: 0.
+ """
+ if hasattr(model, 'module'):
+ model = model.module
+ model.show_result(
+ img,
+ result,
+ score_thr=score_thr,
+ show=True,
+ wait_time=wait_time,
+ win_name=title,
+ bbox_color=(72, 101, 241),
+ text_color=(72, 101, 241))
diff --git a/annotator/uniformer/mmdet_null/apis/test.py b/annotator/uniformer/mmdet_null/apis/test.py
new file mode 100644
index 0000000000000000000000000000000000000000..e54b1b8c24efc448972c31ee5da63041d7f97a47
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/apis/test.py
@@ -0,0 +1,190 @@
+import os.path as osp
+import pickle
+import shutil
+import tempfile
+import time
+
+import mmcv
+import torch
+import torch.distributed as dist
+from mmcv.image import tensor2imgs
+from mmcv.runner import get_dist_info
+
+from mmdet.core import encode_mask_results
+
+
+def single_gpu_test(model,
+ data_loader,
+ show=False,
+ out_dir=None,
+ show_score_thr=0.3):
+ model.eval()
+ results = []
+ dataset = data_loader.dataset
+ prog_bar = mmcv.ProgressBar(len(dataset))
+ for i, data in enumerate(data_loader):
+ with torch.no_grad():
+ result = model(return_loss=False, rescale=True, **data)
+
+ batch_size = len(result)
+ if show or out_dir:
+ if batch_size == 1 and isinstance(data['img'][0], torch.Tensor):
+ img_tensor = data['img'][0]
+ else:
+ img_tensor = data['img'][0].data[0]
+ img_metas = data['img_metas'][0].data[0]
+ imgs = tensor2imgs(img_tensor, **img_metas[0]['img_norm_cfg'])
+ assert len(imgs) == len(img_metas)
+
+ for i, (img, img_meta) in enumerate(zip(imgs, img_metas)):
+ h, w, _ = img_meta['img_shape']
+ img_show = img[:h, :w, :]
+
+ ori_h, ori_w = img_meta['ori_shape'][:-1]
+ img_show = mmcv.imresize(img_show, (ori_w, ori_h))
+
+ if out_dir:
+ out_file = osp.join(out_dir, img_meta['ori_filename'])
+ else:
+ out_file = None
+
+ model.module.show_result(
+ img_show,
+ result[i],
+ show=show,
+ out_file=out_file,
+ score_thr=show_score_thr)
+
+ # encode mask results
+ if isinstance(result[0], tuple):
+ result = [(bbox_results, encode_mask_results(mask_results))
+ for bbox_results, mask_results in result]
+ results.extend(result)
+
+ for _ in range(batch_size):
+ prog_bar.update()
+ return results
+
+
+def multi_gpu_test(model, data_loader, tmpdir=None, gpu_collect=False):
+ """Test model with multiple gpus.
+
+ This method tests model with multiple gpus and collects the results
+ under two different modes: gpu and cpu modes. By setting 'gpu_collect=True'
+ it encodes results to gpu tensors and use gpu communication for results
+ collection. On cpu mode it saves the results on different gpus to 'tmpdir'
+ and collects them by the rank 0 worker.
+
+ Args:
+ model (nn.Module): Model to be tested.
+ data_loader (nn.Dataloader): Pytorch data loader.
+ tmpdir (str): Path of directory to save the temporary results from
+ different gpus under cpu mode.
+ gpu_collect (bool): Option to use either gpu or cpu to collect results.
+
+ Returns:
+ list: The prediction results.
+ """
+ model.eval()
+ results = []
+ dataset = data_loader.dataset
+ rank, world_size = get_dist_info()
+ if rank == 0:
+ prog_bar = mmcv.ProgressBar(len(dataset))
+ time.sleep(2) # This line can prevent deadlock problem in some cases.
+ for i, data in enumerate(data_loader):
+ with torch.no_grad():
+ result = model(return_loss=False, rescale=True, **data)
+ # encode mask results
+ if isinstance(result[0], tuple):
+ result = [(bbox_results, encode_mask_results(mask_results))
+ for bbox_results, mask_results in result]
+ results.extend(result)
+
+ if rank == 0:
+ batch_size = len(result)
+ for _ in range(batch_size * world_size):
+ prog_bar.update()
+
+ # collect results from all ranks
+ if gpu_collect:
+ results = collect_results_gpu(results, len(dataset))
+ else:
+ results = collect_results_cpu(results, len(dataset), tmpdir)
+ return results
+
+
+def collect_results_cpu(result_part, size, tmpdir=None):
+ rank, world_size = get_dist_info()
+ # create a tmp dir if it is not specified
+ if tmpdir is None:
+ MAX_LEN = 512
+ # 32 is whitespace
+ dir_tensor = torch.full((MAX_LEN, ),
+ 32,
+ dtype=torch.uint8,
+ device='cuda')
+ if rank == 0:
+ mmcv.mkdir_or_exist('.dist_test')
+ tmpdir = tempfile.mkdtemp(dir='.dist_test')
+ tmpdir = torch.tensor(
+ bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda')
+ dir_tensor[:len(tmpdir)] = tmpdir
+ dist.broadcast(dir_tensor, 0)
+ tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip()
+ else:
+ mmcv.mkdir_or_exist(tmpdir)
+ # dump the part result to the dir
+ mmcv.dump(result_part, osp.join(tmpdir, f'part_{rank}.pkl'))
+ dist.barrier()
+ # collect all parts
+ if rank != 0:
+ return None
+ else:
+ # load results of all parts from tmp dir
+ part_list = []
+ for i in range(world_size):
+ part_file = osp.join(tmpdir, f'part_{i}.pkl')
+ part_list.append(mmcv.load(part_file))
+ # sort the results
+ ordered_results = []
+ for res in zip(*part_list):
+ ordered_results.extend(list(res))
+ # the dataloader may pad some samples
+ ordered_results = ordered_results[:size]
+ # remove tmp dir
+ shutil.rmtree(tmpdir)
+ return ordered_results
+
+
+def collect_results_gpu(result_part, size):
+ rank, world_size = get_dist_info()
+ # dump result part to tensor with pickle
+ part_tensor = torch.tensor(
+ bytearray(pickle.dumps(result_part)), dtype=torch.uint8, device='cuda')
+ # gather all result part tensor shape
+ shape_tensor = torch.tensor(part_tensor.shape, device='cuda')
+ shape_list = [shape_tensor.clone() for _ in range(world_size)]
+ dist.all_gather(shape_list, shape_tensor)
+ # padding result part tensor to max length
+ shape_max = torch.tensor(shape_list).max()
+ part_send = torch.zeros(shape_max, dtype=torch.uint8, device='cuda')
+ part_send[:shape_tensor[0]] = part_tensor
+ part_recv_list = [
+ part_tensor.new_zeros(shape_max) for _ in range(world_size)
+ ]
+ # gather all result part
+ dist.all_gather(part_recv_list, part_send)
+
+ if rank == 0:
+ part_list = []
+ for recv, shape in zip(part_recv_list, shape_list):
+ part_list.append(
+ pickle.loads(recv[:shape[0]].cpu().numpy().tobytes()))
+ # sort the results
+ ordered_results = []
+ for res in zip(*part_list):
+ ordered_results.extend(list(res))
+ # the dataloader may pad some samples
+ ordered_results = ordered_results[:size]
+ return ordered_results
diff --git a/annotator/uniformer/mmdet_null/apis/train.py b/annotator/uniformer/mmdet_null/apis/train.py
new file mode 100644
index 0000000000000000000000000000000000000000..7f2f1f95c0a8e7c9232f7aa490e8104f8e37c4f5
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/apis/train.py
@@ -0,0 +1,185 @@
+import random
+import warnings
+
+import numpy as np
+import torch
+from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
+from mmcv.runner import (HOOKS, DistSamplerSeedHook, EpochBasedRunner,
+ Fp16OptimizerHook, OptimizerHook, build_optimizer,
+ build_runner)
+from mmcv.utils import build_from_cfg
+
+from mmdet.core import DistEvalHook, EvalHook
+from mmdet.datasets import (build_dataloader, build_dataset,
+ replace_ImageToTensor)
+from mmdet.utils import get_root_logger
+from mmcv_custom.runner import EpochBasedRunnerAmp
+try:
+ import apex
+except:
+ print('apex is not installed')
+
+
+def set_random_seed(seed, deterministic=False):
+ """Set random seed.
+
+ Args:
+ seed (int): Seed to be used.
+ deterministic (bool): Whether to set the deterministic option for
+ CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`
+ to True and `torch.backends.cudnn.benchmark` to False.
+ Default: False.
+ """
+ random.seed(seed)
+ np.random.seed(seed)
+ torch.manual_seed(seed)
+ torch.cuda.manual_seed_all(seed)
+ if deterministic:
+ torch.backends.cudnn.deterministic = True
+ torch.backends.cudnn.benchmark = False
+
+
+def train_detector(model,
+ dataset,
+ cfg,
+ distributed=False,
+ validate=False,
+ timestamp=None,
+ meta=None):
+ logger = get_root_logger(cfg.log_level)
+
+ # prepare data loaders
+ dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset]
+ if 'imgs_per_gpu' in cfg.data:
+ logger.warning('"imgs_per_gpu" is deprecated in MMDet V2.0. '
+ 'Please use "samples_per_gpu" instead')
+ if 'samples_per_gpu' in cfg.data:
+ logger.warning(
+ f'Got "imgs_per_gpu"={cfg.data.imgs_per_gpu} and '
+ f'"samples_per_gpu"={cfg.data.samples_per_gpu}, "imgs_per_gpu"'
+ f'={cfg.data.imgs_per_gpu} is used in this experiments')
+ else:
+ logger.warning(
+ 'Automatically set "samples_per_gpu"="imgs_per_gpu"='
+ f'{cfg.data.imgs_per_gpu} in this experiments')
+ cfg.data.samples_per_gpu = cfg.data.imgs_per_gpu
+
+ data_loaders = [
+ build_dataloader(
+ ds,
+ cfg.data.samples_per_gpu,
+ cfg.data.workers_per_gpu,
+ # cfg.gpus will be ignored if distributed
+ len(cfg.gpu_ids),
+ dist=distributed,
+ seed=cfg.seed) for ds in dataset
+ ]
+
+ # build optimizer
+ optimizer = build_optimizer(model, cfg.optimizer)
+
+ # use apex fp16 optimizer
+ if cfg.optimizer_config.get("type", None) and cfg.optimizer_config["type"] == "DistOptimizerHook":
+ if cfg.optimizer_config.get("use_fp16", False):
+ model, optimizer = apex.amp.initialize(
+ model.cuda(), optimizer, opt_level="O1")
+ for m in model.modules():
+ if hasattr(m, "fp16_enabled"):
+ m.fp16_enabled = True
+
+ # put model on gpus
+ if distributed:
+ find_unused_parameters = cfg.get('find_unused_parameters', False)
+ # Sets the `find_unused_parameters` parameter in
+ # torch.nn.parallel.DistributedDataParallel
+ model = MMDistributedDataParallel(
+ model.cuda(),
+ device_ids=[torch.cuda.current_device()],
+ broadcast_buffers=False,
+ find_unused_parameters=find_unused_parameters)
+ else:
+ model = MMDataParallel(
+ model.cuda(cfg.gpu_ids[0]), device_ids=cfg.gpu_ids)
+
+ if 'runner' not in cfg:
+ cfg.runner = {
+ 'type': 'EpochBasedRunner',
+ 'max_epochs': cfg.total_epochs
+ }
+ warnings.warn(
+ 'config is now expected to have a `runner` section, '
+ 'please set `runner` in your config.', UserWarning)
+ else:
+ if 'total_epochs' in cfg:
+ assert cfg.total_epochs == cfg.runner.max_epochs
+
+ # build runner
+ runner = build_runner(
+ cfg.runner,
+ default_args=dict(
+ model=model,
+ optimizer=optimizer,
+ work_dir=cfg.work_dir,
+ logger=logger,
+ meta=meta))
+
+ # an ugly workaround to make .log and .log.json filenames the same
+ runner.timestamp = timestamp
+
+ # fp16 setting
+ fp16_cfg = cfg.get('fp16', None)
+ if fp16_cfg is not None:
+ optimizer_config = Fp16OptimizerHook(
+ **cfg.optimizer_config, **fp16_cfg, distributed=distributed)
+ elif distributed and 'type' not in cfg.optimizer_config:
+ optimizer_config = OptimizerHook(**cfg.optimizer_config)
+ else:
+ optimizer_config = cfg.optimizer_config
+
+ # register hooks
+ runner.register_training_hooks(cfg.lr_config, optimizer_config,
+ cfg.checkpoint_config, cfg.log_config,
+ cfg.get('momentum_config', None))
+ if distributed:
+ if isinstance(runner, EpochBasedRunner):
+ runner.register_hook(DistSamplerSeedHook())
+
+ # register eval hooks
+ if validate:
+ # Support batch_size > 1 in validation
+ val_samples_per_gpu = cfg.data.val.pop('samples_per_gpu', 1)
+ if val_samples_per_gpu > 1:
+ # Replace 'ImageToTensor' to 'DefaultFormatBundle'
+ cfg.data.val.pipeline = replace_ImageToTensor(
+ cfg.data.val.pipeline)
+ val_dataset = build_dataset(cfg.data.val, dict(test_mode=True))
+ val_dataloader = build_dataloader(
+ val_dataset,
+ samples_per_gpu=val_samples_per_gpu,
+ workers_per_gpu=cfg.data.workers_per_gpu,
+ dist=distributed,
+ shuffle=False)
+ eval_cfg = cfg.get('evaluation', {})
+ eval_cfg['by_epoch'] = cfg.runner['type'] != 'IterBasedRunner'
+ eval_hook = DistEvalHook if distributed else EvalHook
+ runner.register_hook(eval_hook(val_dataloader, **eval_cfg))
+
+ # user-defined hooks
+ if cfg.get('custom_hooks', None):
+ custom_hooks = cfg.custom_hooks
+ assert isinstance(custom_hooks, list), \
+ f'custom_hooks expect list type, but got {type(custom_hooks)}'
+ for hook_cfg in cfg.custom_hooks:
+ assert isinstance(hook_cfg, dict), \
+ 'Each item in custom_hooks expects dict type, but got ' \
+ f'{type(hook_cfg)}'
+ hook_cfg = hook_cfg.copy()
+ priority = hook_cfg.pop('priority', 'NORMAL')
+ hook = build_from_cfg(hook_cfg, HOOKS)
+ runner.register_hook(hook, priority=priority)
+
+ if cfg.resume_from:
+ runner.resume(cfg.resume_from)
+ elif cfg.load_from:
+ runner.load_checkpoint(cfg.load_from)
+ runner.run(data_loaders, cfg.workflow)
diff --git a/annotator/uniformer/mmdet_null/core/__init__.py b/annotator/uniformer/mmdet_null/core/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..b7137d60817cd9826e03cc45c8ccf3551b929909
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/core/__init__.py
@@ -0,0 +1,7 @@
+from .anchor import * # noqa: F401, F403
+# from .bbox import * # noqa: F401, F403
+from .evaluation import get_palette, get_classes # noqa: F401, F403
+# from .export import * # noqa: F401, F403
+# from .mask import * # noqa: F401, F403
+# from .post_processing import * # noqa: F401, F403
+# from .utils import * # noqa: F401, F403
diff --git a/annotator/uniformer/mmdet_null/core/anchor/__init__.py b/annotator/uniformer/mmdet_null/core/anchor/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..5838ff3eefb03bc83928fa13848cea9ff8647827
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/core/anchor/__init__.py
@@ -0,0 +1,11 @@
+from .anchor_generator import (AnchorGenerator, LegacyAnchorGenerator,
+ YOLOAnchorGenerator)
+from .builder import ANCHOR_GENERATORS, build_anchor_generator
+from .point_generator import PointGenerator
+from .utils import anchor_inside_flags, calc_region, images_to_levels
+
+__all__ = [
+ 'AnchorGenerator', 'LegacyAnchorGenerator', 'anchor_inside_flags',
+ 'PointGenerator', 'images_to_levels', 'calc_region',
+ 'build_anchor_generator', 'ANCHOR_GENERATORS', 'YOLOAnchorGenerator'
+]
diff --git a/annotator/uniformer/mmdet_null/core/anchor/anchor_generator.py b/annotator/uniformer/mmdet_null/core/anchor/anchor_generator.py
new file mode 100644
index 0000000000000000000000000000000000000000..5b0d0cc0c352bf4599a629979c614616ab383454
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/core/anchor/anchor_generator.py
@@ -0,0 +1,727 @@
+import annotator.uniformer.mmcv as mmcv
+import numpy as np
+import torch
+from torch.nn.modules.utils import _pair
+
+from .builder import ANCHOR_GENERATORS
+
+
+@ANCHOR_GENERATORS.register_module()
+class AnchorGenerator(object):
+ """Standard anchor generator for 2D anchor-based detectors.
+
+ Args:
+ strides (list[int] | list[tuple[int, int]]): Strides of anchors
+ in multiple feature levels in order (w, h).
+ ratios (list[float]): The list of ratios between the height and width
+ of anchors in a single level.
+ scales (list[int] | None): Anchor scales for anchors in a single level.
+ It cannot be set at the same time if `octave_base_scale` and
+ `scales_per_octave` are set.
+ base_sizes (list[int] | None): The basic sizes
+ of anchors in multiple levels.
+ If None is given, strides will be used as base_sizes.
+ (If strides are non square, the shortest stride is taken.)
+ scale_major (bool): Whether to multiply scales first when generating
+ base anchors. If true, the anchors in the same row will have the
+ same scales. By default it is True in V2.0
+ octave_base_scale (int): The base scale of octave.
+ scales_per_octave (int): Number of scales for each octave.
+ `octave_base_scale` and `scales_per_octave` are usually used in
+ retinanet and the `scales` should be None when they are set.
+ centers (list[tuple[float, float]] | None): The centers of the anchor
+ relative to the feature grid center in multiple feature levels.
+ By default it is set to be None and not used. If a list of tuple of
+ float is given, they will be used to shift the centers of anchors.
+ center_offset (float): The offset of center in proportion to anchors'
+ width and height. By default it is 0 in V2.0.
+
+ Examples:
+ >>> from mmdet.core import AnchorGenerator
+ >>> self = AnchorGenerator([16], [1.], [1.], [9])
+ >>> all_anchors = self.grid_anchors([(2, 2)], device='cpu')
+ >>> print(all_anchors)
+ [tensor([[-4.5000, -4.5000, 4.5000, 4.5000],
+ [11.5000, -4.5000, 20.5000, 4.5000],
+ [-4.5000, 11.5000, 4.5000, 20.5000],
+ [11.5000, 11.5000, 20.5000, 20.5000]])]
+ >>> self = AnchorGenerator([16, 32], [1.], [1.], [9, 18])
+ >>> all_anchors = self.grid_anchors([(2, 2), (1, 1)], device='cpu')
+ >>> print(all_anchors)
+ [tensor([[-4.5000, -4.5000, 4.5000, 4.5000],
+ [11.5000, -4.5000, 20.5000, 4.5000],
+ [-4.5000, 11.5000, 4.5000, 20.5000],
+ [11.5000, 11.5000, 20.5000, 20.5000]]), \
+ tensor([[-9., -9., 9., 9.]])]
+ """
+
+ def __init__(self,
+ strides,
+ ratios,
+ scales=None,
+ base_sizes=None,
+ scale_major=True,
+ octave_base_scale=None,
+ scales_per_octave=None,
+ centers=None,
+ center_offset=0.):
+ # check center and center_offset
+ if center_offset != 0:
+ assert centers is None, 'center cannot be set when center_offset' \
+ f'!=0, {centers} is given.'
+ if not (0 <= center_offset <= 1):
+ raise ValueError('center_offset should be in range [0, 1], '
+ f'{center_offset} is given.')
+ if centers is not None:
+ assert len(centers) == len(strides), \
+ 'The number of strides should be the same as centers, got ' \
+ f'{strides} and {centers}'
+
+ # calculate base sizes of anchors
+ self.strides = [_pair(stride) for stride in strides]
+ self.base_sizes = [min(stride) for stride in self.strides
+ ] if base_sizes is None else base_sizes
+ assert len(self.base_sizes) == len(self.strides), \
+ 'The number of strides should be the same as base sizes, got ' \
+ f'{self.strides} and {self.base_sizes}'
+
+ # calculate scales of anchors
+ assert ((octave_base_scale is not None
+ and scales_per_octave is not None) ^ (scales is not None)), \
+ 'scales and octave_base_scale with scales_per_octave cannot' \
+ ' be set at the same time'
+ if scales is not None:
+ self.scales = torch.Tensor(scales)
+ elif octave_base_scale is not None and scales_per_octave is not None:
+ octave_scales = np.array(
+ [2**(i / scales_per_octave) for i in range(scales_per_octave)])
+ scales = octave_scales * octave_base_scale
+ self.scales = torch.Tensor(scales)
+ else:
+ raise ValueError('Either scales or octave_base_scale with '
+ 'scales_per_octave should be set')
+
+ self.octave_base_scale = octave_base_scale
+ self.scales_per_octave = scales_per_octave
+ self.ratios = torch.Tensor(ratios)
+ self.scale_major = scale_major
+ self.centers = centers
+ self.center_offset = center_offset
+ self.base_anchors = self.gen_base_anchors()
+
+ @property
+ def num_base_anchors(self):
+ """list[int]: total number of base anchors in a feature grid"""
+ return [base_anchors.size(0) for base_anchors in self.base_anchors]
+
+ @property
+ def num_levels(self):
+ """int: number of feature levels that the generator will be applied"""
+ return len(self.strides)
+
+ def gen_base_anchors(self):
+ """Generate base anchors.
+
+ Returns:
+ list(torch.Tensor): Base anchors of a feature grid in multiple \
+ feature levels.
+ """
+ multi_level_base_anchors = []
+ for i, base_size in enumerate(self.base_sizes):
+ center = None
+ if self.centers is not None:
+ center = self.centers[i]
+ multi_level_base_anchors.append(
+ self.gen_single_level_base_anchors(
+ base_size,
+ scales=self.scales,
+ ratios=self.ratios,
+ center=center))
+ return multi_level_base_anchors
+
+ def gen_single_level_base_anchors(self,
+ base_size,
+ scales,
+ ratios,
+ center=None):
+ """Generate base anchors of a single level.
+
+ Args:
+ base_size (int | float): Basic size of an anchor.
+ scales (torch.Tensor): Scales of the anchor.
+ ratios (torch.Tensor): The ratio between between the height
+ and width of anchors in a single level.
+ center (tuple[float], optional): The center of the base anchor
+ related to a single feature grid. Defaults to None.
+
+ Returns:
+ torch.Tensor: Anchors in a single-level feature maps.
+ """
+ w = base_size
+ h = base_size
+ if center is None:
+ x_center = self.center_offset * w
+ y_center = self.center_offset * h
+ else:
+ x_center, y_center = center
+
+ h_ratios = torch.sqrt(ratios)
+ w_ratios = 1 / h_ratios
+ if self.scale_major:
+ ws = (w * w_ratios[:, None] * scales[None, :]).view(-1)
+ hs = (h * h_ratios[:, None] * scales[None, :]).view(-1)
+ else:
+ ws = (w * scales[:, None] * w_ratios[None, :]).view(-1)
+ hs = (h * scales[:, None] * h_ratios[None, :]).view(-1)
+
+ # use float anchor and the anchor's center is aligned with the
+ # pixel center
+ base_anchors = [
+ x_center - 0.5 * ws, y_center - 0.5 * hs, x_center + 0.5 * ws,
+ y_center + 0.5 * hs
+ ]
+ base_anchors = torch.stack(base_anchors, dim=-1)
+
+ return base_anchors
+
+ def _meshgrid(self, x, y, row_major=True):
+ """Generate mesh grid of x and y.
+
+ Args:
+ x (torch.Tensor): Grids of x dimension.
+ y (torch.Tensor): Grids of y dimension.
+ row_major (bool, optional): Whether to return y grids first.
+ Defaults to True.
+
+ Returns:
+ tuple[torch.Tensor]: The mesh grids of x and y.
+ """
+ # use shape instead of len to keep tracing while exporting to onnx
+ xx = x.repeat(y.shape[0])
+ yy = y.view(-1, 1).repeat(1, x.shape[0]).view(-1)
+ if row_major:
+ return xx, yy
+ else:
+ return yy, xx
+
+ def grid_anchors(self, featmap_sizes, device='cuda'):
+ """Generate grid anchors in multiple feature levels.
+
+ Args:
+ featmap_sizes (list[tuple]): List of feature map sizes in
+ multiple feature levels.
+ device (str): Device where the anchors will be put on.
+
+ Return:
+ list[torch.Tensor]: Anchors in multiple feature levels. \
+ The sizes of each tensor should be [N, 4], where \
+ N = width * height * num_base_anchors, width and height \
+ are the sizes of the corresponding feature level, \
+ num_base_anchors is the number of anchors for that level.
+ """
+ assert self.num_levels == len(featmap_sizes)
+ multi_level_anchors = []
+ for i in range(self.num_levels):
+ anchors = self.single_level_grid_anchors(
+ self.base_anchors[i].to(device),
+ featmap_sizes[i],
+ self.strides[i],
+ device=device)
+ multi_level_anchors.append(anchors)
+ return multi_level_anchors
+
+ def single_level_grid_anchors(self,
+ base_anchors,
+ featmap_size,
+ stride=(16, 16),
+ device='cuda'):
+ """Generate grid anchors of a single level.
+
+ Note:
+ This function is usually called by method ``self.grid_anchors``.
+
+ Args:
+ base_anchors (torch.Tensor): The base anchors of a feature grid.
+ featmap_size (tuple[int]): Size of the feature maps.
+ stride (tuple[int], optional): Stride of the feature map in order
+ (w, h). Defaults to (16, 16).
+ device (str, optional): Device the tensor will be put on.
+ Defaults to 'cuda'.
+
+ Returns:
+ torch.Tensor: Anchors in the overall feature maps.
+ """
+ # keep as Tensor, so that we can covert to ONNX correctly
+ feat_h, feat_w = featmap_size
+ shift_x = torch.arange(0, feat_w, device=device) * stride[0]
+ shift_y = torch.arange(0, feat_h, device=device) * stride[1]
+
+ shift_xx, shift_yy = self._meshgrid(shift_x, shift_y)
+ shifts = torch.stack([shift_xx, shift_yy, shift_xx, shift_yy], dim=-1)
+ shifts = shifts.type_as(base_anchors)
+ # first feat_w elements correspond to the first row of shifts
+ # add A anchors (1, A, 4) to K shifts (K, 1, 4) to get
+ # shifted anchors (K, A, 4), reshape to (K*A, 4)
+
+ all_anchors = base_anchors[None, :, :] + shifts[:, None, :]
+ all_anchors = all_anchors.view(-1, 4)
+ # first A rows correspond to A anchors of (0, 0) in feature map,
+ # then (0, 1), (0, 2), ...
+ return all_anchors
+
+ def valid_flags(self, featmap_sizes, pad_shape, device='cuda'):
+ """Generate valid flags of anchors in multiple feature levels.
+
+ Args:
+ featmap_sizes (list(tuple)): List of feature map sizes in
+ multiple feature levels.
+ pad_shape (tuple): The padded shape of the image.
+ device (str): Device where the anchors will be put on.
+
+ Return:
+ list(torch.Tensor): Valid flags of anchors in multiple levels.
+ """
+ assert self.num_levels == len(featmap_sizes)
+ multi_level_flags = []
+ for i in range(self.num_levels):
+ anchor_stride = self.strides[i]
+ feat_h, feat_w = featmap_sizes[i]
+ h, w = pad_shape[:2]
+ valid_feat_h = min(int(np.ceil(h / anchor_stride[1])), feat_h)
+ valid_feat_w = min(int(np.ceil(w / anchor_stride[0])), feat_w)
+ flags = self.single_level_valid_flags((feat_h, feat_w),
+ (valid_feat_h, valid_feat_w),
+ self.num_base_anchors[i],
+ device=device)
+ multi_level_flags.append(flags)
+ return multi_level_flags
+
+ def single_level_valid_flags(self,
+ featmap_size,
+ valid_size,
+ num_base_anchors,
+ device='cuda'):
+ """Generate the valid flags of anchor in a single feature map.
+
+ Args:
+ featmap_size (tuple[int]): The size of feature maps.
+ valid_size (tuple[int]): The valid size of the feature maps.
+ num_base_anchors (int): The number of base anchors.
+ device (str, optional): Device where the flags will be put on.
+ Defaults to 'cuda'.
+
+ Returns:
+ torch.Tensor: The valid flags of each anchor in a single level \
+ feature map.
+ """
+ feat_h, feat_w = featmap_size
+ valid_h, valid_w = valid_size
+ assert valid_h <= feat_h and valid_w <= feat_w
+ valid_x = torch.zeros(feat_w, dtype=torch.bool, device=device)
+ valid_y = torch.zeros(feat_h, dtype=torch.bool, device=device)
+ valid_x[:valid_w] = 1
+ valid_y[:valid_h] = 1
+ valid_xx, valid_yy = self._meshgrid(valid_x, valid_y)
+ valid = valid_xx & valid_yy
+ valid = valid[:, None].expand(valid.size(0),
+ num_base_anchors).contiguous().view(-1)
+ return valid
+
+ def __repr__(self):
+ """str: a string that describes the module"""
+ indent_str = ' '
+ repr_str = self.__class__.__name__ + '(\n'
+ repr_str += f'{indent_str}strides={self.strides},\n'
+ repr_str += f'{indent_str}ratios={self.ratios},\n'
+ repr_str += f'{indent_str}scales={self.scales},\n'
+ repr_str += f'{indent_str}base_sizes={self.base_sizes},\n'
+ repr_str += f'{indent_str}scale_major={self.scale_major},\n'
+ repr_str += f'{indent_str}octave_base_scale='
+ repr_str += f'{self.octave_base_scale},\n'
+ repr_str += f'{indent_str}scales_per_octave='
+ repr_str += f'{self.scales_per_octave},\n'
+ repr_str += f'{indent_str}num_levels={self.num_levels}\n'
+ repr_str += f'{indent_str}centers={self.centers},\n'
+ repr_str += f'{indent_str}center_offset={self.center_offset})'
+ return repr_str
+
+
+@ANCHOR_GENERATORS.register_module()
+class SSDAnchorGenerator(AnchorGenerator):
+ """Anchor generator for SSD.
+
+ Args:
+ strides (list[int] | list[tuple[int, int]]): Strides of anchors
+ in multiple feature levels.
+ ratios (list[float]): The list of ratios between the height and width
+ of anchors in a single level.
+ basesize_ratio_range (tuple(float)): Ratio range of anchors.
+ input_size (int): Size of feature map, 300 for SSD300,
+ 512 for SSD512.
+ scale_major (bool): Whether to multiply scales first when generating
+ base anchors. If true, the anchors in the same row will have the
+ same scales. It is always set to be False in SSD.
+ """
+
+ def __init__(self,
+ strides,
+ ratios,
+ basesize_ratio_range,
+ input_size=300,
+ scale_major=True):
+ assert len(strides) == len(ratios)
+ assert mmcv.is_tuple_of(basesize_ratio_range, float)
+
+ self.strides = [_pair(stride) for stride in strides]
+ self.input_size = input_size
+ self.centers = [(stride[0] / 2., stride[1] / 2.)
+ for stride in self.strides]
+ self.basesize_ratio_range = basesize_ratio_range
+
+ # calculate anchor ratios and sizes
+ min_ratio, max_ratio = basesize_ratio_range
+ min_ratio = int(min_ratio * 100)
+ max_ratio = int(max_ratio * 100)
+ step = int(np.floor(max_ratio - min_ratio) / (self.num_levels - 2))
+ min_sizes = []
+ max_sizes = []
+ for ratio in range(int(min_ratio), int(max_ratio) + 1, step):
+ min_sizes.append(int(self.input_size * ratio / 100))
+ max_sizes.append(int(self.input_size * (ratio + step) / 100))
+ if self.input_size == 300:
+ if basesize_ratio_range[0] == 0.15: # SSD300 COCO
+ min_sizes.insert(0, int(self.input_size * 7 / 100))
+ max_sizes.insert(0, int(self.input_size * 15 / 100))
+ elif basesize_ratio_range[0] == 0.2: # SSD300 VOC
+ min_sizes.insert(0, int(self.input_size * 10 / 100))
+ max_sizes.insert(0, int(self.input_size * 20 / 100))
+ else:
+ raise ValueError(
+ 'basesize_ratio_range[0] should be either 0.15'
+ 'or 0.2 when input_size is 300, got '
+ f'{basesize_ratio_range[0]}.')
+ elif self.input_size == 512:
+ if basesize_ratio_range[0] == 0.1: # SSD512 COCO
+ min_sizes.insert(0, int(self.input_size * 4 / 100))
+ max_sizes.insert(0, int(self.input_size * 10 / 100))
+ elif basesize_ratio_range[0] == 0.15: # SSD512 VOC
+ min_sizes.insert(0, int(self.input_size * 7 / 100))
+ max_sizes.insert(0, int(self.input_size * 15 / 100))
+ else:
+ raise ValueError('basesize_ratio_range[0] should be either 0.1'
+ 'or 0.15 when input_size is 512, got'
+ f' {basesize_ratio_range[0]}.')
+ else:
+ raise ValueError('Only support 300 or 512 in SSDAnchorGenerator'
+ f', got {self.input_size}.')
+
+ anchor_ratios = []
+ anchor_scales = []
+ for k in range(len(self.strides)):
+ scales = [1., np.sqrt(max_sizes[k] / min_sizes[k])]
+ anchor_ratio = [1.]
+ for r in ratios[k]:
+ anchor_ratio += [1 / r, r] # 4 or 6 ratio
+ anchor_ratios.append(torch.Tensor(anchor_ratio))
+ anchor_scales.append(torch.Tensor(scales))
+
+ self.base_sizes = min_sizes
+ self.scales = anchor_scales
+ self.ratios = anchor_ratios
+ self.scale_major = scale_major
+ self.center_offset = 0
+ self.base_anchors = self.gen_base_anchors()
+
+ def gen_base_anchors(self):
+ """Generate base anchors.
+
+ Returns:
+ list(torch.Tensor): Base anchors of a feature grid in multiple \
+ feature levels.
+ """
+ multi_level_base_anchors = []
+ for i, base_size in enumerate(self.base_sizes):
+ base_anchors = self.gen_single_level_base_anchors(
+ base_size,
+ scales=self.scales[i],
+ ratios=self.ratios[i],
+ center=self.centers[i])
+ indices = list(range(len(self.ratios[i])))
+ indices.insert(1, len(indices))
+ base_anchors = torch.index_select(base_anchors, 0,
+ torch.LongTensor(indices))
+ multi_level_base_anchors.append(base_anchors)
+ return multi_level_base_anchors
+
+ def __repr__(self):
+ """str: a string that describes the module"""
+ indent_str = ' '
+ repr_str = self.__class__.__name__ + '(\n'
+ repr_str += f'{indent_str}strides={self.strides},\n'
+ repr_str += f'{indent_str}scales={self.scales},\n'
+ repr_str += f'{indent_str}scale_major={self.scale_major},\n'
+ repr_str += f'{indent_str}input_size={self.input_size},\n'
+ repr_str += f'{indent_str}scales={self.scales},\n'
+ repr_str += f'{indent_str}ratios={self.ratios},\n'
+ repr_str += f'{indent_str}num_levels={self.num_levels},\n'
+ repr_str += f'{indent_str}base_sizes={self.base_sizes},\n'
+ repr_str += f'{indent_str}basesize_ratio_range='
+ repr_str += f'{self.basesize_ratio_range})'
+ return repr_str
+
+
+@ANCHOR_GENERATORS.register_module()
+class LegacyAnchorGenerator(AnchorGenerator):
+ """Legacy anchor generator used in MMDetection V1.x.
+
+ Note:
+ Difference to the V2.0 anchor generator:
+
+ 1. The center offset of V1.x anchors are set to be 0.5 rather than 0.
+ 2. The width/height are minused by 1 when calculating the anchors' \
+ centers and corners to meet the V1.x coordinate system.
+ 3. The anchors' corners are quantized.
+
+ Args:
+ strides (list[int] | list[tuple[int]]): Strides of anchors
+ in multiple feature levels.
+ ratios (list[float]): The list of ratios between the height and width
+ of anchors in a single level.
+ scales (list[int] | None): Anchor scales for anchors in a single level.
+ It cannot be set at the same time if `octave_base_scale` and
+ `scales_per_octave` are set.
+ base_sizes (list[int]): The basic sizes of anchors in multiple levels.
+ If None is given, strides will be used to generate base_sizes.
+ scale_major (bool): Whether to multiply scales first when generating
+ base anchors. If true, the anchors in the same row will have the
+ same scales. By default it is True in V2.0
+ octave_base_scale (int): The base scale of octave.
+ scales_per_octave (int): Number of scales for each octave.
+ `octave_base_scale` and `scales_per_octave` are usually used in
+ retinanet and the `scales` should be None when they are set.
+ centers (list[tuple[float, float]] | None): The centers of the anchor
+ relative to the feature grid center in multiple feature levels.
+ By default it is set to be None and not used. It a list of float
+ is given, this list will be used to shift the centers of anchors.
+ center_offset (float): The offset of center in propotion to anchors'
+ width and height. By default it is 0.5 in V2.0 but it should be 0.5
+ in v1.x models.
+
+ Examples:
+ >>> from mmdet.core import LegacyAnchorGenerator
+ >>> self = LegacyAnchorGenerator(
+ >>> [16], [1.], [1.], [9], center_offset=0.5)
+ >>> all_anchors = self.grid_anchors(((2, 2),), device='cpu')
+ >>> print(all_anchors)
+ [tensor([[ 0., 0., 8., 8.],
+ [16., 0., 24., 8.],
+ [ 0., 16., 8., 24.],
+ [16., 16., 24., 24.]])]
+ """
+
+ def gen_single_level_base_anchors(self,
+ base_size,
+ scales,
+ ratios,
+ center=None):
+ """Generate base anchors of a single level.
+
+ Note:
+ The width/height of anchors are minused by 1 when calculating \
+ the centers and corners to meet the V1.x coordinate system.
+
+ Args:
+ base_size (int | float): Basic size of an anchor.
+ scales (torch.Tensor): Scales of the anchor.
+ ratios (torch.Tensor): The ratio between between the height.
+ and width of anchors in a single level.
+ center (tuple[float], optional): The center of the base anchor
+ related to a single feature grid. Defaults to None.
+
+ Returns:
+ torch.Tensor: Anchors in a single-level feature map.
+ """
+ w = base_size
+ h = base_size
+ if center is None:
+ x_center = self.center_offset * (w - 1)
+ y_center = self.center_offset * (h - 1)
+ else:
+ x_center, y_center = center
+
+ h_ratios = torch.sqrt(ratios)
+ w_ratios = 1 / h_ratios
+ if self.scale_major:
+ ws = (w * w_ratios[:, None] * scales[None, :]).view(-1)
+ hs = (h * h_ratios[:, None] * scales[None, :]).view(-1)
+ else:
+ ws = (w * scales[:, None] * w_ratios[None, :]).view(-1)
+ hs = (h * scales[:, None] * h_ratios[None, :]).view(-1)
+
+ # use float anchor and the anchor's center is aligned with the
+ # pixel center
+ base_anchors = [
+ x_center - 0.5 * (ws - 1), y_center - 0.5 * (hs - 1),
+ x_center + 0.5 * (ws - 1), y_center + 0.5 * (hs - 1)
+ ]
+ base_anchors = torch.stack(base_anchors, dim=-1).round()
+
+ return base_anchors
+
+
+@ANCHOR_GENERATORS.register_module()
+class LegacySSDAnchorGenerator(SSDAnchorGenerator, LegacyAnchorGenerator):
+ """Legacy anchor generator used in MMDetection V1.x.
+
+ The difference between `LegacySSDAnchorGenerator` and `SSDAnchorGenerator`
+ can be found in `LegacyAnchorGenerator`.
+ """
+
+ def __init__(self,
+ strides,
+ ratios,
+ basesize_ratio_range,
+ input_size=300,
+ scale_major=True):
+ super(LegacySSDAnchorGenerator,
+ self).__init__(strides, ratios, basesize_ratio_range, input_size,
+ scale_major)
+ self.centers = [((stride - 1) / 2., (stride - 1) / 2.)
+ for stride in strides]
+ self.base_anchors = self.gen_base_anchors()
+
+
+@ANCHOR_GENERATORS.register_module()
+class YOLOAnchorGenerator(AnchorGenerator):
+ """Anchor generator for YOLO.
+
+ Args:
+ strides (list[int] | list[tuple[int, int]]): Strides of anchors
+ in multiple feature levels.
+ base_sizes (list[list[tuple[int, int]]]): The basic sizes
+ of anchors in multiple levels.
+ """
+
+ def __init__(self, strides, base_sizes):
+ self.strides = [_pair(stride) for stride in strides]
+ self.centers = [(stride[0] / 2., stride[1] / 2.)
+ for stride in self.strides]
+ self.base_sizes = []
+ num_anchor_per_level = len(base_sizes[0])
+ for base_sizes_per_level in base_sizes:
+ assert num_anchor_per_level == len(base_sizes_per_level)
+ self.base_sizes.append(
+ [_pair(base_size) for base_size in base_sizes_per_level])
+ self.base_anchors = self.gen_base_anchors()
+
+ @property
+ def num_levels(self):
+ """int: number of feature levels that the generator will be applied"""
+ return len(self.base_sizes)
+
+ def gen_base_anchors(self):
+ """Generate base anchors.
+
+ Returns:
+ list(torch.Tensor): Base anchors of a feature grid in multiple \
+ feature levels.
+ """
+ multi_level_base_anchors = []
+ for i, base_sizes_per_level in enumerate(self.base_sizes):
+ center = None
+ if self.centers is not None:
+ center = self.centers[i]
+ multi_level_base_anchors.append(
+ self.gen_single_level_base_anchors(base_sizes_per_level,
+ center))
+ return multi_level_base_anchors
+
+ def gen_single_level_base_anchors(self, base_sizes_per_level, center=None):
+ """Generate base anchors of a single level.
+
+ Args:
+ base_sizes_per_level (list[tuple[int, int]]): Basic sizes of
+ anchors.
+ center (tuple[float], optional): The center of the base anchor
+ related to a single feature grid. Defaults to None.
+
+ Returns:
+ torch.Tensor: Anchors in a single-level feature maps.
+ """
+ x_center, y_center = center
+ base_anchors = []
+ for base_size in base_sizes_per_level:
+ w, h = base_size
+
+ # use float anchor and the anchor's center is aligned with the
+ # pixel center
+ base_anchor = torch.Tensor([
+ x_center - 0.5 * w, y_center - 0.5 * h, x_center + 0.5 * w,
+ y_center + 0.5 * h
+ ])
+ base_anchors.append(base_anchor)
+ base_anchors = torch.stack(base_anchors, dim=0)
+
+ return base_anchors
+
+ def responsible_flags(self, featmap_sizes, gt_bboxes, device='cuda'):
+ """Generate responsible anchor flags of grid cells in multiple scales.
+
+ Args:
+ featmap_sizes (list(tuple)): List of feature map sizes in multiple
+ feature levels.
+ gt_bboxes (Tensor): Ground truth boxes, shape (n, 4).
+ device (str): Device where the anchors will be put on.
+
+ Return:
+ list(torch.Tensor): responsible flags of anchors in multiple level
+ """
+ assert self.num_levels == len(featmap_sizes)
+ multi_level_responsible_flags = []
+ for i in range(self.num_levels):
+ anchor_stride = self.strides[i]
+ flags = self.single_level_responsible_flags(
+ featmap_sizes[i],
+ gt_bboxes,
+ anchor_stride,
+ self.num_base_anchors[i],
+ device=device)
+ multi_level_responsible_flags.append(flags)
+ return multi_level_responsible_flags
+
+ def single_level_responsible_flags(self,
+ featmap_size,
+ gt_bboxes,
+ stride,
+ num_base_anchors,
+ device='cuda'):
+ """Generate the responsible flags of anchor in a single feature map.
+
+ Args:
+ featmap_size (tuple[int]): The size of feature maps.
+ gt_bboxes (Tensor): Ground truth boxes, shape (n, 4).
+ stride (tuple(int)): stride of current level
+ num_base_anchors (int): The number of base anchors.
+ device (str, optional): Device where the flags will be put on.
+ Defaults to 'cuda'.
+
+ Returns:
+ torch.Tensor: The valid flags of each anchor in a single level \
+ feature map.
+ """
+ feat_h, feat_w = featmap_size
+ gt_bboxes_cx = ((gt_bboxes[:, 0] + gt_bboxes[:, 2]) * 0.5).to(device)
+ gt_bboxes_cy = ((gt_bboxes[:, 1] + gt_bboxes[:, 3]) * 0.5).to(device)
+ gt_bboxes_grid_x = torch.floor(gt_bboxes_cx / stride[0]).long()
+ gt_bboxes_grid_y = torch.floor(gt_bboxes_cy / stride[1]).long()
+
+ # row major indexing
+ gt_bboxes_grid_idx = gt_bboxes_grid_y * feat_w + gt_bboxes_grid_x
+
+ responsible_grid = torch.zeros(
+ feat_h * feat_w, dtype=torch.uint8, device=device)
+ responsible_grid[gt_bboxes_grid_idx] = 1
+
+ responsible_grid = responsible_grid[:, None].expand(
+ responsible_grid.size(0), num_base_anchors).contiguous().view(-1)
+ return responsible_grid
diff --git a/annotator/uniformer/mmdet_null/core/anchor/builder.py b/annotator/uniformer/mmdet_null/core/anchor/builder.py
new file mode 100644
index 0000000000000000000000000000000000000000..154a3c6dcb9ad381be57af7da85dbfef10d783d7
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/core/anchor/builder.py
@@ -0,0 +1,7 @@
+from annotator.uniformer.mmcv.utils import Registry, build_from_cfg
+
+ANCHOR_GENERATORS = Registry('Anchor generator')
+
+
+def build_anchor_generator(cfg, default_args=None):
+ return build_from_cfg(cfg, ANCHOR_GENERATORS, default_args)
diff --git a/annotator/uniformer/mmdet_null/core/anchor/point_generator.py b/annotator/uniformer/mmdet_null/core/anchor/point_generator.py
new file mode 100644
index 0000000000000000000000000000000000000000..e6fbd988c317992c092c68c827dc4c53223b4a4a
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/core/anchor/point_generator.py
@@ -0,0 +1,37 @@
+import torch
+
+from .builder import ANCHOR_GENERATORS
+
+
+@ANCHOR_GENERATORS.register_module()
+class PointGenerator(object):
+
+ def _meshgrid(self, x, y, row_major=True):
+ xx = x.repeat(len(y))
+ yy = y.view(-1, 1).repeat(1, len(x)).view(-1)
+ if row_major:
+ return xx, yy
+ else:
+ return yy, xx
+
+ def grid_points(self, featmap_size, stride=16, device='cuda'):
+ feat_h, feat_w = featmap_size
+ shift_x = torch.arange(0., feat_w, device=device) * stride
+ shift_y = torch.arange(0., feat_h, device=device) * stride
+ shift_xx, shift_yy = self._meshgrid(shift_x, shift_y)
+ stride = shift_x.new_full((shift_xx.shape[0], ), stride)
+ shifts = torch.stack([shift_xx, shift_yy, stride], dim=-1)
+ all_points = shifts.to(device)
+ return all_points
+
+ def valid_flags(self, featmap_size, valid_size, device='cuda'):
+ feat_h, feat_w = featmap_size
+ valid_h, valid_w = valid_size
+ assert valid_h <= feat_h and valid_w <= feat_w
+ valid_x = torch.zeros(feat_w, dtype=torch.bool, device=device)
+ valid_y = torch.zeros(feat_h, dtype=torch.bool, device=device)
+ valid_x[:valid_w] = 1
+ valid_y[:valid_h] = 1
+ valid_xx, valid_yy = self._meshgrid(valid_x, valid_y)
+ valid = valid_xx & valid_yy
+ return valid
diff --git a/annotator/uniformer/mmdet_null/core/anchor/utils.py b/annotator/uniformer/mmdet_null/core/anchor/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..ab9b53f37f7be1f52fe63c5e53df64ac1303b9e0
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/core/anchor/utils.py
@@ -0,0 +1,71 @@
+import torch
+
+
+def images_to_levels(target, num_levels):
+ """Convert targets by image to targets by feature level.
+
+ [target_img0, target_img1] -> [target_level0, target_level1, ...]
+ """
+ target = torch.stack(target, 0)
+ level_targets = []
+ start = 0
+ for n in num_levels:
+ end = start + n
+ # level_targets.append(target[:, start:end].squeeze(0))
+ level_targets.append(target[:, start:end])
+ start = end
+ return level_targets
+
+
+def anchor_inside_flags(flat_anchors,
+ valid_flags,
+ img_shape,
+ allowed_border=0):
+ """Check whether the anchors are inside the border.
+
+ Args:
+ flat_anchors (torch.Tensor): Flatten anchors, shape (n, 4).
+ valid_flags (torch.Tensor): An existing valid flags of anchors.
+ img_shape (tuple(int)): Shape of current image.
+ allowed_border (int, optional): The border to allow the valid anchor.
+ Defaults to 0.
+
+ Returns:
+ torch.Tensor: Flags indicating whether the anchors are inside a \
+ valid range.
+ """
+ img_h, img_w = img_shape[:2]
+ if allowed_border >= 0:
+ inside_flags = valid_flags & \
+ (flat_anchors[:, 0] >= -allowed_border) & \
+ (flat_anchors[:, 1] >= -allowed_border) & \
+ (flat_anchors[:, 2] < img_w + allowed_border) & \
+ (flat_anchors[:, 3] < img_h + allowed_border)
+ else:
+ inside_flags = valid_flags
+ return inside_flags
+
+
+def calc_region(bbox, ratio, featmap_size=None):
+ """Calculate a proportional bbox region.
+
+ The bbox center are fixed and the new h' and w' is h * ratio and w * ratio.
+
+ Args:
+ bbox (Tensor): Bboxes to calculate regions, shape (n, 4).
+ ratio (float): Ratio of the output region.
+ featmap_size (tuple): Feature map size used for clipping the boundary.
+
+ Returns:
+ tuple: x1, y1, x2, y2
+ """
+ x1 = torch.round((1 - ratio) * bbox[0] + ratio * bbox[2]).long()
+ y1 = torch.round((1 - ratio) * bbox[1] + ratio * bbox[3]).long()
+ x2 = torch.round(ratio * bbox[0] + (1 - ratio) * bbox[2]).long()
+ y2 = torch.round(ratio * bbox[1] + (1 - ratio) * bbox[3]).long()
+ if featmap_size is not None:
+ x1 = x1.clamp(min=0, max=featmap_size[1])
+ y1 = y1.clamp(min=0, max=featmap_size[0])
+ x2 = x2.clamp(min=0, max=featmap_size[1])
+ y2 = y2.clamp(min=0, max=featmap_size[0])
+ return (x1, y1, x2, y2)
diff --git a/annotator/uniformer/mmdet_null/core/bbox/__init__.py b/annotator/uniformer/mmdet_null/core/bbox/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..a3537297f57e4c3670afdb97b5fcb1b2d775e5f3
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/core/bbox/__init__.py
@@ -0,0 +1,27 @@
+from .assigners import (AssignResult, BaseAssigner, CenterRegionAssigner,
+ MaxIoUAssigner, RegionAssigner)
+from .builder import build_assigner, build_bbox_coder, build_sampler
+from .coder import (BaseBBoxCoder, DeltaXYWHBBoxCoder, PseudoBBoxCoder,
+ TBLRBBoxCoder)
+from .iou_calculators import BboxOverlaps2D, bbox_overlaps
+from .samplers import (BaseSampler, CombinedSampler,
+ InstanceBalancedPosSampler, IoUBalancedNegSampler,
+ OHEMSampler, PseudoSampler, RandomSampler,
+ SamplingResult, ScoreHLRSampler)
+from .transforms import (bbox2distance, bbox2result, bbox2roi,
+ bbox_cxcywh_to_xyxy, bbox_flip, bbox_mapping,
+ bbox_mapping_back, bbox_rescale, bbox_xyxy_to_cxcywh,
+ distance2bbox, roi2bbox)
+
+__all__ = [
+ 'bbox_overlaps', 'BboxOverlaps2D', 'BaseAssigner', 'MaxIoUAssigner',
+ 'AssignResult', 'BaseSampler', 'PseudoSampler', 'RandomSampler',
+ 'InstanceBalancedPosSampler', 'IoUBalancedNegSampler', 'CombinedSampler',
+ 'OHEMSampler', 'SamplingResult', 'ScoreHLRSampler', 'build_assigner',
+ 'build_sampler', 'bbox_flip', 'bbox_mapping', 'bbox_mapping_back',
+ 'bbox2roi', 'roi2bbox', 'bbox2result', 'distance2bbox', 'bbox2distance',
+ 'build_bbox_coder', 'BaseBBoxCoder', 'PseudoBBoxCoder',
+ 'DeltaXYWHBBoxCoder', 'TBLRBBoxCoder', 'CenterRegionAssigner',
+ 'bbox_rescale', 'bbox_cxcywh_to_xyxy', 'bbox_xyxy_to_cxcywh',
+ 'RegionAssigner'
+]
diff --git a/annotator/uniformer/mmdet_null/core/bbox/assigners/__init__.py b/annotator/uniformer/mmdet_null/core/bbox/assigners/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..95e34a848652f2ab3ca6d3489aa2934d24817888
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/core/bbox/assigners/__init__.py
@@ -0,0 +1,16 @@
+from .approx_max_iou_assigner import ApproxMaxIoUAssigner
+from .assign_result import AssignResult
+from .atss_assigner import ATSSAssigner
+from .base_assigner import BaseAssigner
+from .center_region_assigner import CenterRegionAssigner
+from .grid_assigner import GridAssigner
+from .hungarian_assigner import HungarianAssigner
+from .max_iou_assigner import MaxIoUAssigner
+from .point_assigner import PointAssigner
+from .region_assigner import RegionAssigner
+
+__all__ = [
+ 'BaseAssigner', 'MaxIoUAssigner', 'ApproxMaxIoUAssigner', 'AssignResult',
+ 'PointAssigner', 'ATSSAssigner', 'CenterRegionAssigner', 'GridAssigner',
+ 'HungarianAssigner', 'RegionAssigner'
+]
diff --git a/annotator/uniformer/mmdet_null/core/bbox/assigners/approx_max_iou_assigner.py b/annotator/uniformer/mmdet_null/core/bbox/assigners/approx_max_iou_assigner.py
new file mode 100644
index 0000000000000000000000000000000000000000..6d07656d173744426795c81c14c6bcdb4e63a406
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/core/bbox/assigners/approx_max_iou_assigner.py
@@ -0,0 +1,145 @@
+import torch
+
+from ..builder import BBOX_ASSIGNERS
+from ..iou_calculators import build_iou_calculator
+from .max_iou_assigner import MaxIoUAssigner
+
+
+@BBOX_ASSIGNERS.register_module()
+class ApproxMaxIoUAssigner(MaxIoUAssigner):
+ """Assign a corresponding gt bbox or background to each bbox.
+
+ Each proposals will be assigned with an integer indicating the ground truth
+ index. (semi-positive index: gt label (0-based), -1: background)
+
+ - -1: negative sample, no assigned gt
+ - semi-positive integer: positive sample, index (0-based) of assigned gt
+
+ Args:
+ pos_iou_thr (float): IoU threshold for positive bboxes.
+ neg_iou_thr (float or tuple): IoU threshold for negative bboxes.
+ min_pos_iou (float): Minimum iou for a bbox to be considered as a
+ positive bbox. Positive samples can have smaller IoU than
+ pos_iou_thr due to the 4th step (assign max IoU sample to each gt).
+ gt_max_assign_all (bool): Whether to assign all bboxes with the same
+ highest overlap with some gt to that gt.
+ ignore_iof_thr (float): IoF threshold for ignoring bboxes (if
+ `gt_bboxes_ignore` is specified). Negative values mean not
+ ignoring any bboxes.
+ ignore_wrt_candidates (bool): Whether to compute the iof between
+ `bboxes` and `gt_bboxes_ignore`, or the contrary.
+ match_low_quality (bool): Whether to allow quality matches. This is
+ usually allowed for RPN and single stage detectors, but not allowed
+ in the second stage.
+ gpu_assign_thr (int): The upper bound of the number of GT for GPU
+ assign. When the number of gt is above this threshold, will assign
+ on CPU device. Negative values mean not assign on CPU.
+ """
+
+ def __init__(self,
+ pos_iou_thr,
+ neg_iou_thr,
+ min_pos_iou=.0,
+ gt_max_assign_all=True,
+ ignore_iof_thr=-1,
+ ignore_wrt_candidates=True,
+ match_low_quality=True,
+ gpu_assign_thr=-1,
+ iou_calculator=dict(type='BboxOverlaps2D')):
+ self.pos_iou_thr = pos_iou_thr
+ self.neg_iou_thr = neg_iou_thr
+ self.min_pos_iou = min_pos_iou
+ self.gt_max_assign_all = gt_max_assign_all
+ self.ignore_iof_thr = ignore_iof_thr
+ self.ignore_wrt_candidates = ignore_wrt_candidates
+ self.gpu_assign_thr = gpu_assign_thr
+ self.match_low_quality = match_low_quality
+ self.iou_calculator = build_iou_calculator(iou_calculator)
+
+ def assign(self,
+ approxs,
+ squares,
+ approxs_per_octave,
+ gt_bboxes,
+ gt_bboxes_ignore=None,
+ gt_labels=None):
+ """Assign gt to approxs.
+
+ This method assign a gt bbox to each group of approxs (bboxes),
+ each group of approxs is represent by a base approx (bbox) and
+ will be assigned with -1, or a semi-positive number.
+ background_label (-1) means negative sample,
+ semi-positive number is the index (0-based) of assigned gt.
+ The assignment is done in following steps, the order matters.
+
+ 1. assign every bbox to background_label (-1)
+ 2. use the max IoU of each group of approxs to assign
+ 2. assign proposals whose iou with all gts < neg_iou_thr to background
+ 3. for each bbox, if the iou with its nearest gt >= pos_iou_thr,
+ assign it to that bbox
+ 4. for each gt bbox, assign its nearest proposals (may be more than
+ one) to itself
+
+ Args:
+ approxs (Tensor): Bounding boxes to be assigned,
+ shape(approxs_per_octave*n, 4).
+ squares (Tensor): Base Bounding boxes to be assigned,
+ shape(n, 4).
+ approxs_per_octave (int): number of approxs per octave
+ gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4).
+ gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are
+ labelled as `ignored`, e.g., crowd boxes in COCO.
+ gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ).
+
+ Returns:
+ :obj:`AssignResult`: The assign result.
+ """
+ num_squares = squares.size(0)
+ num_gts = gt_bboxes.size(0)
+
+ if num_squares == 0 or num_gts == 0:
+ # No predictions and/or truth, return empty assignment
+ overlaps = approxs.new(num_gts, num_squares)
+ assign_result = self.assign_wrt_overlaps(overlaps, gt_labels)
+ return assign_result
+
+ # re-organize anchors by approxs_per_octave x num_squares
+ approxs = torch.transpose(
+ approxs.view(num_squares, approxs_per_octave, 4), 0,
+ 1).contiguous().view(-1, 4)
+ assign_on_cpu = True if (self.gpu_assign_thr > 0) and (
+ num_gts > self.gpu_assign_thr) else False
+ # compute overlap and assign gt on CPU when number of GT is large
+ if assign_on_cpu:
+ device = approxs.device
+ approxs = approxs.cpu()
+ gt_bboxes = gt_bboxes.cpu()
+ if gt_bboxes_ignore is not None:
+ gt_bboxes_ignore = gt_bboxes_ignore.cpu()
+ if gt_labels is not None:
+ gt_labels = gt_labels.cpu()
+ all_overlaps = self.iou_calculator(approxs, gt_bboxes)
+
+ overlaps, _ = all_overlaps.view(approxs_per_octave, num_squares,
+ num_gts).max(dim=0)
+ overlaps = torch.transpose(overlaps, 0, 1)
+
+ if (self.ignore_iof_thr > 0 and gt_bboxes_ignore is not None
+ and gt_bboxes_ignore.numel() > 0 and squares.numel() > 0):
+ if self.ignore_wrt_candidates:
+ ignore_overlaps = self.iou_calculator(
+ squares, gt_bboxes_ignore, mode='iof')
+ ignore_max_overlaps, _ = ignore_overlaps.max(dim=1)
+ else:
+ ignore_overlaps = self.iou_calculator(
+ gt_bboxes_ignore, squares, mode='iof')
+ ignore_max_overlaps, _ = ignore_overlaps.max(dim=0)
+ overlaps[:, ignore_max_overlaps > self.ignore_iof_thr] = -1
+
+ assign_result = self.assign_wrt_overlaps(overlaps, gt_labels)
+ if assign_on_cpu:
+ assign_result.gt_inds = assign_result.gt_inds.to(device)
+ assign_result.max_overlaps = assign_result.max_overlaps.to(device)
+ if assign_result.labels is not None:
+ assign_result.labels = assign_result.labels.to(device)
+ return assign_result
diff --git a/annotator/uniformer/mmdet_null/core/bbox/assigners/assign_result.py b/annotator/uniformer/mmdet_null/core/bbox/assigners/assign_result.py
new file mode 100644
index 0000000000000000000000000000000000000000..cb12a571dfe306e5f3055af170d16ff12371ac77
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/core/bbox/assigners/assign_result.py
@@ -0,0 +1,204 @@
+import torch
+
+from annotator.uniformer.mmdet.utils import util_mixins
+
+
+class AssignResult(util_mixins.NiceRepr):
+ """Stores assignments between predicted and truth boxes.
+
+ Attributes:
+ num_gts (int): the number of truth boxes considered when computing this
+ assignment
+
+ gt_inds (LongTensor): for each predicted box indicates the 1-based
+ index of the assigned truth box. 0 means unassigned and -1 means
+ ignore.
+
+ max_overlaps (FloatTensor): the iou between the predicted box and its
+ assigned truth box.
+
+ labels (None | LongTensor): If specified, for each predicted box
+ indicates the category label of the assigned truth box.
+
+ Example:
+ >>> # An assign result between 4 predicted boxes and 9 true boxes
+ >>> # where only two boxes were assigned.
+ >>> num_gts = 9
+ >>> max_overlaps = torch.LongTensor([0, .5, .9, 0])
+ >>> gt_inds = torch.LongTensor([-1, 1, 2, 0])
+ >>> labels = torch.LongTensor([0, 3, 4, 0])
+ >>> self = AssignResult(num_gts, gt_inds, max_overlaps, labels)
+ >>> print(str(self)) # xdoctest: +IGNORE_WANT
+
+ >>> # Force addition of gt labels (when adding gt as proposals)
+ >>> new_labels = torch.LongTensor([3, 4, 5])
+ >>> self.add_gt_(new_labels)
+ >>> print(str(self)) # xdoctest: +IGNORE_WANT
+
+ """
+
+ def __init__(self, num_gts, gt_inds, max_overlaps, labels=None):
+ self.num_gts = num_gts
+ self.gt_inds = gt_inds
+ self.max_overlaps = max_overlaps
+ self.labels = labels
+ # Interface for possible user-defined properties
+ self._extra_properties = {}
+
+ @property
+ def num_preds(self):
+ """int: the number of predictions in this assignment"""
+ return len(self.gt_inds)
+
+ def set_extra_property(self, key, value):
+ """Set user-defined new property."""
+ assert key not in self.info
+ self._extra_properties[key] = value
+
+ def get_extra_property(self, key):
+ """Get user-defined property."""
+ return self._extra_properties.get(key, None)
+
+ @property
+ def info(self):
+ """dict: a dictionary of info about the object"""
+ basic_info = {
+ 'num_gts': self.num_gts,
+ 'num_preds': self.num_preds,
+ 'gt_inds': self.gt_inds,
+ 'max_overlaps': self.max_overlaps,
+ 'labels': self.labels,
+ }
+ basic_info.update(self._extra_properties)
+ return basic_info
+
+ def __nice__(self):
+ """str: a "nice" summary string describing this assign result"""
+ parts = []
+ parts.append(f'num_gts={self.num_gts!r}')
+ if self.gt_inds is None:
+ parts.append(f'gt_inds={self.gt_inds!r}')
+ else:
+ parts.append(f'gt_inds.shape={tuple(self.gt_inds.shape)!r}')
+ if self.max_overlaps is None:
+ parts.append(f'max_overlaps={self.max_overlaps!r}')
+ else:
+ parts.append('max_overlaps.shape='
+ f'{tuple(self.max_overlaps.shape)!r}')
+ if self.labels is None:
+ parts.append(f'labels={self.labels!r}')
+ else:
+ parts.append(f'labels.shape={tuple(self.labels.shape)!r}')
+ return ', '.join(parts)
+
+ @classmethod
+ def random(cls, **kwargs):
+ """Create random AssignResult for tests or debugging.
+
+ Args:
+ num_preds: number of predicted boxes
+ num_gts: number of true boxes
+ p_ignore (float): probability of a predicted box assinged to an
+ ignored truth
+ p_assigned (float): probability of a predicted box not being
+ assigned
+ p_use_label (float | bool): with labels or not
+ rng (None | int | numpy.random.RandomState): seed or state
+
+ Returns:
+ :obj:`AssignResult`: Randomly generated assign results.
+
+ Example:
+ >>> from mmdet.core.bbox.assigners.assign_result import * # NOQA
+ >>> self = AssignResult.random()
+ >>> print(self.info)
+ """
+ from mmdet.core.bbox import demodata
+ rng = demodata.ensure_rng(kwargs.get('rng', None))
+
+ num_gts = kwargs.get('num_gts', None)
+ num_preds = kwargs.get('num_preds', None)
+ p_ignore = kwargs.get('p_ignore', 0.3)
+ p_assigned = kwargs.get('p_assigned', 0.7)
+ p_use_label = kwargs.get('p_use_label', 0.5)
+ num_classes = kwargs.get('p_use_label', 3)
+
+ if num_gts is None:
+ num_gts = rng.randint(0, 8)
+ if num_preds is None:
+ num_preds = rng.randint(0, 16)
+
+ if num_gts == 0:
+ max_overlaps = torch.zeros(num_preds, dtype=torch.float32)
+ gt_inds = torch.zeros(num_preds, dtype=torch.int64)
+ if p_use_label is True or p_use_label < rng.rand():
+ labels = torch.zeros(num_preds, dtype=torch.int64)
+ else:
+ labels = None
+ else:
+ import numpy as np
+ # Create an overlap for each predicted box
+ max_overlaps = torch.from_numpy(rng.rand(num_preds))
+
+ # Construct gt_inds for each predicted box
+ is_assigned = torch.from_numpy(rng.rand(num_preds) < p_assigned)
+ # maximum number of assignments constraints
+ n_assigned = min(num_preds, min(num_gts, is_assigned.sum()))
+
+ assigned_idxs = np.where(is_assigned)[0]
+ rng.shuffle(assigned_idxs)
+ assigned_idxs = assigned_idxs[0:n_assigned]
+ assigned_idxs.sort()
+
+ is_assigned[:] = 0
+ is_assigned[assigned_idxs] = True
+
+ is_ignore = torch.from_numpy(
+ rng.rand(num_preds) < p_ignore) & is_assigned
+
+ gt_inds = torch.zeros(num_preds, dtype=torch.int64)
+
+ true_idxs = np.arange(num_gts)
+ rng.shuffle(true_idxs)
+ true_idxs = torch.from_numpy(true_idxs)
+ gt_inds[is_assigned] = true_idxs[:n_assigned]
+
+ gt_inds = torch.from_numpy(
+ rng.randint(1, num_gts + 1, size=num_preds))
+ gt_inds[is_ignore] = -1
+ gt_inds[~is_assigned] = 0
+ max_overlaps[~is_assigned] = 0
+
+ if p_use_label is True or p_use_label < rng.rand():
+ if num_classes == 0:
+ labels = torch.zeros(num_preds, dtype=torch.int64)
+ else:
+ labels = torch.from_numpy(
+ # remind that we set FG labels to [0, num_class-1]
+ # since mmdet v2.0
+ # BG cat_id: num_class
+ rng.randint(0, num_classes, size=num_preds))
+ labels[~is_assigned] = 0
+ else:
+ labels = None
+
+ self = cls(num_gts, gt_inds, max_overlaps, labels)
+ return self
+
+ def add_gt_(self, gt_labels):
+ """Add ground truth as assigned results.
+
+ Args:
+ gt_labels (torch.Tensor): Labels of gt boxes
+ """
+ self_inds = torch.arange(
+ 1, len(gt_labels) + 1, dtype=torch.long, device=gt_labels.device)
+ self.gt_inds = torch.cat([self_inds, self.gt_inds])
+
+ self.max_overlaps = torch.cat(
+ [self.max_overlaps.new_ones(len(gt_labels)), self.max_overlaps])
+
+ if self.labels is not None:
+ self.labels = torch.cat([gt_labels, self.labels])
diff --git a/annotator/uniformer/mmdet_null/core/bbox/assigners/atss_assigner.py b/annotator/uniformer/mmdet_null/core/bbox/assigners/atss_assigner.py
new file mode 100644
index 0000000000000000000000000000000000000000..d4fe9d0e3c8704bd780d493eff20a5505dbe9580
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/core/bbox/assigners/atss_assigner.py
@@ -0,0 +1,178 @@
+import torch
+
+from ..builder import BBOX_ASSIGNERS
+from ..iou_calculators import build_iou_calculator
+from .assign_result import AssignResult
+from .base_assigner import BaseAssigner
+
+
+@BBOX_ASSIGNERS.register_module()
+class ATSSAssigner(BaseAssigner):
+ """Assign a corresponding gt bbox or background to each bbox.
+
+ Each proposals will be assigned with `0` or a positive integer
+ indicating the ground truth index.
+
+ - 0: negative sample, no assigned gt
+ - positive integer: positive sample, index (1-based) of assigned gt
+
+ Args:
+ topk (float): number of bbox selected in each level
+ """
+
+ def __init__(self,
+ topk,
+ iou_calculator=dict(type='BboxOverlaps2D'),
+ ignore_iof_thr=-1):
+ self.topk = topk
+ self.iou_calculator = build_iou_calculator(iou_calculator)
+ self.ignore_iof_thr = ignore_iof_thr
+
+ # https://github.com/sfzhang15/ATSS/blob/master/atss_core/modeling/rpn/atss/loss.py
+
+ def assign(self,
+ bboxes,
+ num_level_bboxes,
+ gt_bboxes,
+ gt_bboxes_ignore=None,
+ gt_labels=None):
+ """Assign gt to bboxes.
+
+ The assignment is done in following steps
+
+ 1. compute iou between all bbox (bbox of all pyramid levels) and gt
+ 2. compute center distance between all bbox and gt
+ 3. on each pyramid level, for each gt, select k bbox whose center
+ are closest to the gt center, so we total select k*l bbox as
+ candidates for each gt
+ 4. get corresponding iou for the these candidates, and compute the
+ mean and std, set mean + std as the iou threshold
+ 5. select these candidates whose iou are greater than or equal to
+ the threshold as positive
+ 6. limit the positive sample's center in gt
+
+
+ Args:
+ bboxes (Tensor): Bounding boxes to be assigned, shape(n, 4).
+ num_level_bboxes (List): num of bboxes in each level
+ gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4).
+ gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are
+ labelled as `ignored`, e.g., crowd boxes in COCO.
+ gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ).
+
+ Returns:
+ :obj:`AssignResult`: The assign result.
+ """
+ INF = 100000000
+ bboxes = bboxes[:, :4]
+ num_gt, num_bboxes = gt_bboxes.size(0), bboxes.size(0)
+
+ # compute iou between all bbox and gt
+ overlaps = self.iou_calculator(bboxes, gt_bboxes)
+
+ # assign 0 by default
+ assigned_gt_inds = overlaps.new_full((num_bboxes, ),
+ 0,
+ dtype=torch.long)
+
+ if num_gt == 0 or num_bboxes == 0:
+ # No ground truth or boxes, return empty assignment
+ max_overlaps = overlaps.new_zeros((num_bboxes, ))
+ if num_gt == 0:
+ # No truth, assign everything to background
+ assigned_gt_inds[:] = 0
+ if gt_labels is None:
+ assigned_labels = None
+ else:
+ assigned_labels = overlaps.new_full((num_bboxes, ),
+ -1,
+ dtype=torch.long)
+ return AssignResult(
+ num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels)
+
+ # compute center distance between all bbox and gt
+ gt_cx = (gt_bboxes[:, 0] + gt_bboxes[:, 2]) / 2.0
+ gt_cy = (gt_bboxes[:, 1] + gt_bboxes[:, 3]) / 2.0
+ gt_points = torch.stack((gt_cx, gt_cy), dim=1)
+
+ bboxes_cx = (bboxes[:, 0] + bboxes[:, 2]) / 2.0
+ bboxes_cy = (bboxes[:, 1] + bboxes[:, 3]) / 2.0
+ bboxes_points = torch.stack((bboxes_cx, bboxes_cy), dim=1)
+
+ distances = (bboxes_points[:, None, :] -
+ gt_points[None, :, :]).pow(2).sum(-1).sqrt()
+
+ if (self.ignore_iof_thr > 0 and gt_bboxes_ignore is not None
+ and gt_bboxes_ignore.numel() > 0 and bboxes.numel() > 0):
+ ignore_overlaps = self.iou_calculator(
+ bboxes, gt_bboxes_ignore, mode='iof')
+ ignore_max_overlaps, _ = ignore_overlaps.max(dim=1)
+ ignore_idxs = ignore_max_overlaps > self.ignore_iof_thr
+ distances[ignore_idxs, :] = INF
+ assigned_gt_inds[ignore_idxs] = -1
+
+ # Selecting candidates based on the center distance
+ candidate_idxs = []
+ start_idx = 0
+ for level, bboxes_per_level in enumerate(num_level_bboxes):
+ # on each pyramid level, for each gt,
+ # select k bbox whose center are closest to the gt center
+ end_idx = start_idx + bboxes_per_level
+ distances_per_level = distances[start_idx:end_idx, :]
+ selectable_k = min(self.topk, bboxes_per_level)
+ _, topk_idxs_per_level = distances_per_level.topk(
+ selectable_k, dim=0, largest=False)
+ candidate_idxs.append(topk_idxs_per_level + start_idx)
+ start_idx = end_idx
+ candidate_idxs = torch.cat(candidate_idxs, dim=0)
+
+ # get corresponding iou for the these candidates, and compute the
+ # mean and std, set mean + std as the iou threshold
+ candidate_overlaps = overlaps[candidate_idxs, torch.arange(num_gt)]
+ overlaps_mean_per_gt = candidate_overlaps.mean(0)
+ overlaps_std_per_gt = candidate_overlaps.std(0)
+ overlaps_thr_per_gt = overlaps_mean_per_gt + overlaps_std_per_gt
+
+ is_pos = candidate_overlaps >= overlaps_thr_per_gt[None, :]
+
+ # limit the positive sample's center in gt
+ for gt_idx in range(num_gt):
+ candidate_idxs[:, gt_idx] += gt_idx * num_bboxes
+ ep_bboxes_cx = bboxes_cx.view(1, -1).expand(
+ num_gt, num_bboxes).contiguous().view(-1)
+ ep_bboxes_cy = bboxes_cy.view(1, -1).expand(
+ num_gt, num_bboxes).contiguous().view(-1)
+ candidate_idxs = candidate_idxs.view(-1)
+
+ # calculate the left, top, right, bottom distance between positive
+ # bbox center and gt side
+ l_ = ep_bboxes_cx[candidate_idxs].view(-1, num_gt) - gt_bboxes[:, 0]
+ t_ = ep_bboxes_cy[candidate_idxs].view(-1, num_gt) - gt_bboxes[:, 1]
+ r_ = gt_bboxes[:, 2] - ep_bboxes_cx[candidate_idxs].view(-1, num_gt)
+ b_ = gt_bboxes[:, 3] - ep_bboxes_cy[candidate_idxs].view(-1, num_gt)
+ is_in_gts = torch.stack([l_, t_, r_, b_], dim=1).min(dim=1)[0] > 0.01
+ is_pos = is_pos & is_in_gts
+
+ # if an anchor box is assigned to multiple gts,
+ # the one with the highest IoU will be selected.
+ overlaps_inf = torch.full_like(overlaps,
+ -INF).t().contiguous().view(-1)
+ index = candidate_idxs.view(-1)[is_pos.view(-1)]
+ overlaps_inf[index] = overlaps.t().contiguous().view(-1)[index]
+ overlaps_inf = overlaps_inf.view(num_gt, -1).t()
+
+ max_overlaps, argmax_overlaps = overlaps_inf.max(dim=1)
+ assigned_gt_inds[
+ max_overlaps != -INF] = argmax_overlaps[max_overlaps != -INF] + 1
+
+ if gt_labels is not None:
+ assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1)
+ pos_inds = torch.nonzero(
+ assigned_gt_inds > 0, as_tuple=False).squeeze()
+ if pos_inds.numel() > 0:
+ assigned_labels[pos_inds] = gt_labels[
+ assigned_gt_inds[pos_inds] - 1]
+ else:
+ assigned_labels = None
+ return AssignResult(
+ num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels)
diff --git a/annotator/uniformer/mmdet_null/core/bbox/assigners/base_assigner.py b/annotator/uniformer/mmdet_null/core/bbox/assigners/base_assigner.py
new file mode 100644
index 0000000000000000000000000000000000000000..1ff0160dbb4bfbf53cb40d1d5cb29bcc3d197a59
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/core/bbox/assigners/base_assigner.py
@@ -0,0 +1,9 @@
+from abc import ABCMeta, abstractmethod
+
+
+class BaseAssigner(metaclass=ABCMeta):
+ """Base assigner that assigns boxes to ground truth boxes."""
+
+ @abstractmethod
+ def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None):
+ """Assign boxes to either a ground truth boxes or a negative boxes."""
diff --git a/annotator/uniformer/mmdet_null/core/bbox/assigners/center_region_assigner.py b/annotator/uniformer/mmdet_null/core/bbox/assigners/center_region_assigner.py
new file mode 100644
index 0000000000000000000000000000000000000000..488e3b615318787751cab3211e38dd9471c666be
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/core/bbox/assigners/center_region_assigner.py
@@ -0,0 +1,335 @@
+import torch
+
+from ..builder import BBOX_ASSIGNERS
+from ..iou_calculators import build_iou_calculator
+from .assign_result import AssignResult
+from .base_assigner import BaseAssigner
+
+
+def scale_boxes(bboxes, scale):
+ """Expand an array of boxes by a given scale.
+
+ Args:
+ bboxes (Tensor): Shape (m, 4)
+ scale (float): The scale factor of bboxes
+
+ Returns:
+ (Tensor): Shape (m, 4). Scaled bboxes
+ """
+ assert bboxes.size(1) == 4
+ w_half = (bboxes[:, 2] - bboxes[:, 0]) * .5
+ h_half = (bboxes[:, 3] - bboxes[:, 1]) * .5
+ x_c = (bboxes[:, 2] + bboxes[:, 0]) * .5
+ y_c = (bboxes[:, 3] + bboxes[:, 1]) * .5
+
+ w_half *= scale
+ h_half *= scale
+
+ boxes_scaled = torch.zeros_like(bboxes)
+ boxes_scaled[:, 0] = x_c - w_half
+ boxes_scaled[:, 2] = x_c + w_half
+ boxes_scaled[:, 1] = y_c - h_half
+ boxes_scaled[:, 3] = y_c + h_half
+ return boxes_scaled
+
+
+def is_located_in(points, bboxes):
+ """Are points located in bboxes.
+
+ Args:
+ points (Tensor): Points, shape: (m, 2).
+ bboxes (Tensor): Bounding boxes, shape: (n, 4).
+
+ Return:
+ Tensor: Flags indicating if points are located in bboxes, shape: (m, n).
+ """
+ assert points.size(1) == 2
+ assert bboxes.size(1) == 4
+ return (points[:, 0].unsqueeze(1) > bboxes[:, 0].unsqueeze(0)) & \
+ (points[:, 0].unsqueeze(1) < bboxes[:, 2].unsqueeze(0)) & \
+ (points[:, 1].unsqueeze(1) > bboxes[:, 1].unsqueeze(0)) & \
+ (points[:, 1].unsqueeze(1) < bboxes[:, 3].unsqueeze(0))
+
+
+def bboxes_area(bboxes):
+ """Compute the area of an array of bboxes.
+
+ Args:
+ bboxes (Tensor): The coordinates ox bboxes. Shape: (m, 4)
+
+ Returns:
+ Tensor: Area of the bboxes. Shape: (m, )
+ """
+ assert bboxes.size(1) == 4
+ w = (bboxes[:, 2] - bboxes[:, 0])
+ h = (bboxes[:, 3] - bboxes[:, 1])
+ areas = w * h
+ return areas
+
+
+@BBOX_ASSIGNERS.register_module()
+class CenterRegionAssigner(BaseAssigner):
+ """Assign pixels at the center region of a bbox as positive.
+
+ Each proposals will be assigned with `-1`, `0`, or a positive integer
+ indicating the ground truth index.
+ - -1: negative samples
+ - semi-positive numbers: positive sample, index (0-based) of assigned gt
+
+ Args:
+ pos_scale (float): Threshold within which pixels are
+ labelled as positive.
+ neg_scale (float): Threshold above which pixels are
+ labelled as positive.
+ min_pos_iof (float): Minimum iof of a pixel with a gt to be
+ labelled as positive. Default: 1e-2
+ ignore_gt_scale (float): Threshold within which the pixels
+ are ignored when the gt is labelled as shadowed. Default: 0.5
+ foreground_dominate (bool): If True, the bbox will be assigned as
+ positive when a gt's kernel region overlaps with another's shadowed
+ (ignored) region, otherwise it is set as ignored. Default to False.
+ """
+
+ def __init__(self,
+ pos_scale,
+ neg_scale,
+ min_pos_iof=1e-2,
+ ignore_gt_scale=0.5,
+ foreground_dominate=False,
+ iou_calculator=dict(type='BboxOverlaps2D')):
+ self.pos_scale = pos_scale
+ self.neg_scale = neg_scale
+ self.min_pos_iof = min_pos_iof
+ self.ignore_gt_scale = ignore_gt_scale
+ self.foreground_dominate = foreground_dominate
+ self.iou_calculator = build_iou_calculator(iou_calculator)
+
+ def get_gt_priorities(self, gt_bboxes):
+ """Get gt priorities according to their areas.
+
+ Smaller gt has higher priority.
+
+ Args:
+ gt_bboxes (Tensor): Ground truth boxes, shape (k, 4).
+
+ Returns:
+ Tensor: The priority of gts so that gts with larger priority is \
+ more likely to be assigned. Shape (k, )
+ """
+ gt_areas = bboxes_area(gt_bboxes)
+ # Rank all gt bbox areas. Smaller objects has larger priority
+ _, sort_idx = gt_areas.sort(descending=True)
+ sort_idx = sort_idx.argsort()
+ return sort_idx
+
+ def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None):
+ """Assign gt to bboxes.
+
+ This method assigns gts to every bbox (proposal/anchor), each bbox \
+ will be assigned with -1, or a semi-positive number. -1 means \
+ negative sample, semi-positive number is the index (0-based) of \
+ assigned gt.
+
+ Args:
+ bboxes (Tensor): Bounding boxes to be assigned, shape(n, 4).
+ gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4).
+ gt_bboxes_ignore (tensor, optional): Ground truth bboxes that are
+ labelled as `ignored`, e.g., crowd boxes in COCO.
+ gt_labels (tensor, optional): Label of gt_bboxes, shape (num_gts,).
+
+ Returns:
+ :obj:`AssignResult`: The assigned result. Note that \
+ shadowed_labels of shape (N, 2) is also added as an \
+ `assign_result` attribute. `shadowed_labels` is a tensor \
+ composed of N pairs of anchor_ind, class_label], where N \
+ is the number of anchors that lie in the outer region of a \
+ gt, anchor_ind is the shadowed anchor index and class_label \
+ is the shadowed class label.
+
+ Example:
+ >>> self = CenterRegionAssigner(0.2, 0.2)
+ >>> bboxes = torch.Tensor([[0, 0, 10, 10], [10, 10, 20, 20]])
+ >>> gt_bboxes = torch.Tensor([[0, 0, 10, 10]])
+ >>> assign_result = self.assign(bboxes, gt_bboxes)
+ >>> expected_gt_inds = torch.LongTensor([1, 0])
+ >>> assert torch.all(assign_result.gt_inds == expected_gt_inds)
+ """
+ # There are in total 5 steps in the pixel assignment
+ # 1. Find core (the center region, say inner 0.2)
+ # and shadow (the relatively ourter part, say inner 0.2-0.5)
+ # regions of every gt.
+ # 2. Find all prior bboxes that lie in gt_core and gt_shadow regions
+ # 3. Assign prior bboxes in gt_core with a one-hot id of the gt in
+ # the image.
+ # 3.1. For overlapping objects, the prior bboxes in gt_core is
+ # assigned with the object with smallest area
+ # 4. Assign prior bboxes with class label according to its gt id.
+ # 4.1. Assign -1 to prior bboxes lying in shadowed gts
+ # 4.2. Assign positive prior boxes with the corresponding label
+ # 5. Find pixels lying in the shadow of an object and assign them with
+ # background label, but set the loss weight of its corresponding
+ # gt to zero.
+ assert bboxes.size(1) == 4, 'bboxes must have size of 4'
+ # 1. Find core positive and shadow region of every gt
+ gt_core = scale_boxes(gt_bboxes, self.pos_scale)
+ gt_shadow = scale_boxes(gt_bboxes, self.neg_scale)
+
+ # 2. Find prior bboxes that lie in gt_core and gt_shadow regions
+ bbox_centers = (bboxes[:, 2:4] + bboxes[:, 0:2]) / 2
+ # The center points lie within the gt boxes
+ is_bbox_in_gt = is_located_in(bbox_centers, gt_bboxes)
+ # Only calculate bbox and gt_core IoF. This enables small prior bboxes
+ # to match large gts
+ bbox_and_gt_core_overlaps = self.iou_calculator(
+ bboxes, gt_core, mode='iof')
+ # The center point of effective priors should be within the gt box
+ is_bbox_in_gt_core = is_bbox_in_gt & (
+ bbox_and_gt_core_overlaps > self.min_pos_iof) # shape (n, k)
+
+ is_bbox_in_gt_shadow = (
+ self.iou_calculator(bboxes, gt_shadow, mode='iof') >
+ self.min_pos_iof)
+ # Rule out center effective positive pixels
+ is_bbox_in_gt_shadow &= (~is_bbox_in_gt_core)
+
+ num_gts, num_bboxes = gt_bboxes.size(0), bboxes.size(0)
+ if num_gts == 0 or num_bboxes == 0:
+ # If no gts exist, assign all pixels to negative
+ assigned_gt_ids = \
+ is_bbox_in_gt_core.new_zeros((num_bboxes,),
+ dtype=torch.long)
+ pixels_in_gt_shadow = assigned_gt_ids.new_empty((0, 2))
+ else:
+ # Step 3: assign a one-hot gt id to each pixel, and smaller objects
+ # have high priority to assign the pixel.
+ sort_idx = self.get_gt_priorities(gt_bboxes)
+ assigned_gt_ids, pixels_in_gt_shadow = \
+ self.assign_one_hot_gt_indices(is_bbox_in_gt_core,
+ is_bbox_in_gt_shadow,
+ gt_priority=sort_idx)
+
+ if gt_bboxes_ignore is not None and gt_bboxes_ignore.numel() > 0:
+ # No ground truth or boxes, return empty assignment
+ gt_bboxes_ignore = scale_boxes(
+ gt_bboxes_ignore, scale=self.ignore_gt_scale)
+ is_bbox_in_ignored_gts = is_located_in(bbox_centers,
+ gt_bboxes_ignore)
+ is_bbox_in_ignored_gts = is_bbox_in_ignored_gts.any(dim=1)
+ assigned_gt_ids[is_bbox_in_ignored_gts] = -1
+
+ # 4. Assign prior bboxes with class label according to its gt id.
+ assigned_labels = None
+ shadowed_pixel_labels = None
+ if gt_labels is not None:
+ # Default assigned label is the background (-1)
+ assigned_labels = assigned_gt_ids.new_full((num_bboxes, ), -1)
+ pos_inds = torch.nonzero(
+ assigned_gt_ids > 0, as_tuple=False).squeeze()
+ if pos_inds.numel() > 0:
+ assigned_labels[pos_inds] = gt_labels[assigned_gt_ids[pos_inds]
+ - 1]
+ # 5. Find pixels lying in the shadow of an object
+ shadowed_pixel_labels = pixels_in_gt_shadow.clone()
+ if pixels_in_gt_shadow.numel() > 0:
+ pixel_idx, gt_idx =\
+ pixels_in_gt_shadow[:, 0], pixels_in_gt_shadow[:, 1]
+ assert (assigned_gt_ids[pixel_idx] != gt_idx).all(), \
+ 'Some pixels are dually assigned to ignore and gt!'
+ shadowed_pixel_labels[:, 1] = gt_labels[gt_idx - 1]
+ override = (
+ assigned_labels[pixel_idx] == shadowed_pixel_labels[:, 1])
+ if self.foreground_dominate:
+ # When a pixel is both positive and shadowed, set it as pos
+ shadowed_pixel_labels = shadowed_pixel_labels[~override]
+ else:
+ # When a pixel is both pos and shadowed, set it as shadowed
+ assigned_labels[pixel_idx[override]] = -1
+ assigned_gt_ids[pixel_idx[override]] = 0
+
+ assign_result = AssignResult(
+ num_gts, assigned_gt_ids, None, labels=assigned_labels)
+ # Add shadowed_labels as assign_result property. Shape: (num_shadow, 2)
+ assign_result.set_extra_property('shadowed_labels',
+ shadowed_pixel_labels)
+ return assign_result
+
+ def assign_one_hot_gt_indices(self,
+ is_bbox_in_gt_core,
+ is_bbox_in_gt_shadow,
+ gt_priority=None):
+ """Assign only one gt index to each prior box.
+
+ Gts with large gt_priority are more likely to be assigned.
+
+ Args:
+ is_bbox_in_gt_core (Tensor): Bool tensor indicating the bbox center
+ is in the core area of a gt (e.g. 0-0.2).
+ Shape: (num_prior, num_gt).
+ is_bbox_in_gt_shadow (Tensor): Bool tensor indicating the bbox
+ center is in the shadowed area of a gt (e.g. 0.2-0.5).
+ Shape: (num_prior, num_gt).
+ gt_priority (Tensor): Priorities of gts. The gt with a higher
+ priority is more likely to be assigned to the bbox when the bbox
+ match with multiple gts. Shape: (num_gt, ).
+
+ Returns:
+ tuple: Returns (assigned_gt_inds, shadowed_gt_inds).
+
+ - assigned_gt_inds: The assigned gt index of each prior bbox \
+ (i.e. index from 1 to num_gts). Shape: (num_prior, ).
+ - shadowed_gt_inds: shadowed gt indices. It is a tensor of \
+ shape (num_ignore, 2) with first column being the \
+ shadowed prior bbox indices and the second column the \
+ shadowed gt indices (1-based).
+ """
+ num_bboxes, num_gts = is_bbox_in_gt_core.shape
+
+ if gt_priority is None:
+ gt_priority = torch.arange(
+ num_gts, device=is_bbox_in_gt_core.device)
+ assert gt_priority.size(0) == num_gts
+ # The bigger gt_priority, the more preferable to be assigned
+ # The assigned inds are by default 0 (background)
+ assigned_gt_inds = is_bbox_in_gt_core.new_zeros((num_bboxes, ),
+ dtype=torch.long)
+ # Shadowed bboxes are assigned to be background. But the corresponding
+ # label is ignored during loss calculation, which is done through
+ # shadowed_gt_inds
+ shadowed_gt_inds = torch.nonzero(is_bbox_in_gt_shadow, as_tuple=False)
+ if is_bbox_in_gt_core.sum() == 0: # No gt match
+ shadowed_gt_inds[:, 1] += 1 # 1-based. For consistency issue
+ return assigned_gt_inds, shadowed_gt_inds
+
+ # The priority of each prior box and gt pair. If one prior box is
+ # matched bo multiple gts. Only the pair with the highest priority
+ # is saved
+ pair_priority = is_bbox_in_gt_core.new_full((num_bboxes, num_gts),
+ -1,
+ dtype=torch.long)
+
+ # Each bbox could match with multiple gts.
+ # The following codes deal with this situation
+ # Matched bboxes (to any gt). Shape: (num_pos_anchor, )
+ inds_of_match = torch.any(is_bbox_in_gt_core, dim=1)
+ # The matched gt index of each positive bbox. Length >= num_pos_anchor
+ # , since one bbox could match multiple gts
+ matched_bbox_gt_inds = torch.nonzero(
+ is_bbox_in_gt_core, as_tuple=False)[:, 1]
+ # Assign priority to each bbox-gt pair.
+ pair_priority[is_bbox_in_gt_core] = gt_priority[matched_bbox_gt_inds]
+ _, argmax_priority = pair_priority[inds_of_match].max(dim=1)
+ assigned_gt_inds[inds_of_match] = argmax_priority + 1 # 1-based
+ # Zero-out the assigned anchor box to filter the shadowed gt indices
+ is_bbox_in_gt_core[inds_of_match, argmax_priority] = 0
+ # Concat the shadowed indices due to overlapping with that out side of
+ # effective scale. shape: (total_num_ignore, 2)
+ shadowed_gt_inds = torch.cat(
+ (shadowed_gt_inds, torch.nonzero(
+ is_bbox_in_gt_core, as_tuple=False)),
+ dim=0)
+ # `is_bbox_in_gt_core` should be changed back to keep arguments intact.
+ is_bbox_in_gt_core[inds_of_match, argmax_priority] = 1
+ # 1-based shadowed gt indices, to be consistent with `assigned_gt_inds`
+ if shadowed_gt_inds.numel() > 0:
+ shadowed_gt_inds[:, 1] += 1
+ return assigned_gt_inds, shadowed_gt_inds
diff --git a/annotator/uniformer/mmdet_null/core/bbox/assigners/grid_assigner.py b/annotator/uniformer/mmdet_null/core/bbox/assigners/grid_assigner.py
new file mode 100644
index 0000000000000000000000000000000000000000..7390ea6370639c939d578c6ebf0f9268499161bc
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/core/bbox/assigners/grid_assigner.py
@@ -0,0 +1,155 @@
+import torch
+
+from ..builder import BBOX_ASSIGNERS
+from ..iou_calculators import build_iou_calculator
+from .assign_result import AssignResult
+from .base_assigner import BaseAssigner
+
+
+@BBOX_ASSIGNERS.register_module()
+class GridAssigner(BaseAssigner):
+ """Assign a corresponding gt bbox or background to each bbox.
+
+ Each proposals will be assigned with `-1`, `0`, or a positive integer
+ indicating the ground truth index.
+
+ - -1: don't care
+ - 0: negative sample, no assigned gt
+ - positive integer: positive sample, index (1-based) of assigned gt
+
+ Args:
+ pos_iou_thr (float): IoU threshold for positive bboxes.
+ neg_iou_thr (float or tuple): IoU threshold for negative bboxes.
+ min_pos_iou (float): Minimum iou for a bbox to be considered as a
+ positive bbox. Positive samples can have smaller IoU than
+ pos_iou_thr due to the 4th step (assign max IoU sample to each gt).
+ gt_max_assign_all (bool): Whether to assign all bboxes with the same
+ highest overlap with some gt to that gt.
+ """
+
+ def __init__(self,
+ pos_iou_thr,
+ neg_iou_thr,
+ min_pos_iou=.0,
+ gt_max_assign_all=True,
+ iou_calculator=dict(type='BboxOverlaps2D')):
+ self.pos_iou_thr = pos_iou_thr
+ self.neg_iou_thr = neg_iou_thr
+ self.min_pos_iou = min_pos_iou
+ self.gt_max_assign_all = gt_max_assign_all
+ self.iou_calculator = build_iou_calculator(iou_calculator)
+
+ def assign(self, bboxes, box_responsible_flags, gt_bboxes, gt_labels=None):
+ """Assign gt to bboxes. The process is very much like the max iou
+ assigner, except that positive samples are constrained within the cell
+ that the gt boxes fell in.
+
+ This method assign a gt bbox to every bbox (proposal/anchor), each bbox
+ will be assigned with -1, 0, or a positive number. -1 means don't care,
+ 0 means negative sample, positive number is the index (1-based) of
+ assigned gt.
+ The assignment is done in following steps, the order matters.
+
+ 1. assign every bbox to -1
+ 2. assign proposals whose iou with all gts <= neg_iou_thr to 0
+ 3. for each bbox within a cell, if the iou with its nearest gt >
+ pos_iou_thr and the center of that gt falls inside the cell,
+ assign it to that bbox
+ 4. for each gt bbox, assign its nearest proposals within the cell the
+ gt bbox falls in to itself.
+
+ Args:
+ bboxes (Tensor): Bounding boxes to be assigned, shape(n, 4).
+ box_responsible_flags (Tensor): flag to indicate whether box is
+ responsible for prediction, shape(n, )
+ gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4).
+ gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ).
+
+ Returns:
+ :obj:`AssignResult`: The assign result.
+ """
+ num_gts, num_bboxes = gt_bboxes.size(0), bboxes.size(0)
+
+ # compute iou between all gt and bboxes
+ overlaps = self.iou_calculator(gt_bboxes, bboxes)
+
+ # 1. assign -1 by default
+ assigned_gt_inds = overlaps.new_full((num_bboxes, ),
+ -1,
+ dtype=torch.long)
+
+ if num_gts == 0 or num_bboxes == 0:
+ # No ground truth or boxes, return empty assignment
+ max_overlaps = overlaps.new_zeros((num_bboxes, ))
+ if num_gts == 0:
+ # No truth, assign everything to background
+ assigned_gt_inds[:] = 0
+ if gt_labels is None:
+ assigned_labels = None
+ else:
+ assigned_labels = overlaps.new_full((num_bboxes, ),
+ -1,
+ dtype=torch.long)
+ return AssignResult(
+ num_gts,
+ assigned_gt_inds,
+ max_overlaps,
+ labels=assigned_labels)
+
+ # 2. assign negative: below
+ # for each anchor, which gt best overlaps with it
+ # for each anchor, the max iou of all gts
+ # shape of max_overlaps == argmax_overlaps == num_bboxes
+ max_overlaps, argmax_overlaps = overlaps.max(dim=0)
+
+ if isinstance(self.neg_iou_thr, float):
+ assigned_gt_inds[(max_overlaps >= 0)
+ & (max_overlaps <= self.neg_iou_thr)] = 0
+ elif isinstance(self.neg_iou_thr, (tuple, list)):
+ assert len(self.neg_iou_thr) == 2
+ assigned_gt_inds[(max_overlaps > self.neg_iou_thr[0])
+ & (max_overlaps <= self.neg_iou_thr[1])] = 0
+
+ # 3. assign positive: falls into responsible cell and above
+ # positive IOU threshold, the order matters.
+ # the prior condition of comparision is to filter out all
+ # unrelated anchors, i.e. not box_responsible_flags
+ overlaps[:, ~box_responsible_flags.type(torch.bool)] = -1.
+
+ # calculate max_overlaps again, but this time we only consider IOUs
+ # for anchors responsible for prediction
+ max_overlaps, argmax_overlaps = overlaps.max(dim=0)
+
+ # for each gt, which anchor best overlaps with it
+ # for each gt, the max iou of all proposals
+ # shape of gt_max_overlaps == gt_argmax_overlaps == num_gts
+ gt_max_overlaps, gt_argmax_overlaps = overlaps.max(dim=1)
+
+ pos_inds = (max_overlaps >
+ self.pos_iou_thr) & box_responsible_flags.type(torch.bool)
+ assigned_gt_inds[pos_inds] = argmax_overlaps[pos_inds] + 1
+
+ # 4. assign positive to max overlapped anchors within responsible cell
+ for i in range(num_gts):
+ if gt_max_overlaps[i] > self.min_pos_iou:
+ if self.gt_max_assign_all:
+ max_iou_inds = (overlaps[i, :] == gt_max_overlaps[i]) & \
+ box_responsible_flags.type(torch.bool)
+ assigned_gt_inds[max_iou_inds] = i + 1
+ elif box_responsible_flags[gt_argmax_overlaps[i]]:
+ assigned_gt_inds[gt_argmax_overlaps[i]] = i + 1
+
+ # assign labels of positive anchors
+ if gt_labels is not None:
+ assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1)
+ pos_inds = torch.nonzero(
+ assigned_gt_inds > 0, as_tuple=False).squeeze()
+ if pos_inds.numel() > 0:
+ assigned_labels[pos_inds] = gt_labels[
+ assigned_gt_inds[pos_inds] - 1]
+
+ else:
+ assigned_labels = None
+
+ return AssignResult(
+ num_gts, assigned_gt_inds, max_overlaps, labels=assigned_labels)
diff --git a/annotator/uniformer/mmdet_null/core/bbox/assigners/hungarian_assigner.py b/annotator/uniformer/mmdet_null/core/bbox/assigners/hungarian_assigner.py
new file mode 100644
index 0000000000000000000000000000000000000000..e10cc14afac4ddfcb9395c1a250ece1fbfe3263c
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/core/bbox/assigners/hungarian_assigner.py
@@ -0,0 +1,145 @@
+import torch
+
+from ..builder import BBOX_ASSIGNERS
+from ..match_costs import build_match_cost
+from ..transforms import bbox_cxcywh_to_xyxy
+from .assign_result import AssignResult
+from .base_assigner import BaseAssigner
+
+try:
+ from scipy.optimize import linear_sum_assignment
+except ImportError:
+ linear_sum_assignment = None
+
+
+@BBOX_ASSIGNERS.register_module()
+class HungarianAssigner(BaseAssigner):
+ """Computes one-to-one matching between predictions and ground truth.
+
+ This class computes an assignment between the targets and the predictions
+ based on the costs. The costs are weighted sum of three components:
+ classification cost, regression L1 cost and regression iou cost. The
+ targets don't include the no_object, so generally there are more
+ predictions than targets. After the one-to-one matching, the un-matched
+ are treated as backgrounds. Thus each query prediction will be assigned
+ with `0` or a positive integer indicating the ground truth index:
+
+ - 0: negative sample, no assigned gt
+ - positive integer: positive sample, index (1-based) of assigned gt
+
+ Args:
+ cls_weight (int | float, optional): The scale factor for classification
+ cost. Default 1.0.
+ bbox_weight (int | float, optional): The scale factor for regression
+ L1 cost. Default 1.0.
+ iou_weight (int | float, optional): The scale factor for regression
+ iou cost. Default 1.0.
+ iou_calculator (dict | optional): The config for the iou calculation.
+ Default type `BboxOverlaps2D`.
+ iou_mode (str | optional): "iou" (intersection over union), "iof"
+ (intersection over foreground), or "giou" (generalized
+ intersection over union). Default "giou".
+ """
+
+ def __init__(self,
+ cls_cost=dict(type='ClassificationCost', weight=1.),
+ reg_cost=dict(type='BBoxL1Cost', weight=1.0),
+ iou_cost=dict(type='IoUCost', iou_mode='giou', weight=1.0)):
+ self.cls_cost = build_match_cost(cls_cost)
+ self.reg_cost = build_match_cost(reg_cost)
+ self.iou_cost = build_match_cost(iou_cost)
+
+ def assign(self,
+ bbox_pred,
+ cls_pred,
+ gt_bboxes,
+ gt_labels,
+ img_meta,
+ gt_bboxes_ignore=None,
+ eps=1e-7):
+ """Computes one-to-one matching based on the weighted costs.
+
+ This method assign each query prediction to a ground truth or
+ background. The `assigned_gt_inds` with -1 means don't care,
+ 0 means negative sample, and positive number is the index (1-based)
+ of assigned gt.
+ The assignment is done in the following steps, the order matters.
+
+ 1. assign every prediction to -1
+ 2. compute the weighted costs
+ 3. do Hungarian matching on CPU based on the costs
+ 4. assign all to 0 (background) first, then for each matched pair
+ between predictions and gts, treat this prediction as foreground
+ and assign the corresponding gt index (plus 1) to it.
+
+ Args:
+ bbox_pred (Tensor): Predicted boxes with normalized coordinates
+ (cx, cy, w, h), which are all in range [0, 1]. Shape
+ [num_query, 4].
+ cls_pred (Tensor): Predicted classification logits, shape
+ [num_query, num_class].
+ gt_bboxes (Tensor): Ground truth boxes with unnormalized
+ coordinates (x1, y1, x2, y2). Shape [num_gt, 4].
+ gt_labels (Tensor): Label of `gt_bboxes`, shape (num_gt,).
+ img_meta (dict): Meta information for current image.
+ gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are
+ labelled as `ignored`. Default None.
+ eps (int | float, optional): A value added to the denominator for
+ numerical stability. Default 1e-7.
+
+ Returns:
+ :obj:`AssignResult`: The assigned result.
+ """
+ assert gt_bboxes_ignore is None, \
+ 'Only case when gt_bboxes_ignore is None is supported.'
+ num_gts, num_bboxes = gt_bboxes.size(0), bbox_pred.size(0)
+
+ # 1. assign -1 by default
+ assigned_gt_inds = bbox_pred.new_full((num_bboxes, ),
+ -1,
+ dtype=torch.long)
+ assigned_labels = bbox_pred.new_full((num_bboxes, ),
+ -1,
+ dtype=torch.long)
+ if num_gts == 0 or num_bboxes == 0:
+ # No ground truth or boxes, return empty assignment
+ if num_gts == 0:
+ # No ground truth, assign all to background
+ assigned_gt_inds[:] = 0
+ return AssignResult(
+ num_gts, assigned_gt_inds, None, labels=assigned_labels)
+ img_h, img_w, _ = img_meta['img_shape']
+ factor = gt_bboxes.new_tensor([img_w, img_h, img_w,
+ img_h]).unsqueeze(0)
+
+ # 2. compute the weighted costs
+ # classification and bboxcost.
+ cls_cost = self.cls_cost(cls_pred, gt_labels)
+ # regression L1 cost
+ normalize_gt_bboxes = gt_bboxes / factor
+ reg_cost = self.reg_cost(bbox_pred, normalize_gt_bboxes)
+ # regression iou cost, defaultly giou is used in official DETR.
+ bboxes = bbox_cxcywh_to_xyxy(bbox_pred) * factor
+ iou_cost = self.iou_cost(bboxes, gt_bboxes)
+ # weighted sum of above three costs
+ cost = cls_cost + reg_cost + iou_cost
+
+ # 3. do Hungarian matching on CPU using linear_sum_assignment
+ cost = cost.detach().cpu()
+ if linear_sum_assignment is None:
+ raise ImportError('Please run "pip install scipy" '
+ 'to install scipy first.')
+ matched_row_inds, matched_col_inds = linear_sum_assignment(cost)
+ matched_row_inds = torch.from_numpy(matched_row_inds).to(
+ bbox_pred.device)
+ matched_col_inds = torch.from_numpy(matched_col_inds).to(
+ bbox_pred.device)
+
+ # 4. assign backgrounds and foregrounds
+ # assign all indices to backgrounds first
+ assigned_gt_inds[:] = 0
+ # assign foregrounds based on matching results
+ assigned_gt_inds[matched_row_inds] = matched_col_inds + 1
+ assigned_labels[matched_row_inds] = gt_labels[matched_col_inds]
+ return AssignResult(
+ num_gts, assigned_gt_inds, None, labels=assigned_labels)
diff --git a/annotator/uniformer/mmdet_null/core/bbox/assigners/max_iou_assigner.py b/annotator/uniformer/mmdet_null/core/bbox/assigners/max_iou_assigner.py
new file mode 100644
index 0000000000000000000000000000000000000000..5cf4c4b4b450f87dfb99c3d33d8ed83d3e5cfcb3
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/core/bbox/assigners/max_iou_assigner.py
@@ -0,0 +1,212 @@
+import torch
+
+from ..builder import BBOX_ASSIGNERS
+from ..iou_calculators import build_iou_calculator
+from .assign_result import AssignResult
+from .base_assigner import BaseAssigner
+
+
+@BBOX_ASSIGNERS.register_module()
+class MaxIoUAssigner(BaseAssigner):
+ """Assign a corresponding gt bbox or background to each bbox.
+
+ Each proposals will be assigned with `-1`, or a semi-positive integer
+ indicating the ground truth index.
+
+ - -1: negative sample, no assigned gt
+ - semi-positive integer: positive sample, index (0-based) of assigned gt
+
+ Args:
+ pos_iou_thr (float): IoU threshold for positive bboxes.
+ neg_iou_thr (float or tuple): IoU threshold for negative bboxes.
+ min_pos_iou (float): Minimum iou for a bbox to be considered as a
+ positive bbox. Positive samples can have smaller IoU than
+ pos_iou_thr due to the 4th step (assign max IoU sample to each gt).
+ gt_max_assign_all (bool): Whether to assign all bboxes with the same
+ highest overlap with some gt to that gt.
+ ignore_iof_thr (float): IoF threshold for ignoring bboxes (if
+ `gt_bboxes_ignore` is specified). Negative values mean not
+ ignoring any bboxes.
+ ignore_wrt_candidates (bool): Whether to compute the iof between
+ `bboxes` and `gt_bboxes_ignore`, or the contrary.
+ match_low_quality (bool): Whether to allow low quality matches. This is
+ usually allowed for RPN and single stage detectors, but not allowed
+ in the second stage. Details are demonstrated in Step 4.
+ gpu_assign_thr (int): The upper bound of the number of GT for GPU
+ assign. When the number of gt is above this threshold, will assign
+ on CPU device. Negative values mean not assign on CPU.
+ """
+
+ def __init__(self,
+ pos_iou_thr,
+ neg_iou_thr,
+ min_pos_iou=.0,
+ gt_max_assign_all=True,
+ ignore_iof_thr=-1,
+ ignore_wrt_candidates=True,
+ match_low_quality=True,
+ gpu_assign_thr=-1,
+ iou_calculator=dict(type='BboxOverlaps2D')):
+ self.pos_iou_thr = pos_iou_thr
+ self.neg_iou_thr = neg_iou_thr
+ self.min_pos_iou = min_pos_iou
+ self.gt_max_assign_all = gt_max_assign_all
+ self.ignore_iof_thr = ignore_iof_thr
+ self.ignore_wrt_candidates = ignore_wrt_candidates
+ self.gpu_assign_thr = gpu_assign_thr
+ self.match_low_quality = match_low_quality
+ self.iou_calculator = build_iou_calculator(iou_calculator)
+
+ def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None):
+ """Assign gt to bboxes.
+
+ This method assign a gt bbox to every bbox (proposal/anchor), each bbox
+ will be assigned with -1, or a semi-positive number. -1 means negative
+ sample, semi-positive number is the index (0-based) of assigned gt.
+ The assignment is done in following steps, the order matters.
+
+ 1. assign every bbox to the background
+ 2. assign proposals whose iou with all gts < neg_iou_thr to 0
+ 3. for each bbox, if the iou with its nearest gt >= pos_iou_thr,
+ assign it to that bbox
+ 4. for each gt bbox, assign its nearest proposals (may be more than
+ one) to itself
+
+ Args:
+ bboxes (Tensor): Bounding boxes to be assigned, shape(n, 4).
+ gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4).
+ gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are
+ labelled as `ignored`, e.g., crowd boxes in COCO.
+ gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ).
+
+ Returns:
+ :obj:`AssignResult`: The assign result.
+
+ Example:
+ >>> self = MaxIoUAssigner(0.5, 0.5)
+ >>> bboxes = torch.Tensor([[0, 0, 10, 10], [10, 10, 20, 20]])
+ >>> gt_bboxes = torch.Tensor([[0, 0, 10, 9]])
+ >>> assign_result = self.assign(bboxes, gt_bboxes)
+ >>> expected_gt_inds = torch.LongTensor([1, 0])
+ >>> assert torch.all(assign_result.gt_inds == expected_gt_inds)
+ """
+ assign_on_cpu = True if (self.gpu_assign_thr > 0) and (
+ gt_bboxes.shape[0] > self.gpu_assign_thr) else False
+ # compute overlap and assign gt on CPU when number of GT is large
+ if assign_on_cpu:
+ device = bboxes.device
+ bboxes = bboxes.cpu()
+ gt_bboxes = gt_bboxes.cpu()
+ if gt_bboxes_ignore is not None:
+ gt_bboxes_ignore = gt_bboxes_ignore.cpu()
+ if gt_labels is not None:
+ gt_labels = gt_labels.cpu()
+
+ overlaps = self.iou_calculator(gt_bboxes, bboxes)
+
+ if (self.ignore_iof_thr > 0 and gt_bboxes_ignore is not None
+ and gt_bboxes_ignore.numel() > 0 and bboxes.numel() > 0):
+ if self.ignore_wrt_candidates:
+ ignore_overlaps = self.iou_calculator(
+ bboxes, gt_bboxes_ignore, mode='iof')
+ ignore_max_overlaps, _ = ignore_overlaps.max(dim=1)
+ else:
+ ignore_overlaps = self.iou_calculator(
+ gt_bboxes_ignore, bboxes, mode='iof')
+ ignore_max_overlaps, _ = ignore_overlaps.max(dim=0)
+ overlaps[:, ignore_max_overlaps > self.ignore_iof_thr] = -1
+
+ assign_result = self.assign_wrt_overlaps(overlaps, gt_labels)
+ if assign_on_cpu:
+ assign_result.gt_inds = assign_result.gt_inds.to(device)
+ assign_result.max_overlaps = assign_result.max_overlaps.to(device)
+ if assign_result.labels is not None:
+ assign_result.labels = assign_result.labels.to(device)
+ return assign_result
+
+ def assign_wrt_overlaps(self, overlaps, gt_labels=None):
+ """Assign w.r.t. the overlaps of bboxes with gts.
+
+ Args:
+ overlaps (Tensor): Overlaps between k gt_bboxes and n bboxes,
+ shape(k, n).
+ gt_labels (Tensor, optional): Labels of k gt_bboxes, shape (k, ).
+
+ Returns:
+ :obj:`AssignResult`: The assign result.
+ """
+ num_gts, num_bboxes = overlaps.size(0), overlaps.size(1)
+
+ # 1. assign -1 by default
+ assigned_gt_inds = overlaps.new_full((num_bboxes, ),
+ -1,
+ dtype=torch.long)
+
+ if num_gts == 0 or num_bboxes == 0:
+ # No ground truth or boxes, return empty assignment
+ max_overlaps = overlaps.new_zeros((num_bboxes, ))
+ if num_gts == 0:
+ # No truth, assign everything to background
+ assigned_gt_inds[:] = 0
+ if gt_labels is None:
+ assigned_labels = None
+ else:
+ assigned_labels = overlaps.new_full((num_bboxes, ),
+ -1,
+ dtype=torch.long)
+ return AssignResult(
+ num_gts,
+ assigned_gt_inds,
+ max_overlaps,
+ labels=assigned_labels)
+
+ # for each anchor, which gt best overlaps with it
+ # for each anchor, the max iou of all gts
+ max_overlaps, argmax_overlaps = overlaps.max(dim=0)
+ # for each gt, which anchor best overlaps with it
+ # for each gt, the max iou of all proposals
+ gt_max_overlaps, gt_argmax_overlaps = overlaps.max(dim=1)
+
+ # 2. assign negative: below
+ # the negative inds are set to be 0
+ if isinstance(self.neg_iou_thr, float):
+ assigned_gt_inds[(max_overlaps >= 0)
+ & (max_overlaps < self.neg_iou_thr)] = 0
+ elif isinstance(self.neg_iou_thr, tuple):
+ assert len(self.neg_iou_thr) == 2
+ assigned_gt_inds[(max_overlaps >= self.neg_iou_thr[0])
+ & (max_overlaps < self.neg_iou_thr[1])] = 0
+
+ # 3. assign positive: above positive IoU threshold
+ pos_inds = max_overlaps >= self.pos_iou_thr
+ assigned_gt_inds[pos_inds] = argmax_overlaps[pos_inds] + 1
+
+ if self.match_low_quality:
+ # Low-quality matching will overwrite the assigned_gt_inds assigned
+ # in Step 3. Thus, the assigned gt might not be the best one for
+ # prediction.
+ # For example, if bbox A has 0.9 and 0.8 iou with GT bbox 1 & 2,
+ # bbox 1 will be assigned as the best target for bbox A in step 3.
+ # However, if GT bbox 2's gt_argmax_overlaps = A, bbox A's
+ # assigned_gt_inds will be overwritten to be bbox B.
+ # This might be the reason that it is not used in ROI Heads.
+ for i in range(num_gts):
+ if gt_max_overlaps[i] >= self.min_pos_iou:
+ if self.gt_max_assign_all:
+ max_iou_inds = overlaps[i, :] == gt_max_overlaps[i]
+ assigned_gt_inds[max_iou_inds] = i + 1
+ else:
+ assigned_gt_inds[gt_argmax_overlaps[i]] = i + 1
+
+ if gt_labels is not None:
+ assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1)
+ pos_inds = torch.nonzero(
+ assigned_gt_inds > 0, as_tuple=False).squeeze()
+ if pos_inds.numel() > 0:
+ assigned_labels[pos_inds] = gt_labels[
+ assigned_gt_inds[pos_inds] - 1]
+ else:
+ assigned_labels = None
+
+ return AssignResult(
+ num_gts, assigned_gt_inds, max_overlaps, labels=assigned_labels)
diff --git a/annotator/uniformer/mmdet_null/core/bbox/assigners/point_assigner.py b/annotator/uniformer/mmdet_null/core/bbox/assigners/point_assigner.py
new file mode 100644
index 0000000000000000000000000000000000000000..fb8f5e4edc63f4851e2067034c5e67a3558f31bc
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/core/bbox/assigners/point_assigner.py
@@ -0,0 +1,133 @@
+import torch
+
+from ..builder import BBOX_ASSIGNERS
+from .assign_result import AssignResult
+from .base_assigner import BaseAssigner
+
+
+@BBOX_ASSIGNERS.register_module()
+class PointAssigner(BaseAssigner):
+ """Assign a corresponding gt bbox or background to each point.
+
+ Each proposals will be assigned with `0`, or a positive integer
+ indicating the ground truth index.
+
+ - 0: negative sample, no assigned gt
+ - positive integer: positive sample, index (1-based) of assigned gt
+ """
+
+ def __init__(self, scale=4, pos_num=3):
+ self.scale = scale
+ self.pos_num = pos_num
+
+ def assign(self, points, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None):
+ """Assign gt to points.
+
+ This method assign a gt bbox to every points set, each points set
+ will be assigned with the background_label (-1), or a label number.
+ -1 is background, and semi-positive number is the index (0-based) of
+ assigned gt.
+ The assignment is done in following steps, the order matters.
+
+ 1. assign every points to the background_label (-1)
+ 2. A point is assigned to some gt bbox if
+ (i) the point is within the k closest points to the gt bbox
+ (ii) the distance between this point and the gt is smaller than
+ other gt bboxes
+
+ Args:
+ points (Tensor): points to be assigned, shape(n, 3) while last
+ dimension stands for (x, y, stride).
+ gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4).
+ gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are
+ labelled as `ignored`, e.g., crowd boxes in COCO.
+ NOTE: currently unused.
+ gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ).
+
+ Returns:
+ :obj:`AssignResult`: The assign result.
+ """
+ num_points = points.shape[0]
+ num_gts = gt_bboxes.shape[0]
+
+ if num_gts == 0 or num_points == 0:
+ # If no truth assign everything to the background
+ assigned_gt_inds = points.new_full((num_points, ),
+ 0,
+ dtype=torch.long)
+ if gt_labels is None:
+ assigned_labels = None
+ else:
+ assigned_labels = points.new_full((num_points, ),
+ -1,
+ dtype=torch.long)
+ return AssignResult(
+ num_gts, assigned_gt_inds, None, labels=assigned_labels)
+
+ points_xy = points[:, :2]
+ points_stride = points[:, 2]
+ points_lvl = torch.log2(
+ points_stride).int() # [3...,4...,5...,6...,7...]
+ lvl_min, lvl_max = points_lvl.min(), points_lvl.max()
+
+ # assign gt box
+ gt_bboxes_xy = (gt_bboxes[:, :2] + gt_bboxes[:, 2:]) / 2
+ gt_bboxes_wh = (gt_bboxes[:, 2:] - gt_bboxes[:, :2]).clamp(min=1e-6)
+ scale = self.scale
+ gt_bboxes_lvl = ((torch.log2(gt_bboxes_wh[:, 0] / scale) +
+ torch.log2(gt_bboxes_wh[:, 1] / scale)) / 2).int()
+ gt_bboxes_lvl = torch.clamp(gt_bboxes_lvl, min=lvl_min, max=lvl_max)
+
+ # stores the assigned gt index of each point
+ assigned_gt_inds = points.new_zeros((num_points, ), dtype=torch.long)
+ # stores the assigned gt dist (to this point) of each point
+ assigned_gt_dist = points.new_full((num_points, ), float('inf'))
+ points_range = torch.arange(points.shape[0])
+
+ for idx in range(num_gts):
+ gt_lvl = gt_bboxes_lvl[idx]
+ # get the index of points in this level
+ lvl_idx = gt_lvl == points_lvl
+ points_index = points_range[lvl_idx]
+ # get the points in this level
+ lvl_points = points_xy[lvl_idx, :]
+ # get the center point of gt
+ gt_point = gt_bboxes_xy[[idx], :]
+ # get width and height of gt
+ gt_wh = gt_bboxes_wh[[idx], :]
+ # compute the distance between gt center and
+ # all points in this level
+ points_gt_dist = ((lvl_points - gt_point) / gt_wh).norm(dim=1)
+ # find the nearest k points to gt center in this level
+ min_dist, min_dist_index = torch.topk(
+ points_gt_dist, self.pos_num, largest=False)
+ # the index of nearest k points to gt center in this level
+ min_dist_points_index = points_index[min_dist_index]
+ # The less_than_recorded_index stores the index
+ # of min_dist that is less then the assigned_gt_dist. Where
+ # assigned_gt_dist stores the dist from previous assigned gt
+ # (if exist) to each point.
+ less_than_recorded_index = min_dist < assigned_gt_dist[
+ min_dist_points_index]
+ # The min_dist_points_index stores the index of points satisfy:
+ # (1) it is k nearest to current gt center in this level.
+ # (2) it is closer to current gt center than other gt center.
+ min_dist_points_index = min_dist_points_index[
+ less_than_recorded_index]
+ # assign the result
+ assigned_gt_inds[min_dist_points_index] = idx + 1
+ assigned_gt_dist[min_dist_points_index] = min_dist[
+ less_than_recorded_index]
+
+ if gt_labels is not None:
+ assigned_labels = assigned_gt_inds.new_full((num_points, ), -1)
+ pos_inds = torch.nonzero(
+ assigned_gt_inds > 0, as_tuple=False).squeeze()
+ if pos_inds.numel() > 0:
+ assigned_labels[pos_inds] = gt_labels[
+ assigned_gt_inds[pos_inds] - 1]
+ else:
+ assigned_labels = None
+
+ return AssignResult(
+ num_gts, assigned_gt_inds, None, labels=assigned_labels)
diff --git a/annotator/uniformer/mmdet_null/core/bbox/assigners/region_assigner.py b/annotator/uniformer/mmdet_null/core/bbox/assigners/region_assigner.py
new file mode 100644
index 0000000000000000000000000000000000000000..dd7d4326b31f0b637018159a31a68c0303afd06b
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/core/bbox/assigners/region_assigner.py
@@ -0,0 +1,221 @@
+import torch
+
+from annotator.uniformer.mmdet.core import anchor_inside_flags
+from ..builder import BBOX_ASSIGNERS
+from .assign_result import AssignResult
+from .base_assigner import BaseAssigner
+
+
+def calc_region(bbox, ratio, stride, featmap_size=None):
+ """Calculate region of the box defined by the ratio, the ratio is from the
+ center of the box to every edge."""
+ # project bbox on the feature
+ f_bbox = bbox / stride
+ x1 = torch.round((1 - ratio) * f_bbox[0] + ratio * f_bbox[2])
+ y1 = torch.round((1 - ratio) * f_bbox[1] + ratio * f_bbox[3])
+ x2 = torch.round(ratio * f_bbox[0] + (1 - ratio) * f_bbox[2])
+ y2 = torch.round(ratio * f_bbox[1] + (1 - ratio) * f_bbox[3])
+ if featmap_size is not None:
+ x1 = x1.clamp(min=0, max=featmap_size[1])
+ y1 = y1.clamp(min=0, max=featmap_size[0])
+ x2 = x2.clamp(min=0, max=featmap_size[1])
+ y2 = y2.clamp(min=0, max=featmap_size[0])
+ return (x1, y1, x2, y2)
+
+
+def anchor_ctr_inside_region_flags(anchors, stride, region):
+ """Get the flag indicate whether anchor centers are inside regions."""
+ x1, y1, x2, y2 = region
+ f_anchors = anchors / stride
+ x = (f_anchors[:, 0] + f_anchors[:, 2]) * 0.5
+ y = (f_anchors[:, 1] + f_anchors[:, 3]) * 0.5
+ flags = (x >= x1) & (x <= x2) & (y >= y1) & (y <= y2)
+ return flags
+
+
+@BBOX_ASSIGNERS.register_module()
+class RegionAssigner(BaseAssigner):
+ """Assign a corresponding gt bbox or background to each bbox.
+
+ Each proposals will be assigned with `-1`, `0`, or a positive integer
+ indicating the ground truth index.
+
+ - -1: don't care
+ - 0: negative sample, no assigned gt
+ - positive integer: positive sample, index (1-based) of assigned gt
+
+ Args:
+ center_ratio: ratio of the region in the center of the bbox to
+ define positive sample.
+ ignore_ratio: ratio of the region to define ignore samples.
+ """
+
+ def __init__(self, center_ratio=0.2, ignore_ratio=0.5):
+ self.center_ratio = center_ratio
+ self.ignore_ratio = ignore_ratio
+
+ def assign(self,
+ mlvl_anchors,
+ mlvl_valid_flags,
+ gt_bboxes,
+ img_meta,
+ featmap_sizes,
+ anchor_scale,
+ anchor_strides,
+ gt_bboxes_ignore=None,
+ gt_labels=None,
+ allowed_border=0):
+ """Assign gt to anchors.
+
+ This method assign a gt bbox to every bbox (proposal/anchor), each bbox
+ will be assigned with -1, 0, or a positive number. -1 means don't care,
+ 0 means negative sample, positive number is the index (1-based) of
+ assigned gt.
+ The assignment is done in following steps, the order matters.
+
+ 1. Assign every anchor to 0 (negative)
+ For each gt_bboxes:
+ 2. Compute ignore flags based on ignore_region then
+ assign -1 to anchors w.r.t. ignore flags
+ 3. Compute pos flags based on center_region then
+ assign gt_bboxes to anchors w.r.t. pos flags
+ 4. Compute ignore flags based on adjacent anchor lvl then
+ assign -1 to anchors w.r.t. ignore flags
+ 5. Assign anchor outside of image to -1
+
+ Args:
+ mlvl_anchors (list[Tensor]): Multi level anchors.
+ mlvl_valid_flags (list[Tensor]): Multi level valid flags.
+ gt_bboxes (Tensor): Ground truth bboxes of image
+ img_meta (dict): Meta info of image.
+ featmap_sizes (list[Tensor]): Feature mapsize each level
+ anchor_scale (int): Scale of the anchor.
+ anchor_strides (list[int]): Stride of the anchor.
+ gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4).
+ gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are
+ labelled as `ignored`, e.g., crowd boxes in COCO.
+ gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ).
+ allowed_border (int, optional): The border to allow the valid
+ anchor. Defaults to 0.
+
+ Returns:
+ :obj:`AssignResult`: The assign result.
+ """
+ if gt_bboxes_ignore is not None:
+ raise NotImplementedError
+
+ num_gts = gt_bboxes.shape[0]
+ num_bboxes = sum(x.shape[0] for x in mlvl_anchors)
+
+ if num_gts == 0 or num_bboxes == 0:
+ # No ground truth or boxes, return empty assignment
+ max_overlaps = gt_bboxes.new_zeros((num_bboxes, ))
+ assigned_gt_inds = gt_bboxes.new_zeros((num_bboxes, ),
+ dtype=torch.long)
+ if gt_labels is None:
+ assigned_labels = None
+ else:
+ assigned_labels = gt_bboxes.new_full((num_bboxes, ),
+ -1,
+ dtype=torch.long)
+ return AssignResult(
+ num_gts,
+ assigned_gt_inds,
+ max_overlaps,
+ labels=assigned_labels)
+
+ num_lvls = len(mlvl_anchors)
+ r1 = (1 - self.center_ratio) / 2
+ r2 = (1 - self.ignore_ratio) / 2
+
+ scale = torch.sqrt((gt_bboxes[:, 2] - gt_bboxes[:, 0]) *
+ (gt_bboxes[:, 3] - gt_bboxes[:, 1]))
+ min_anchor_size = scale.new_full(
+ (1, ), float(anchor_scale * anchor_strides[0]))
+ target_lvls = torch.floor(
+ torch.log2(scale) - torch.log2(min_anchor_size) + 0.5)
+ target_lvls = target_lvls.clamp(min=0, max=num_lvls - 1).long()
+
+ # 1. assign 0 (negative) by default
+ mlvl_assigned_gt_inds = []
+ mlvl_ignore_flags = []
+ for lvl in range(num_lvls):
+ h, w = featmap_sizes[lvl]
+ assert h * w == mlvl_anchors[lvl].shape[0]
+ assigned_gt_inds = gt_bboxes.new_full((h * w, ),
+ 0,
+ dtype=torch.long)
+ ignore_flags = torch.zeros_like(assigned_gt_inds)
+ mlvl_assigned_gt_inds.append(assigned_gt_inds)
+ mlvl_ignore_flags.append(ignore_flags)
+
+ for gt_id in range(num_gts):
+ lvl = target_lvls[gt_id].item()
+ featmap_size = featmap_sizes[lvl]
+ stride = anchor_strides[lvl]
+ anchors = mlvl_anchors[lvl]
+ gt_bbox = gt_bboxes[gt_id, :4]
+
+ # Compute regions
+ ignore_region = calc_region(gt_bbox, r2, stride, featmap_size)
+ ctr_region = calc_region(gt_bbox, r1, stride, featmap_size)
+
+ # 2. Assign -1 to ignore flags
+ ignore_flags = anchor_ctr_inside_region_flags(
+ anchors, stride, ignore_region)
+ mlvl_assigned_gt_inds[lvl][ignore_flags] = -1
+
+ # 3. Assign gt_bboxes to pos flags
+ pos_flags = anchor_ctr_inside_region_flags(anchors, stride,
+ ctr_region)
+ mlvl_assigned_gt_inds[lvl][pos_flags] = gt_id + 1
+
+ # 4. Assign -1 to ignore adjacent lvl
+ if lvl > 0:
+ d_lvl = lvl - 1
+ d_anchors = mlvl_anchors[d_lvl]
+ d_featmap_size = featmap_sizes[d_lvl]
+ d_stride = anchor_strides[d_lvl]
+ d_ignore_region = calc_region(gt_bbox, r2, d_stride,
+ d_featmap_size)
+ ignore_flags = anchor_ctr_inside_region_flags(
+ d_anchors, d_stride, d_ignore_region)
+ mlvl_ignore_flags[d_lvl][ignore_flags] = 1
+ if lvl < num_lvls - 1:
+ u_lvl = lvl + 1
+ u_anchors = mlvl_anchors[u_lvl]
+ u_featmap_size = featmap_sizes[u_lvl]
+ u_stride = anchor_strides[u_lvl]
+ u_ignore_region = calc_region(gt_bbox, r2, u_stride,
+ u_featmap_size)
+ ignore_flags = anchor_ctr_inside_region_flags(
+ u_anchors, u_stride, u_ignore_region)
+ mlvl_ignore_flags[u_lvl][ignore_flags] = 1
+
+ # 4. (cont.) Assign -1 to ignore adjacent lvl
+ for lvl in range(num_lvls):
+ ignore_flags = mlvl_ignore_flags[lvl]
+ mlvl_assigned_gt_inds[lvl][ignore_flags] = -1
+
+ # 5. Assign -1 to anchor outside of image
+ flat_assigned_gt_inds = torch.cat(mlvl_assigned_gt_inds)
+ flat_anchors = torch.cat(mlvl_anchors)
+ flat_valid_flags = torch.cat(mlvl_valid_flags)
+ assert (flat_assigned_gt_inds.shape[0] == flat_anchors.shape[0] ==
+ flat_valid_flags.shape[0])
+ inside_flags = anchor_inside_flags(flat_anchors, flat_valid_flags,
+ img_meta['img_shape'],
+ allowed_border)
+ outside_flags = ~inside_flags
+ flat_assigned_gt_inds[outside_flags] = -1
+
+ if gt_labels is not None:
+ assigned_labels = torch.zeros_like(flat_assigned_gt_inds)
+ pos_flags = assigned_gt_inds > 0
+ assigned_labels[pos_flags] = gt_labels[
+ flat_assigned_gt_inds[pos_flags] - 1]
+ else:
+ assigned_labels = None
+
+ return AssignResult(
+ num_gts, flat_assigned_gt_inds, None, labels=assigned_labels)
diff --git a/annotator/uniformer/mmdet_null/core/bbox/builder.py b/annotator/uniformer/mmdet_null/core/bbox/builder.py
new file mode 100644
index 0000000000000000000000000000000000000000..a1fefe863c6959f78dd48a0682b2ae05e7a672cf
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/core/bbox/builder.py
@@ -0,0 +1,20 @@
+from annotator.uniformer.mmcv.utils import Registry, build_from_cfg
+
+BBOX_ASSIGNERS = Registry('bbox_assigner')
+BBOX_SAMPLERS = Registry('bbox_sampler')
+BBOX_CODERS = Registry('bbox_coder')
+
+
+def build_assigner(cfg, **default_args):
+ """Builder of box assigner."""
+ return build_from_cfg(cfg, BBOX_ASSIGNERS, default_args)
+
+
+def build_sampler(cfg, **default_args):
+ """Builder of box sampler."""
+ return build_from_cfg(cfg, BBOX_SAMPLERS, default_args)
+
+
+def build_bbox_coder(cfg, **default_args):
+ """Builder of box coder."""
+ return build_from_cfg(cfg, BBOX_CODERS, default_args)
diff --git a/annotator/uniformer/mmdet_null/core/bbox/coder/__init__.py b/annotator/uniformer/mmdet_null/core/bbox/coder/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..ae455ba8fc0e0727e2d581cdc8f20fceededf99a
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/core/bbox/coder/__init__.py
@@ -0,0 +1,13 @@
+from .base_bbox_coder import BaseBBoxCoder
+from .bucketing_bbox_coder import BucketingBBoxCoder
+from .delta_xywh_bbox_coder import DeltaXYWHBBoxCoder
+from .legacy_delta_xywh_bbox_coder import LegacyDeltaXYWHBBoxCoder
+from .pseudo_bbox_coder import PseudoBBoxCoder
+from .tblr_bbox_coder import TBLRBBoxCoder
+from .yolo_bbox_coder import YOLOBBoxCoder
+
+__all__ = [
+ 'BaseBBoxCoder', 'PseudoBBoxCoder', 'DeltaXYWHBBoxCoder',
+ 'LegacyDeltaXYWHBBoxCoder', 'TBLRBBoxCoder', 'YOLOBBoxCoder',
+ 'BucketingBBoxCoder'
+]
diff --git a/annotator/uniformer/mmdet_null/core/bbox/coder/base_bbox_coder.py b/annotator/uniformer/mmdet_null/core/bbox/coder/base_bbox_coder.py
new file mode 100644
index 0000000000000000000000000000000000000000..cf0b34c7cc2fe561718b0c884990beb40a993643
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/core/bbox/coder/base_bbox_coder.py
@@ -0,0 +1,17 @@
+from abc import ABCMeta, abstractmethod
+
+
+class BaseBBoxCoder(metaclass=ABCMeta):
+ """Base bounding box coder."""
+
+ def __init__(self, **kwargs):
+ pass
+
+ @abstractmethod
+ def encode(self, bboxes, gt_bboxes):
+ """Encode deltas between bboxes and ground truth boxes."""
+
+ @abstractmethod
+ def decode(self, bboxes, bboxes_pred):
+ """Decode the predicted bboxes according to prediction and base
+ boxes."""
diff --git a/annotator/uniformer/mmdet_null/core/bbox/coder/bucketing_bbox_coder.py b/annotator/uniformer/mmdet_null/core/bbox/coder/bucketing_bbox_coder.py
new file mode 100644
index 0000000000000000000000000000000000000000..21763f153539eb9567b94f0ca1c088a77db719a5
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/core/bbox/coder/bucketing_bbox_coder.py
@@ -0,0 +1,350 @@
+import annotator.uniformer.mmcv as mmcv
+import numpy as np
+import torch
+import torch.nn.functional as F
+
+from ..builder import BBOX_CODERS
+from ..transforms import bbox_rescale
+from .base_bbox_coder import BaseBBoxCoder
+
+
+@BBOX_CODERS.register_module()
+class BucketingBBoxCoder(BaseBBoxCoder):
+ """Bucketing BBox Coder for Side-Aware Boundary Localization (SABL).
+
+ Boundary Localization with Bucketing and Bucketing Guided Rescoring
+ are implemented here.
+
+ Please refer to https://arxiv.org/abs/1912.04260 for more details.
+
+ Args:
+ num_buckets (int): Number of buckets.
+ scale_factor (int): Scale factor of proposals to generate buckets.
+ offset_topk (int): Topk buckets are used to generate
+ bucket fine regression targets. Defaults to 2.
+ offset_upperbound (float): Offset upperbound to generate
+ bucket fine regression targets.
+ To avoid too large offset displacements. Defaults to 1.0.
+ cls_ignore_neighbor (bool): Ignore second nearest bucket or Not.
+ Defaults to True.
+ clip_border (bool, optional): Whether clip the objects outside the
+ border of the image. Defaults to True.
+ """
+
+ def __init__(self,
+ num_buckets,
+ scale_factor,
+ offset_topk=2,
+ offset_upperbound=1.0,
+ cls_ignore_neighbor=True,
+ clip_border=True):
+ super(BucketingBBoxCoder, self).__init__()
+ self.num_buckets = num_buckets
+ self.scale_factor = scale_factor
+ self.offset_topk = offset_topk
+ self.offset_upperbound = offset_upperbound
+ self.cls_ignore_neighbor = cls_ignore_neighbor
+ self.clip_border = clip_border
+
+ def encode(self, bboxes, gt_bboxes):
+ """Get bucketing estimation and fine regression targets during
+ training.
+
+ Args:
+ bboxes (torch.Tensor): source boxes, e.g., object proposals.
+ gt_bboxes (torch.Tensor): target of the transformation, e.g.,
+ ground truth boxes.
+
+ Returns:
+ encoded_bboxes(tuple[Tensor]): bucketing estimation
+ and fine regression targets and weights
+ """
+
+ assert bboxes.size(0) == gt_bboxes.size(0)
+ assert bboxes.size(-1) == gt_bboxes.size(-1) == 4
+ encoded_bboxes = bbox2bucket(bboxes, gt_bboxes, self.num_buckets,
+ self.scale_factor, self.offset_topk,
+ self.offset_upperbound,
+ self.cls_ignore_neighbor)
+ return encoded_bboxes
+
+ def decode(self, bboxes, pred_bboxes, max_shape=None):
+ """Apply transformation `pred_bboxes` to `boxes`.
+ Args:
+ boxes (torch.Tensor): Basic boxes.
+ pred_bboxes (torch.Tensor): Predictions for bucketing estimation
+ and fine regression
+ max_shape (tuple[int], optional): Maximum shape of boxes.
+ Defaults to None.
+
+ Returns:
+ torch.Tensor: Decoded boxes.
+ """
+ assert len(pred_bboxes) == 2
+ cls_preds, offset_preds = pred_bboxes
+ assert cls_preds.size(0) == bboxes.size(0) and offset_preds.size(
+ 0) == bboxes.size(0)
+ decoded_bboxes = bucket2bbox(bboxes, cls_preds, offset_preds,
+ self.num_buckets, self.scale_factor,
+ max_shape, self.clip_border)
+
+ return decoded_bboxes
+
+
+@mmcv.jit(coderize=True)
+def generat_buckets(proposals, num_buckets, scale_factor=1.0):
+ """Generate buckets w.r.t bucket number and scale factor of proposals.
+
+ Args:
+ proposals (Tensor): Shape (n, 4)
+ num_buckets (int): Number of buckets.
+ scale_factor (float): Scale factor to rescale proposals.
+
+ Returns:
+ tuple[Tensor]: (bucket_w, bucket_h, l_buckets, r_buckets,
+ t_buckets, d_buckets)
+
+ - bucket_w: Width of buckets on x-axis. Shape (n, ).
+ - bucket_h: Height of buckets on y-axis. Shape (n, ).
+ - l_buckets: Left buckets. Shape (n, ceil(side_num/2)).
+ - r_buckets: Right buckets. Shape (n, ceil(side_num/2)).
+ - t_buckets: Top buckets. Shape (n, ceil(side_num/2)).
+ - d_buckets: Down buckets. Shape (n, ceil(side_num/2)).
+ """
+ proposals = bbox_rescale(proposals, scale_factor)
+
+ # number of buckets in each side
+ side_num = int(np.ceil(num_buckets / 2.0))
+ pw = proposals[..., 2] - proposals[..., 0]
+ ph = proposals[..., 3] - proposals[..., 1]
+ px1 = proposals[..., 0]
+ py1 = proposals[..., 1]
+ px2 = proposals[..., 2]
+ py2 = proposals[..., 3]
+
+ bucket_w = pw / num_buckets
+ bucket_h = ph / num_buckets
+
+ # left buckets
+ l_buckets = px1[:, None] + (0.5 + torch.arange(
+ 0, side_num).to(proposals).float())[None, :] * bucket_w[:, None]
+ # right buckets
+ r_buckets = px2[:, None] - (0.5 + torch.arange(
+ 0, side_num).to(proposals).float())[None, :] * bucket_w[:, None]
+ # top buckets
+ t_buckets = py1[:, None] + (0.5 + torch.arange(
+ 0, side_num).to(proposals).float())[None, :] * bucket_h[:, None]
+ # down buckets
+ d_buckets = py2[:, None] - (0.5 + torch.arange(
+ 0, side_num).to(proposals).float())[None, :] * bucket_h[:, None]
+ return bucket_w, bucket_h, l_buckets, r_buckets, t_buckets, d_buckets
+
+
+@mmcv.jit(coderize=True)
+def bbox2bucket(proposals,
+ gt,
+ num_buckets,
+ scale_factor,
+ offset_topk=2,
+ offset_upperbound=1.0,
+ cls_ignore_neighbor=True):
+ """Generate buckets estimation and fine regression targets.
+
+ Args:
+ proposals (Tensor): Shape (n, 4)
+ gt (Tensor): Shape (n, 4)
+ num_buckets (int): Number of buckets.
+ scale_factor (float): Scale factor to rescale proposals.
+ offset_topk (int): Topk buckets are used to generate
+ bucket fine regression targets. Defaults to 2.
+ offset_upperbound (float): Offset allowance to generate
+ bucket fine regression targets.
+ To avoid too large offset displacements. Defaults to 1.0.
+ cls_ignore_neighbor (bool): Ignore second nearest bucket or Not.
+ Defaults to True.
+
+ Returns:
+ tuple[Tensor]: (offsets, offsets_weights, bucket_labels, cls_weights).
+
+ - offsets: Fine regression targets. \
+ Shape (n, num_buckets*2).
+ - offsets_weights: Fine regression weights. \
+ Shape (n, num_buckets*2).
+ - bucket_labels: Bucketing estimation labels. \
+ Shape (n, num_buckets*2).
+ - cls_weights: Bucketing estimation weights. \
+ Shape (n, num_buckets*2).
+ """
+ assert proposals.size() == gt.size()
+
+ # generate buckets
+ proposals = proposals.float()
+ gt = gt.float()
+ (bucket_w, bucket_h, l_buckets, r_buckets, t_buckets,
+ d_buckets) = generat_buckets(proposals, num_buckets, scale_factor)
+
+ gx1 = gt[..., 0]
+ gy1 = gt[..., 1]
+ gx2 = gt[..., 2]
+ gy2 = gt[..., 3]
+
+ # generate offset targets and weights
+ # offsets from buckets to gts
+ l_offsets = (l_buckets - gx1[:, None]) / bucket_w[:, None]
+ r_offsets = (r_buckets - gx2[:, None]) / bucket_w[:, None]
+ t_offsets = (t_buckets - gy1[:, None]) / bucket_h[:, None]
+ d_offsets = (d_buckets - gy2[:, None]) / bucket_h[:, None]
+
+ # select top-k nearset buckets
+ l_topk, l_label = l_offsets.abs().topk(
+ offset_topk, dim=1, largest=False, sorted=True)
+ r_topk, r_label = r_offsets.abs().topk(
+ offset_topk, dim=1, largest=False, sorted=True)
+ t_topk, t_label = t_offsets.abs().topk(
+ offset_topk, dim=1, largest=False, sorted=True)
+ d_topk, d_label = d_offsets.abs().topk(
+ offset_topk, dim=1, largest=False, sorted=True)
+
+ offset_l_weights = l_offsets.new_zeros(l_offsets.size())
+ offset_r_weights = r_offsets.new_zeros(r_offsets.size())
+ offset_t_weights = t_offsets.new_zeros(t_offsets.size())
+ offset_d_weights = d_offsets.new_zeros(d_offsets.size())
+ inds = torch.arange(0, proposals.size(0)).to(proposals).long()
+
+ # generate offset weights of top-k nearset buckets
+ for k in range(offset_topk):
+ if k >= 1:
+ offset_l_weights[inds, l_label[:,
+ k]] = (l_topk[:, k] <
+ offset_upperbound).float()
+ offset_r_weights[inds, r_label[:,
+ k]] = (r_topk[:, k] <
+ offset_upperbound).float()
+ offset_t_weights[inds, t_label[:,
+ k]] = (t_topk[:, k] <
+ offset_upperbound).float()
+ offset_d_weights[inds, d_label[:,
+ k]] = (d_topk[:, k] <
+ offset_upperbound).float()
+ else:
+ offset_l_weights[inds, l_label[:, k]] = 1.0
+ offset_r_weights[inds, r_label[:, k]] = 1.0
+ offset_t_weights[inds, t_label[:, k]] = 1.0
+ offset_d_weights[inds, d_label[:, k]] = 1.0
+
+ offsets = torch.cat([l_offsets, r_offsets, t_offsets, d_offsets], dim=-1)
+ offsets_weights = torch.cat([
+ offset_l_weights, offset_r_weights, offset_t_weights, offset_d_weights
+ ],
+ dim=-1)
+
+ # generate bucket labels and weight
+ side_num = int(np.ceil(num_buckets / 2.0))
+ labels = torch.stack(
+ [l_label[:, 0], r_label[:, 0], t_label[:, 0], d_label[:, 0]], dim=-1)
+
+ batch_size = labels.size(0)
+ bucket_labels = F.one_hot(labels.view(-1), side_num).view(batch_size,
+ -1).float()
+ bucket_cls_l_weights = (l_offsets.abs() < 1).float()
+ bucket_cls_r_weights = (r_offsets.abs() < 1).float()
+ bucket_cls_t_weights = (t_offsets.abs() < 1).float()
+ bucket_cls_d_weights = (d_offsets.abs() < 1).float()
+ bucket_cls_weights = torch.cat([
+ bucket_cls_l_weights, bucket_cls_r_weights, bucket_cls_t_weights,
+ bucket_cls_d_weights
+ ],
+ dim=-1)
+ # ignore second nearest buckets for cls if necessary
+ if cls_ignore_neighbor:
+ bucket_cls_weights = (~((bucket_cls_weights == 1) &
+ (bucket_labels == 0))).float()
+ else:
+ bucket_cls_weights[:] = 1.0
+ return offsets, offsets_weights, bucket_labels, bucket_cls_weights
+
+
+@mmcv.jit(coderize=True)
+def bucket2bbox(proposals,
+ cls_preds,
+ offset_preds,
+ num_buckets,
+ scale_factor=1.0,
+ max_shape=None,
+ clip_border=True):
+ """Apply bucketing estimation (cls preds) and fine regression (offset
+ preds) to generate det bboxes.
+
+ Args:
+ proposals (Tensor): Boxes to be transformed. Shape (n, 4)
+ cls_preds (Tensor): bucketing estimation. Shape (n, num_buckets*2).
+ offset_preds (Tensor): fine regression. Shape (n, num_buckets*2).
+ num_buckets (int): Number of buckets.
+ scale_factor (float): Scale factor to rescale proposals.
+ max_shape (tuple[int, int]): Maximum bounds for boxes. specifies (H, W)
+ clip_border (bool, optional): Whether clip the objects outside the
+ border of the image. Defaults to True.
+
+ Returns:
+ tuple[Tensor]: (bboxes, loc_confidence).
+
+ - bboxes: predicted bboxes. Shape (n, 4)
+ - loc_confidence: localization confidence of predicted bboxes.
+ Shape (n,).
+ """
+
+ side_num = int(np.ceil(num_buckets / 2.0))
+ cls_preds = cls_preds.view(-1, side_num)
+ offset_preds = offset_preds.view(-1, side_num)
+
+ scores = F.softmax(cls_preds, dim=1)
+ score_topk, score_label = scores.topk(2, dim=1, largest=True, sorted=True)
+
+ rescaled_proposals = bbox_rescale(proposals, scale_factor)
+
+ pw = rescaled_proposals[..., 2] - rescaled_proposals[..., 0]
+ ph = rescaled_proposals[..., 3] - rescaled_proposals[..., 1]
+ px1 = rescaled_proposals[..., 0]
+ py1 = rescaled_proposals[..., 1]
+ px2 = rescaled_proposals[..., 2]
+ py2 = rescaled_proposals[..., 3]
+
+ bucket_w = pw / num_buckets
+ bucket_h = ph / num_buckets
+
+ score_inds_l = score_label[0::4, 0]
+ score_inds_r = score_label[1::4, 0]
+ score_inds_t = score_label[2::4, 0]
+ score_inds_d = score_label[3::4, 0]
+ l_buckets = px1 + (0.5 + score_inds_l.float()) * bucket_w
+ r_buckets = px2 - (0.5 + score_inds_r.float()) * bucket_w
+ t_buckets = py1 + (0.5 + score_inds_t.float()) * bucket_h
+ d_buckets = py2 - (0.5 + score_inds_d.float()) * bucket_h
+
+ offsets = offset_preds.view(-1, 4, side_num)
+ inds = torch.arange(proposals.size(0)).to(proposals).long()
+ l_offsets = offsets[:, 0, :][inds, score_inds_l]
+ r_offsets = offsets[:, 1, :][inds, score_inds_r]
+ t_offsets = offsets[:, 2, :][inds, score_inds_t]
+ d_offsets = offsets[:, 3, :][inds, score_inds_d]
+
+ x1 = l_buckets - l_offsets * bucket_w
+ x2 = r_buckets - r_offsets * bucket_w
+ y1 = t_buckets - t_offsets * bucket_h
+ y2 = d_buckets - d_offsets * bucket_h
+
+ if clip_border and max_shape is not None:
+ x1 = x1.clamp(min=0, max=max_shape[1] - 1)
+ y1 = y1.clamp(min=0, max=max_shape[0] - 1)
+ x2 = x2.clamp(min=0, max=max_shape[1] - 1)
+ y2 = y2.clamp(min=0, max=max_shape[0] - 1)
+ bboxes = torch.cat([x1[:, None], y1[:, None], x2[:, None], y2[:, None]],
+ dim=-1)
+
+ # bucketing guided rescoring
+ loc_confidence = score_topk[:, 0]
+ top2_neighbor_inds = (score_label[:, 0] - score_label[:, 1]).abs() == 1
+ loc_confidence += score_topk[:, 1] * top2_neighbor_inds.float()
+ loc_confidence = loc_confidence.view(-1, 4).mean(dim=1)
+
+ return bboxes, loc_confidence
diff --git a/annotator/uniformer/mmdet_null/core/bbox/coder/delta_xywh_bbox_coder.py b/annotator/uniformer/mmdet_null/core/bbox/coder/delta_xywh_bbox_coder.py
new file mode 100644
index 0000000000000000000000000000000000000000..dc9a41e4464ac6332e26e7c21248f65d06a78af9
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/core/bbox/coder/delta_xywh_bbox_coder.py
@@ -0,0 +1,237 @@
+import annotator.uniformer.mmcv as mmcv
+import numpy as np
+import torch
+
+from ..builder import BBOX_CODERS
+from .base_bbox_coder import BaseBBoxCoder
+
+
+@BBOX_CODERS.register_module()
+class DeltaXYWHBBoxCoder(BaseBBoxCoder):
+ """Delta XYWH BBox coder.
+
+ Following the practice in `R-CNN `_,
+ this coder encodes bbox (x1, y1, x2, y2) into delta (dx, dy, dw, dh) and
+ decodes delta (dx, dy, dw, dh) back to original bbox (x1, y1, x2, y2).
+
+ Args:
+ target_means (Sequence[float]): Denormalizing means of target for
+ delta coordinates
+ target_stds (Sequence[float]): Denormalizing standard deviation of
+ target for delta coordinates
+ clip_border (bool, optional): Whether clip the objects outside the
+ border of the image. Defaults to True.
+ """
+
+ def __init__(self,
+ target_means=(0., 0., 0., 0.),
+ target_stds=(1., 1., 1., 1.),
+ clip_border=True):
+ super(BaseBBoxCoder, self).__init__()
+ self.means = target_means
+ self.stds = target_stds
+ self.clip_border = clip_border
+
+ def encode(self, bboxes, gt_bboxes):
+ """Get box regression transformation deltas that can be used to
+ transform the ``bboxes`` into the ``gt_bboxes``.
+
+ Args:
+ bboxes (torch.Tensor): Source boxes, e.g., object proposals.
+ gt_bboxes (torch.Tensor): Target of the transformation, e.g.,
+ ground-truth boxes.
+
+ Returns:
+ torch.Tensor: Box transformation deltas
+ """
+
+ assert bboxes.size(0) == gt_bboxes.size(0)
+ assert bboxes.size(-1) == gt_bboxes.size(-1) == 4
+ encoded_bboxes = bbox2delta(bboxes, gt_bboxes, self.means, self.stds)
+ return encoded_bboxes
+
+ def decode(self,
+ bboxes,
+ pred_bboxes,
+ max_shape=None,
+ wh_ratio_clip=16 / 1000):
+ """Apply transformation `pred_bboxes` to `boxes`.
+
+ Args:
+ bboxes (torch.Tensor): Basic boxes. Shape (B, N, 4) or (N, 4)
+ pred_bboxes (Tensor): Encoded offsets with respect to each roi.
+ Has shape (B, N, num_classes * 4) or (B, N, 4) or
+ (N, num_classes * 4) or (N, 4). Note N = num_anchors * W * H
+ when rois is a grid of anchors.Offset encoding follows [1]_.
+ max_shape (Sequence[int] or torch.Tensor or Sequence[
+ Sequence[int]],optional): Maximum bounds for boxes, specifies
+ (H, W, C) or (H, W). If bboxes shape is (B, N, 4), then
+ the max_shape should be a Sequence[Sequence[int]]
+ and the length of max_shape should also be B.
+ wh_ratio_clip (float, optional): The allowed ratio between
+ width and height.
+
+ Returns:
+ torch.Tensor: Decoded boxes.
+ """
+
+ assert pred_bboxes.size(0) == bboxes.size(0)
+ if pred_bboxes.ndim == 3:
+ assert pred_bboxes.size(1) == bboxes.size(1)
+ decoded_bboxes = delta2bbox(bboxes, pred_bboxes, self.means, self.stds,
+ max_shape, wh_ratio_clip, self.clip_border)
+
+ return decoded_bboxes
+
+
+@mmcv.jit(coderize=True)
+def bbox2delta(proposals, gt, means=(0., 0., 0., 0.), stds=(1., 1., 1., 1.)):
+ """Compute deltas of proposals w.r.t. gt.
+
+ We usually compute the deltas of x, y, w, h of proposals w.r.t ground
+ truth bboxes to get regression target.
+ This is the inverse function of :func:`delta2bbox`.
+
+ Args:
+ proposals (Tensor): Boxes to be transformed, shape (N, ..., 4)
+ gt (Tensor): Gt bboxes to be used as base, shape (N, ..., 4)
+ means (Sequence[float]): Denormalizing means for delta coordinates
+ stds (Sequence[float]): Denormalizing standard deviation for delta
+ coordinates
+
+ Returns:
+ Tensor: deltas with shape (N, 4), where columns represent dx, dy,
+ dw, dh.
+ """
+ assert proposals.size() == gt.size()
+
+ proposals = proposals.float()
+ gt = gt.float()
+ px = (proposals[..., 0] + proposals[..., 2]) * 0.5
+ py = (proposals[..., 1] + proposals[..., 3]) * 0.5
+ pw = proposals[..., 2] - proposals[..., 0]
+ ph = proposals[..., 3] - proposals[..., 1]
+
+ gx = (gt[..., 0] + gt[..., 2]) * 0.5
+ gy = (gt[..., 1] + gt[..., 3]) * 0.5
+ gw = gt[..., 2] - gt[..., 0]
+ gh = gt[..., 3] - gt[..., 1]
+
+ dx = (gx - px) / pw
+ dy = (gy - py) / ph
+ dw = torch.log(gw / pw)
+ dh = torch.log(gh / ph)
+ deltas = torch.stack([dx, dy, dw, dh], dim=-1)
+
+ means = deltas.new_tensor(means).unsqueeze(0)
+ stds = deltas.new_tensor(stds).unsqueeze(0)
+ deltas = deltas.sub_(means).div_(stds)
+
+ return deltas
+
+
+@mmcv.jit(coderize=True)
+def delta2bbox(rois,
+ deltas,
+ means=(0., 0., 0., 0.),
+ stds=(1., 1., 1., 1.),
+ max_shape=None,
+ wh_ratio_clip=16 / 1000,
+ clip_border=True):
+ """Apply deltas to shift/scale base boxes.
+
+ Typically the rois are anchor or proposed bounding boxes and the deltas are
+ network outputs used to shift/scale those boxes.
+ This is the inverse function of :func:`bbox2delta`.
+
+ Args:
+ rois (Tensor): Boxes to be transformed. Has shape (N, 4) or (B, N, 4)
+ deltas (Tensor): Encoded offsets with respect to each roi.
+ Has shape (B, N, num_classes * 4) or (B, N, 4) or
+ (N, num_classes * 4) or (N, 4). Note N = num_anchors * W * H
+ when rois is a grid of anchors.Offset encoding follows [1]_.
+ means (Sequence[float]): Denormalizing means for delta coordinates
+ stds (Sequence[float]): Denormalizing standard deviation for delta
+ coordinates
+ max_shape (Sequence[int] or torch.Tensor or Sequence[
+ Sequence[int]],optional): Maximum bounds for boxes, specifies
+ (H, W, C) or (H, W). If rois shape is (B, N, 4), then
+ the max_shape should be a Sequence[Sequence[int]]
+ and the length of max_shape should also be B.
+ wh_ratio_clip (float): Maximum aspect ratio for boxes.
+ clip_border (bool, optional): Whether clip the objects outside the
+ border of the image. Defaults to True.
+
+ Returns:
+ Tensor: Boxes with shape (B, N, num_classes * 4) or (B, N, 4) or
+ (N, num_classes * 4) or (N, 4), where 4 represent
+ tl_x, tl_y, br_x, br_y.
+
+ References:
+ .. [1] https://arxiv.org/abs/1311.2524
+
+ Example:
+ >>> rois = torch.Tensor([[ 0., 0., 1., 1.],
+ >>> [ 0., 0., 1., 1.],
+ >>> [ 0., 0., 1., 1.],
+ >>> [ 5., 5., 5., 5.]])
+ >>> deltas = torch.Tensor([[ 0., 0., 0., 0.],
+ >>> [ 1., 1., 1., 1.],
+ >>> [ 0., 0., 2., -1.],
+ >>> [ 0.7, -1.9, -0.5, 0.3]])
+ >>> delta2bbox(rois, deltas, max_shape=(32, 32, 3))
+ tensor([[0.0000, 0.0000, 1.0000, 1.0000],
+ [0.1409, 0.1409, 2.8591, 2.8591],
+ [0.0000, 0.3161, 4.1945, 0.6839],
+ [5.0000, 5.0000, 5.0000, 5.0000]])
+ """
+ means = deltas.new_tensor(means).view(1,
+ -1).repeat(1,
+ deltas.size(-1) // 4)
+ stds = deltas.new_tensor(stds).view(1, -1).repeat(1, deltas.size(-1) // 4)
+ denorm_deltas = deltas * stds + means
+ dx = denorm_deltas[..., 0::4]
+ dy = denorm_deltas[..., 1::4]
+ dw = denorm_deltas[..., 2::4]
+ dh = denorm_deltas[..., 3::4]
+ max_ratio = np.abs(np.log(wh_ratio_clip))
+ dw = dw.clamp(min=-max_ratio, max=max_ratio)
+ dh = dh.clamp(min=-max_ratio, max=max_ratio)
+ x1, y1 = rois[..., 0], rois[..., 1]
+ x2, y2 = rois[..., 2], rois[..., 3]
+ # Compute center of each roi
+ px = ((x1 + x2) * 0.5).unsqueeze(-1).expand_as(dx)
+ py = ((y1 + y2) * 0.5).unsqueeze(-1).expand_as(dy)
+ # Compute width/height of each roi
+ pw = (x2 - x1).unsqueeze(-1).expand_as(dw)
+ ph = (y2 - y1).unsqueeze(-1).expand_as(dh)
+ # Use exp(network energy) to enlarge/shrink each roi
+ gw = pw * dw.exp()
+ gh = ph * dh.exp()
+ # Use network energy to shift the center of each roi
+ gx = px + pw * dx
+ gy = py + ph * dy
+ # Convert center-xy/width/height to top-left, bottom-right
+ x1 = gx - gw * 0.5
+ y1 = gy - gh * 0.5
+ x2 = gx + gw * 0.5
+ y2 = gy + gh * 0.5
+
+ bboxes = torch.stack([x1, y1, x2, y2], dim=-1).view(deltas.size())
+
+ if clip_border and max_shape is not None:
+ if not isinstance(max_shape, torch.Tensor):
+ max_shape = x1.new_tensor(max_shape)
+ max_shape = max_shape[..., :2].type_as(x1)
+ if max_shape.ndim == 2:
+ assert bboxes.ndim == 3
+ assert max_shape.size(0) == bboxes.size(0)
+
+ min_xy = x1.new_tensor(0)
+ max_xy = torch.cat(
+ [max_shape] * (deltas.size(-1) // 2),
+ dim=-1).flip(-1).unsqueeze(-2)
+ bboxes = torch.where(bboxes < min_xy, min_xy, bboxes)
+ bboxes = torch.where(bboxes > max_xy, max_xy, bboxes)
+
+ return bboxes
diff --git a/annotator/uniformer/mmdet_null/core/bbox/coder/legacy_delta_xywh_bbox_coder.py b/annotator/uniformer/mmdet_null/core/bbox/coder/legacy_delta_xywh_bbox_coder.py
new file mode 100644
index 0000000000000000000000000000000000000000..fd73d27e47d44f2a351ea05a5b7a8a8102ad463e
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/core/bbox/coder/legacy_delta_xywh_bbox_coder.py
@@ -0,0 +1,215 @@
+import annotator.uniformer.mmcv as mmcv
+import numpy as np
+import torch
+
+from ..builder import BBOX_CODERS
+from .base_bbox_coder import BaseBBoxCoder
+
+
+@BBOX_CODERS.register_module()
+class LegacyDeltaXYWHBBoxCoder(BaseBBoxCoder):
+ """Legacy Delta XYWH BBox coder used in MMDet V1.x.
+
+ Following the practice in R-CNN [1]_, this coder encodes bbox (x1, y1, x2,
+ y2) into delta (dx, dy, dw, dh) and decodes delta (dx, dy, dw, dh)
+ back to original bbox (x1, y1, x2, y2).
+
+ Note:
+ The main difference between :class`LegacyDeltaXYWHBBoxCoder` and
+ :class:`DeltaXYWHBBoxCoder` is whether ``+ 1`` is used during width and
+ height calculation. We suggest to only use this coder when testing with
+ MMDet V1.x models.
+
+ References:
+ .. [1] https://arxiv.org/abs/1311.2524
+
+ Args:
+ target_means (Sequence[float]): denormalizing means of target for
+ delta coordinates
+ target_stds (Sequence[float]): denormalizing standard deviation of
+ target for delta coordinates
+ """
+
+ def __init__(self,
+ target_means=(0., 0., 0., 0.),
+ target_stds=(1., 1., 1., 1.)):
+ super(BaseBBoxCoder, self).__init__()
+ self.means = target_means
+ self.stds = target_stds
+
+ def encode(self, bboxes, gt_bboxes):
+ """Get box regression transformation deltas that can be used to
+ transform the ``bboxes`` into the ``gt_bboxes``.
+
+ Args:
+ bboxes (torch.Tensor): source boxes, e.g., object proposals.
+ gt_bboxes (torch.Tensor): target of the transformation, e.g.,
+ ground-truth boxes.
+
+ Returns:
+ torch.Tensor: Box transformation deltas
+ """
+ assert bboxes.size(0) == gt_bboxes.size(0)
+ assert bboxes.size(-1) == gt_bboxes.size(-1) == 4
+ encoded_bboxes = legacy_bbox2delta(bboxes, gt_bboxes, self.means,
+ self.stds)
+ return encoded_bboxes
+
+ def decode(self,
+ bboxes,
+ pred_bboxes,
+ max_shape=None,
+ wh_ratio_clip=16 / 1000):
+ """Apply transformation `pred_bboxes` to `boxes`.
+
+ Args:
+ boxes (torch.Tensor): Basic boxes.
+ pred_bboxes (torch.Tensor): Encoded boxes with shape
+ max_shape (tuple[int], optional): Maximum shape of boxes.
+ Defaults to None.
+ wh_ratio_clip (float, optional): The allowed ratio between
+ width and height.
+
+ Returns:
+ torch.Tensor: Decoded boxes.
+ """
+ assert pred_bboxes.size(0) == bboxes.size(0)
+ decoded_bboxes = legacy_delta2bbox(bboxes, pred_bboxes, self.means,
+ self.stds, max_shape, wh_ratio_clip)
+
+ return decoded_bboxes
+
+
+@mmcv.jit(coderize=True)
+def legacy_bbox2delta(proposals,
+ gt,
+ means=(0., 0., 0., 0.),
+ stds=(1., 1., 1., 1.)):
+ """Compute deltas of proposals w.r.t. gt in the MMDet V1.x manner.
+
+ We usually compute the deltas of x, y, w, h of proposals w.r.t ground
+ truth bboxes to get regression target.
+ This is the inverse function of `delta2bbox()`
+
+ Args:
+ proposals (Tensor): Boxes to be transformed, shape (N, ..., 4)
+ gt (Tensor): Gt bboxes to be used as base, shape (N, ..., 4)
+ means (Sequence[float]): Denormalizing means for delta coordinates
+ stds (Sequence[float]): Denormalizing standard deviation for delta
+ coordinates
+
+ Returns:
+ Tensor: deltas with shape (N, 4), where columns represent dx, dy,
+ dw, dh.
+ """
+ assert proposals.size() == gt.size()
+
+ proposals = proposals.float()
+ gt = gt.float()
+ px = (proposals[..., 0] + proposals[..., 2]) * 0.5
+ py = (proposals[..., 1] + proposals[..., 3]) * 0.5
+ pw = proposals[..., 2] - proposals[..., 0] + 1.0
+ ph = proposals[..., 3] - proposals[..., 1] + 1.0
+
+ gx = (gt[..., 0] + gt[..., 2]) * 0.5
+ gy = (gt[..., 1] + gt[..., 3]) * 0.5
+ gw = gt[..., 2] - gt[..., 0] + 1.0
+ gh = gt[..., 3] - gt[..., 1] + 1.0
+
+ dx = (gx - px) / pw
+ dy = (gy - py) / ph
+ dw = torch.log(gw / pw)
+ dh = torch.log(gh / ph)
+ deltas = torch.stack([dx, dy, dw, dh], dim=-1)
+
+ means = deltas.new_tensor(means).unsqueeze(0)
+ stds = deltas.new_tensor(stds).unsqueeze(0)
+ deltas = deltas.sub_(means).div_(stds)
+
+ return deltas
+
+
+@mmcv.jit(coderize=True)
+def legacy_delta2bbox(rois,
+ deltas,
+ means=(0., 0., 0., 0.),
+ stds=(1., 1., 1., 1.),
+ max_shape=None,
+ wh_ratio_clip=16 / 1000):
+ """Apply deltas to shift/scale base boxes in the MMDet V1.x manner.
+
+ Typically the rois are anchor or proposed bounding boxes and the deltas are
+ network outputs used to shift/scale those boxes.
+ This is the inverse function of `bbox2delta()`
+
+ Args:
+ rois (Tensor): Boxes to be transformed. Has shape (N, 4)
+ deltas (Tensor): Encoded offsets with respect to each roi.
+ Has shape (N, 4 * num_classes). Note N = num_anchors * W * H when
+ rois is a grid of anchors. Offset encoding follows [1]_.
+ means (Sequence[float]): Denormalizing means for delta coordinates
+ stds (Sequence[float]): Denormalizing standard deviation for delta
+ coordinates
+ max_shape (tuple[int, int]): Maximum bounds for boxes. specifies (H, W)
+ wh_ratio_clip (float): Maximum aspect ratio for boxes.
+
+ Returns:
+ Tensor: Boxes with shape (N, 4), where columns represent
+ tl_x, tl_y, br_x, br_y.
+
+ References:
+ .. [1] https://arxiv.org/abs/1311.2524
+
+ Example:
+ >>> rois = torch.Tensor([[ 0., 0., 1., 1.],
+ >>> [ 0., 0., 1., 1.],
+ >>> [ 0., 0., 1., 1.],
+ >>> [ 5., 5., 5., 5.]])
+ >>> deltas = torch.Tensor([[ 0., 0., 0., 0.],
+ >>> [ 1., 1., 1., 1.],
+ >>> [ 0., 0., 2., -1.],
+ >>> [ 0.7, -1.9, -0.5, 0.3]])
+ >>> legacy_delta2bbox(rois, deltas, max_shape=(32, 32))
+ tensor([[0.0000, 0.0000, 1.5000, 1.5000],
+ [0.0000, 0.0000, 5.2183, 5.2183],
+ [0.0000, 0.1321, 7.8891, 0.8679],
+ [5.3967, 2.4251, 6.0033, 3.7749]])
+ """
+ means = deltas.new_tensor(means).repeat(1, deltas.size(1) // 4)
+ stds = deltas.new_tensor(stds).repeat(1, deltas.size(1) // 4)
+ denorm_deltas = deltas * stds + means
+ dx = denorm_deltas[:, 0::4]
+ dy = denorm_deltas[:, 1::4]
+ dw = denorm_deltas[:, 2::4]
+ dh = denorm_deltas[:, 3::4]
+ max_ratio = np.abs(np.log(wh_ratio_clip))
+ dw = dw.clamp(min=-max_ratio, max=max_ratio)
+ dh = dh.clamp(min=-max_ratio, max=max_ratio)
+ # Compute center of each roi
+ px = ((rois[:, 0] + rois[:, 2]) * 0.5).unsqueeze(1).expand_as(dx)
+ py = ((rois[:, 1] + rois[:, 3]) * 0.5).unsqueeze(1).expand_as(dy)
+ # Compute width/height of each roi
+ pw = (rois[:, 2] - rois[:, 0] + 1.0).unsqueeze(1).expand_as(dw)
+ ph = (rois[:, 3] - rois[:, 1] + 1.0).unsqueeze(1).expand_as(dh)
+ # Use exp(network energy) to enlarge/shrink each roi
+ gw = pw * dw.exp()
+ gh = ph * dh.exp()
+ # Use network energy to shift the center of each roi
+ gx = px + pw * dx
+ gy = py + ph * dy
+ # Convert center-xy/width/height to top-left, bottom-right
+
+ # The true legacy box coder should +- 0.5 here.
+ # However, current implementation improves the performance when testing
+ # the models trained in MMDetection 1.X (~0.5 bbox AP, 0.2 mask AP)
+ x1 = gx - gw * 0.5
+ y1 = gy - gh * 0.5
+ x2 = gx + gw * 0.5
+ y2 = gy + gh * 0.5
+ if max_shape is not None:
+ x1 = x1.clamp(min=0, max=max_shape[1] - 1)
+ y1 = y1.clamp(min=0, max=max_shape[0] - 1)
+ x2 = x2.clamp(min=0, max=max_shape[1] - 1)
+ y2 = y2.clamp(min=0, max=max_shape[0] - 1)
+ bboxes = torch.stack([x1, y1, x2, y2], dim=-1).view_as(deltas)
+ return bboxes
diff --git a/annotator/uniformer/mmdet_null/core/bbox/coder/pseudo_bbox_coder.py b/annotator/uniformer/mmdet_null/core/bbox/coder/pseudo_bbox_coder.py
new file mode 100644
index 0000000000000000000000000000000000000000..1c8346f4ae2c7db9719a70c7dc0244e088a9965b
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/core/bbox/coder/pseudo_bbox_coder.py
@@ -0,0 +1,18 @@
+from ..builder import BBOX_CODERS
+from .base_bbox_coder import BaseBBoxCoder
+
+
+@BBOX_CODERS.register_module()
+class PseudoBBoxCoder(BaseBBoxCoder):
+ """Pseudo bounding box coder."""
+
+ def __init__(self, **kwargs):
+ super(BaseBBoxCoder, self).__init__(**kwargs)
+
+ def encode(self, bboxes, gt_bboxes):
+ """torch.Tensor: return the given ``bboxes``"""
+ return gt_bboxes
+
+ def decode(self, bboxes, pred_bboxes):
+ """torch.Tensor: return the given ``pred_bboxes``"""
+ return pred_bboxes
diff --git a/annotator/uniformer/mmdet_null/core/bbox/coder/tblr_bbox_coder.py b/annotator/uniformer/mmdet_null/core/bbox/coder/tblr_bbox_coder.py
new file mode 100644
index 0000000000000000000000000000000000000000..3e9075665732b20a366a45a3e04f9c39f43e1ce9
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/core/bbox/coder/tblr_bbox_coder.py
@@ -0,0 +1,198 @@
+import annotator.uniformer.mmcv as mmcv
+import torch
+
+from ..builder import BBOX_CODERS
+from .base_bbox_coder import BaseBBoxCoder
+
+
+@BBOX_CODERS.register_module()
+class TBLRBBoxCoder(BaseBBoxCoder):
+ """TBLR BBox coder.
+
+ Following the practice in `FSAF `_,
+ this coder encodes gt bboxes (x1, y1, x2, y2) into (top, bottom, left,
+ right) and decode it back to the original.
+
+ Args:
+ normalizer (list | float): Normalization factor to be
+ divided with when coding the coordinates. If it is a list, it should
+ have length of 4 indicating normalization factor in tblr dims.
+ Otherwise it is a unified float factor for all dims. Default: 4.0
+ clip_border (bool, optional): Whether clip the objects outside the
+ border of the image. Defaults to True.
+ """
+
+ def __init__(self, normalizer=4.0, clip_border=True):
+ super(BaseBBoxCoder, self).__init__()
+ self.normalizer = normalizer
+ self.clip_border = clip_border
+
+ def encode(self, bboxes, gt_bboxes):
+ """Get box regression transformation deltas that can be used to
+ transform the ``bboxes`` into the ``gt_bboxes`` in the (top, left,
+ bottom, right) order.
+
+ Args:
+ bboxes (torch.Tensor): source boxes, e.g., object proposals.
+ gt_bboxes (torch.Tensor): target of the transformation, e.g.,
+ ground truth boxes.
+
+ Returns:
+ torch.Tensor: Box transformation deltas
+ """
+ assert bboxes.size(0) == gt_bboxes.size(0)
+ assert bboxes.size(-1) == gt_bboxes.size(-1) == 4
+ encoded_bboxes = bboxes2tblr(
+ bboxes, gt_bboxes, normalizer=self.normalizer)
+ return encoded_bboxes
+
+ def decode(self, bboxes, pred_bboxes, max_shape=None):
+ """Apply transformation `pred_bboxes` to `boxes`.
+
+ Args:
+ bboxes (torch.Tensor): Basic boxes.Shape (B, N, 4) or (N, 4)
+ pred_bboxes (torch.Tensor): Encoded boxes with shape
+ (B, N, 4) or (N, 4)
+ max_shape (Sequence[int] or torch.Tensor or Sequence[
+ Sequence[int]],optional): Maximum bounds for boxes, specifies
+ (H, W, C) or (H, W). If bboxes shape is (B, N, 4), then
+ the max_shape should be a Sequence[Sequence[int]]
+ and the length of max_shape should also be B.
+
+ Returns:
+ torch.Tensor: Decoded boxes.
+ """
+ decoded_bboxes = tblr2bboxes(
+ bboxes,
+ pred_bboxes,
+ normalizer=self.normalizer,
+ max_shape=max_shape,
+ clip_border=self.clip_border)
+
+ return decoded_bboxes
+
+
+@mmcv.jit(coderize=True)
+def bboxes2tblr(priors, gts, normalizer=4.0, normalize_by_wh=True):
+ """Encode ground truth boxes to tblr coordinate.
+
+ It first convert the gt coordinate to tblr format,
+ (top, bottom, left, right), relative to prior box centers.
+ The tblr coordinate may be normalized by the side length of prior bboxes
+ if `normalize_by_wh` is specified as True, and it is then normalized by
+ the `normalizer` factor.
+
+ Args:
+ priors (Tensor): Prior boxes in point form
+ Shape: (num_proposals,4).
+ gts (Tensor): Coords of ground truth for each prior in point-form
+ Shape: (num_proposals, 4).
+ normalizer (Sequence[float] | float): normalization parameter of
+ encoded boxes. If it is a list, it has to have length = 4.
+ Default: 4.0
+ normalize_by_wh (bool): Whether to normalize tblr coordinate by the
+ side length (wh) of prior bboxes.
+
+ Return:
+ encoded boxes (Tensor), Shape: (num_proposals, 4)
+ """
+
+ # dist b/t match center and prior's center
+ if not isinstance(normalizer, float):
+ normalizer = torch.tensor(normalizer, device=priors.device)
+ assert len(normalizer) == 4, 'Normalizer must have length = 4'
+ assert priors.size(0) == gts.size(0)
+ prior_centers = (priors[:, 0:2] + priors[:, 2:4]) / 2
+ xmin, ymin, xmax, ymax = gts.split(1, dim=1)
+ top = prior_centers[:, 1].unsqueeze(1) - ymin
+ bottom = ymax - prior_centers[:, 1].unsqueeze(1)
+ left = prior_centers[:, 0].unsqueeze(1) - xmin
+ right = xmax - prior_centers[:, 0].unsqueeze(1)
+ loc = torch.cat((top, bottom, left, right), dim=1)
+ if normalize_by_wh:
+ # Normalize tblr by anchor width and height
+ wh = priors[:, 2:4] - priors[:, 0:2]
+ w, h = torch.split(wh, 1, dim=1)
+ loc[:, :2] /= h # tb is normalized by h
+ loc[:, 2:] /= w # lr is normalized by w
+ # Normalize tblr by the given normalization factor
+ return loc / normalizer
+
+
+@mmcv.jit(coderize=True)
+def tblr2bboxes(priors,
+ tblr,
+ normalizer=4.0,
+ normalize_by_wh=True,
+ max_shape=None,
+ clip_border=True):
+ """Decode tblr outputs to prediction boxes.
+
+ The process includes 3 steps: 1) De-normalize tblr coordinates by
+ multiplying it with `normalizer`; 2) De-normalize tblr coordinates by the
+ prior bbox width and height if `normalize_by_wh` is `True`; 3) Convert
+ tblr (top, bottom, left, right) pair relative to the center of priors back
+ to (xmin, ymin, xmax, ymax) coordinate.
+
+ Args:
+ priors (Tensor): Prior boxes in point form (x0, y0, x1, y1)
+ Shape: (N,4) or (B, N, 4).
+ tblr (Tensor): Coords of network output in tblr form
+ Shape: (N, 4) or (B, N, 4).
+ normalizer (Sequence[float] | float): Normalization parameter of
+ encoded boxes. By list, it represents the normalization factors at
+ tblr dims. By float, it is the unified normalization factor at all
+ dims. Default: 4.0
+ normalize_by_wh (bool): Whether the tblr coordinates have been
+ normalized by the side length (wh) of prior bboxes.
+ max_shape (Sequence[int] or torch.Tensor or Sequence[
+ Sequence[int]],optional): Maximum bounds for boxes, specifies
+ (H, W, C) or (H, W). If priors shape is (B, N, 4), then
+ the max_shape should be a Sequence[Sequence[int]]
+ and the length of max_shape should also be B.
+ clip_border (bool, optional): Whether clip the objects outside the
+ border of the image. Defaults to True.
+
+ Return:
+ encoded boxes (Tensor): Boxes with shape (N, 4) or (B, N, 4)
+ """
+ if not isinstance(normalizer, float):
+ normalizer = torch.tensor(normalizer, device=priors.device)
+ assert len(normalizer) == 4, 'Normalizer must have length = 4'
+ assert priors.size(0) == tblr.size(0)
+ if priors.ndim == 3:
+ assert priors.size(1) == tblr.size(1)
+
+ loc_decode = tblr * normalizer
+ prior_centers = (priors[..., 0:2] + priors[..., 2:4]) / 2
+ if normalize_by_wh:
+ wh = priors[..., 2:4] - priors[..., 0:2]
+ w, h = torch.split(wh, 1, dim=-1)
+ # Inplace operation with slice would failed for exporting to ONNX
+ th = h * loc_decode[..., :2] # tb
+ tw = w * loc_decode[..., 2:] # lr
+ loc_decode = torch.cat([th, tw], dim=-1)
+ # Cannot be exported using onnx when loc_decode.split(1, dim=-1)
+ top, bottom, left, right = loc_decode.split((1, 1, 1, 1), dim=-1)
+ xmin = prior_centers[..., 0].unsqueeze(-1) - left
+ xmax = prior_centers[..., 0].unsqueeze(-1) + right
+ ymin = prior_centers[..., 1].unsqueeze(-1) - top
+ ymax = prior_centers[..., 1].unsqueeze(-1) + bottom
+
+ bboxes = torch.cat((xmin, ymin, xmax, ymax), dim=-1)
+
+ if clip_border and max_shape is not None:
+ if not isinstance(max_shape, torch.Tensor):
+ max_shape = priors.new_tensor(max_shape)
+ max_shape = max_shape[..., :2].type_as(priors)
+ if max_shape.ndim == 2:
+ assert bboxes.ndim == 3
+ assert max_shape.size(0) == bboxes.size(0)
+
+ min_xy = priors.new_tensor(0)
+ max_xy = torch.cat([max_shape, max_shape],
+ dim=-1).flip(-1).unsqueeze(-2)
+ bboxes = torch.where(bboxes < min_xy, min_xy, bboxes)
+ bboxes = torch.where(bboxes > max_xy, max_xy, bboxes)
+
+ return bboxes
diff --git a/annotator/uniformer/mmdet_null/core/bbox/coder/yolo_bbox_coder.py b/annotator/uniformer/mmdet_null/core/bbox/coder/yolo_bbox_coder.py
new file mode 100644
index 0000000000000000000000000000000000000000..2f8a0be133b313b03b1dbadb5c59f29cdcaffa22
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/core/bbox/coder/yolo_bbox_coder.py
@@ -0,0 +1,89 @@
+import annotator.uniformer.mmcv as mmcv
+import torch
+
+from ..builder import BBOX_CODERS
+from .base_bbox_coder import BaseBBoxCoder
+
+
+@BBOX_CODERS.register_module()
+class YOLOBBoxCoder(BaseBBoxCoder):
+ """YOLO BBox coder.
+
+ Following `YOLO `_, this coder divide
+ image into grids, and encode bbox (x1, y1, x2, y2) into (cx, cy, dw, dh).
+ cx, cy in [0., 1.], denotes relative center position w.r.t the center of
+ bboxes. dw, dh are the same as :obj:`DeltaXYWHBBoxCoder`.
+
+ Args:
+ eps (float): Min value of cx, cy when encoding.
+ """
+
+ def __init__(self, eps=1e-6):
+ super(BaseBBoxCoder, self).__init__()
+ self.eps = eps
+
+ @mmcv.jit(coderize=True)
+ def encode(self, bboxes, gt_bboxes, stride):
+ """Get box regression transformation deltas that can be used to
+ transform the ``bboxes`` into the ``gt_bboxes``.
+
+ Args:
+ bboxes (torch.Tensor): Source boxes, e.g., anchors.
+ gt_bboxes (torch.Tensor): Target of the transformation, e.g.,
+ ground-truth boxes.
+ stride (torch.Tensor | int): Stride of bboxes.
+
+ Returns:
+ torch.Tensor: Box transformation deltas
+ """
+
+ assert bboxes.size(0) == gt_bboxes.size(0)
+ assert bboxes.size(-1) == gt_bboxes.size(-1) == 4
+ x_center_gt = (gt_bboxes[..., 0] + gt_bboxes[..., 2]) * 0.5
+ y_center_gt = (gt_bboxes[..., 1] + gt_bboxes[..., 3]) * 0.5
+ w_gt = gt_bboxes[..., 2] - gt_bboxes[..., 0]
+ h_gt = gt_bboxes[..., 3] - gt_bboxes[..., 1]
+ x_center = (bboxes[..., 0] + bboxes[..., 2]) * 0.5
+ y_center = (bboxes[..., 1] + bboxes[..., 3]) * 0.5
+ w = bboxes[..., 2] - bboxes[..., 0]
+ h = bboxes[..., 3] - bboxes[..., 1]
+ w_target = torch.log((w_gt / w).clamp(min=self.eps))
+ h_target = torch.log((h_gt / h).clamp(min=self.eps))
+ x_center_target = ((x_center_gt - x_center) / stride + 0.5).clamp(
+ self.eps, 1 - self.eps)
+ y_center_target = ((y_center_gt - y_center) / stride + 0.5).clamp(
+ self.eps, 1 - self.eps)
+ encoded_bboxes = torch.stack(
+ [x_center_target, y_center_target, w_target, h_target], dim=-1)
+ return encoded_bboxes
+
+ @mmcv.jit(coderize=True)
+ def decode(self, bboxes, pred_bboxes, stride):
+ """Apply transformation `pred_bboxes` to `boxes`.
+
+ Args:
+ boxes (torch.Tensor): Basic boxes, e.g. anchors.
+ pred_bboxes (torch.Tensor): Encoded boxes with shape
+ stride (torch.Tensor | int): Strides of bboxes.
+
+ Returns:
+ torch.Tensor: Decoded boxes.
+ """
+ assert pred_bboxes.size(0) == bboxes.size(0)
+ assert pred_bboxes.size(-1) == bboxes.size(-1) == 4
+ x_center = (bboxes[..., 0] + bboxes[..., 2]) * 0.5
+ y_center = (bboxes[..., 1] + bboxes[..., 3]) * 0.5
+ w = bboxes[..., 2] - bboxes[..., 0]
+ h = bboxes[..., 3] - bboxes[..., 1]
+ # Get outputs x, y
+ x_center_pred = (pred_bboxes[..., 0] - 0.5) * stride + x_center
+ y_center_pred = (pred_bboxes[..., 1] - 0.5) * stride + y_center
+ w_pred = torch.exp(pred_bboxes[..., 2]) * w
+ h_pred = torch.exp(pred_bboxes[..., 3]) * h
+
+ decoded_bboxes = torch.stack(
+ (x_center_pred - w_pred / 2, y_center_pred - h_pred / 2,
+ x_center_pred + w_pred / 2, y_center_pred + h_pred / 2),
+ dim=-1)
+
+ return decoded_bboxes
diff --git a/annotator/uniformer/mmdet_null/core/bbox/demodata.py b/annotator/uniformer/mmdet_null/core/bbox/demodata.py
new file mode 100644
index 0000000000000000000000000000000000000000..feecb693745a47d9f2bebd8af9a217ff4f5cc92b
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/core/bbox/demodata.py
@@ -0,0 +1,41 @@
+import numpy as np
+import torch
+
+from mmdet.utils.util_random import ensure_rng
+
+
+def random_boxes(num=1, scale=1, rng=None):
+ """Simple version of ``kwimage.Boxes.random``
+
+ Returns:
+ Tensor: shape (n, 4) in x1, y1, x2, y2 format.
+
+ References:
+ https://gitlab.kitware.com/computer-vision/kwimage/blob/master/kwimage/structs/boxes.py#L1390
+
+ Example:
+ >>> num = 3
+ >>> scale = 512
+ >>> rng = 0
+ >>> boxes = random_boxes(num, scale, rng)
+ >>> print(boxes)
+ tensor([[280.9925, 278.9802, 308.6148, 366.1769],
+ [216.9113, 330.6978, 224.0446, 456.5878],
+ [405.3632, 196.3221, 493.3953, 270.7942]])
+ """
+ rng = ensure_rng(rng)
+
+ tlbr = rng.rand(num, 4).astype(np.float32)
+
+ tl_x = np.minimum(tlbr[:, 0], tlbr[:, 2])
+ tl_y = np.minimum(tlbr[:, 1], tlbr[:, 3])
+ br_x = np.maximum(tlbr[:, 0], tlbr[:, 2])
+ br_y = np.maximum(tlbr[:, 1], tlbr[:, 3])
+
+ tlbr[:, 0] = tl_x * scale
+ tlbr[:, 1] = tl_y * scale
+ tlbr[:, 2] = br_x * scale
+ tlbr[:, 3] = br_y * scale
+
+ boxes = torch.from_numpy(tlbr)
+ return boxes
diff --git a/annotator/uniformer/mmdet_null/core/bbox/iou_calculators/__init__.py b/annotator/uniformer/mmdet_null/core/bbox/iou_calculators/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e71369a58a05fa25e6a754300875fdbb87cb26a5
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/core/bbox/iou_calculators/__init__.py
@@ -0,0 +1,4 @@
+from .builder import build_iou_calculator
+from .iou2d_calculator import BboxOverlaps2D, bbox_overlaps
+
+__all__ = ['build_iou_calculator', 'BboxOverlaps2D', 'bbox_overlaps']
diff --git a/annotator/uniformer/mmdet_null/core/bbox/iou_calculators/builder.py b/annotator/uniformer/mmdet_null/core/bbox/iou_calculators/builder.py
new file mode 100644
index 0000000000000000000000000000000000000000..3220806fbcf70302dd58c5a166c7436692db11d1
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/core/bbox/iou_calculators/builder.py
@@ -0,0 +1,8 @@
+from annotator.uniformer.mmcv.utils import Registry, build_from_cfg
+
+IOU_CALCULATORS = Registry('IoU calculator')
+
+
+def build_iou_calculator(cfg, default_args=None):
+ """Builder of IoU calculator."""
+ return build_from_cfg(cfg, IOU_CALCULATORS, default_args)
diff --git a/annotator/uniformer/mmdet_null/core/bbox/iou_calculators/iou2d_calculator.py b/annotator/uniformer/mmdet_null/core/bbox/iou_calculators/iou2d_calculator.py
new file mode 100644
index 0000000000000000000000000000000000000000..158b702c234f5c10c4f5f03e08e8794ac7b8dcad
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/core/bbox/iou_calculators/iou2d_calculator.py
@@ -0,0 +1,159 @@
+import torch
+
+from .builder import IOU_CALCULATORS
+
+
+@IOU_CALCULATORS.register_module()
+class BboxOverlaps2D(object):
+ """2D Overlaps (e.g. IoUs, GIoUs) Calculator."""
+
+ def __call__(self, bboxes1, bboxes2, mode='iou', is_aligned=False):
+ """Calculate IoU between 2D bboxes.
+
+ Args:
+ bboxes1 (Tensor): bboxes have shape (m, 4) in
+ format, or shape (m, 5) in format.
+ bboxes2 (Tensor): bboxes have shape (m, 4) in
+ format, shape (m, 5) in format, or be
+ empty. If ``is_aligned `` is ``True``, then m and n must be
+ equal.
+ mode (str): "iou" (intersection over union), "iof" (intersection
+ over foreground), or "giou" (generalized intersection over
+ union).
+ is_aligned (bool, optional): If True, then m and n must be equal.
+ Default False.
+
+ Returns:
+ Tensor: shape (m, n) if ``is_aligned `` is False else shape (m,)
+ """
+ assert bboxes1.size(-1) in [0, 4, 5]
+ assert bboxes2.size(-1) in [0, 4, 5]
+ if bboxes2.size(-1) == 5:
+ bboxes2 = bboxes2[..., :4]
+ if bboxes1.size(-1) == 5:
+ bboxes1 = bboxes1[..., :4]
+ return bbox_overlaps(bboxes1, bboxes2, mode, is_aligned)
+
+ def __repr__(self):
+ """str: a string describing the module"""
+ repr_str = self.__class__.__name__ + '()'
+ return repr_str
+
+
+def bbox_overlaps(bboxes1, bboxes2, mode='iou', is_aligned=False, eps=1e-6):
+ """Calculate overlap between two set of bboxes.
+
+ If ``is_aligned `` is ``False``, then calculate the overlaps between each
+ bbox of bboxes1 and bboxes2, otherwise the overlaps between each aligned
+ pair of bboxes1 and bboxes2.
+
+ Args:
+ bboxes1 (Tensor): shape (B, m, 4) in format or empty.
+ bboxes2 (Tensor): shape (B, n, 4) in format or empty.
+ B indicates the batch dim, in shape (B1, B2, ..., Bn).
+ If ``is_aligned `` is ``True``, then m and n must be equal.
+ mode (str): "iou" (intersection over union), "iof" (intersection over
+ foreground) or "giou" (generalized intersection over union).
+ Default "iou".
+ is_aligned (bool, optional): If True, then m and n must be equal.
+ Default False.
+ eps (float, optional): A value added to the denominator for numerical
+ stability. Default 1e-6.
+
+ Returns:
+ Tensor: shape (m, n) if ``is_aligned `` is False else shape (m,)
+
+ Example:
+ >>> bboxes1 = torch.FloatTensor([
+ >>> [0, 0, 10, 10],
+ >>> [10, 10, 20, 20],
+ >>> [32, 32, 38, 42],
+ >>> ])
+ >>> bboxes2 = torch.FloatTensor([
+ >>> [0, 0, 10, 20],
+ >>> [0, 10, 10, 19],
+ >>> [10, 10, 20, 20],
+ >>> ])
+ >>> overlaps = bbox_overlaps(bboxes1, bboxes2)
+ >>> assert overlaps.shape == (3, 3)
+ >>> overlaps = bbox_overlaps(bboxes1, bboxes2, is_aligned=True)
+ >>> assert overlaps.shape == (3, )
+
+ Example:
+ >>> empty = torch.empty(0, 4)
+ >>> nonempty = torch.FloatTensor([[0, 0, 10, 9]])
+ >>> assert tuple(bbox_overlaps(empty, nonempty).shape) == (0, 1)
+ >>> assert tuple(bbox_overlaps(nonempty, empty).shape) == (1, 0)
+ >>> assert tuple(bbox_overlaps(empty, empty).shape) == (0, 0)
+ """
+
+ assert mode in ['iou', 'iof', 'giou'], f'Unsupported mode {mode}'
+ # Either the boxes are empty or the length of boxes' last dimension is 4
+ assert (bboxes1.size(-1) == 4 or bboxes1.size(0) == 0)
+ assert (bboxes2.size(-1) == 4 or bboxes2.size(0) == 0)
+
+ # Batch dim must be the same
+ # Batch dim: (B1, B2, ... Bn)
+ assert bboxes1.shape[:-2] == bboxes2.shape[:-2]
+ batch_shape = bboxes1.shape[:-2]
+
+ rows = bboxes1.size(-2)
+ cols = bboxes2.size(-2)
+ if is_aligned:
+ assert rows == cols
+
+ if rows * cols == 0:
+ if is_aligned:
+ return bboxes1.new(batch_shape + (rows, ))
+ else:
+ return bboxes1.new(batch_shape + (rows, cols))
+
+ area1 = (bboxes1[..., 2] - bboxes1[..., 0]) * (
+ bboxes1[..., 3] - bboxes1[..., 1])
+ area2 = (bboxes2[..., 2] - bboxes2[..., 0]) * (
+ bboxes2[..., 3] - bboxes2[..., 1])
+
+ if is_aligned:
+ lt = torch.max(bboxes1[..., :2], bboxes2[..., :2]) # [B, rows, 2]
+ rb = torch.min(bboxes1[..., 2:], bboxes2[..., 2:]) # [B, rows, 2]
+
+ wh = (rb - lt).clamp(min=0) # [B, rows, 2]
+ overlap = wh[..., 0] * wh[..., 1]
+
+ if mode in ['iou', 'giou']:
+ union = area1 + area2 - overlap
+ else:
+ union = area1
+ if mode == 'giou':
+ enclosed_lt = torch.min(bboxes1[..., :2], bboxes2[..., :2])
+ enclosed_rb = torch.max(bboxes1[..., 2:], bboxes2[..., 2:])
+ else:
+ lt = torch.max(bboxes1[..., :, None, :2],
+ bboxes2[..., None, :, :2]) # [B, rows, cols, 2]
+ rb = torch.min(bboxes1[..., :, None, 2:],
+ bboxes2[..., None, :, 2:]) # [B, rows, cols, 2]
+
+ wh = (rb - lt).clamp(min=0) # [B, rows, cols, 2]
+ overlap = wh[..., 0] * wh[..., 1]
+
+ if mode in ['iou', 'giou']:
+ union = area1[..., None] + area2[..., None, :] - overlap
+ else:
+ union = area1[..., None]
+ if mode == 'giou':
+ enclosed_lt = torch.min(bboxes1[..., :, None, :2],
+ bboxes2[..., None, :, :2])
+ enclosed_rb = torch.max(bboxes1[..., :, None, 2:],
+ bboxes2[..., None, :, 2:])
+
+ eps = union.new_tensor([eps])
+ union = torch.max(union, eps)
+ ious = overlap / union
+ if mode in ['iou', 'iof']:
+ return ious
+ # calculate gious
+ enclose_wh = (enclosed_rb - enclosed_lt).clamp(min=0)
+ enclose_area = enclose_wh[..., 0] * enclose_wh[..., 1]
+ enclose_area = torch.max(enclose_area, eps)
+ gious = ious - (enclose_area - union) / enclose_area
+ return gious
diff --git a/annotator/uniformer/mmdet_null/core/bbox/match_costs/__init__.py b/annotator/uniformer/mmdet_null/core/bbox/match_costs/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..add5e0d394034d89b2d47c314ff1938294deb6ea
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/core/bbox/match_costs/__init__.py
@@ -0,0 +1,7 @@
+from .builder import build_match_cost
+from .match_cost import BBoxL1Cost, ClassificationCost, FocalLossCost, IoUCost
+
+__all__ = [
+ 'build_match_cost', 'ClassificationCost', 'BBoxL1Cost', 'IoUCost',
+ 'FocalLossCost'
+]
diff --git a/annotator/uniformer/mmdet_null/core/bbox/match_costs/builder.py b/annotator/uniformer/mmdet_null/core/bbox/match_costs/builder.py
new file mode 100644
index 0000000000000000000000000000000000000000..92f0869ed4993167c504175d14315f7e9e8411f1
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/core/bbox/match_costs/builder.py
@@ -0,0 +1,8 @@
+from annotator.uniformer.mmcv.utils import Registry, build_from_cfg
+
+MATCH_COST = Registry('Match Cost')
+
+
+def build_match_cost(cfg, default_args=None):
+ """Builder of IoU calculator."""
+ return build_from_cfg(cfg, MATCH_COST, default_args)
diff --git a/annotator/uniformer/mmdet_null/core/bbox/match_costs/match_cost.py b/annotator/uniformer/mmdet_null/core/bbox/match_costs/match_cost.py
new file mode 100644
index 0000000000000000000000000000000000000000..09599084a96f6add1e22612dcebd38967e5f318e
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/core/bbox/match_costs/match_cost.py
@@ -0,0 +1,184 @@
+import torch
+
+from annotator.uniformer.mmdet.core.bbox.iou_calculators import bbox_overlaps
+from annotator.uniformer.mmdet.core.bbox.transforms import bbox_cxcywh_to_xyxy, bbox_xyxy_to_cxcywh
+from .builder import MATCH_COST
+
+
+@MATCH_COST.register_module()
+class BBoxL1Cost(object):
+ """BBoxL1Cost.
+
+ Args:
+ weight (int | float, optional): loss_weight
+ box_format (str, optional): 'xyxy' for DETR, 'xywh' for Sparse_RCNN
+
+ Examples:
+ >>> from mmdet.core.bbox.match_costs.match_cost import BBoxL1Cost
+ >>> import torch
+ >>> self = BBoxL1Cost()
+ >>> bbox_pred = torch.rand(1, 4)
+ >>> gt_bboxes= torch.FloatTensor([[0, 0, 2, 4], [1, 2, 3, 4]])
+ >>> factor = torch.tensor([10, 8, 10, 8])
+ >>> self(bbox_pred, gt_bboxes, factor)
+ tensor([[1.6172, 1.6422]])
+ """
+
+ def __init__(self, weight=1., box_format='xyxy'):
+ self.weight = weight
+ assert box_format in ['xyxy', 'xywh']
+ self.box_format = box_format
+
+ def __call__(self, bbox_pred, gt_bboxes):
+ """
+ Args:
+ bbox_pred (Tensor): Predicted boxes with normalized coordinates
+ (cx, cy, w, h), which are all in range [0, 1]. Shape
+ [num_query, 4].
+ gt_bboxes (Tensor): Ground truth boxes with normalized
+ coordinates (x1, y1, x2, y2). Shape [num_gt, 4].
+
+ Returns:
+ torch.Tensor: bbox_cost value with weight
+ """
+ if self.box_format == 'xywh':
+ gt_bboxes = bbox_xyxy_to_cxcywh(gt_bboxes)
+ elif self.box_format == 'xyxy':
+ bbox_pred = bbox_cxcywh_to_xyxy(bbox_pred)
+ bbox_cost = torch.cdist(bbox_pred, gt_bboxes, p=1)
+ return bbox_cost * self.weight
+
+
+@MATCH_COST.register_module()
+class FocalLossCost(object):
+ """FocalLossCost.
+
+ Args:
+ weight (int | float, optional): loss_weight
+ alpha (int | float, optional): focal_loss alpha
+ gamma (int | float, optional): focal_loss gamma
+ eps (float, optional): default 1e-12
+
+ Examples:
+ >>> from mmdet.core.bbox.match_costs.match_cost import FocalLossCost
+ >>> import torch
+ >>> self = FocalLossCost()
+ >>> cls_pred = torch.rand(4, 3)
+ >>> gt_labels = torch.tensor([0, 1, 2])
+ >>> factor = torch.tensor([10, 8, 10, 8])
+ >>> self(cls_pred, gt_labels)
+ tensor([[-0.3236, -0.3364, -0.2699],
+ [-0.3439, -0.3209, -0.4807],
+ [-0.4099, -0.3795, -0.2929],
+ [-0.1950, -0.1207, -0.2626]])
+ """
+
+ def __init__(self, weight=1., alpha=0.25, gamma=2, eps=1e-12):
+ self.weight = weight
+ self.alpha = alpha
+ self.gamma = gamma
+ self.eps = eps
+
+ def __call__(self, cls_pred, gt_labels):
+ """
+ Args:
+ cls_pred (Tensor): Predicted classification logits, shape
+ [num_query, num_class].
+ gt_labels (Tensor): Label of `gt_bboxes`, shape (num_gt,).
+
+ Returns:
+ torch.Tensor: cls_cost value with weight
+ """
+ cls_pred = cls_pred.sigmoid()
+ neg_cost = -(1 - cls_pred + self.eps).log() * (
+ 1 - self.alpha) * cls_pred.pow(self.gamma)
+ pos_cost = -(cls_pred + self.eps).log() * self.alpha * (
+ 1 - cls_pred).pow(self.gamma)
+ cls_cost = pos_cost[:, gt_labels] - neg_cost[:, gt_labels]
+ return cls_cost * self.weight
+
+
+@MATCH_COST.register_module()
+class ClassificationCost(object):
+ """ClsSoftmaxCost.
+
+ Args:
+ weight (int | float, optional): loss_weight
+
+ Examples:
+ >>> from mmdet.core.bbox.match_costs.match_cost import \
+ ... ClassificationCost
+ >>> import torch
+ >>> self = ClassificationCost()
+ >>> cls_pred = torch.rand(4, 3)
+ >>> gt_labels = torch.tensor([0, 1, 2])
+ >>> factor = torch.tensor([10, 8, 10, 8])
+ >>> self(cls_pred, gt_labels)
+ tensor([[-0.3430, -0.3525, -0.3045],
+ [-0.3077, -0.2931, -0.3992],
+ [-0.3664, -0.3455, -0.2881],
+ [-0.3343, -0.2701, -0.3956]])
+ """
+
+ def __init__(self, weight=1.):
+ self.weight = weight
+
+ def __call__(self, cls_pred, gt_labels):
+ """
+ Args:
+ cls_pred (Tensor): Predicted classification logits, shape
+ [num_query, num_class].
+ gt_labels (Tensor): Label of `gt_bboxes`, shape (num_gt,).
+
+ Returns:
+ torch.Tensor: cls_cost value with weight
+ """
+ # Following the official DETR repo, contrary to the loss that
+ # NLL is used, we approximate it in 1 - cls_score[gt_label].
+ # The 1 is a constant that doesn't change the matching,
+ # so it can be omitted.
+ cls_score = cls_pred.softmax(-1)
+ cls_cost = -cls_score[:, gt_labels]
+ return cls_cost * self.weight
+
+
+@MATCH_COST.register_module()
+class IoUCost(object):
+ """IoUCost.
+
+ Args:
+ iou_mode (str, optional): iou mode such as 'iou' | 'giou'
+ weight (int | float, optional): loss weight
+
+ Examples:
+ >>> from mmdet.core.bbox.match_costs.match_cost import IoUCost
+ >>> import torch
+ >>> self = IoUCost()
+ >>> bboxes = torch.FloatTensor([[1,1, 2, 2], [2, 2, 3, 4]])
+ >>> gt_bboxes = torch.FloatTensor([[0, 0, 2, 4], [1, 2, 3, 4]])
+ >>> self(bboxes, gt_bboxes)
+ tensor([[-0.1250, 0.1667],
+ [ 0.1667, -0.5000]])
+ """
+
+ def __init__(self, iou_mode='giou', weight=1.):
+ self.weight = weight
+ self.iou_mode = iou_mode
+
+ def __call__(self, bboxes, gt_bboxes):
+ """
+ Args:
+ bboxes (Tensor): Predicted boxes with unnormalized coordinates
+ (x1, y1, x2, y2). Shape [num_query, 4].
+ gt_bboxes (Tensor): Ground truth boxes with unnormalized
+ coordinates (x1, y1, x2, y2). Shape [num_gt, 4].
+
+ Returns:
+ torch.Tensor: iou_cost value with weight
+ """
+ # overlaps: [num_bboxes, num_gt]
+ overlaps = bbox_overlaps(
+ bboxes, gt_bboxes, mode=self.iou_mode, is_aligned=False)
+ # The 1 is a constant that doesn't change the matching, so omitted.
+ iou_cost = -overlaps
+ return iou_cost * self.weight
diff --git a/annotator/uniformer/mmdet_null/core/bbox/samplers/__init__.py b/annotator/uniformer/mmdet_null/core/bbox/samplers/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..0b06303fe1000e11c5486c40c70606a34a5208e3
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/core/bbox/samplers/__init__.py
@@ -0,0 +1,15 @@
+from .base_sampler import BaseSampler
+from .combined_sampler import CombinedSampler
+from .instance_balanced_pos_sampler import InstanceBalancedPosSampler
+from .iou_balanced_neg_sampler import IoUBalancedNegSampler
+from .ohem_sampler import OHEMSampler
+from .pseudo_sampler import PseudoSampler
+from .random_sampler import RandomSampler
+from .sampling_result import SamplingResult
+from .score_hlr_sampler import ScoreHLRSampler
+
+__all__ = [
+ 'BaseSampler', 'PseudoSampler', 'RandomSampler',
+ 'InstanceBalancedPosSampler', 'IoUBalancedNegSampler', 'CombinedSampler',
+ 'OHEMSampler', 'SamplingResult', 'ScoreHLRSampler'
+]
diff --git a/annotator/uniformer/mmdet_null/core/bbox/samplers/base_sampler.py b/annotator/uniformer/mmdet_null/core/bbox/samplers/base_sampler.py
new file mode 100644
index 0000000000000000000000000000000000000000..9ea35def115b49dfdad8a1f7c040ef3cd983b0d1
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/core/bbox/samplers/base_sampler.py
@@ -0,0 +1,101 @@
+from abc import ABCMeta, abstractmethod
+
+import torch
+
+from .sampling_result import SamplingResult
+
+
+class BaseSampler(metaclass=ABCMeta):
+ """Base class of samplers."""
+
+ def __init__(self,
+ num,
+ pos_fraction,
+ neg_pos_ub=-1,
+ add_gt_as_proposals=True,
+ **kwargs):
+ self.num = num
+ self.pos_fraction = pos_fraction
+ self.neg_pos_ub = neg_pos_ub
+ self.add_gt_as_proposals = add_gt_as_proposals
+ self.pos_sampler = self
+ self.neg_sampler = self
+
+ @abstractmethod
+ def _sample_pos(self, assign_result, num_expected, **kwargs):
+ """Sample positive samples."""
+ pass
+
+ @abstractmethod
+ def _sample_neg(self, assign_result, num_expected, **kwargs):
+ """Sample negative samples."""
+ pass
+
+ def sample(self,
+ assign_result,
+ bboxes,
+ gt_bboxes,
+ gt_labels=None,
+ **kwargs):
+ """Sample positive and negative bboxes.
+
+ This is a simple implementation of bbox sampling given candidates,
+ assigning results and ground truth bboxes.
+
+ Args:
+ assign_result (:obj:`AssignResult`): Bbox assigning results.
+ bboxes (Tensor): Boxes to be sampled from.
+ gt_bboxes (Tensor): Ground truth bboxes.
+ gt_labels (Tensor, optional): Class labels of ground truth bboxes.
+
+ Returns:
+ :obj:`SamplingResult`: Sampling result.
+
+ Example:
+ >>> from mmdet.core.bbox import RandomSampler
+ >>> from mmdet.core.bbox import AssignResult
+ >>> from mmdet.core.bbox.demodata import ensure_rng, random_boxes
+ >>> rng = ensure_rng(None)
+ >>> assign_result = AssignResult.random(rng=rng)
+ >>> bboxes = random_boxes(assign_result.num_preds, rng=rng)
+ >>> gt_bboxes = random_boxes(assign_result.num_gts, rng=rng)
+ >>> gt_labels = None
+ >>> self = RandomSampler(num=32, pos_fraction=0.5, neg_pos_ub=-1,
+ >>> add_gt_as_proposals=False)
+ >>> self = self.sample(assign_result, bboxes, gt_bboxes, gt_labels)
+ """
+ if len(bboxes.shape) < 2:
+ bboxes = bboxes[None, :]
+
+ bboxes = bboxes[:, :4]
+
+ gt_flags = bboxes.new_zeros((bboxes.shape[0], ), dtype=torch.uint8)
+ if self.add_gt_as_proposals and len(gt_bboxes) > 0:
+ if gt_labels is None:
+ raise ValueError(
+ 'gt_labels must be given when add_gt_as_proposals is True')
+ bboxes = torch.cat([gt_bboxes, bboxes], dim=0)
+ assign_result.add_gt_(gt_labels)
+ gt_ones = bboxes.new_ones(gt_bboxes.shape[0], dtype=torch.uint8)
+ gt_flags = torch.cat([gt_ones, gt_flags])
+
+ num_expected_pos = int(self.num * self.pos_fraction)
+ pos_inds = self.pos_sampler._sample_pos(
+ assign_result, num_expected_pos, bboxes=bboxes, **kwargs)
+ # We found that sampled indices have duplicated items occasionally.
+ # (may be a bug of PyTorch)
+ pos_inds = pos_inds.unique()
+ num_sampled_pos = pos_inds.numel()
+ num_expected_neg = self.num - num_sampled_pos
+ if self.neg_pos_ub >= 0:
+ _pos = max(1, num_sampled_pos)
+ neg_upper_bound = int(self.neg_pos_ub * _pos)
+ if num_expected_neg > neg_upper_bound:
+ num_expected_neg = neg_upper_bound
+ neg_inds = self.neg_sampler._sample_neg(
+ assign_result, num_expected_neg, bboxes=bboxes, **kwargs)
+ neg_inds = neg_inds.unique()
+
+ sampling_result = SamplingResult(pos_inds, neg_inds, bboxes, gt_bboxes,
+ assign_result, gt_flags)
+ return sampling_result
diff --git a/annotator/uniformer/mmdet_null/core/bbox/samplers/combined_sampler.py b/annotator/uniformer/mmdet_null/core/bbox/samplers/combined_sampler.py
new file mode 100644
index 0000000000000000000000000000000000000000..564729f0895b1863d94c479a67202438af45f996
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/core/bbox/samplers/combined_sampler.py
@@ -0,0 +1,20 @@
+from ..builder import BBOX_SAMPLERS, build_sampler
+from .base_sampler import BaseSampler
+
+
+@BBOX_SAMPLERS.register_module()
+class CombinedSampler(BaseSampler):
+ """A sampler that combines positive sampler and negative sampler."""
+
+ def __init__(self, pos_sampler, neg_sampler, **kwargs):
+ super(CombinedSampler, self).__init__(**kwargs)
+ self.pos_sampler = build_sampler(pos_sampler, **kwargs)
+ self.neg_sampler = build_sampler(neg_sampler, **kwargs)
+
+ def _sample_pos(self, **kwargs):
+ """Sample positive samples."""
+ raise NotImplementedError
+
+ def _sample_neg(self, **kwargs):
+ """Sample negative samples."""
+ raise NotImplementedError
diff --git a/annotator/uniformer/mmdet_null/core/bbox/samplers/instance_balanced_pos_sampler.py b/annotator/uniformer/mmdet_null/core/bbox/samplers/instance_balanced_pos_sampler.py
new file mode 100644
index 0000000000000000000000000000000000000000..c735298487e14e4a0ec42913f25673cccb98a8a0
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/core/bbox/samplers/instance_balanced_pos_sampler.py
@@ -0,0 +1,55 @@
+import numpy as np
+import torch
+
+from ..builder import BBOX_SAMPLERS
+from .random_sampler import RandomSampler
+
+
+@BBOX_SAMPLERS.register_module()
+class InstanceBalancedPosSampler(RandomSampler):
+ """Instance balanced sampler that samples equal number of positive samples
+ for each instance."""
+
+ def _sample_pos(self, assign_result, num_expected, **kwargs):
+ """Sample positive boxes.
+
+ Args:
+ assign_result (:obj:`AssignResult`): The assigned results of boxes.
+ num_expected (int): The number of expected positive samples
+
+ Returns:
+ Tensor or ndarray: sampled indices.
+ """
+ pos_inds = torch.nonzero(assign_result.gt_inds > 0, as_tuple=False)
+ if pos_inds.numel() != 0:
+ pos_inds = pos_inds.squeeze(1)
+ if pos_inds.numel() <= num_expected:
+ return pos_inds
+ else:
+ unique_gt_inds = assign_result.gt_inds[pos_inds].unique()
+ num_gts = len(unique_gt_inds)
+ num_per_gt = int(round(num_expected / float(num_gts)) + 1)
+ sampled_inds = []
+ for i in unique_gt_inds:
+ inds = torch.nonzero(
+ assign_result.gt_inds == i.item(), as_tuple=False)
+ if inds.numel() != 0:
+ inds = inds.squeeze(1)
+ else:
+ continue
+ if len(inds) > num_per_gt:
+ inds = self.random_choice(inds, num_per_gt)
+ sampled_inds.append(inds)
+ sampled_inds = torch.cat(sampled_inds)
+ if len(sampled_inds) < num_expected:
+ num_extra = num_expected - len(sampled_inds)
+ extra_inds = np.array(
+ list(set(pos_inds.cpu()) - set(sampled_inds.cpu())))
+ if len(extra_inds) > num_extra:
+ extra_inds = self.random_choice(extra_inds, num_extra)
+ extra_inds = torch.from_numpy(extra_inds).to(
+ assign_result.gt_inds.device).long()
+ sampled_inds = torch.cat([sampled_inds, extra_inds])
+ elif len(sampled_inds) > num_expected:
+ sampled_inds = self.random_choice(sampled_inds, num_expected)
+ return sampled_inds
diff --git a/annotator/uniformer/mmdet_null/core/bbox/samplers/iou_balanced_neg_sampler.py b/annotator/uniformer/mmdet_null/core/bbox/samplers/iou_balanced_neg_sampler.py
new file mode 100644
index 0000000000000000000000000000000000000000..f275e430d1b57c4d9df57387b8f3ae6f0ff68cf1
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/core/bbox/samplers/iou_balanced_neg_sampler.py
@@ -0,0 +1,157 @@
+import numpy as np
+import torch
+
+from ..builder import BBOX_SAMPLERS
+from .random_sampler import RandomSampler
+
+
+@BBOX_SAMPLERS.register_module()
+class IoUBalancedNegSampler(RandomSampler):
+ """IoU Balanced Sampling.
+
+ arXiv: https://arxiv.org/pdf/1904.02701.pdf (CVPR 2019)
+
+ Sampling proposals according to their IoU. `floor_fraction` of needed RoIs
+ are sampled from proposals whose IoU are lower than `floor_thr` randomly.
+ The others are sampled from proposals whose IoU are higher than
+ `floor_thr`. These proposals are sampled from some bins evenly, which are
+ split by `num_bins` via IoU evenly.
+
+ Args:
+ num (int): number of proposals.
+ pos_fraction (float): fraction of positive proposals.
+ floor_thr (float): threshold (minimum) IoU for IoU balanced sampling,
+ set to -1 if all using IoU balanced sampling.
+ floor_fraction (float): sampling fraction of proposals under floor_thr.
+ num_bins (int): number of bins in IoU balanced sampling.
+ """
+
+ def __init__(self,
+ num,
+ pos_fraction,
+ floor_thr=-1,
+ floor_fraction=0,
+ num_bins=3,
+ **kwargs):
+ super(IoUBalancedNegSampler, self).__init__(num, pos_fraction,
+ **kwargs)
+ assert floor_thr >= 0 or floor_thr == -1
+ assert 0 <= floor_fraction <= 1
+ assert num_bins >= 1
+
+ self.floor_thr = floor_thr
+ self.floor_fraction = floor_fraction
+ self.num_bins = num_bins
+
+ def sample_via_interval(self, max_overlaps, full_set, num_expected):
+ """Sample according to the iou interval.
+
+ Args:
+ max_overlaps (torch.Tensor): IoU between bounding boxes and ground
+ truth boxes.
+ full_set (set(int)): A full set of indices of boxes。
+ num_expected (int): Number of expected samples。
+
+ Returns:
+ np.ndarray: Indices of samples
+ """
+ max_iou = max_overlaps.max()
+ iou_interval = (max_iou - self.floor_thr) / self.num_bins
+ per_num_expected = int(num_expected / self.num_bins)
+
+ sampled_inds = []
+ for i in range(self.num_bins):
+ start_iou = self.floor_thr + i * iou_interval
+ end_iou = self.floor_thr + (i + 1) * iou_interval
+ tmp_set = set(
+ np.where(
+ np.logical_and(max_overlaps >= start_iou,
+ max_overlaps < end_iou))[0])
+ tmp_inds = list(tmp_set & full_set)
+ if len(tmp_inds) > per_num_expected:
+ tmp_sampled_set = self.random_choice(tmp_inds,
+ per_num_expected)
+ else:
+ tmp_sampled_set = np.array(tmp_inds, dtype=np.int)
+ sampled_inds.append(tmp_sampled_set)
+
+ sampled_inds = np.concatenate(sampled_inds)
+ if len(sampled_inds) < num_expected:
+ num_extra = num_expected - len(sampled_inds)
+ extra_inds = np.array(list(full_set - set(sampled_inds)))
+ if len(extra_inds) > num_extra:
+ extra_inds = self.random_choice(extra_inds, num_extra)
+ sampled_inds = np.concatenate([sampled_inds, extra_inds])
+
+ return sampled_inds
+
+ def _sample_neg(self, assign_result, num_expected, **kwargs):
+ """Sample negative boxes.
+
+ Args:
+ assign_result (:obj:`AssignResult`): The assigned results of boxes.
+ num_expected (int): The number of expected negative samples
+
+ Returns:
+ Tensor or ndarray: sampled indices.
+ """
+ neg_inds = torch.nonzero(assign_result.gt_inds == 0, as_tuple=False)
+ if neg_inds.numel() != 0:
+ neg_inds = neg_inds.squeeze(1)
+ if len(neg_inds) <= num_expected:
+ return neg_inds
+ else:
+ max_overlaps = assign_result.max_overlaps.cpu().numpy()
+ # balance sampling for negative samples
+ neg_set = set(neg_inds.cpu().numpy())
+
+ if self.floor_thr > 0:
+ floor_set = set(
+ np.where(
+ np.logical_and(max_overlaps >= 0,
+ max_overlaps < self.floor_thr))[0])
+ iou_sampling_set = set(
+ np.where(max_overlaps >= self.floor_thr)[0])
+ elif self.floor_thr == 0:
+ floor_set = set(np.where(max_overlaps == 0)[0])
+ iou_sampling_set = set(
+ np.where(max_overlaps > self.floor_thr)[0])
+ else:
+ floor_set = set()
+ iou_sampling_set = set(
+ np.where(max_overlaps > self.floor_thr)[0])
+ # for sampling interval calculation
+ self.floor_thr = 0
+
+ floor_neg_inds = list(floor_set & neg_set)
+ iou_sampling_neg_inds = list(iou_sampling_set & neg_set)
+ num_expected_iou_sampling = int(num_expected *
+ (1 - self.floor_fraction))
+ if len(iou_sampling_neg_inds) > num_expected_iou_sampling:
+ if self.num_bins >= 2:
+ iou_sampled_inds = self.sample_via_interval(
+ max_overlaps, set(iou_sampling_neg_inds),
+ num_expected_iou_sampling)
+ else:
+ iou_sampled_inds = self.random_choice(
+ iou_sampling_neg_inds, num_expected_iou_sampling)
+ else:
+ iou_sampled_inds = np.array(
+ iou_sampling_neg_inds, dtype=np.int)
+ num_expected_floor = num_expected - len(iou_sampled_inds)
+ if len(floor_neg_inds) > num_expected_floor:
+ sampled_floor_inds = self.random_choice(
+ floor_neg_inds, num_expected_floor)
+ else:
+ sampled_floor_inds = np.array(floor_neg_inds, dtype=np.int)
+ sampled_inds = np.concatenate(
+ (sampled_floor_inds, iou_sampled_inds))
+ if len(sampled_inds) < num_expected:
+ num_extra = num_expected - len(sampled_inds)
+ extra_inds = np.array(list(neg_set - set(sampled_inds)))
+ if len(extra_inds) > num_extra:
+ extra_inds = self.random_choice(extra_inds, num_extra)
+ sampled_inds = np.concatenate((sampled_inds, extra_inds))
+ sampled_inds = torch.from_numpy(sampled_inds).long().to(
+ assign_result.gt_inds.device)
+ return sampled_inds
diff --git a/annotator/uniformer/mmdet_null/core/bbox/samplers/ohem_sampler.py b/annotator/uniformer/mmdet_null/core/bbox/samplers/ohem_sampler.py
new file mode 100644
index 0000000000000000000000000000000000000000..8b99f60ef0176f1b7a56665fb0f59272f65b84cd
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/core/bbox/samplers/ohem_sampler.py
@@ -0,0 +1,107 @@
+import torch
+
+from ..builder import BBOX_SAMPLERS
+from ..transforms import bbox2roi
+from .base_sampler import BaseSampler
+
+
+@BBOX_SAMPLERS.register_module()
+class OHEMSampler(BaseSampler):
+ r"""Online Hard Example Mining Sampler described in `Training Region-based
+ Object Detectors with Online Hard Example Mining
+ `_.
+ """
+
+ def __init__(self,
+ num,
+ pos_fraction,
+ context,
+ neg_pos_ub=-1,
+ add_gt_as_proposals=True,
+ **kwargs):
+ super(OHEMSampler, self).__init__(num, pos_fraction, neg_pos_ub,
+ add_gt_as_proposals)
+ self.context = context
+ if not hasattr(self.context, 'num_stages'):
+ self.bbox_head = self.context.bbox_head
+ else:
+ self.bbox_head = self.context.bbox_head[self.context.current_stage]
+
+ def hard_mining(self, inds, num_expected, bboxes, labels, feats):
+ with torch.no_grad():
+ rois = bbox2roi([bboxes])
+ if not hasattr(self.context, 'num_stages'):
+ bbox_results = self.context._bbox_forward(feats, rois)
+ else:
+ bbox_results = self.context._bbox_forward(
+ self.context.current_stage, feats, rois)
+ cls_score = bbox_results['cls_score']
+ loss = self.bbox_head.loss(
+ cls_score=cls_score,
+ bbox_pred=None,
+ rois=rois,
+ labels=labels,
+ label_weights=cls_score.new_ones(cls_score.size(0)),
+ bbox_targets=None,
+ bbox_weights=None,
+ reduction_override='none')['loss_cls']
+ _, topk_loss_inds = loss.topk(num_expected)
+ return inds[topk_loss_inds]
+
+ def _sample_pos(self,
+ assign_result,
+ num_expected,
+ bboxes=None,
+ feats=None,
+ **kwargs):
+ """Sample positive boxes.
+
+ Args:
+ assign_result (:obj:`AssignResult`): Assigned results
+ num_expected (int): Number of expected positive samples
+ bboxes (torch.Tensor, optional): Boxes. Defaults to None.
+ feats (list[torch.Tensor], optional): Multi-level features.
+ Defaults to None.
+
+ Returns:
+ torch.Tensor: Indices of positive samples
+ """
+ # Sample some hard positive samples
+ pos_inds = torch.nonzero(assign_result.gt_inds > 0, as_tuple=False)
+ if pos_inds.numel() != 0:
+ pos_inds = pos_inds.squeeze(1)
+ if pos_inds.numel() <= num_expected:
+ return pos_inds
+ else:
+ return self.hard_mining(pos_inds, num_expected, bboxes[pos_inds],
+ assign_result.labels[pos_inds], feats)
+
+ def _sample_neg(self,
+ assign_result,
+ num_expected,
+ bboxes=None,
+ feats=None,
+ **kwargs):
+ """Sample negative boxes.
+
+ Args:
+ assign_result (:obj:`AssignResult`): Assigned results
+ num_expected (int): Number of expected negative samples
+ bboxes (torch.Tensor, optional): Boxes. Defaults to None.
+ feats (list[torch.Tensor], optional): Multi-level features.
+ Defaults to None.
+
+ Returns:
+ torch.Tensor: Indices of negative samples
+ """
+ # Sample some hard negative samples
+ neg_inds = torch.nonzero(assign_result.gt_inds == 0, as_tuple=False)
+ if neg_inds.numel() != 0:
+ neg_inds = neg_inds.squeeze(1)
+ if len(neg_inds) <= num_expected:
+ return neg_inds
+ else:
+ neg_labels = assign_result.labels.new_empty(
+ neg_inds.size(0)).fill_(self.bbox_head.num_classes)
+ return self.hard_mining(neg_inds, num_expected, bboxes[neg_inds],
+ neg_labels, feats)
diff --git a/annotator/uniformer/mmdet_null/core/bbox/samplers/pseudo_sampler.py b/annotator/uniformer/mmdet_null/core/bbox/samplers/pseudo_sampler.py
new file mode 100644
index 0000000000000000000000000000000000000000..2bd81abcdc62debc14772659d7a171f20bf33364
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/core/bbox/samplers/pseudo_sampler.py
@@ -0,0 +1,41 @@
+import torch
+
+from ..builder import BBOX_SAMPLERS
+from .base_sampler import BaseSampler
+from .sampling_result import SamplingResult
+
+
+@BBOX_SAMPLERS.register_module()
+class PseudoSampler(BaseSampler):
+ """A pseudo sampler that does not do sampling actually."""
+
+ def __init__(self, **kwargs):
+ pass
+
+ def _sample_pos(self, **kwargs):
+ """Sample positive samples."""
+ raise NotImplementedError
+
+ def _sample_neg(self, **kwargs):
+ """Sample negative samples."""
+ raise NotImplementedError
+
+ def sample(self, assign_result, bboxes, gt_bboxes, **kwargs):
+ """Directly returns the positive and negative indices of samples.
+
+ Args:
+ assign_result (:obj:`AssignResult`): Assigned results
+ bboxes (torch.Tensor): Bounding boxes
+ gt_bboxes (torch.Tensor): Ground truth boxes
+
+ Returns:
+ :obj:`SamplingResult`: sampler results
+ """
+ pos_inds = torch.nonzero(
+ assign_result.gt_inds > 0, as_tuple=False).squeeze(-1).unique()
+ neg_inds = torch.nonzero(
+ assign_result.gt_inds == 0, as_tuple=False).squeeze(-1).unique()
+ gt_flags = bboxes.new_zeros(bboxes.shape[0], dtype=torch.uint8)
+ sampling_result = SamplingResult(pos_inds, neg_inds, bboxes, gt_bboxes,
+ assign_result, gt_flags)
+ return sampling_result
diff --git a/annotator/uniformer/mmdet_null/core/bbox/samplers/random_sampler.py b/annotator/uniformer/mmdet_null/core/bbox/samplers/random_sampler.py
new file mode 100644
index 0000000000000000000000000000000000000000..f34b006e8bb0b55c74aa1c3b792f3664ada93162
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/core/bbox/samplers/random_sampler.py
@@ -0,0 +1,78 @@
+import torch
+
+from ..builder import BBOX_SAMPLERS
+from .base_sampler import BaseSampler
+
+
+@BBOX_SAMPLERS.register_module()
+class RandomSampler(BaseSampler):
+ """Random sampler.
+
+ Args:
+ num (int): Number of samples
+ pos_fraction (float): Fraction of positive samples
+ neg_pos_up (int, optional): Upper bound number of negative and
+ positive samples. Defaults to -1.
+ add_gt_as_proposals (bool, optional): Whether to add ground truth
+ boxes as proposals. Defaults to True.
+ """
+
+ def __init__(self,
+ num,
+ pos_fraction,
+ neg_pos_ub=-1,
+ add_gt_as_proposals=True,
+ **kwargs):
+ from mmdet.core.bbox import demodata
+ super(RandomSampler, self).__init__(num, pos_fraction, neg_pos_ub,
+ add_gt_as_proposals)
+ self.rng = demodata.ensure_rng(kwargs.get('rng', None))
+
+ def random_choice(self, gallery, num):
+ """Random select some elements from the gallery.
+
+ If `gallery` is a Tensor, the returned indices will be a Tensor;
+ If `gallery` is a ndarray or list, the returned indices will be a
+ ndarray.
+
+ Args:
+ gallery (Tensor | ndarray | list): indices pool.
+ num (int): expected sample num.
+
+ Returns:
+ Tensor or ndarray: sampled indices.
+ """
+ assert len(gallery) >= num
+
+ is_tensor = isinstance(gallery, torch.Tensor)
+ if not is_tensor:
+ if torch.cuda.is_available():
+ device = torch.cuda.current_device()
+ else:
+ device = 'cpu'
+ gallery = torch.tensor(gallery, dtype=torch.long, device=device)
+ perm = torch.randperm(gallery.numel(), device=gallery.device)[:num]
+ rand_inds = gallery[perm]
+ if not is_tensor:
+ rand_inds = rand_inds.cpu().numpy()
+ return rand_inds
+
+ def _sample_pos(self, assign_result, num_expected, **kwargs):
+ """Randomly sample some positive samples."""
+ pos_inds = torch.nonzero(assign_result.gt_inds > 0, as_tuple=False)
+ if pos_inds.numel() != 0:
+ pos_inds = pos_inds.squeeze(1)
+ if pos_inds.numel() <= num_expected:
+ return pos_inds
+ else:
+ return self.random_choice(pos_inds, num_expected)
+
+ def _sample_neg(self, assign_result, num_expected, **kwargs):
+ """Randomly sample some negative samples."""
+ neg_inds = torch.nonzero(assign_result.gt_inds == 0, as_tuple=False)
+ if neg_inds.numel() != 0:
+ neg_inds = neg_inds.squeeze(1)
+ if len(neg_inds) <= num_expected:
+ return neg_inds
+ else:
+ return self.random_choice(neg_inds, num_expected)
diff --git a/annotator/uniformer/mmdet_null/core/bbox/samplers/sampling_result.py b/annotator/uniformer/mmdet_null/core/bbox/samplers/sampling_result.py
new file mode 100644
index 0000000000000000000000000000000000000000..8b2dde44fdae62efc07da75f54463b41cadc3473
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/core/bbox/samplers/sampling_result.py
@@ -0,0 +1,152 @@
+import torch
+
+from annotator.uniformer.mmdet.utils import util_mixins
+
+
+class SamplingResult(util_mixins.NiceRepr):
+ """Bbox sampling result.
+
+ Example:
+ >>> # xdoctest: +IGNORE_WANT
+ >>> from mmdet.core.bbox.samplers.sampling_result import * # NOQA
+ >>> self = SamplingResult.random(rng=10)
+ >>> print(f'self = {self}')
+ self =
+ """
+
+ def __init__(self, pos_inds, neg_inds, bboxes, gt_bboxes, assign_result,
+ gt_flags):
+ self.pos_inds = pos_inds
+ self.neg_inds = neg_inds
+ self.pos_bboxes = bboxes[pos_inds]
+ self.neg_bboxes = bboxes[neg_inds]
+ self.pos_is_gt = gt_flags[pos_inds]
+
+ self.num_gts = gt_bboxes.shape[0]
+ self.pos_assigned_gt_inds = assign_result.gt_inds[pos_inds] - 1
+
+ if gt_bboxes.numel() == 0:
+ # hack for index error case
+ assert self.pos_assigned_gt_inds.numel() == 0
+ self.pos_gt_bboxes = torch.empty_like(gt_bboxes).view(-1, 4)
+ else:
+ if len(gt_bboxes.shape) < 2:
+ gt_bboxes = gt_bboxes.view(-1, 4)
+
+ self.pos_gt_bboxes = gt_bboxes[self.pos_assigned_gt_inds, :]
+
+ if assign_result.labels is not None:
+ self.pos_gt_labels = assign_result.labels[pos_inds]
+ else:
+ self.pos_gt_labels = None
+
+ @property
+ def bboxes(self):
+ """torch.Tensor: concatenated positive and negative boxes"""
+ return torch.cat([self.pos_bboxes, self.neg_bboxes])
+
+ def to(self, device):
+ """Change the device of the data inplace.
+
+ Example:
+ >>> self = SamplingResult.random()
+ >>> print(f'self = {self.to(None)}')
+ >>> # xdoctest: +REQUIRES(--gpu)
+ >>> print(f'self = {self.to(0)}')
+ """
+ _dict = self.__dict__
+ for key, value in _dict.items():
+ if isinstance(value, torch.Tensor):
+ _dict[key] = value.to(device)
+ return self
+
+ def __nice__(self):
+ data = self.info.copy()
+ data['pos_bboxes'] = data.pop('pos_bboxes').shape
+ data['neg_bboxes'] = data.pop('neg_bboxes').shape
+ parts = [f"'{k}': {v!r}" for k, v in sorted(data.items())]
+ body = ' ' + ',\n '.join(parts)
+ return '{\n' + body + '\n}'
+
+ @property
+ def info(self):
+ """Returns a dictionary of info about the object."""
+ return {
+ 'pos_inds': self.pos_inds,
+ 'neg_inds': self.neg_inds,
+ 'pos_bboxes': self.pos_bboxes,
+ 'neg_bboxes': self.neg_bboxes,
+ 'pos_is_gt': self.pos_is_gt,
+ 'num_gts': self.num_gts,
+ 'pos_assigned_gt_inds': self.pos_assigned_gt_inds,
+ }
+
+ @classmethod
+ def random(cls, rng=None, **kwargs):
+ """
+ Args:
+ rng (None | int | numpy.random.RandomState): seed or state.
+ kwargs (keyword arguments):
+ - num_preds: number of predicted boxes
+ - num_gts: number of true boxes
+ - p_ignore (float): probability of a predicted box assinged to \
+ an ignored truth.
+ - p_assigned (float): probability of a predicted box not being \
+ assigned.
+ - p_use_label (float | bool): with labels or not.
+
+ Returns:
+ :obj:`SamplingResult`: Randomly generated sampling result.
+
+ Example:
+ >>> from mmdet.core.bbox.samplers.sampling_result import * # NOQA
+ >>> self = SamplingResult.random()
+ >>> print(self.__dict__)
+ """
+ from mmdet.core.bbox.samplers.random_sampler import RandomSampler
+ from mmdet.core.bbox.assigners.assign_result import AssignResult
+ from mmdet.core.bbox import demodata
+ rng = demodata.ensure_rng(rng)
+
+ # make probabalistic?
+ num = 32
+ pos_fraction = 0.5
+ neg_pos_ub = -1
+
+ assign_result = AssignResult.random(rng=rng, **kwargs)
+
+ # Note we could just compute an assignment
+ bboxes = demodata.random_boxes(assign_result.num_preds, rng=rng)
+ gt_bboxes = demodata.random_boxes(assign_result.num_gts, rng=rng)
+
+ if rng.rand() > 0.2:
+ # sometimes algorithms squeeze their data, be robust to that
+ gt_bboxes = gt_bboxes.squeeze()
+ bboxes = bboxes.squeeze()
+
+ if assign_result.labels is None:
+ gt_labels = None
+ else:
+ gt_labels = None # todo
+
+ if gt_labels is None:
+ add_gt_as_proposals = False
+ else:
+ add_gt_as_proposals = True # make probabalistic?
+
+ sampler = RandomSampler(
+ num,
+ pos_fraction,
+ neg_pos_ub=neg_pos_ub,
+ add_gt_as_proposals=add_gt_as_proposals,
+ rng=rng)
+ self = sampler.sample(assign_result, bboxes, gt_bboxes, gt_labels)
+ return self
diff --git a/annotator/uniformer/mmdet_null/core/bbox/samplers/score_hlr_sampler.py b/annotator/uniformer/mmdet_null/core/bbox/samplers/score_hlr_sampler.py
new file mode 100644
index 0000000000000000000000000000000000000000..e7fd71a482e64bf3d8a9767adf78947bc98b1e36
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/core/bbox/samplers/score_hlr_sampler.py
@@ -0,0 +1,264 @@
+import torch
+from annotator.uniformer.mmcv.ops import nms_match
+
+from ..builder import BBOX_SAMPLERS
+from ..transforms import bbox2roi
+from .base_sampler import BaseSampler
+from .sampling_result import SamplingResult
+
+
+@BBOX_SAMPLERS.register_module()
+class ScoreHLRSampler(BaseSampler):
+ r"""Importance-based Sample Reweighting (ISR_N), described in `Prime Sample
+ Attention in Object Detection `_.
+
+ Score hierarchical local rank (HLR) differentiates with RandomSampler in
+ negative part. It firstly computes Score-HLR in a two-step way,
+ then linearly maps score hlr to the loss weights.
+
+ Args:
+ num (int): Total number of sampled RoIs.
+ pos_fraction (float): Fraction of positive samples.
+ context (:class:`BaseRoIHead`): RoI head that the sampler belongs to.
+ neg_pos_ub (int): Upper bound of the ratio of num negative to num
+ positive, -1 means no upper bound.
+ add_gt_as_proposals (bool): Whether to add ground truth as proposals.
+ k (float): Power of the non-linear mapping.
+ bias (float): Shift of the non-linear mapping.
+ score_thr (float): Minimum score that a negative sample is to be
+ considered as valid bbox.
+ """
+
+ def __init__(self,
+ num,
+ pos_fraction,
+ context,
+ neg_pos_ub=-1,
+ add_gt_as_proposals=True,
+ k=0.5,
+ bias=0,
+ score_thr=0.05,
+ iou_thr=0.5,
+ **kwargs):
+ super().__init__(num, pos_fraction, neg_pos_ub, add_gt_as_proposals)
+ self.k = k
+ self.bias = bias
+ self.score_thr = score_thr
+ self.iou_thr = iou_thr
+ self.context = context
+ # context of cascade detectors is a list, so distinguish them here.
+ if not hasattr(context, 'num_stages'):
+ self.bbox_roi_extractor = context.bbox_roi_extractor
+ self.bbox_head = context.bbox_head
+ self.with_shared_head = context.with_shared_head
+ if self.with_shared_head:
+ self.shared_head = context.shared_head
+ else:
+ self.bbox_roi_extractor = context.bbox_roi_extractor[
+ context.current_stage]
+ self.bbox_head = context.bbox_head[context.current_stage]
+
+ @staticmethod
+ def random_choice(gallery, num):
+ """Randomly select some elements from the gallery.
+
+ If `gallery` is a Tensor, the returned indices will be a Tensor;
+ If `gallery` is a ndarray or list, the returned indices will be a
+ ndarray.
+
+ Args:
+ gallery (Tensor | ndarray | list): indices pool.
+ num (int): expected sample num.
+
+ Returns:
+ Tensor or ndarray: sampled indices.
+ """
+ assert len(gallery) >= num
+
+ is_tensor = isinstance(gallery, torch.Tensor)
+ if not is_tensor:
+ if torch.cuda.is_available():
+ device = torch.cuda.current_device()
+ else:
+ device = 'cpu'
+ gallery = torch.tensor(gallery, dtype=torch.long, device=device)
+ perm = torch.randperm(gallery.numel(), device=gallery.device)[:num]
+ rand_inds = gallery[perm]
+ if not is_tensor:
+ rand_inds = rand_inds.cpu().numpy()
+ return rand_inds
+
+ def _sample_pos(self, assign_result, num_expected, **kwargs):
+ """Randomly sample some positive samples."""
+ pos_inds = torch.nonzero(assign_result.gt_inds > 0).flatten()
+ if pos_inds.numel() <= num_expected:
+ return pos_inds
+ else:
+ return self.random_choice(pos_inds, num_expected)
+
+ def _sample_neg(self,
+ assign_result,
+ num_expected,
+ bboxes,
+ feats=None,
+ img_meta=None,
+ **kwargs):
+ """Sample negative samples.
+
+ Score-HLR sampler is done in the following steps:
+ 1. Take the maximum positive score prediction of each negative samples
+ as s_i.
+ 2. Filter out negative samples whose s_i <= score_thr, the left samples
+ are called valid samples.
+ 3. Use NMS-Match to divide valid samples into different groups,
+ samples in the same group will greatly overlap with each other
+ 4. Rank the matched samples in two-steps to get Score-HLR.
+ (1) In the same group, rank samples with their scores.
+ (2) In the same score rank across different groups,
+ rank samples with their scores again.
+ 5. Linearly map Score-HLR to the final label weights.
+
+ Args:
+ assign_result (:obj:`AssignResult`): result of assigner.
+ num_expected (int): Expected number of samples.
+ bboxes (Tensor): bbox to be sampled.
+ feats (Tensor): Features come from FPN.
+ img_meta (dict): Meta information dictionary.
+ """
+ neg_inds = torch.nonzero(assign_result.gt_inds == 0).flatten()
+ num_neg = neg_inds.size(0)
+ if num_neg == 0:
+ return neg_inds, None
+ with torch.no_grad():
+ neg_bboxes = bboxes[neg_inds]
+ neg_rois = bbox2roi([neg_bboxes])
+ bbox_result = self.context._bbox_forward(feats, neg_rois)
+ cls_score, bbox_pred = bbox_result['cls_score'], bbox_result[
+ 'bbox_pred']
+
+ ori_loss = self.bbox_head.loss(
+ cls_score=cls_score,
+ bbox_pred=None,
+ rois=None,
+ labels=neg_inds.new_full((num_neg, ),
+ self.bbox_head.num_classes),
+ label_weights=cls_score.new_ones(num_neg),
+ bbox_targets=None,
+ bbox_weights=None,
+ reduction_override='none')['loss_cls']
+
+ # filter out samples with the max score lower than score_thr
+ max_score, argmax_score = cls_score.softmax(-1)[:, :-1].max(-1)
+ valid_inds = (max_score > self.score_thr).nonzero().view(-1)
+ invalid_inds = (max_score <= self.score_thr).nonzero().view(-1)
+ num_valid = valid_inds.size(0)
+ num_invalid = invalid_inds.size(0)
+
+ num_expected = min(num_neg, num_expected)
+ num_hlr = min(num_valid, num_expected)
+ num_rand = num_expected - num_hlr
+ if num_valid > 0:
+ valid_rois = neg_rois[valid_inds]
+ valid_max_score = max_score[valid_inds]
+ valid_argmax_score = argmax_score[valid_inds]
+ valid_bbox_pred = bbox_pred[valid_inds]
+
+ # valid_bbox_pred shape: [num_valid, #num_classes, 4]
+ valid_bbox_pred = valid_bbox_pred.view(
+ valid_bbox_pred.size(0), -1, 4)
+ selected_bbox_pred = valid_bbox_pred[range(num_valid),
+ valid_argmax_score]
+ pred_bboxes = self.bbox_head.bbox_coder.decode(
+ valid_rois[:, 1:], selected_bbox_pred)
+ pred_bboxes_with_score = torch.cat(
+ [pred_bboxes, valid_max_score[:, None]], -1)
+ group = nms_match(pred_bboxes_with_score, self.iou_thr)
+
+ # imp: importance
+ imp = cls_score.new_zeros(num_valid)
+ for g in group:
+ g_score = valid_max_score[g]
+ # g_score has already sorted
+ rank = g_score.new_tensor(range(g_score.size(0)))
+ imp[g] = num_valid - rank + g_score
+ _, imp_rank_inds = imp.sort(descending=True)
+ _, imp_rank = imp_rank_inds.sort()
+ hlr_inds = imp_rank_inds[:num_expected]
+
+ if num_rand > 0:
+ rand_inds = torch.randperm(num_invalid)[:num_rand]
+ select_inds = torch.cat(
+ [valid_inds[hlr_inds], invalid_inds[rand_inds]])
+ else:
+ select_inds = valid_inds[hlr_inds]
+
+ neg_label_weights = cls_score.new_ones(num_expected)
+
+ up_bound = max(num_expected, num_valid)
+ imp_weights = (up_bound -
+ imp_rank[hlr_inds].float()) / up_bound
+ neg_label_weights[:num_hlr] = imp_weights
+ neg_label_weights[num_hlr:] = imp_weights.min()
+ neg_label_weights = (self.bias +
+ (1 - self.bias) * neg_label_weights).pow(
+ self.k)
+ ori_selected_loss = ori_loss[select_inds]
+ new_loss = ori_selected_loss * neg_label_weights
+ norm_ratio = ori_selected_loss.sum() / new_loss.sum()
+ neg_label_weights *= norm_ratio
+ else:
+ neg_label_weights = cls_score.new_ones(num_expected)
+ select_inds = torch.randperm(num_neg)[:num_expected]
+
+ return neg_inds[select_inds], neg_label_weights
+
+ def sample(self,
+ assign_result,
+ bboxes,
+ gt_bboxes,
+ gt_labels=None,
+ img_meta=None,
+ **kwargs):
+ """Sample positive and negative bboxes.
+
+ This is a simple implementation of bbox sampling given candidates,
+ assigning results and ground truth bboxes.
+
+ Args:
+ assign_result (:obj:`AssignResult`): Bbox assigning results.
+ bboxes (Tensor): Boxes to be sampled from.
+ gt_bboxes (Tensor): Ground truth bboxes.
+ gt_labels (Tensor, optional): Class labels of ground truth bboxes.
+
+ Returns:
+ tuple[:obj:`SamplingResult`, Tensor]: Sampling result and negetive
+ label weights.
+ """
+ bboxes = bboxes[:, :4]
+
+ gt_flags = bboxes.new_zeros((bboxes.shape[0], ), dtype=torch.uint8)
+ if self.add_gt_as_proposals:
+ bboxes = torch.cat([gt_bboxes, bboxes], dim=0)
+ assign_result.add_gt_(gt_labels)
+ gt_ones = bboxes.new_ones(gt_bboxes.shape[0], dtype=torch.uint8)
+ gt_flags = torch.cat([gt_ones, gt_flags])
+
+ num_expected_pos = int(self.num * self.pos_fraction)
+ pos_inds = self.pos_sampler._sample_pos(
+ assign_result, num_expected_pos, bboxes=bboxes, **kwargs)
+ num_sampled_pos = pos_inds.numel()
+ num_expected_neg = self.num - num_sampled_pos
+ if self.neg_pos_ub >= 0:
+ _pos = max(1, num_sampled_pos)
+ neg_upper_bound = int(self.neg_pos_ub * _pos)
+ if num_expected_neg > neg_upper_bound:
+ num_expected_neg = neg_upper_bound
+ neg_inds, neg_label_weights = self.neg_sampler._sample_neg(
+ assign_result,
+ num_expected_neg,
+ bboxes,
+ img_meta=img_meta,
+ **kwargs)
+
+ return SamplingResult(pos_inds, neg_inds, bboxes, gt_bboxes,
+ assign_result, gt_flags), neg_label_weights
diff --git a/annotator/uniformer/mmdet_null/core/bbox/transforms.py b/annotator/uniformer/mmdet_null/core/bbox/transforms.py
new file mode 100644
index 0000000000000000000000000000000000000000..df55b0a496516bf7373fe96cf746c561dd713c3b
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/core/bbox/transforms.py
@@ -0,0 +1,240 @@
+import numpy as np
+import torch
+
+
+def bbox_flip(bboxes, img_shape, direction='horizontal'):
+ """Flip bboxes horizontally or vertically.
+
+ Args:
+ bboxes (Tensor): Shape (..., 4*k)
+ img_shape (tuple): Image shape.
+ direction (str): Flip direction, options are "horizontal", "vertical",
+ "diagonal". Default: "horizontal"
+
+ Returns:
+ Tensor: Flipped bboxes.
+ """
+ assert bboxes.shape[-1] % 4 == 0
+ assert direction in ['horizontal', 'vertical', 'diagonal']
+ flipped = bboxes.clone()
+ if direction == 'horizontal':
+ flipped[..., 0::4] = img_shape[1] - bboxes[..., 2::4]
+ flipped[..., 2::4] = img_shape[1] - bboxes[..., 0::4]
+ elif direction == 'vertical':
+ flipped[..., 1::4] = img_shape[0] - bboxes[..., 3::4]
+ flipped[..., 3::4] = img_shape[0] - bboxes[..., 1::4]
+ else:
+ flipped[..., 0::4] = img_shape[1] - bboxes[..., 2::4]
+ flipped[..., 1::4] = img_shape[0] - bboxes[..., 3::4]
+ flipped[..., 2::4] = img_shape[1] - bboxes[..., 0::4]
+ flipped[..., 3::4] = img_shape[0] - bboxes[..., 1::4]
+ return flipped
+
+
+def bbox_mapping(bboxes,
+ img_shape,
+ scale_factor,
+ flip,
+ flip_direction='horizontal'):
+ """Map bboxes from the original image scale to testing scale."""
+ new_bboxes = bboxes * bboxes.new_tensor(scale_factor)
+ if flip:
+ new_bboxes = bbox_flip(new_bboxes, img_shape, flip_direction)
+ return new_bboxes
+
+
+def bbox_mapping_back(bboxes,
+ img_shape,
+ scale_factor,
+ flip,
+ flip_direction='horizontal'):
+ """Map bboxes from testing scale to original image scale."""
+ new_bboxes = bbox_flip(bboxes, img_shape,
+ flip_direction) if flip else bboxes
+ new_bboxes = new_bboxes.view(-1, 4) / new_bboxes.new_tensor(scale_factor)
+ return new_bboxes.view(bboxes.shape)
+
+
+def bbox2roi(bbox_list):
+ """Convert a list of bboxes to roi format.
+
+ Args:
+ bbox_list (list[Tensor]): a list of bboxes corresponding to a batch
+ of images.
+
+ Returns:
+ Tensor: shape (n, 5), [batch_ind, x1, y1, x2, y2]
+ """
+ rois_list = []
+ for img_id, bboxes in enumerate(bbox_list):
+ if bboxes.size(0) > 0:
+ img_inds = bboxes.new_full((bboxes.size(0), 1), img_id)
+ rois = torch.cat([img_inds, bboxes[:, :4]], dim=-1)
+ else:
+ rois = bboxes.new_zeros((0, 5))
+ rois_list.append(rois)
+ rois = torch.cat(rois_list, 0)
+ return rois
+
+
+def roi2bbox(rois):
+ """Convert rois to bounding box format.
+
+ Args:
+ rois (torch.Tensor): RoIs with the shape (n, 5) where the first
+ column indicates batch id of each RoI.
+
+ Returns:
+ list[torch.Tensor]: Converted boxes of corresponding rois.
+ """
+ bbox_list = []
+ img_ids = torch.unique(rois[:, 0].cpu(), sorted=True)
+ for img_id in img_ids:
+ inds = (rois[:, 0] == img_id.item())
+ bbox = rois[inds, 1:]
+ bbox_list.append(bbox)
+ return bbox_list
+
+
+def bbox2result(bboxes, labels, num_classes):
+ """Convert detection results to a list of numpy arrays.
+
+ Args:
+ bboxes (torch.Tensor | np.ndarray): shape (n, 5)
+ labels (torch.Tensor | np.ndarray): shape (n, )
+ num_classes (int): class number, including background class
+
+ Returns:
+ list(ndarray): bbox results of each class
+ """
+ if bboxes.shape[0] == 0:
+ return [np.zeros((0, 5), dtype=np.float32) for i in range(num_classes)]
+ else:
+ if isinstance(bboxes, torch.Tensor):
+ bboxes = bboxes.detach().cpu().numpy()
+ labels = labels.detach().cpu().numpy()
+ return [bboxes[labels == i, :] for i in range(num_classes)]
+
+
+def distance2bbox(points, distance, max_shape=None):
+ """Decode distance prediction to bounding box.
+
+ Args:
+ points (Tensor): Shape (B, N, 2) or (N, 2).
+ distance (Tensor): Distance from the given point to 4
+ boundaries (left, top, right, bottom). Shape (B, N, 4) or (N, 4)
+ max_shape (Sequence[int] or torch.Tensor or Sequence[
+ Sequence[int]],optional): Maximum bounds for boxes, specifies
+ (H, W, C) or (H, W). If priors shape is (B, N, 4), then
+ the max_shape should be a Sequence[Sequence[int]]
+ and the length of max_shape should also be B.
+
+ Returns:
+ Tensor: Boxes with shape (N, 4) or (B, N, 4)
+ """
+ x1 = points[..., 0] - distance[..., 0]
+ y1 = points[..., 1] - distance[..., 1]
+ x2 = points[..., 0] + distance[..., 2]
+ y2 = points[..., 1] + distance[..., 3]
+
+ bboxes = torch.stack([x1, y1, x2, y2], -1)
+
+ if max_shape is not None:
+ if not isinstance(max_shape, torch.Tensor):
+ max_shape = x1.new_tensor(max_shape)
+ max_shape = max_shape[..., :2].type_as(x1)
+ if max_shape.ndim == 2:
+ assert bboxes.ndim == 3
+ assert max_shape.size(0) == bboxes.size(0)
+
+ min_xy = x1.new_tensor(0)
+ max_xy = torch.cat([max_shape, max_shape],
+ dim=-1).flip(-1).unsqueeze(-2)
+ bboxes = torch.where(bboxes < min_xy, min_xy, bboxes)
+ bboxes = torch.where(bboxes > max_xy, max_xy, bboxes)
+
+ return bboxes
+
+
+def bbox2distance(points, bbox, max_dis=None, eps=0.1):
+ """Decode bounding box based on distances.
+
+ Args:
+ points (Tensor): Shape (n, 2), [x, y].
+ bbox (Tensor): Shape (n, 4), "xyxy" format
+ max_dis (float): Upper bound of the distance.
+ eps (float): a small value to ensure target < max_dis, instead <=
+
+ Returns:
+ Tensor: Decoded distances.
+ """
+ left = points[:, 0] - bbox[:, 0]
+ top = points[:, 1] - bbox[:, 1]
+ right = bbox[:, 2] - points[:, 0]
+ bottom = bbox[:, 3] - points[:, 1]
+ if max_dis is not None:
+ left = left.clamp(min=0, max=max_dis - eps)
+ top = top.clamp(min=0, max=max_dis - eps)
+ right = right.clamp(min=0, max=max_dis - eps)
+ bottom = bottom.clamp(min=0, max=max_dis - eps)
+ return torch.stack([left, top, right, bottom], -1)
+
+
+def bbox_rescale(bboxes, scale_factor=1.0):
+ """Rescale bounding box w.r.t. scale_factor.
+
+ Args:
+ bboxes (Tensor): Shape (n, 4) for bboxes or (n, 5) for rois
+ scale_factor (float): rescale factor
+
+ Returns:
+ Tensor: Rescaled bboxes.
+ """
+ if bboxes.size(1) == 5:
+ bboxes_ = bboxes[:, 1:]
+ inds_ = bboxes[:, 0]
+ else:
+ bboxes_ = bboxes
+ cx = (bboxes_[:, 0] + bboxes_[:, 2]) * 0.5
+ cy = (bboxes_[:, 1] + bboxes_[:, 3]) * 0.5
+ w = bboxes_[:, 2] - bboxes_[:, 0]
+ h = bboxes_[:, 3] - bboxes_[:, 1]
+ w = w * scale_factor
+ h = h * scale_factor
+ x1 = cx - 0.5 * w
+ x2 = cx + 0.5 * w
+ y1 = cy - 0.5 * h
+ y2 = cy + 0.5 * h
+ if bboxes.size(1) == 5:
+ rescaled_bboxes = torch.stack([inds_, x1, y1, x2, y2], dim=-1)
+ else:
+ rescaled_bboxes = torch.stack([x1, y1, x2, y2], dim=-1)
+ return rescaled_bboxes
+
+
+def bbox_cxcywh_to_xyxy(bbox):
+ """Convert bbox coordinates from (cx, cy, w, h) to (x1, y1, x2, y2).
+
+ Args:
+ bbox (Tensor): Shape (n, 4) for bboxes.
+
+ Returns:
+ Tensor: Converted bboxes.
+ """
+ cx, cy, w, h = bbox.split((1, 1, 1, 1), dim=-1)
+ bbox_new = [(cx - 0.5 * w), (cy - 0.5 * h), (cx + 0.5 * w), (cy + 0.5 * h)]
+ return torch.cat(bbox_new, dim=-1)
+
+
+def bbox_xyxy_to_cxcywh(bbox):
+ """Convert bbox coordinates from (x1, y1, x2, y2) to (cx, cy, w, h).
+
+ Args:
+ bbox (Tensor): Shape (n, 4) for bboxes.
+
+ Returns:
+ Tensor: Converted bboxes.
+ """
+ x1, y1, x2, y2 = bbox.split((1, 1, 1, 1), dim=-1)
+ bbox_new = [(x1 + x2) / 2, (y1 + y2) / 2, (x2 - x1), (y2 - y1)]
+ return torch.cat(bbox_new, dim=-1)
diff --git a/annotator/uniformer/mmdet_null/core/evaluation/__init__.py b/annotator/uniformer/mmdet_null/core/evaluation/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..b1fb4062967f7e225a75c697a990153d4f5b5481
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/core/evaluation/__init__.py
@@ -0,0 +1,15 @@
+from .class_names import (cityscapes_classes, coco_classes, dataset_aliases,
+ get_classes, imagenet_det_classes,
+ imagenet_vid_classes, voc_classes, get_palette)
+from .eval_hooks import DistEvalHook, EvalHook
+from .mean_ap import average_precision, eval_map, print_map_summary
+from .recall import (eval_recalls, plot_iou_recall, plot_num_recall,
+ print_recall_summary)
+
+__all__ = [
+ 'voc_classes', 'imagenet_det_classes', 'imagenet_vid_classes',
+ 'coco_classes', 'cityscapes_classes', 'dataset_aliases', 'get_classes', 'get_palette',
+ 'DistEvalHook', 'EvalHook', 'average_precision', 'eval_map',
+ 'print_map_summary', 'eval_recalls', 'print_recall_summary',
+ 'plot_num_recall', 'plot_iou_recall'
+]
diff --git a/annotator/uniformer/mmdet_null/core/evaluation/bbox_overlaps.py b/annotator/uniformer/mmdet_null/core/evaluation/bbox_overlaps.py
new file mode 100644
index 0000000000000000000000000000000000000000..93559ea0f25369d552a5365312fa32b9ffec9226
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/core/evaluation/bbox_overlaps.py
@@ -0,0 +1,48 @@
+import numpy as np
+
+
+def bbox_overlaps(bboxes1, bboxes2, mode='iou', eps=1e-6):
+ """Calculate the ious between each bbox of bboxes1 and bboxes2.
+
+ Args:
+ bboxes1(ndarray): shape (n, 4)
+ bboxes2(ndarray): shape (k, 4)
+ mode(str): iou (intersection over union) or iof (intersection
+ over foreground)
+
+ Returns:
+ ious(ndarray): shape (n, k)
+ """
+
+ assert mode in ['iou', 'iof']
+
+ bboxes1 = bboxes1.astype(np.float32)
+ bboxes2 = bboxes2.astype(np.float32)
+ rows = bboxes1.shape[0]
+ cols = bboxes2.shape[0]
+ ious = np.zeros((rows, cols), dtype=np.float32)
+ if rows * cols == 0:
+ return ious
+ exchange = False
+ if bboxes1.shape[0] > bboxes2.shape[0]:
+ bboxes1, bboxes2 = bboxes2, bboxes1
+ ious = np.zeros((cols, rows), dtype=np.float32)
+ exchange = True
+ area1 = (bboxes1[:, 2] - bboxes1[:, 0]) * (bboxes1[:, 3] - bboxes1[:, 1])
+ area2 = (bboxes2[:, 2] - bboxes2[:, 0]) * (bboxes2[:, 3] - bboxes2[:, 1])
+ for i in range(bboxes1.shape[0]):
+ x_start = np.maximum(bboxes1[i, 0], bboxes2[:, 0])
+ y_start = np.maximum(bboxes1[i, 1], bboxes2[:, 1])
+ x_end = np.minimum(bboxes1[i, 2], bboxes2[:, 2])
+ y_end = np.minimum(bboxes1[i, 3], bboxes2[:, 3])
+ overlap = np.maximum(x_end - x_start, 0) * np.maximum(
+ y_end - y_start, 0)
+ if mode == 'iou':
+ union = area1[i] + area2 - overlap
+ else:
+ union = area1[i] if not exchange else area2
+ union = np.maximum(union, eps)
+ ious[i, :] = overlap / union
+ if exchange:
+ ious = ious.T
+ return ious
diff --git a/annotator/uniformer/mmdet_null/core/evaluation/class_names.py b/annotator/uniformer/mmdet_null/core/evaluation/class_names.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69d602ee3a6e197e21a806d5aab4b020be2fe6c
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/core/evaluation/class_names.py
@@ -0,0 +1,132 @@
+import annotator.uniformer.mmcv as mmcv
+
+
+def wider_face_classes():
+ return ['face']
+
+
+def voc_classes():
+ return [
+ 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat',
+ 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person',
+ 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor'
+ ]
+
+
+def imagenet_det_classes():
+ return [
+ 'accordion', 'airplane', 'ant', 'antelope', 'apple', 'armadillo',
+ 'artichoke', 'axe', 'baby_bed', 'backpack', 'bagel', 'balance_beam',
+ 'banana', 'band_aid', 'banjo', 'baseball', 'basketball', 'bathing_cap',
+ 'beaker', 'bear', 'bee', 'bell_pepper', 'bench', 'bicycle', 'binder',
+ 'bird', 'bookshelf', 'bow_tie', 'bow', 'bowl', 'brassiere', 'burrito',
+ 'bus', 'butterfly', 'camel', 'can_opener', 'car', 'cart', 'cattle',
+ 'cello', 'centipede', 'chain_saw', 'chair', 'chime', 'cocktail_shaker',
+ 'coffee_maker', 'computer_keyboard', 'computer_mouse', 'corkscrew',
+ 'cream', 'croquet_ball', 'crutch', 'cucumber', 'cup_or_mug', 'diaper',
+ 'digital_clock', 'dishwasher', 'dog', 'domestic_cat', 'dragonfly',
+ 'drum', 'dumbbell', 'electric_fan', 'elephant', 'face_powder', 'fig',
+ 'filing_cabinet', 'flower_pot', 'flute', 'fox', 'french_horn', 'frog',
+ 'frying_pan', 'giant_panda', 'goldfish', 'golf_ball', 'golfcart',
+ 'guacamole', 'guitar', 'hair_dryer', 'hair_spray', 'hamburger',
+ 'hammer', 'hamster', 'harmonica', 'harp', 'hat_with_a_wide_brim',
+ 'head_cabbage', 'helmet', 'hippopotamus', 'horizontal_bar', 'horse',
+ 'hotdog', 'iPod', 'isopod', 'jellyfish', 'koala_bear', 'ladle',
+ 'ladybug', 'lamp', 'laptop', 'lemon', 'lion', 'lipstick', 'lizard',
+ 'lobster', 'maillot', 'maraca', 'microphone', 'microwave', 'milk_can',
+ 'miniskirt', 'monkey', 'motorcycle', 'mushroom', 'nail', 'neck_brace',
+ 'oboe', 'orange', 'otter', 'pencil_box', 'pencil_sharpener', 'perfume',
+ 'person', 'piano', 'pineapple', 'ping-pong_ball', 'pitcher', 'pizza',
+ 'plastic_bag', 'plate_rack', 'pomegranate', 'popsicle', 'porcupine',
+ 'power_drill', 'pretzel', 'printer', 'puck', 'punching_bag', 'purse',
+ 'rabbit', 'racket', 'ray', 'red_panda', 'refrigerator',
+ 'remote_control', 'rubber_eraser', 'rugby_ball', 'ruler',
+ 'salt_or_pepper_shaker', 'saxophone', 'scorpion', 'screwdriver',
+ 'seal', 'sheep', 'ski', 'skunk', 'snail', 'snake', 'snowmobile',
+ 'snowplow', 'soap_dispenser', 'soccer_ball', 'sofa', 'spatula',
+ 'squirrel', 'starfish', 'stethoscope', 'stove', 'strainer',
+ 'strawberry', 'stretcher', 'sunglasses', 'swimming_trunks', 'swine',
+ 'syringe', 'table', 'tape_player', 'tennis_ball', 'tick', 'tie',
+ 'tiger', 'toaster', 'traffic_light', 'train', 'trombone', 'trumpet',
+ 'turtle', 'tv_or_monitor', 'unicycle', 'vacuum', 'violin',
+ 'volleyball', 'waffle_iron', 'washer', 'water_bottle', 'watercraft',
+ 'whale', 'wine_bottle', 'zebra'
+ ]
+
+
+def imagenet_vid_classes():
+ return [
+ 'airplane', 'antelope', 'bear', 'bicycle', 'bird', 'bus', 'car',
+ 'cattle', 'dog', 'domestic_cat', 'elephant', 'fox', 'giant_panda',
+ 'hamster', 'horse', 'lion', 'lizard', 'monkey', 'motorcycle', 'rabbit',
+ 'red_panda', 'sheep', 'snake', 'squirrel', 'tiger', 'train', 'turtle',
+ 'watercraft', 'whale', 'zebra'
+ ]
+
+
+def coco_classes():
+ return [
+ 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
+ 'truck', 'boat', 'traffic_light', 'fire_hydrant', 'stop_sign',
+ 'parking_meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep',
+ 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella',
+ 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard',
+ 'sports_ball', 'kite', 'baseball_bat', 'baseball_glove', 'skateboard',
+ 'surfboard', 'tennis_racket', 'bottle', 'wine_glass', 'cup', 'fork',
+ 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange',
+ 'broccoli', 'carrot', 'hot_dog', 'pizza', 'donut', 'cake', 'chair',
+ 'couch', 'potted_plant', 'bed', 'dining_table', 'toilet', 'tv',
+ 'laptop', 'mouse', 'remote', 'keyboard', 'cell_phone', 'microwave',
+ 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',
+ 'scissors', 'teddy_bear', 'hair_drier', 'toothbrush'
+ ]
+
+
+def cityscapes_classes():
+ return [
+ 'person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle',
+ 'bicycle'
+ ]
+
+
+dataset_aliases = {
+ 'voc': ['voc', 'pascal_voc', 'voc07', 'voc12'],
+ 'imagenet_det': ['det', 'imagenet_det', 'ilsvrc_det'],
+ 'imagenet_vid': ['vid', 'imagenet_vid', 'ilsvrc_vid'],
+ 'coco': ['coco', 'mscoco', 'ms_coco'],
+ 'wider_face': ['WIDERFaceDataset', 'wider_face', 'WIDERFace'],
+ 'cityscapes': ['cityscapes']
+}
+
+
+def get_classes(dataset):
+ """Get class names of a dataset."""
+ alias2name = {}
+ for name, aliases in dataset_aliases.items():
+ for alias in aliases:
+ alias2name[alias] = name
+
+ if mmcv.is_str(dataset):
+ if dataset in alias2name:
+ labels = eval(alias2name[dataset] + '_classes()')
+ else:
+ raise ValueError(f'Unrecognized dataset: {dataset}')
+ else:
+ raise TypeError(f'dataset must a str, but got {type(dataset)}')
+ return labels
+
+def get_palette(dataset):
+ """Get class palette (RGB) of a dataset."""
+ alias2name = {}
+ for name, aliases in dataset_aliases.items():
+ for alias in aliases:
+ alias2name[alias] = name
+
+ if mmcv.is_str(dataset):
+ if dataset in alias2name:
+ labels = eval(alias2name[dataset] + '_palette()')
+ else:
+ raise ValueError(f'Unrecognized dataset: {dataset}')
+ else:
+ raise TypeError(f'dataset must a str, but got {type(dataset)}')
+ return labels
\ No newline at end of file
diff --git a/annotator/uniformer/mmdet_null/core/evaluation/eval_hooks.py b/annotator/uniformer/mmdet_null/core/evaluation/eval_hooks.py
new file mode 100644
index 0000000000000000000000000000000000000000..699ca0463aa83c2d3cfb2e12e438cecc8f3c38ed
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/core/evaluation/eval_hooks.py
@@ -0,0 +1,303 @@
+import os.path as osp
+import warnings
+from math import inf
+
+import annotator.uniformer.mmcv as mmcv
+import torch.distributed as dist
+from annotator.uniformer.mmcv.runner import Hook
+from torch.nn.modules.batchnorm import _BatchNorm
+from torch.utils.data import DataLoader
+
+from annotator.uniformer.mmdet.utils import get_root_logger
+
+
+class EvalHook(Hook):
+ """Evaluation hook.
+
+ Notes:
+ If new arguments are added for EvalHook, tools/test.py,
+ tools/analysis_tools/eval_metric.py may be effected.
+
+ Attributes:
+ dataloader (DataLoader): A PyTorch dataloader.
+ start (int, optional): Evaluation starting epoch. It enables evaluation
+ before the training starts if ``start`` <= the resuming epoch.
+ If None, whether to evaluate is merely decided by ``interval``.
+ Default: None.
+ interval (int): Evaluation interval (by epochs). Default: 1.
+ save_best (str, optional): If a metric is specified, it would measure
+ the best checkpoint during evaluation. The information about best
+ checkpoint would be save in best.json.
+ Options are the evaluation metrics to the test dataset. e.g.,
+ ``bbox_mAP``, ``segm_mAP`` for bbox detection and instance
+ segmentation. ``AR@100`` for proposal recall. If ``save_best`` is
+ ``auto``, the first key will be used. The interval of
+ ``CheckpointHook`` should device EvalHook. Default: None.
+ rule (str, optional): Comparison rule for best score. If set to None,
+ it will infer a reasonable rule. Keys such as 'mAP' or 'AR' will
+ be inferred by 'greater' rule. Keys contain 'loss' will be inferred
+ by 'less' rule. Options are 'greater', 'less'. Default: None.
+ **eval_kwargs: Evaluation arguments fed into the evaluate function of
+ the dataset.
+ """
+
+ rule_map = {'greater': lambda x, y: x > y, 'less': lambda x, y: x < y}
+ init_value_map = {'greater': -inf, 'less': inf}
+ greater_keys = ['mAP', 'AR']
+ less_keys = ['loss']
+
+ def __init__(self,
+ dataloader,
+ start=None,
+ interval=1,
+ by_epoch=True,
+ save_best=None,
+ rule=None,
+ **eval_kwargs):
+ if not isinstance(dataloader, DataLoader):
+ raise TypeError('dataloader must be a pytorch DataLoader, but got'
+ f' {type(dataloader)}')
+ if not interval > 0:
+ raise ValueError(f'interval must be positive, but got {interval}')
+ if start is not None and start < 0:
+ warnings.warn(
+ f'The evaluation start epoch {start} is smaller than 0, '
+ f'use 0 instead', UserWarning)
+ start = 0
+ self.dataloader = dataloader
+ self.interval = interval
+ self.by_epoch = by_epoch
+ self.start = start
+ assert isinstance(save_best, str) or save_best is None
+ self.save_best = save_best
+ self.eval_kwargs = eval_kwargs
+ self.initial_epoch_flag = True
+
+ self.logger = get_root_logger()
+
+ if self.save_best is not None:
+ self._init_rule(rule, self.save_best)
+
+ def _init_rule(self, rule, key_indicator):
+ """Initialize rule, key_indicator, comparison_func, and best score.
+
+ Args:
+ rule (str | None): Comparison rule for best score.
+ key_indicator (str | None): Key indicator to determine the
+ comparison rule.
+ """
+ if rule not in self.rule_map and rule is not None:
+ raise KeyError(f'rule must be greater, less or None, '
+ f'but got {rule}.')
+
+ if rule is None:
+ if key_indicator != 'auto':
+ if any(key in key_indicator for key in self.greater_keys):
+ rule = 'greater'
+ elif any(key in key_indicator for key in self.less_keys):
+ rule = 'less'
+ else:
+ raise ValueError(f'Cannot infer the rule for key '
+ f'{key_indicator}, thus a specific rule '
+ f'must be specified.')
+ self.rule = rule
+ self.key_indicator = key_indicator
+ if self.rule is not None:
+ self.compare_func = self.rule_map[self.rule]
+
+ def before_run(self, runner):
+ if self.save_best is not None:
+ if runner.meta is None:
+ warnings.warn('runner.meta is None. Creating a empty one.')
+ runner.meta = dict()
+ runner.meta.setdefault('hook_msgs', dict())
+
+ def before_train_epoch(self, runner):
+ """Evaluate the model only at the start of training."""
+ if not self.initial_epoch_flag:
+ return
+ if self.start is not None and runner.epoch >= self.start:
+ self.after_train_epoch(runner)
+ self.initial_epoch_flag = False
+
+ def evaluation_flag(self, runner):
+ """Judge whether to perform_evaluation after this epoch.
+
+ Returns:
+ bool: The flag indicating whether to perform evaluation.
+ """
+ if self.start is None:
+ if not self.every_n_epochs(runner, self.interval):
+ # No evaluation during the interval epochs.
+ return False
+ elif (runner.epoch + 1) < self.start:
+ # No evaluation if start is larger than the current epoch.
+ return False
+ else:
+ # Evaluation only at epochs 3, 5, 7... if start==3 and interval==2
+ if (runner.epoch + 1 - self.start) % self.interval:
+ return False
+ return True
+
+ def after_train_epoch(self, runner):
+ if not self.by_epoch or not self.evaluation_flag(runner):
+ return
+ from mmdet.apis import single_gpu_test
+ results = single_gpu_test(runner.model, self.dataloader, show=False)
+ key_score = self.evaluate(runner, results)
+ if self.save_best:
+ self.save_best_checkpoint(runner, key_score)
+
+ def after_train_iter(self, runner):
+ if self.by_epoch or not self.every_n_iters(runner, self.interval):
+ return
+ from mmdet.apis import single_gpu_test
+ results = single_gpu_test(runner.model, self.dataloader, show=False)
+ key_score = self.evaluate(runner, results)
+ if self.save_best:
+ self.save_best_checkpoint(runner, key_score)
+
+ def save_best_checkpoint(self, runner, key_score):
+ best_score = runner.meta['hook_msgs'].get(
+ 'best_score', self.init_value_map[self.rule])
+ if self.compare_func(key_score, best_score):
+ best_score = key_score
+ runner.meta['hook_msgs']['best_score'] = best_score
+ last_ckpt = runner.meta['hook_msgs']['last_ckpt']
+ runner.meta['hook_msgs']['best_ckpt'] = last_ckpt
+ mmcv.symlink(
+ last_ckpt,
+ osp.join(runner.work_dir, f'best_{self.key_indicator}.pth'))
+ time_stamp = runner.epoch + 1 if self.by_epoch else runner.iter + 1
+ self.logger.info(f'Now best checkpoint is epoch_{time_stamp}.pth.'
+ f'Best {self.key_indicator} is {best_score:0.4f}')
+
+ def evaluate(self, runner, results):
+ eval_res = self.dataloader.dataset.evaluate(
+ results, logger=runner.logger, **self.eval_kwargs)
+ for name, val in eval_res.items():
+ runner.log_buffer.output[name] = val
+ runner.log_buffer.ready = True
+ if self.save_best is not None:
+ if self.key_indicator == 'auto':
+ # infer from eval_results
+ self._init_rule(self.rule, list(eval_res.keys())[0])
+ return eval_res[self.key_indicator]
+ else:
+ return None
+
+
+class DistEvalHook(EvalHook):
+ """Distributed evaluation hook.
+
+ Notes:
+ If new arguments are added, tools/test.py may be effected.
+
+ Attributes:
+ dataloader (DataLoader): A PyTorch dataloader.
+ start (int, optional): Evaluation starting epoch. It enables evaluation
+ before the training starts if ``start`` <= the resuming epoch.
+ If None, whether to evaluate is merely decided by ``interval``.
+ Default: None.
+ interval (int): Evaluation interval (by epochs). Default: 1.
+ tmpdir (str | None): Temporary directory to save the results of all
+ processes. Default: None.
+ gpu_collect (bool): Whether to use gpu or cpu to collect results.
+ Default: False.
+ save_best (str, optional): If a metric is specified, it would measure
+ the best checkpoint during evaluation. The information about best
+ checkpoint would be save in best.json.
+ Options are the evaluation metrics to the test dataset. e.g.,
+ ``bbox_mAP``, ``segm_mAP`` for bbox detection and instance
+ segmentation. ``AR@100`` for proposal recall. If ``save_best`` is
+ ``auto``, the first key will be used. The interval of
+ ``CheckpointHook`` should device EvalHook. Default: None.
+ rule (str | None): Comparison rule for best score. If set to None,
+ it will infer a reasonable rule. Default: 'None'.
+ broadcast_bn_buffer (bool): Whether to broadcast the
+ buffer(running_mean and running_var) of rank 0 to other rank
+ before evaluation. Default: True.
+ **eval_kwargs: Evaluation arguments fed into the evaluate function of
+ the dataset.
+ """
+
+ def __init__(self,
+ dataloader,
+ start=None,
+ interval=1,
+ by_epoch=True,
+ tmpdir=None,
+ gpu_collect=False,
+ save_best=None,
+ rule=None,
+ broadcast_bn_buffer=True,
+ **eval_kwargs):
+ super().__init__(
+ dataloader,
+ start=start,
+ interval=interval,
+ by_epoch=by_epoch,
+ save_best=save_best,
+ rule=rule,
+ **eval_kwargs)
+ self.broadcast_bn_buffer = broadcast_bn_buffer
+ self.tmpdir = tmpdir
+ self.gpu_collect = gpu_collect
+
+ def _broadcast_bn_buffer(self, runner):
+ # Synchronization of BatchNorm's buffer (running_mean
+ # and running_var) is not supported in the DDP of pytorch,
+ # which may cause the inconsistent performance of models in
+ # different ranks, so we broadcast BatchNorm's buffers
+ # of rank 0 to other ranks to avoid this.
+ if self.broadcast_bn_buffer:
+ model = runner.model
+ for name, module in model.named_modules():
+ if isinstance(module,
+ _BatchNorm) and module.track_running_stats:
+ dist.broadcast(module.running_var, 0)
+ dist.broadcast(module.running_mean, 0)
+
+ def after_train_epoch(self, runner):
+ if not self.by_epoch or not self.evaluation_flag(runner):
+ return
+
+ if self.broadcast_bn_buffer:
+ self._broadcast_bn_buffer(runner)
+
+ from mmdet.apis import multi_gpu_test
+ tmpdir = self.tmpdir
+ if tmpdir is None:
+ tmpdir = osp.join(runner.work_dir, '.eval_hook')
+ results = multi_gpu_test(
+ runner.model,
+ self.dataloader,
+ tmpdir=tmpdir,
+ gpu_collect=self.gpu_collect)
+ if runner.rank == 0:
+ print('\n')
+ key_score = self.evaluate(runner, results)
+ if self.save_best:
+ self.save_best_checkpoint(runner, key_score)
+
+ def after_train_iter(self, runner):
+ if self.by_epoch or not self.every_n_iters(runner, self.interval):
+ return
+
+ if self.broadcast_bn_buffer:
+ self._broadcast_bn_buffer(runner)
+
+ from mmdet.apis import multi_gpu_test
+ tmpdir = self.tmpdir
+ if tmpdir is None:
+ tmpdir = osp.join(runner.work_dir, '.eval_hook')
+ results = multi_gpu_test(
+ runner.model,
+ self.dataloader,
+ tmpdir=tmpdir,
+ gpu_collect=self.gpu_collect)
+ if runner.rank == 0:
+ print('\n')
+ key_score = self.evaluate(runner, results)
+ if self.save_best:
+ self.save_best_checkpoint(runner, key_score)
diff --git a/annotator/uniformer/mmdet_null/core/evaluation/mean_ap.py b/annotator/uniformer/mmdet_null/core/evaluation/mean_ap.py
new file mode 100644
index 0000000000000000000000000000000000000000..e3226c71cf8457dce65652553132ad1ddbf214f7
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/core/evaluation/mean_ap.py
@@ -0,0 +1,469 @@
+from multiprocessing import Pool
+
+import annotator.uniformer.mmcv as mmcv
+import numpy as np
+from annotator.uniformer.mmcv.utils import print_log
+from terminaltables import AsciiTable
+
+from .bbox_overlaps import bbox_overlaps
+from .class_names import get_classes
+
+
+def average_precision(recalls, precisions, mode='area'):
+ """Calculate average precision (for single or multiple scales).
+
+ Args:
+ recalls (ndarray): shape (num_scales, num_dets) or (num_dets, )
+ precisions (ndarray): shape (num_scales, num_dets) or (num_dets, )
+ mode (str): 'area' or '11points', 'area' means calculating the area
+ under precision-recall curve, '11points' means calculating
+ the average precision of recalls at [0, 0.1, ..., 1]
+
+ Returns:
+ float or ndarray: calculated average precision
+ """
+ no_scale = False
+ if recalls.ndim == 1:
+ no_scale = True
+ recalls = recalls[np.newaxis, :]
+ precisions = precisions[np.newaxis, :]
+ assert recalls.shape == precisions.shape and recalls.ndim == 2
+ num_scales = recalls.shape[0]
+ ap = np.zeros(num_scales, dtype=np.float32)
+ if mode == 'area':
+ zeros = np.zeros((num_scales, 1), dtype=recalls.dtype)
+ ones = np.ones((num_scales, 1), dtype=recalls.dtype)
+ mrec = np.hstack((zeros, recalls, ones))
+ mpre = np.hstack((zeros, precisions, zeros))
+ for i in range(mpre.shape[1] - 1, 0, -1):
+ mpre[:, i - 1] = np.maximum(mpre[:, i - 1], mpre[:, i])
+ for i in range(num_scales):
+ ind = np.where(mrec[i, 1:] != mrec[i, :-1])[0]
+ ap[i] = np.sum(
+ (mrec[i, ind + 1] - mrec[i, ind]) * mpre[i, ind + 1])
+ elif mode == '11points':
+ for i in range(num_scales):
+ for thr in np.arange(0, 1 + 1e-3, 0.1):
+ precs = precisions[i, recalls[i, :] >= thr]
+ prec = precs.max() if precs.size > 0 else 0
+ ap[i] += prec
+ ap /= 11
+ else:
+ raise ValueError(
+ 'Unrecognized mode, only "area" and "11points" are supported')
+ if no_scale:
+ ap = ap[0]
+ return ap
+
+
+def tpfp_imagenet(det_bboxes,
+ gt_bboxes,
+ gt_bboxes_ignore=None,
+ default_iou_thr=0.5,
+ area_ranges=None):
+ """Check if detected bboxes are true positive or false positive.
+
+ Args:
+ det_bbox (ndarray): Detected bboxes of this image, of shape (m, 5).
+ gt_bboxes (ndarray): GT bboxes of this image, of shape (n, 4).
+ gt_bboxes_ignore (ndarray): Ignored gt bboxes of this image,
+ of shape (k, 4). Default: None
+ default_iou_thr (float): IoU threshold to be considered as matched for
+ medium and large bboxes (small ones have special rules).
+ Default: 0.5.
+ area_ranges (list[tuple] | None): Range of bbox areas to be evaluated,
+ in the format [(min1, max1), (min2, max2), ...]. Default: None.
+
+ Returns:
+ tuple[np.ndarray]: (tp, fp) whose elements are 0 and 1. The shape of
+ each array is (num_scales, m).
+ """
+ # an indicator of ignored gts
+ gt_ignore_inds = np.concatenate(
+ (np.zeros(gt_bboxes.shape[0], dtype=np.bool),
+ np.ones(gt_bboxes_ignore.shape[0], dtype=np.bool)))
+ # stack gt_bboxes and gt_bboxes_ignore for convenience
+ gt_bboxes = np.vstack((gt_bboxes, gt_bboxes_ignore))
+
+ num_dets = det_bboxes.shape[0]
+ num_gts = gt_bboxes.shape[0]
+ if area_ranges is None:
+ area_ranges = [(None, None)]
+ num_scales = len(area_ranges)
+ # tp and fp are of shape (num_scales, num_gts), each row is tp or fp
+ # of a certain scale.
+ tp = np.zeros((num_scales, num_dets), dtype=np.float32)
+ fp = np.zeros((num_scales, num_dets), dtype=np.float32)
+ if gt_bboxes.shape[0] == 0:
+ if area_ranges == [(None, None)]:
+ fp[...] = 1
+ else:
+ det_areas = (det_bboxes[:, 2] - det_bboxes[:, 0]) * (
+ det_bboxes[:, 3] - det_bboxes[:, 1])
+ for i, (min_area, max_area) in enumerate(area_ranges):
+ fp[i, (det_areas >= min_area) & (det_areas < max_area)] = 1
+ return tp, fp
+ ious = bbox_overlaps(det_bboxes, gt_bboxes - 1)
+ gt_w = gt_bboxes[:, 2] - gt_bboxes[:, 0]
+ gt_h = gt_bboxes[:, 3] - gt_bboxes[:, 1]
+ iou_thrs = np.minimum((gt_w * gt_h) / ((gt_w + 10.0) * (gt_h + 10.0)),
+ default_iou_thr)
+ # sort all detections by scores in descending order
+ sort_inds = np.argsort(-det_bboxes[:, -1])
+ for k, (min_area, max_area) in enumerate(area_ranges):
+ gt_covered = np.zeros(num_gts, dtype=bool)
+ # if no area range is specified, gt_area_ignore is all False
+ if min_area is None:
+ gt_area_ignore = np.zeros_like(gt_ignore_inds, dtype=bool)
+ else:
+ gt_areas = gt_w * gt_h
+ gt_area_ignore = (gt_areas < min_area) | (gt_areas >= max_area)
+ for i in sort_inds:
+ max_iou = -1
+ matched_gt = -1
+ # find best overlapped available gt
+ for j in range(num_gts):
+ # different from PASCAL VOC: allow finding other gts if the
+ # best overlapped ones are already matched by other det bboxes
+ if gt_covered[j]:
+ continue
+ elif ious[i, j] >= iou_thrs[j] and ious[i, j] > max_iou:
+ max_iou = ious[i, j]
+ matched_gt = j
+ # there are 4 cases for a det bbox:
+ # 1. it matches a gt, tp = 1, fp = 0
+ # 2. it matches an ignored gt, tp = 0, fp = 0
+ # 3. it matches no gt and within area range, tp = 0, fp = 1
+ # 4. it matches no gt but is beyond area range, tp = 0, fp = 0
+ if matched_gt >= 0:
+ gt_covered[matched_gt] = 1
+ if not (gt_ignore_inds[matched_gt]
+ or gt_area_ignore[matched_gt]):
+ tp[k, i] = 1
+ elif min_area is None:
+ fp[k, i] = 1
+ else:
+ bbox = det_bboxes[i, :4]
+ area = (bbox[2] - bbox[0]) * (bbox[3] - bbox[1])
+ if area >= min_area and area < max_area:
+ fp[k, i] = 1
+ return tp, fp
+
+
+def tpfp_default(det_bboxes,
+ gt_bboxes,
+ gt_bboxes_ignore=None,
+ iou_thr=0.5,
+ area_ranges=None):
+ """Check if detected bboxes are true positive or false positive.
+
+ Args:
+ det_bbox (ndarray): Detected bboxes of this image, of shape (m, 5).
+ gt_bboxes (ndarray): GT bboxes of this image, of shape (n, 4).
+ gt_bboxes_ignore (ndarray): Ignored gt bboxes of this image,
+ of shape (k, 4). Default: None
+ iou_thr (float): IoU threshold to be considered as matched.
+ Default: 0.5.
+ area_ranges (list[tuple] | None): Range of bbox areas to be evaluated,
+ in the format [(min1, max1), (min2, max2), ...]. Default: None.
+
+ Returns:
+ tuple[np.ndarray]: (tp, fp) whose elements are 0 and 1. The shape of
+ each array is (num_scales, m).
+ """
+ # an indicator of ignored gts
+ gt_ignore_inds = np.concatenate(
+ (np.zeros(gt_bboxes.shape[0], dtype=np.bool),
+ np.ones(gt_bboxes_ignore.shape[0], dtype=np.bool)))
+ # stack gt_bboxes and gt_bboxes_ignore for convenience
+ gt_bboxes = np.vstack((gt_bboxes, gt_bboxes_ignore))
+
+ num_dets = det_bboxes.shape[0]
+ num_gts = gt_bboxes.shape[0]
+ if area_ranges is None:
+ area_ranges = [(None, None)]
+ num_scales = len(area_ranges)
+ # tp and fp are of shape (num_scales, num_gts), each row is tp or fp of
+ # a certain scale
+ tp = np.zeros((num_scales, num_dets), dtype=np.float32)
+ fp = np.zeros((num_scales, num_dets), dtype=np.float32)
+
+ # if there is no gt bboxes in this image, then all det bboxes
+ # within area range are false positives
+ if gt_bboxes.shape[0] == 0:
+ if area_ranges == [(None, None)]:
+ fp[...] = 1
+ else:
+ det_areas = (det_bboxes[:, 2] - det_bboxes[:, 0]) * (
+ det_bboxes[:, 3] - det_bboxes[:, 1])
+ for i, (min_area, max_area) in enumerate(area_ranges):
+ fp[i, (det_areas >= min_area) & (det_areas < max_area)] = 1
+ return tp, fp
+
+ ious = bbox_overlaps(det_bboxes, gt_bboxes)
+ # for each det, the max iou with all gts
+ ious_max = ious.max(axis=1)
+ # for each det, which gt overlaps most with it
+ ious_argmax = ious.argmax(axis=1)
+ # sort all dets in descending order by scores
+ sort_inds = np.argsort(-det_bboxes[:, -1])
+ for k, (min_area, max_area) in enumerate(area_ranges):
+ gt_covered = np.zeros(num_gts, dtype=bool)
+ # if no area range is specified, gt_area_ignore is all False
+ if min_area is None:
+ gt_area_ignore = np.zeros_like(gt_ignore_inds, dtype=bool)
+ else:
+ gt_areas = (gt_bboxes[:, 2] - gt_bboxes[:, 0]) * (
+ gt_bboxes[:, 3] - gt_bboxes[:, 1])
+ gt_area_ignore = (gt_areas < min_area) | (gt_areas >= max_area)
+ for i in sort_inds:
+ if ious_max[i] >= iou_thr:
+ matched_gt = ious_argmax[i]
+ if not (gt_ignore_inds[matched_gt]
+ or gt_area_ignore[matched_gt]):
+ if not gt_covered[matched_gt]:
+ gt_covered[matched_gt] = True
+ tp[k, i] = 1
+ else:
+ fp[k, i] = 1
+ # otherwise ignore this detected bbox, tp = 0, fp = 0
+ elif min_area is None:
+ fp[k, i] = 1
+ else:
+ bbox = det_bboxes[i, :4]
+ area = (bbox[2] - bbox[0]) * (bbox[3] - bbox[1])
+ if area >= min_area and area < max_area:
+ fp[k, i] = 1
+ return tp, fp
+
+
+def get_cls_results(det_results, annotations, class_id):
+ """Get det results and gt information of a certain class.
+
+ Args:
+ det_results (list[list]): Same as `eval_map()`.
+ annotations (list[dict]): Same as `eval_map()`.
+ class_id (int): ID of a specific class.
+
+ Returns:
+ tuple[list[np.ndarray]]: detected bboxes, gt bboxes, ignored gt bboxes
+ """
+ cls_dets = [img_res[class_id] for img_res in det_results]
+ cls_gts = []
+ cls_gts_ignore = []
+ for ann in annotations:
+ gt_inds = ann['labels'] == class_id
+ cls_gts.append(ann['bboxes'][gt_inds, :])
+
+ if ann.get('labels_ignore', None) is not None:
+ ignore_inds = ann['labels_ignore'] == class_id
+ cls_gts_ignore.append(ann['bboxes_ignore'][ignore_inds, :])
+ else:
+ cls_gts_ignore.append(np.empty((0, 4), dtype=np.float32))
+
+ return cls_dets, cls_gts, cls_gts_ignore
+
+
+def eval_map(det_results,
+ annotations,
+ scale_ranges=None,
+ iou_thr=0.5,
+ dataset=None,
+ logger=None,
+ tpfp_fn=None,
+ nproc=4):
+ """Evaluate mAP of a dataset.
+
+ Args:
+ det_results (list[list]): [[cls1_det, cls2_det, ...], ...].
+ The outer list indicates images, and the inner list indicates
+ per-class detected bboxes.
+ annotations (list[dict]): Ground truth annotations where each item of
+ the list indicates an image. Keys of annotations are:
+
+ - `bboxes`: numpy array of shape (n, 4)
+ - `labels`: numpy array of shape (n, )
+ - `bboxes_ignore` (optional): numpy array of shape (k, 4)
+ - `labels_ignore` (optional): numpy array of shape (k, )
+ scale_ranges (list[tuple] | None): Range of scales to be evaluated,
+ in the format [(min1, max1), (min2, max2), ...]. A range of
+ (32, 64) means the area range between (32**2, 64**2).
+ Default: None.
+ iou_thr (float): IoU threshold to be considered as matched.
+ Default: 0.5.
+ dataset (list[str] | str | None): Dataset name or dataset classes,
+ there are minor differences in metrics for different datsets, e.g.
+ "voc07", "imagenet_det", etc. Default: None.
+ logger (logging.Logger | str | None): The way to print the mAP
+ summary. See `mmcv.utils.print_log()` for details. Default: None.
+ tpfp_fn (callable | None): The function used to determine true/
+ false positives. If None, :func:`tpfp_default` is used as default
+ unless dataset is 'det' or 'vid' (:func:`tpfp_imagenet` in this
+ case). If it is given as a function, then this function is used
+ to evaluate tp & fp. Default None.
+ nproc (int): Processes used for computing TP and FP.
+ Default: 4.
+
+ Returns:
+ tuple: (mAP, [dict, dict, ...])
+ """
+ assert len(det_results) == len(annotations)
+
+ num_imgs = len(det_results)
+ num_scales = len(scale_ranges) if scale_ranges is not None else 1
+ num_classes = len(det_results[0]) # positive class num
+ area_ranges = ([(rg[0]**2, rg[1]**2) for rg in scale_ranges]
+ if scale_ranges is not None else None)
+
+ pool = Pool(nproc)
+ eval_results = []
+ for i in range(num_classes):
+ # get gt and det bboxes of this class
+ cls_dets, cls_gts, cls_gts_ignore = get_cls_results(
+ det_results, annotations, i)
+ # choose proper function according to datasets to compute tp and fp
+ if tpfp_fn is None:
+ if dataset in ['det', 'vid']:
+ tpfp_fn = tpfp_imagenet
+ else:
+ tpfp_fn = tpfp_default
+ if not callable(tpfp_fn):
+ raise ValueError(
+ f'tpfp_fn has to be a function or None, but got {tpfp_fn}')
+
+ # compute tp and fp for each image with multiple processes
+ tpfp = pool.starmap(
+ tpfp_fn,
+ zip(cls_dets, cls_gts, cls_gts_ignore,
+ [iou_thr for _ in range(num_imgs)],
+ [area_ranges for _ in range(num_imgs)]))
+ tp, fp = tuple(zip(*tpfp))
+ # calculate gt number of each scale
+ # ignored gts or gts beyond the specific scale are not counted
+ num_gts = np.zeros(num_scales, dtype=int)
+ for j, bbox in enumerate(cls_gts):
+ if area_ranges is None:
+ num_gts[0] += bbox.shape[0]
+ else:
+ gt_areas = (bbox[:, 2] - bbox[:, 0]) * (
+ bbox[:, 3] - bbox[:, 1])
+ for k, (min_area, max_area) in enumerate(area_ranges):
+ num_gts[k] += np.sum((gt_areas >= min_area)
+ & (gt_areas < max_area))
+ # sort all det bboxes by score, also sort tp and fp
+ cls_dets = np.vstack(cls_dets)
+ num_dets = cls_dets.shape[0]
+ sort_inds = np.argsort(-cls_dets[:, -1])
+ tp = np.hstack(tp)[:, sort_inds]
+ fp = np.hstack(fp)[:, sort_inds]
+ # calculate recall and precision with tp and fp
+ tp = np.cumsum(tp, axis=1)
+ fp = np.cumsum(fp, axis=1)
+ eps = np.finfo(np.float32).eps
+ recalls = tp / np.maximum(num_gts[:, np.newaxis], eps)
+ precisions = tp / np.maximum((tp + fp), eps)
+ # calculate AP
+ if scale_ranges is None:
+ recalls = recalls[0, :]
+ precisions = precisions[0, :]
+ num_gts = num_gts.item()
+ mode = 'area' if dataset != 'voc07' else '11points'
+ ap = average_precision(recalls, precisions, mode)
+ eval_results.append({
+ 'num_gts': num_gts,
+ 'num_dets': num_dets,
+ 'recall': recalls,
+ 'precision': precisions,
+ 'ap': ap
+ })
+ pool.close()
+ if scale_ranges is not None:
+ # shape (num_classes, num_scales)
+ all_ap = np.vstack([cls_result['ap'] for cls_result in eval_results])
+ all_num_gts = np.vstack(
+ [cls_result['num_gts'] for cls_result in eval_results])
+ mean_ap = []
+ for i in range(num_scales):
+ if np.any(all_num_gts[:, i] > 0):
+ mean_ap.append(all_ap[all_num_gts[:, i] > 0, i].mean())
+ else:
+ mean_ap.append(0.0)
+ else:
+ aps = []
+ for cls_result in eval_results:
+ if cls_result['num_gts'] > 0:
+ aps.append(cls_result['ap'])
+ mean_ap = np.array(aps).mean().item() if aps else 0.0
+
+ print_map_summary(
+ mean_ap, eval_results, dataset, area_ranges, logger=logger)
+
+ return mean_ap, eval_results
+
+
+def print_map_summary(mean_ap,
+ results,
+ dataset=None,
+ scale_ranges=None,
+ logger=None):
+ """Print mAP and results of each class.
+
+ A table will be printed to show the gts/dets/recall/AP of each class and
+ the mAP.
+
+ Args:
+ mean_ap (float): Calculated from `eval_map()`.
+ results (list[dict]): Calculated from `eval_map()`.
+ dataset (list[str] | str | None): Dataset name or dataset classes.
+ scale_ranges (list[tuple] | None): Range of scales to be evaluated.
+ logger (logging.Logger | str | None): The way to print the mAP
+ summary. See `mmcv.utils.print_log()` for details. Default: None.
+ """
+
+ if logger == 'silent':
+ return
+
+ if isinstance(results[0]['ap'], np.ndarray):
+ num_scales = len(results[0]['ap'])
+ else:
+ num_scales = 1
+
+ if scale_ranges is not None:
+ assert len(scale_ranges) == num_scales
+
+ num_classes = len(results)
+
+ recalls = np.zeros((num_scales, num_classes), dtype=np.float32)
+ aps = np.zeros((num_scales, num_classes), dtype=np.float32)
+ num_gts = np.zeros((num_scales, num_classes), dtype=int)
+ for i, cls_result in enumerate(results):
+ if cls_result['recall'].size > 0:
+ recalls[:, i] = np.array(cls_result['recall'], ndmin=2)[:, -1]
+ aps[:, i] = cls_result['ap']
+ num_gts[:, i] = cls_result['num_gts']
+
+ if dataset is None:
+ label_names = [str(i) for i in range(num_classes)]
+ elif mmcv.is_str(dataset):
+ label_names = get_classes(dataset)
+ else:
+ label_names = dataset
+
+ if not isinstance(mean_ap, list):
+ mean_ap = [mean_ap]
+
+ header = ['class', 'gts', 'dets', 'recall', 'ap']
+ for i in range(num_scales):
+ if scale_ranges is not None:
+ print_log(f'Scale range {scale_ranges[i]}', logger=logger)
+ table_data = [header]
+ for j in range(num_classes):
+ row_data = [
+ label_names[j], num_gts[i, j], results[j]['num_dets'],
+ f'{recalls[i, j]:.3f}', f'{aps[i, j]:.3f}'
+ ]
+ table_data.append(row_data)
+ table_data.append(['mAP', '', '', '', f'{mean_ap[i]:.3f}'])
+ table = AsciiTable(table_data)
+ table.inner_footing_row_border = True
+ print_log('\n' + table.table, logger=logger)
diff --git a/annotator/uniformer/mmdet_null/core/evaluation/recall.py b/annotator/uniformer/mmdet_null/core/evaluation/recall.py
new file mode 100644
index 0000000000000000000000000000000000000000..d840b4f4f100f65158b35ba49eb00214655667f5
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/core/evaluation/recall.py
@@ -0,0 +1,189 @@
+from collections.abc import Sequence
+
+import numpy as np
+from annotator.uniformer.mmcv.utils import print_log
+from terminaltables import AsciiTable
+
+from .bbox_overlaps import bbox_overlaps
+
+
+def _recalls(all_ious, proposal_nums, thrs):
+
+ img_num = all_ious.shape[0]
+ total_gt_num = sum([ious.shape[0] for ious in all_ious])
+
+ _ious = np.zeros((proposal_nums.size, total_gt_num), dtype=np.float32)
+ for k, proposal_num in enumerate(proposal_nums):
+ tmp_ious = np.zeros(0)
+ for i in range(img_num):
+ ious = all_ious[i][:, :proposal_num].copy()
+ gt_ious = np.zeros((ious.shape[0]))
+ if ious.size == 0:
+ tmp_ious = np.hstack((tmp_ious, gt_ious))
+ continue
+ for j in range(ious.shape[0]):
+ gt_max_overlaps = ious.argmax(axis=1)
+ max_ious = ious[np.arange(0, ious.shape[0]), gt_max_overlaps]
+ gt_idx = max_ious.argmax()
+ gt_ious[j] = max_ious[gt_idx]
+ box_idx = gt_max_overlaps[gt_idx]
+ ious[gt_idx, :] = -1
+ ious[:, box_idx] = -1
+ tmp_ious = np.hstack((tmp_ious, gt_ious))
+ _ious[k, :] = tmp_ious
+
+ _ious = np.fliplr(np.sort(_ious, axis=1))
+ recalls = np.zeros((proposal_nums.size, thrs.size))
+ for i, thr in enumerate(thrs):
+ recalls[:, i] = (_ious >= thr).sum(axis=1) / float(total_gt_num)
+
+ return recalls
+
+
+def set_recall_param(proposal_nums, iou_thrs):
+ """Check proposal_nums and iou_thrs and set correct format."""
+ if isinstance(proposal_nums, Sequence):
+ _proposal_nums = np.array(proposal_nums)
+ elif isinstance(proposal_nums, int):
+ _proposal_nums = np.array([proposal_nums])
+ else:
+ _proposal_nums = proposal_nums
+
+ if iou_thrs is None:
+ _iou_thrs = np.array([0.5])
+ elif isinstance(iou_thrs, Sequence):
+ _iou_thrs = np.array(iou_thrs)
+ elif isinstance(iou_thrs, float):
+ _iou_thrs = np.array([iou_thrs])
+ else:
+ _iou_thrs = iou_thrs
+
+ return _proposal_nums, _iou_thrs
+
+
+def eval_recalls(gts,
+ proposals,
+ proposal_nums=None,
+ iou_thrs=0.5,
+ logger=None):
+ """Calculate recalls.
+
+ Args:
+ gts (list[ndarray]): a list of arrays of shape (n, 4)
+ proposals (list[ndarray]): a list of arrays of shape (k, 4) or (k, 5)
+ proposal_nums (int | Sequence[int]): Top N proposals to be evaluated.
+ iou_thrs (float | Sequence[float]): IoU thresholds. Default: 0.5.
+ logger (logging.Logger | str | None): The way to print the recall
+ summary. See `mmcv.utils.print_log()` for details. Default: None.
+
+ Returns:
+ ndarray: recalls of different ious and proposal nums
+ """
+
+ img_num = len(gts)
+ assert img_num == len(proposals)
+
+ proposal_nums, iou_thrs = set_recall_param(proposal_nums, iou_thrs)
+
+ all_ious = []
+ for i in range(img_num):
+ if proposals[i].ndim == 2 and proposals[i].shape[1] == 5:
+ scores = proposals[i][:, 4]
+ sort_idx = np.argsort(scores)[::-1]
+ img_proposal = proposals[i][sort_idx, :]
+ else:
+ img_proposal = proposals[i]
+ prop_num = min(img_proposal.shape[0], proposal_nums[-1])
+ if gts[i] is None or gts[i].shape[0] == 0:
+ ious = np.zeros((0, img_proposal.shape[0]), dtype=np.float32)
+ else:
+ ious = bbox_overlaps(gts[i], img_proposal[:prop_num, :4])
+ all_ious.append(ious)
+ all_ious = np.array(all_ious)
+ recalls = _recalls(all_ious, proposal_nums, iou_thrs)
+
+ print_recall_summary(recalls, proposal_nums, iou_thrs, logger=logger)
+ return recalls
+
+
+def print_recall_summary(recalls,
+ proposal_nums,
+ iou_thrs,
+ row_idxs=None,
+ col_idxs=None,
+ logger=None):
+ """Print recalls in a table.
+
+ Args:
+ recalls (ndarray): calculated from `bbox_recalls`
+ proposal_nums (ndarray or list): top N proposals
+ iou_thrs (ndarray or list): iou thresholds
+ row_idxs (ndarray): which rows(proposal nums) to print
+ col_idxs (ndarray): which cols(iou thresholds) to print
+ logger (logging.Logger | str | None): The way to print the recall
+ summary. See `mmcv.utils.print_log()` for details. Default: None.
+ """
+ proposal_nums = np.array(proposal_nums, dtype=np.int32)
+ iou_thrs = np.array(iou_thrs)
+ if row_idxs is None:
+ row_idxs = np.arange(proposal_nums.size)
+ if col_idxs is None:
+ col_idxs = np.arange(iou_thrs.size)
+ row_header = [''] + iou_thrs[col_idxs].tolist()
+ table_data = [row_header]
+ for i, num in enumerate(proposal_nums[row_idxs]):
+ row = [f'{val:.3f}' for val in recalls[row_idxs[i], col_idxs].tolist()]
+ row.insert(0, num)
+ table_data.append(row)
+ table = AsciiTable(table_data)
+ print_log('\n' + table.table, logger=logger)
+
+
+def plot_num_recall(recalls, proposal_nums):
+ """Plot Proposal_num-Recalls curve.
+
+ Args:
+ recalls(ndarray or list): shape (k,)
+ proposal_nums(ndarray or list): same shape as `recalls`
+ """
+ if isinstance(proposal_nums, np.ndarray):
+ _proposal_nums = proposal_nums.tolist()
+ else:
+ _proposal_nums = proposal_nums
+ if isinstance(recalls, np.ndarray):
+ _recalls = recalls.tolist()
+ else:
+ _recalls = recalls
+
+ import matplotlib.pyplot as plt
+ f = plt.figure()
+ plt.plot([0] + _proposal_nums, [0] + _recalls)
+ plt.xlabel('Proposal num')
+ plt.ylabel('Recall')
+ plt.axis([0, proposal_nums.max(), 0, 1])
+ f.show()
+
+
+def plot_iou_recall(recalls, iou_thrs):
+ """Plot IoU-Recalls curve.
+
+ Args:
+ recalls(ndarray or list): shape (k,)
+ iou_thrs(ndarray or list): same shape as `recalls`
+ """
+ if isinstance(iou_thrs, np.ndarray):
+ _iou_thrs = iou_thrs.tolist()
+ else:
+ _iou_thrs = iou_thrs
+ if isinstance(recalls, np.ndarray):
+ _recalls = recalls.tolist()
+ else:
+ _recalls = recalls
+
+ import matplotlib.pyplot as plt
+ f = plt.figure()
+ plt.plot(_iou_thrs + [1.0], _recalls + [0.])
+ plt.xlabel('IoU')
+ plt.ylabel('Recall')
+ plt.axis([iou_thrs.min(), 1, 0, 1])
+ f.show()
diff --git a/annotator/uniformer/mmdet_null/core/export/__init__.py b/annotator/uniformer/mmdet_null/core/export/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..76589b1f279a71a59a5515d1b78cea0865f83131
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/core/export/__init__.py
@@ -0,0 +1,8 @@
+from .pytorch2onnx import (build_model_from_cfg,
+ generate_inputs_and_wrap_model,
+ preprocess_example_input)
+
+__all__ = [
+ 'build_model_from_cfg', 'generate_inputs_and_wrap_model',
+ 'preprocess_example_input'
+]
diff --git a/annotator/uniformer/mmdet_null/core/export/pytorch2onnx.py b/annotator/uniformer/mmdet_null/core/export/pytorch2onnx.py
new file mode 100644
index 0000000000000000000000000000000000000000..809a817e67446b3c0c7894dcefb3c4bbc29afb7e
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/core/export/pytorch2onnx.py
@@ -0,0 +1,154 @@
+from functools import partial
+
+import mmcv
+import numpy as np
+import torch
+from mmcv.runner import load_checkpoint
+
+
+def generate_inputs_and_wrap_model(config_path,
+ checkpoint_path,
+ input_config,
+ cfg_options=None):
+ """Prepare sample input and wrap model for ONNX export.
+
+ The ONNX export API only accept args, and all inputs should be
+ torch.Tensor or corresponding types (such as tuple of tensor).
+ So we should call this function before exporting. This function will:
+
+ 1. generate corresponding inputs which are used to execute the model.
+ 2. Wrap the model's forward function.
+
+ For example, the MMDet models' forward function has a parameter
+ ``return_loss:bool``. As we want to set it as False while export API
+ supports neither bool type or kwargs. So we have to replace the forward
+ like: ``model.forward = partial(model.forward, return_loss=False)``
+
+ Args:
+ config_path (str): the OpenMMLab config for the model we want to
+ export to ONNX
+ checkpoint_path (str): Path to the corresponding checkpoint
+ input_config (dict): the exactly data in this dict depends on the
+ framework. For MMSeg, we can just declare the input shape,
+ and generate the dummy data accordingly. However, for MMDet,
+ we may pass the real img path, or the NMS will return None
+ as there is no legal bbox.
+
+ Returns:
+ tuple: (model, tensor_data) wrapped model which can be called by \
+ model(*tensor_data) and a list of inputs which are used to execute \
+ the model while exporting.
+ """
+
+ model = build_model_from_cfg(
+ config_path, checkpoint_path, cfg_options=cfg_options)
+ one_img, one_meta = preprocess_example_input(input_config)
+ tensor_data = [one_img]
+ model.forward = partial(
+ model.forward, img_metas=[[one_meta]], return_loss=False)
+
+ # pytorch has some bug in pytorch1.3, we have to fix it
+ # by replacing these existing op
+ opset_version = 11
+ # put the import within the function thus it will not cause import error
+ # when not using this function
+ try:
+ from mmcv.onnx.symbolic import register_extra_symbolics
+ except ModuleNotFoundError:
+ raise NotImplementedError('please update mmcv to version>=v1.0.4')
+ register_extra_symbolics(opset_version)
+
+ return model, tensor_data
+
+
+def build_model_from_cfg(config_path, checkpoint_path, cfg_options=None):
+ """Build a model from config and load the given checkpoint.
+
+ Args:
+ config_path (str): the OpenMMLab config for the model we want to
+ export to ONNX
+ checkpoint_path (str): Path to the corresponding checkpoint
+
+ Returns:
+ torch.nn.Module: the built model
+ """
+ from mmdet.models import build_detector
+
+ cfg = mmcv.Config.fromfile(config_path)
+ if cfg_options is not None:
+ cfg.merge_from_dict(cfg_options)
+ # import modules from string list.
+ if cfg.get('custom_imports', None):
+ from mmcv.utils import import_modules_from_strings
+ import_modules_from_strings(**cfg['custom_imports'])
+ # set cudnn_benchmark
+ if cfg.get('cudnn_benchmark', False):
+ torch.backends.cudnn.benchmark = True
+ cfg.model.pretrained = None
+ cfg.data.test.test_mode = True
+
+ # build the model
+ cfg.model.train_cfg = None
+ model = build_detector(cfg.model, test_cfg=cfg.get('test_cfg'))
+ load_checkpoint(model, checkpoint_path, map_location='cpu')
+ model.cpu().eval()
+ return model
+
+
+def preprocess_example_input(input_config):
+ """Prepare an example input image for ``generate_inputs_and_wrap_model``.
+
+ Args:
+ input_config (dict): customized config describing the example input.
+
+ Returns:
+ tuple: (one_img, one_meta), tensor of the example input image and \
+ meta information for the example input image.
+
+ Examples:
+ >>> from mmdet.core.export import preprocess_example_input
+ >>> input_config = {
+ >>> 'input_shape': (1,3,224,224),
+ >>> 'input_path': 'demo/demo.jpg',
+ >>> 'normalize_cfg': {
+ >>> 'mean': (123.675, 116.28, 103.53),
+ >>> 'std': (58.395, 57.12, 57.375)
+ >>> }
+ >>> }
+ >>> one_img, one_meta = preprocess_example_input(input_config)
+ >>> print(one_img.shape)
+ torch.Size([1, 3, 224, 224])
+ >>> print(one_meta)
+ {'img_shape': (224, 224, 3),
+ 'ori_shape': (224, 224, 3),
+ 'pad_shape': (224, 224, 3),
+ 'filename': '.png',
+ 'scale_factor': 1.0,
+ 'flip': False}
+ """
+ input_path = input_config['input_path']
+ input_shape = input_config['input_shape']
+ one_img = mmcv.imread(input_path)
+ one_img = mmcv.imresize(one_img, input_shape[2:][::-1])
+ show_img = one_img.copy()
+ if 'normalize_cfg' in input_config.keys():
+ normalize_cfg = input_config['normalize_cfg']
+ mean = np.array(normalize_cfg['mean'], dtype=np.float32)
+ std = np.array(normalize_cfg['std'], dtype=np.float32)
+ to_rgb = normalize_cfg.get('to_rgb', True)
+ one_img = mmcv.imnormalize(one_img, mean, std, to_rgb=to_rgb)
+ one_img = one_img.transpose(2, 0, 1)
+ one_img = torch.from_numpy(one_img).unsqueeze(0).float().requires_grad_(
+ True)
+ (_, C, H, W) = input_shape
+ one_meta = {
+ 'img_shape': (H, W, C),
+ 'ori_shape': (H, W, C),
+ 'pad_shape': (H, W, C),
+ 'filename': '.png',
+ 'scale_factor': 1.0,
+ 'flip': False,
+ 'show_img': show_img,
+ }
+
+ return one_img, one_meta
diff --git a/annotator/uniformer/mmdet_null/core/mask/__init__.py b/annotator/uniformer/mmdet_null/core/mask/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..ab1e88bc686d5c2fe72b3114cb2b3e372e73a0f8
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/core/mask/__init__.py
@@ -0,0 +1,8 @@
+from .mask_target import mask_target
+from .structures import BaseInstanceMasks, BitmapMasks, PolygonMasks
+from .utils import encode_mask_results, split_combined_polys
+
+__all__ = [
+ 'split_combined_polys', 'mask_target', 'BaseInstanceMasks', 'BitmapMasks',
+ 'PolygonMasks', 'encode_mask_results'
+]
diff --git a/annotator/uniformer/mmdet_null/core/mask/mask_target.py b/annotator/uniformer/mmdet_null/core/mask/mask_target.py
new file mode 100644
index 0000000000000000000000000000000000000000..15d26a88bbf3710bd92813335918407db8c4e053
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/core/mask/mask_target.py
@@ -0,0 +1,122 @@
+import numpy as np
+import torch
+from torch.nn.modules.utils import _pair
+
+
+def mask_target(pos_proposals_list, pos_assigned_gt_inds_list, gt_masks_list,
+ cfg):
+ """Compute mask target for positive proposals in multiple images.
+
+ Args:
+ pos_proposals_list (list[Tensor]): Positive proposals in multiple
+ images.
+ pos_assigned_gt_inds_list (list[Tensor]): Assigned GT indices for each
+ positive proposals.
+ gt_masks_list (list[:obj:`BaseInstanceMasks`]): Ground truth masks of
+ each image.
+ cfg (dict): Config dict that specifies the mask size.
+
+ Returns:
+ list[Tensor]: Mask target of each image.
+
+ Example:
+ >>> import mmcv
+ >>> import mmdet
+ >>> from mmdet.core.mask import BitmapMasks
+ >>> from mmdet.core.mask.mask_target import *
+ >>> H, W = 17, 18
+ >>> cfg = mmcv.Config({'mask_size': (13, 14)})
+ >>> rng = np.random.RandomState(0)
+ >>> # Positive proposals (tl_x, tl_y, br_x, br_y) for each image
+ >>> pos_proposals_list = [
+ >>> torch.Tensor([
+ >>> [ 7.2425, 5.5929, 13.9414, 14.9541],
+ >>> [ 7.3241, 3.6170, 16.3850, 15.3102],
+ >>> ]),
+ >>> torch.Tensor([
+ >>> [ 4.8448, 6.4010, 7.0314, 9.7681],
+ >>> [ 5.9790, 2.6989, 7.4416, 4.8580],
+ >>> [ 0.0000, 0.0000, 0.1398, 9.8232],
+ >>> ]),
+ >>> ]
+ >>> # Corresponding class index for each proposal for each image
+ >>> pos_assigned_gt_inds_list = [
+ >>> torch.LongTensor([7, 0]),
+ >>> torch.LongTensor([5, 4, 1]),
+ >>> ]
+ >>> # Ground truth mask for each true object for each image
+ >>> gt_masks_list = [
+ >>> BitmapMasks(rng.rand(8, H, W), height=H, width=W),
+ >>> BitmapMasks(rng.rand(6, H, W), height=H, width=W),
+ >>> ]
+ >>> mask_targets = mask_target(
+ >>> pos_proposals_list, pos_assigned_gt_inds_list,
+ >>> gt_masks_list, cfg)
+ >>> assert mask_targets.shape == (5,) + cfg['mask_size']
+ """
+ cfg_list = [cfg for _ in range(len(pos_proposals_list))]
+ mask_targets = map(mask_target_single, pos_proposals_list,
+ pos_assigned_gt_inds_list, gt_masks_list, cfg_list)
+ mask_targets = list(mask_targets)
+ if len(mask_targets) > 0:
+ mask_targets = torch.cat(mask_targets)
+ return mask_targets
+
+
+def mask_target_single(pos_proposals, pos_assigned_gt_inds, gt_masks, cfg):
+ """Compute mask target for each positive proposal in the image.
+
+ Args:
+ pos_proposals (Tensor): Positive proposals.
+ pos_assigned_gt_inds (Tensor): Assigned GT inds of positive proposals.
+ gt_masks (:obj:`BaseInstanceMasks`): GT masks in the format of Bitmap
+ or Polygon.
+ cfg (dict): Config dict that indicate the mask size.
+
+ Returns:
+ Tensor: Mask target of each positive proposals in the image.
+
+ Example:
+ >>> import mmcv
+ >>> import mmdet
+ >>> from mmdet.core.mask import BitmapMasks
+ >>> from mmdet.core.mask.mask_target import * # NOQA
+ >>> H, W = 32, 32
+ >>> cfg = mmcv.Config({'mask_size': (7, 11)})
+ >>> rng = np.random.RandomState(0)
+ >>> # Masks for each ground truth box (relative to the image)
+ >>> gt_masks_data = rng.rand(3, H, W)
+ >>> gt_masks = BitmapMasks(gt_masks_data, height=H, width=W)
+ >>> # Predicted positive boxes in one image
+ >>> pos_proposals = torch.FloatTensor([
+ >>> [ 16.2, 5.5, 19.9, 20.9],
+ >>> [ 17.3, 13.6, 19.3, 19.3],
+ >>> [ 14.8, 16.4, 17.0, 23.7],
+ >>> [ 0.0, 0.0, 16.0, 16.0],
+ >>> [ 4.0, 0.0, 20.0, 16.0],
+ >>> ])
+ >>> # For each predicted proposal, its assignment to a gt mask
+ >>> pos_assigned_gt_inds = torch.LongTensor([0, 1, 2, 1, 1])
+ >>> mask_targets = mask_target_single(
+ >>> pos_proposals, pos_assigned_gt_inds, gt_masks, cfg)
+ >>> assert mask_targets.shape == (5,) + cfg['mask_size']
+ """
+ device = pos_proposals.device
+ mask_size = _pair(cfg.mask_size)
+ num_pos = pos_proposals.size(0)
+ if num_pos > 0:
+ proposals_np = pos_proposals.cpu().numpy()
+ maxh, maxw = gt_masks.height, gt_masks.width
+ proposals_np[:, [0, 2]] = np.clip(proposals_np[:, [0, 2]], 0, maxw)
+ proposals_np[:, [1, 3]] = np.clip(proposals_np[:, [1, 3]], 0, maxh)
+ pos_assigned_gt_inds = pos_assigned_gt_inds.cpu().numpy()
+
+ mask_targets = gt_masks.crop_and_resize(
+ proposals_np, mask_size, device=device,
+ inds=pos_assigned_gt_inds).to_ndarray()
+
+ mask_targets = torch.from_numpy(mask_targets).float().to(device)
+ else:
+ mask_targets = pos_proposals.new_zeros((0, ) + mask_size)
+
+ return mask_targets
diff --git a/annotator/uniformer/mmdet_null/core/mask/structures.py b/annotator/uniformer/mmdet_null/core/mask/structures.py
new file mode 100644
index 0000000000000000000000000000000000000000..d9ec5775f281ab8b76cb873e71a4edd9969ab905
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/core/mask/structures.py
@@ -0,0 +1,1024 @@
+from abc import ABCMeta, abstractmethod
+
+import cv2
+import mmcv
+import numpy as np
+import pycocotools.mask as maskUtils
+import torch
+from mmcv.ops.roi_align import roi_align
+
+
+class BaseInstanceMasks(metaclass=ABCMeta):
+ """Base class for instance masks."""
+
+ @abstractmethod
+ def rescale(self, scale, interpolation='nearest'):
+ """Rescale masks as large as possible while keeping the aspect ratio.
+ For details can refer to `mmcv.imrescale`.
+
+ Args:
+ scale (tuple[int]): The maximum size (h, w) of rescaled mask.
+ interpolation (str): Same as :func:`mmcv.imrescale`.
+
+ Returns:
+ BaseInstanceMasks: The rescaled masks.
+ """
+
+ @abstractmethod
+ def resize(self, out_shape, interpolation='nearest'):
+ """Resize masks to the given out_shape.
+
+ Args:
+ out_shape: Target (h, w) of resized mask.
+ interpolation (str): See :func:`mmcv.imresize`.
+
+ Returns:
+ BaseInstanceMasks: The resized masks.
+ """
+
+ @abstractmethod
+ def flip(self, flip_direction='horizontal'):
+ """Flip masks alone the given direction.
+
+ Args:
+ flip_direction (str): Either 'horizontal' or 'vertical'.
+
+ Returns:
+ BaseInstanceMasks: The flipped masks.
+ """
+
+ @abstractmethod
+ def pad(self, out_shape, pad_val):
+ """Pad masks to the given size of (h, w).
+
+ Args:
+ out_shape (tuple[int]): Target (h, w) of padded mask.
+ pad_val (int): The padded value.
+
+ Returns:
+ BaseInstanceMasks: The padded masks.
+ """
+
+ @abstractmethod
+ def crop(self, bbox):
+ """Crop each mask by the given bbox.
+
+ Args:
+ bbox (ndarray): Bbox in format [x1, y1, x2, y2], shape (4, ).
+
+ Return:
+ BaseInstanceMasks: The cropped masks.
+ """
+
+ @abstractmethod
+ def crop_and_resize(self,
+ bboxes,
+ out_shape,
+ inds,
+ device,
+ interpolation='bilinear'):
+ """Crop and resize masks by the given bboxes.
+
+ This function is mainly used in mask targets computation.
+ It firstly align mask to bboxes by assigned_inds, then crop mask by the
+ assigned bbox and resize to the size of (mask_h, mask_w)
+
+ Args:
+ bboxes (Tensor): Bboxes in format [x1, y1, x2, y2], shape (N, 4)
+ out_shape (tuple[int]): Target (h, w) of resized mask
+ inds (ndarray): Indexes to assign masks to each bbox,
+ shape (N,) and values should be between [0, num_masks - 1].
+ device (str): Device of bboxes
+ interpolation (str): See `mmcv.imresize`
+
+ Return:
+ BaseInstanceMasks: the cropped and resized masks.
+ """
+
+ @abstractmethod
+ def expand(self, expanded_h, expanded_w, top, left):
+ """see :class:`Expand`."""
+
+ @property
+ @abstractmethod
+ def areas(self):
+ """ndarray: areas of each instance."""
+
+ @abstractmethod
+ def to_ndarray(self):
+ """Convert masks to the format of ndarray.
+
+ Return:
+ ndarray: Converted masks in the format of ndarray.
+ """
+
+ @abstractmethod
+ def to_tensor(self, dtype, device):
+ """Convert masks to the format of Tensor.
+
+ Args:
+ dtype (str): Dtype of converted mask.
+ device (torch.device): Device of converted masks.
+
+ Returns:
+ Tensor: Converted masks in the format of Tensor.
+ """
+
+ @abstractmethod
+ def translate(self,
+ out_shape,
+ offset,
+ direction='horizontal',
+ fill_val=0,
+ interpolation='bilinear'):
+ """Translate the masks.
+
+ Args:
+ out_shape (tuple[int]): Shape for output mask, format (h, w).
+ offset (int | float): The offset for translate.
+ direction (str): The translate direction, either "horizontal"
+ or "vertical".
+ fill_val (int | float): Border value. Default 0.
+ interpolation (str): Same as :func:`mmcv.imtranslate`.
+
+ Returns:
+ Translated masks.
+ """
+
+ def shear(self,
+ out_shape,
+ magnitude,
+ direction='horizontal',
+ border_value=0,
+ interpolation='bilinear'):
+ """Shear the masks.
+
+ Args:
+ out_shape (tuple[int]): Shape for output mask, format (h, w).
+ magnitude (int | float): The magnitude used for shear.
+ direction (str): The shear direction, either "horizontal"
+ or "vertical".
+ border_value (int | tuple[int]): Value used in case of a
+ constant border. Default 0.
+ interpolation (str): Same as in :func:`mmcv.imshear`.
+
+ Returns:
+ ndarray: Sheared masks.
+ """
+
+ @abstractmethod
+ def rotate(self, out_shape, angle, center=None, scale=1.0, fill_val=0):
+ """Rotate the masks.
+
+ Args:
+ out_shape (tuple[int]): Shape for output mask, format (h, w).
+ angle (int | float): Rotation angle in degrees. Positive values
+ mean counter-clockwise rotation.
+ center (tuple[float], optional): Center point (w, h) of the
+ rotation in source image. If not specified, the center of
+ the image will be used.
+ scale (int | float): Isotropic scale factor.
+ fill_val (int | float): Border value. Default 0 for masks.
+
+ Returns:
+ Rotated masks.
+ """
+
+
+class BitmapMasks(BaseInstanceMasks):
+ """This class represents masks in the form of bitmaps.
+
+ Args:
+ masks (ndarray): ndarray of masks in shape (N, H, W), where N is
+ the number of objects.
+ height (int): height of masks
+ width (int): width of masks
+
+ Example:
+ >>> from mmdet.core.mask.structures import * # NOQA
+ >>> num_masks, H, W = 3, 32, 32
+ >>> rng = np.random.RandomState(0)
+ >>> masks = (rng.rand(num_masks, H, W) > 0.1).astype(np.int)
+ >>> self = BitmapMasks(masks, height=H, width=W)
+
+ >>> # demo crop_and_resize
+ >>> num_boxes = 5
+ >>> bboxes = np.array([[0, 0, 30, 10.0]] * num_boxes)
+ >>> out_shape = (14, 14)
+ >>> inds = torch.randint(0, len(self), size=(num_boxes,))
+ >>> device = 'cpu'
+ >>> interpolation = 'bilinear'
+ >>> new = self.crop_and_resize(
+ ... bboxes, out_shape, inds, device, interpolation)
+ >>> assert len(new) == num_boxes
+ >>> assert new.height, new.width == out_shape
+ """
+
+ def __init__(self, masks, height, width):
+ self.height = height
+ self.width = width
+ if len(masks) == 0:
+ self.masks = np.empty((0, self.height, self.width), dtype=np.uint8)
+ else:
+ assert isinstance(masks, (list, np.ndarray))
+ if isinstance(masks, list):
+ assert isinstance(masks[0], np.ndarray)
+ assert masks[0].ndim == 2 # (H, W)
+ else:
+ assert masks.ndim == 3 # (N, H, W)
+
+ self.masks = np.stack(masks).reshape(-1, height, width)
+ assert self.masks.shape[1] == self.height
+ assert self.masks.shape[2] == self.width
+
+ def __getitem__(self, index):
+ """Index the BitmapMask.
+
+ Args:
+ index (int | ndarray): Indices in the format of integer or ndarray.
+
+ Returns:
+ :obj:`BitmapMasks`: Indexed bitmap masks.
+ """
+ masks = self.masks[index].reshape(-1, self.height, self.width)
+ return BitmapMasks(masks, self.height, self.width)
+
+ def __iter__(self):
+ return iter(self.masks)
+
+ def __repr__(self):
+ s = self.__class__.__name__ + '('
+ s += f'num_masks={len(self.masks)}, '
+ s += f'height={self.height}, '
+ s += f'width={self.width})'
+ return s
+
+ def __len__(self):
+ """Number of masks."""
+ return len(self.masks)
+
+ def rescale(self, scale, interpolation='nearest'):
+ """See :func:`BaseInstanceMasks.rescale`."""
+ if len(self.masks) == 0:
+ new_w, new_h = mmcv.rescale_size((self.width, self.height), scale)
+ rescaled_masks = np.empty((0, new_h, new_w), dtype=np.uint8)
+ else:
+ rescaled_masks = np.stack([
+ mmcv.imrescale(mask, scale, interpolation=interpolation)
+ for mask in self.masks
+ ])
+ height, width = rescaled_masks.shape[1:]
+ return BitmapMasks(rescaled_masks, height, width)
+
+ def resize(self, out_shape, interpolation='nearest'):
+ """See :func:`BaseInstanceMasks.resize`."""
+ if len(self.masks) == 0:
+ resized_masks = np.empty((0, *out_shape), dtype=np.uint8)
+ else:
+ resized_masks = np.stack([
+ mmcv.imresize(
+ mask, out_shape[::-1], interpolation=interpolation)
+ for mask in self.masks
+ ])
+ return BitmapMasks(resized_masks, *out_shape)
+
+ def flip(self, flip_direction='horizontal'):
+ """See :func:`BaseInstanceMasks.flip`."""
+ assert flip_direction in ('horizontal', 'vertical', 'diagonal')
+
+ if len(self.masks) == 0:
+ flipped_masks = self.masks
+ else:
+ flipped_masks = np.stack([
+ mmcv.imflip(mask, direction=flip_direction)
+ for mask in self.masks
+ ])
+ return BitmapMasks(flipped_masks, self.height, self.width)
+
+ def pad(self, out_shape, pad_val=0):
+ """See :func:`BaseInstanceMasks.pad`."""
+ if len(self.masks) == 0:
+ padded_masks = np.empty((0, *out_shape), dtype=np.uint8)
+ else:
+ padded_masks = np.stack([
+ mmcv.impad(mask, shape=out_shape, pad_val=pad_val)
+ for mask in self.masks
+ ])
+ return BitmapMasks(padded_masks, *out_shape)
+
+ def crop(self, bbox):
+ """See :func:`BaseInstanceMasks.crop`."""
+ assert isinstance(bbox, np.ndarray)
+ assert bbox.ndim == 1
+
+ # clip the boundary
+ bbox = bbox.copy()
+ bbox[0::2] = np.clip(bbox[0::2], 0, self.width)
+ bbox[1::2] = np.clip(bbox[1::2], 0, self.height)
+ x1, y1, x2, y2 = bbox
+ w = np.maximum(x2 - x1, 1)
+ h = np.maximum(y2 - y1, 1)
+
+ if len(self.masks) == 0:
+ cropped_masks = np.empty((0, h, w), dtype=np.uint8)
+ else:
+ cropped_masks = self.masks[:, y1:y1 + h, x1:x1 + w]
+ return BitmapMasks(cropped_masks, h, w)
+
+ def crop_and_resize(self,
+ bboxes,
+ out_shape,
+ inds,
+ device='cpu',
+ interpolation='bilinear'):
+ """See :func:`BaseInstanceMasks.crop_and_resize`."""
+ if len(self.masks) == 0:
+ empty_masks = np.empty((0, *out_shape), dtype=np.uint8)
+ return BitmapMasks(empty_masks, *out_shape)
+
+ # convert bboxes to tensor
+ if isinstance(bboxes, np.ndarray):
+ bboxes = torch.from_numpy(bboxes).to(device=device)
+ if isinstance(inds, np.ndarray):
+ inds = torch.from_numpy(inds).to(device=device)
+
+ num_bbox = bboxes.shape[0]
+ fake_inds = torch.arange(
+ num_bbox, device=device).to(dtype=bboxes.dtype)[:, None]
+ rois = torch.cat([fake_inds, bboxes], dim=1) # Nx5
+ rois = rois.to(device=device)
+ if num_bbox > 0:
+ gt_masks_th = torch.from_numpy(self.masks).to(device).index_select(
+ 0, inds).to(dtype=rois.dtype)
+ targets = roi_align(gt_masks_th[:, None, :, :], rois, out_shape,
+ 1.0, 0, 'avg', True).squeeze(1)
+ resized_masks = (targets >= 0.5).cpu().numpy()
+ else:
+ resized_masks = []
+ return BitmapMasks(resized_masks, *out_shape)
+
+ def expand(self, expanded_h, expanded_w, top, left):
+ """See :func:`BaseInstanceMasks.expand`."""
+ if len(self.masks) == 0:
+ expanded_mask = np.empty((0, expanded_h, expanded_w),
+ dtype=np.uint8)
+ else:
+ expanded_mask = np.zeros((len(self), expanded_h, expanded_w),
+ dtype=np.uint8)
+ expanded_mask[:, top:top + self.height,
+ left:left + self.width] = self.masks
+ return BitmapMasks(expanded_mask, expanded_h, expanded_w)
+
+ def translate(self,
+ out_shape,
+ offset,
+ direction='horizontal',
+ fill_val=0,
+ interpolation='bilinear'):
+ """Translate the BitmapMasks.
+
+ Args:
+ out_shape (tuple[int]): Shape for output mask, format (h, w).
+ offset (int | float): The offset for translate.
+ direction (str): The translate direction, either "horizontal"
+ or "vertical".
+ fill_val (int | float): Border value. Default 0 for masks.
+ interpolation (str): Same as :func:`mmcv.imtranslate`.
+
+ Returns:
+ BitmapMasks: Translated BitmapMasks.
+
+ Example:
+ >>> from mmdet.core.mask.structures import BitmapMasks
+ >>> self = BitmapMasks.random(dtype=np.uint8)
+ >>> out_shape = (32, 32)
+ >>> offset = 4
+ >>> direction = 'horizontal'
+ >>> fill_val = 0
+ >>> interpolation = 'bilinear'
+ >>> # Note, There seem to be issues when:
+ >>> # * out_shape is different than self's shape
+ >>> # * the mask dtype is not supported by cv2.AffineWarp
+ >>> new = self.translate(out_shape, offset, direction, fill_val,
+ >>> interpolation)
+ >>> assert len(new) == len(self)
+ >>> assert new.height, new.width == out_shape
+ """
+ if len(self.masks) == 0:
+ translated_masks = np.empty((0, *out_shape), dtype=np.uint8)
+ else:
+ translated_masks = mmcv.imtranslate(
+ self.masks.transpose((1, 2, 0)),
+ offset,
+ direction,
+ border_value=fill_val,
+ interpolation=interpolation)
+ if translated_masks.ndim == 2:
+ translated_masks = translated_masks[:, :, None]
+ translated_masks = translated_masks.transpose(
+ (2, 0, 1)).astype(self.masks.dtype)
+ return BitmapMasks(translated_masks, *out_shape)
+
+ def shear(self,
+ out_shape,
+ magnitude,
+ direction='horizontal',
+ border_value=0,
+ interpolation='bilinear'):
+ """Shear the BitmapMasks.
+
+ Args:
+ out_shape (tuple[int]): Shape for output mask, format (h, w).
+ magnitude (int | float): The magnitude used for shear.
+ direction (str): The shear direction, either "horizontal"
+ or "vertical".
+ border_value (int | tuple[int]): Value used in case of a
+ constant border.
+ interpolation (str): Same as in :func:`mmcv.imshear`.
+
+ Returns:
+ BitmapMasks: The sheared masks.
+ """
+ if len(self.masks) == 0:
+ sheared_masks = np.empty((0, *out_shape), dtype=np.uint8)
+ else:
+ sheared_masks = mmcv.imshear(
+ self.masks.transpose((1, 2, 0)),
+ magnitude,
+ direction,
+ border_value=border_value,
+ interpolation=interpolation)
+ if sheared_masks.ndim == 2:
+ sheared_masks = sheared_masks[:, :, None]
+ sheared_masks = sheared_masks.transpose(
+ (2, 0, 1)).astype(self.masks.dtype)
+ return BitmapMasks(sheared_masks, *out_shape)
+
+ def rotate(self, out_shape, angle, center=None, scale=1.0, fill_val=0):
+ """Rotate the BitmapMasks.
+
+ Args:
+ out_shape (tuple[int]): Shape for output mask, format (h, w).
+ angle (int | float): Rotation angle in degrees. Positive values
+ mean counter-clockwise rotation.
+ center (tuple[float], optional): Center point (w, h) of the
+ rotation in source image. If not specified, the center of
+ the image will be used.
+ scale (int | float): Isotropic scale factor.
+ fill_val (int | float): Border value. Default 0 for masks.
+
+ Returns:
+ BitmapMasks: Rotated BitmapMasks.
+ """
+ if len(self.masks) == 0:
+ rotated_masks = np.empty((0, *out_shape), dtype=self.masks.dtype)
+ else:
+ rotated_masks = mmcv.imrotate(
+ self.masks.transpose((1, 2, 0)),
+ angle,
+ center=center,
+ scale=scale,
+ border_value=fill_val)
+ if rotated_masks.ndim == 2:
+ # case when only one mask, (h, w)
+ rotated_masks = rotated_masks[:, :, None] # (h, w, 1)
+ rotated_masks = rotated_masks.transpose(
+ (2, 0, 1)).astype(self.masks.dtype)
+ return BitmapMasks(rotated_masks, *out_shape)
+
+ @property
+ def areas(self):
+ """See :py:attr:`BaseInstanceMasks.areas`."""
+ return self.masks.sum((1, 2))
+
+ def to_ndarray(self):
+ """See :func:`BaseInstanceMasks.to_ndarray`."""
+ return self.masks
+
+ def to_tensor(self, dtype, device):
+ """See :func:`BaseInstanceMasks.to_tensor`."""
+ return torch.tensor(self.masks, dtype=dtype, device=device)
+
+ @classmethod
+ def random(cls,
+ num_masks=3,
+ height=32,
+ width=32,
+ dtype=np.uint8,
+ rng=None):
+ """Generate random bitmap masks for demo / testing purposes.
+
+ Example:
+ >>> from mmdet.core.mask.structures import BitmapMasks
+ >>> self = BitmapMasks.random()
+ >>> print('self = {}'.format(self))
+ self = BitmapMasks(num_masks=3, height=32, width=32)
+ """
+ from mmdet.utils.util_random import ensure_rng
+ rng = ensure_rng(rng)
+ masks = (rng.rand(num_masks, height, width) > 0.1).astype(dtype)
+ self = cls(masks, height=height, width=width)
+ return self
+
+
+class PolygonMasks(BaseInstanceMasks):
+ """This class represents masks in the form of polygons.
+
+ Polygons is a list of three levels. The first level of the list
+ corresponds to objects, the second level to the polys that compose the
+ object, the third level to the poly coordinates
+
+ Args:
+ masks (list[list[ndarray]]): The first level of the list
+ corresponds to objects, the second level to the polys that
+ compose the object, the third level to the poly coordinates
+ height (int): height of masks
+ width (int): width of masks
+
+ Example:
+ >>> from mmdet.core.mask.structures import * # NOQA
+ >>> masks = [
+ >>> [ np.array([0, 0, 10, 0, 10, 10., 0, 10, 0, 0]) ]
+ >>> ]
+ >>> height, width = 16, 16
+ >>> self = PolygonMasks(masks, height, width)
+
+ >>> # demo translate
+ >>> new = self.translate((16, 16), 4., direction='horizontal')
+ >>> assert np.all(new.masks[0][0][1::2] == masks[0][0][1::2])
+ >>> assert np.all(new.masks[0][0][0::2] == masks[0][0][0::2] + 4)
+
+ >>> # demo crop_and_resize
+ >>> num_boxes = 3
+ >>> bboxes = np.array([[0, 0, 30, 10.0]] * num_boxes)
+ >>> out_shape = (16, 16)
+ >>> inds = torch.randint(0, len(self), size=(num_boxes,))
+ >>> device = 'cpu'
+ >>> interpolation = 'bilinear'
+ >>> new = self.crop_and_resize(
+ ... bboxes, out_shape, inds, device, interpolation)
+ >>> assert len(new) == num_boxes
+ >>> assert new.height, new.width == out_shape
+ """
+
+ def __init__(self, masks, height, width):
+ assert isinstance(masks, list)
+ if len(masks) > 0:
+ assert isinstance(masks[0], list)
+ assert isinstance(masks[0][0], np.ndarray)
+
+ self.height = height
+ self.width = width
+ self.masks = masks
+
+ def __getitem__(self, index):
+ """Index the polygon masks.
+
+ Args:
+ index (ndarray | List): The indices.
+
+ Returns:
+ :obj:`PolygonMasks`: The indexed polygon masks.
+ """
+ if isinstance(index, np.ndarray):
+ index = index.tolist()
+ if isinstance(index, list):
+ masks = [self.masks[i] for i in index]
+ else:
+ try:
+ masks = self.masks[index]
+ except Exception:
+ raise ValueError(
+ f'Unsupported input of type {type(index)} for indexing!')
+ if len(masks) and isinstance(masks[0], np.ndarray):
+ masks = [masks] # ensure a list of three levels
+ return PolygonMasks(masks, self.height, self.width)
+
+ def __iter__(self):
+ return iter(self.masks)
+
+ def __repr__(self):
+ s = self.__class__.__name__ + '('
+ s += f'num_masks={len(self.masks)}, '
+ s += f'height={self.height}, '
+ s += f'width={self.width})'
+ return s
+
+ def __len__(self):
+ """Number of masks."""
+ return len(self.masks)
+
+ def rescale(self, scale, interpolation=None):
+ """see :func:`BaseInstanceMasks.rescale`"""
+ new_w, new_h = mmcv.rescale_size((self.width, self.height), scale)
+ if len(self.masks) == 0:
+ rescaled_masks = PolygonMasks([], new_h, new_w)
+ else:
+ rescaled_masks = self.resize((new_h, new_w))
+ return rescaled_masks
+
+ def resize(self, out_shape, interpolation=None):
+ """see :func:`BaseInstanceMasks.resize`"""
+ if len(self.masks) == 0:
+ resized_masks = PolygonMasks([], *out_shape)
+ else:
+ h_scale = out_shape[0] / self.height
+ w_scale = out_shape[1] / self.width
+ resized_masks = []
+ for poly_per_obj in self.masks:
+ resized_poly = []
+ for p in poly_per_obj:
+ p = p.copy()
+ p[0::2] *= w_scale
+ p[1::2] *= h_scale
+ resized_poly.append(p)
+ resized_masks.append(resized_poly)
+ resized_masks = PolygonMasks(resized_masks, *out_shape)
+ return resized_masks
+
+ def flip(self, flip_direction='horizontal'):
+ """see :func:`BaseInstanceMasks.flip`"""
+ assert flip_direction in ('horizontal', 'vertical', 'diagonal')
+ if len(self.masks) == 0:
+ flipped_masks = PolygonMasks([], self.height, self.width)
+ else:
+ flipped_masks = []
+ for poly_per_obj in self.masks:
+ flipped_poly_per_obj = []
+ for p in poly_per_obj:
+ p = p.copy()
+ if flip_direction == 'horizontal':
+ p[0::2] = self.width - p[0::2]
+ elif flip_direction == 'vertical':
+ p[1::2] = self.height - p[1::2]
+ else:
+ p[0::2] = self.width - p[0::2]
+ p[1::2] = self.height - p[1::2]
+ flipped_poly_per_obj.append(p)
+ flipped_masks.append(flipped_poly_per_obj)
+ flipped_masks = PolygonMasks(flipped_masks, self.height,
+ self.width)
+ return flipped_masks
+
+ def crop(self, bbox):
+ """see :func:`BaseInstanceMasks.crop`"""
+ assert isinstance(bbox, np.ndarray)
+ assert bbox.ndim == 1
+
+ # clip the boundary
+ bbox = bbox.copy()
+ bbox[0::2] = np.clip(bbox[0::2], 0, self.width)
+ bbox[1::2] = np.clip(bbox[1::2], 0, self.height)
+ x1, y1, x2, y2 = bbox
+ w = np.maximum(x2 - x1, 1)
+ h = np.maximum(y2 - y1, 1)
+
+ if len(self.masks) == 0:
+ cropped_masks = PolygonMasks([], h, w)
+ else:
+ cropped_masks = []
+ for poly_per_obj in self.masks:
+ cropped_poly_per_obj = []
+ for p in poly_per_obj:
+ # pycocotools will clip the boundary
+ p = p.copy()
+ p[0::2] -= bbox[0]
+ p[1::2] -= bbox[1]
+ cropped_poly_per_obj.append(p)
+ cropped_masks.append(cropped_poly_per_obj)
+ cropped_masks = PolygonMasks(cropped_masks, h, w)
+ return cropped_masks
+
+ def pad(self, out_shape, pad_val=0):
+ """padding has no effect on polygons`"""
+ return PolygonMasks(self.masks, *out_shape)
+
+ def expand(self, *args, **kwargs):
+ """TODO: Add expand for polygon"""
+ raise NotImplementedError
+
+ def crop_and_resize(self,
+ bboxes,
+ out_shape,
+ inds,
+ device='cpu',
+ interpolation='bilinear'):
+ """see :func:`BaseInstanceMasks.crop_and_resize`"""
+ out_h, out_w = out_shape
+ if len(self.masks) == 0:
+ return PolygonMasks([], out_h, out_w)
+
+ resized_masks = []
+ for i in range(len(bboxes)):
+ mask = self.masks[inds[i]]
+ bbox = bboxes[i, :]
+ x1, y1, x2, y2 = bbox
+ w = np.maximum(x2 - x1, 1)
+ h = np.maximum(y2 - y1, 1)
+ h_scale = out_h / max(h, 0.1) # avoid too large scale
+ w_scale = out_w / max(w, 0.1)
+
+ resized_mask = []
+ for p in mask:
+ p = p.copy()
+ # crop
+ # pycocotools will clip the boundary
+ p[0::2] -= bbox[0]
+ p[1::2] -= bbox[1]
+
+ # resize
+ p[0::2] *= w_scale
+ p[1::2] *= h_scale
+ resized_mask.append(p)
+ resized_masks.append(resized_mask)
+ return PolygonMasks(resized_masks, *out_shape)
+
+ def translate(self,
+ out_shape,
+ offset,
+ direction='horizontal',
+ fill_val=None,
+ interpolation=None):
+ """Translate the PolygonMasks.
+
+ Example:
+ >>> self = PolygonMasks.random(dtype=np.int)
+ >>> out_shape = (self.height, self.width)
+ >>> new = self.translate(out_shape, 4., direction='horizontal')
+ >>> assert np.all(new.masks[0][0][1::2] == self.masks[0][0][1::2])
+ >>> assert np.all(new.masks[0][0][0::2] == self.masks[0][0][0::2] + 4) # noqa: E501
+ """
+ assert fill_val is None or fill_val == 0, 'Here fill_val is not '\
+ f'used, and defaultly should be None or 0. got {fill_val}.'
+ if len(self.masks) == 0:
+ translated_masks = PolygonMasks([], *out_shape)
+ else:
+ translated_masks = []
+ for poly_per_obj in self.masks:
+ translated_poly_per_obj = []
+ for p in poly_per_obj:
+ p = p.copy()
+ if direction == 'horizontal':
+ p[0::2] = np.clip(p[0::2] + offset, 0, out_shape[1])
+ elif direction == 'vertical':
+ p[1::2] = np.clip(p[1::2] + offset, 0, out_shape[0])
+ translated_poly_per_obj.append(p)
+ translated_masks.append(translated_poly_per_obj)
+ translated_masks = PolygonMasks(translated_masks, *out_shape)
+ return translated_masks
+
+ def shear(self,
+ out_shape,
+ magnitude,
+ direction='horizontal',
+ border_value=0,
+ interpolation='bilinear'):
+ """See :func:`BaseInstanceMasks.shear`."""
+ if len(self.masks) == 0:
+ sheared_masks = PolygonMasks([], *out_shape)
+ else:
+ sheared_masks = []
+ if direction == 'horizontal':
+ shear_matrix = np.stack([[1, magnitude],
+ [0, 1]]).astype(np.float32)
+ elif direction == 'vertical':
+ shear_matrix = np.stack([[1, 0], [magnitude,
+ 1]]).astype(np.float32)
+ for poly_per_obj in self.masks:
+ sheared_poly = []
+ for p in poly_per_obj:
+ p = np.stack([p[0::2], p[1::2]], axis=0) # [2, n]
+ new_coords = np.matmul(shear_matrix, p) # [2, n]
+ new_coords[0, :] = np.clip(new_coords[0, :], 0,
+ out_shape[1])
+ new_coords[1, :] = np.clip(new_coords[1, :], 0,
+ out_shape[0])
+ sheared_poly.append(
+ new_coords.transpose((1, 0)).reshape(-1))
+ sheared_masks.append(sheared_poly)
+ sheared_masks = PolygonMasks(sheared_masks, *out_shape)
+ return sheared_masks
+
+ def rotate(self, out_shape, angle, center=None, scale=1.0, fill_val=0):
+ """See :func:`BaseInstanceMasks.rotate`."""
+ if len(self.masks) == 0:
+ rotated_masks = PolygonMasks([], *out_shape)
+ else:
+ rotated_masks = []
+ rotate_matrix = cv2.getRotationMatrix2D(center, -angle, scale)
+ for poly_per_obj in self.masks:
+ rotated_poly = []
+ for p in poly_per_obj:
+ p = p.copy()
+ coords = np.stack([p[0::2], p[1::2]], axis=1) # [n, 2]
+ # pad 1 to convert from format [x, y] to homogeneous
+ # coordinates format [x, y, 1]
+ coords = np.concatenate(
+ (coords, np.ones((coords.shape[0], 1), coords.dtype)),
+ axis=1) # [n, 3]
+ rotated_coords = np.matmul(
+ rotate_matrix[None, :, :],
+ coords[:, :, None])[..., 0] # [n, 2, 1] -> [n, 2]
+ rotated_coords[:, 0] = np.clip(rotated_coords[:, 0], 0,
+ out_shape[1])
+ rotated_coords[:, 1] = np.clip(rotated_coords[:, 1], 0,
+ out_shape[0])
+ rotated_poly.append(rotated_coords.reshape(-1))
+ rotated_masks.append(rotated_poly)
+ rotated_masks = PolygonMasks(rotated_masks, *out_shape)
+ return rotated_masks
+
+ def to_bitmap(self):
+ """convert polygon masks to bitmap masks."""
+ bitmap_masks = self.to_ndarray()
+ return BitmapMasks(bitmap_masks, self.height, self.width)
+
+ @property
+ def areas(self):
+ """Compute areas of masks.
+
+ This func is modified from `detectron2
+ `_.
+ The function only works with Polygons using the shoelace formula.
+
+ Return:
+ ndarray: areas of each instance
+ """ # noqa: W501
+ area = []
+ for polygons_per_obj in self.masks:
+ area_per_obj = 0
+ for p in polygons_per_obj:
+ area_per_obj += self._polygon_area(p[0::2], p[1::2])
+ area.append(area_per_obj)
+ return np.asarray(area)
+
+ def _polygon_area(self, x, y):
+ """Compute the area of a component of a polygon.
+
+ Using the shoelace formula:
+ https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates
+
+ Args:
+ x (ndarray): x coordinates of the component
+ y (ndarray): y coordinates of the component
+
+ Return:
+ float: the are of the component
+ """ # noqa: 501
+ return 0.5 * np.abs(
+ np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)))
+
+ def to_ndarray(self):
+ """Convert masks to the format of ndarray."""
+ if len(self.masks) == 0:
+ return np.empty((0, self.height, self.width), dtype=np.uint8)
+ bitmap_masks = []
+ for poly_per_obj in self.masks:
+ bitmap_masks.append(
+ polygon_to_bitmap(poly_per_obj, self.height, self.width))
+ return np.stack(bitmap_masks)
+
+ def to_tensor(self, dtype, device):
+ """See :func:`BaseInstanceMasks.to_tensor`."""
+ if len(self.masks) == 0:
+ return torch.empty((0, self.height, self.width),
+ dtype=dtype,
+ device=device)
+ ndarray_masks = self.to_ndarray()
+ return torch.tensor(ndarray_masks, dtype=dtype, device=device)
+
+ @classmethod
+ def random(cls,
+ num_masks=3,
+ height=32,
+ width=32,
+ n_verts=5,
+ dtype=np.float32,
+ rng=None):
+ """Generate random polygon masks for demo / testing purposes.
+
+ Adapted from [1]_
+
+ References:
+ .. [1] https://gitlab.kitware.com/computer-vision/kwimage/-/blob/928cae35ca8/kwimage/structs/polygon.py#L379 # noqa: E501
+
+ Example:
+ >>> from mmdet.core.mask.structures import PolygonMasks
+ >>> self = PolygonMasks.random()
+ >>> print('self = {}'.format(self))
+ """
+ from mmdet.utils.util_random import ensure_rng
+ rng = ensure_rng(rng)
+
+ def _gen_polygon(n, irregularity, spikeyness):
+ """Creates the polygon by sampling points on a circle around the
+ centre. Random noise is added by varying the angular spacing
+ between sequential points, and by varying the radial distance of
+ each point from the centre.
+
+ Based on original code by Mike Ounsworth
+
+ Args:
+ n (int): number of vertices
+ irregularity (float): [0,1] indicating how much variance there
+ is in the angular spacing of vertices. [0,1] will map to
+ [0, 2pi/numberOfVerts]
+ spikeyness (float): [0,1] indicating how much variance there is
+ in each vertex from the circle of radius aveRadius. [0,1]
+ will map to [0, aveRadius]
+
+ Returns:
+ a list of vertices, in CCW order.
+ """
+ from scipy.stats import truncnorm
+ # Generate around the unit circle
+ cx, cy = (0.0, 0.0)
+ radius = 1
+
+ tau = np.pi * 2
+
+ irregularity = np.clip(irregularity, 0, 1) * 2 * np.pi / n
+ spikeyness = np.clip(spikeyness, 1e-9, 1)
+
+ # generate n angle steps
+ lower = (tau / n) - irregularity
+ upper = (tau / n) + irregularity
+ angle_steps = rng.uniform(lower, upper, n)
+
+ # normalize the steps so that point 0 and point n+1 are the same
+ k = angle_steps.sum() / (2 * np.pi)
+ angles = (angle_steps / k).cumsum() + rng.uniform(0, tau)
+
+ # Convert high and low values to be wrt the standard normal range
+ # https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.truncnorm.html
+ low = 0
+ high = 2 * radius
+ mean = radius
+ std = spikeyness
+ a = (low - mean) / std
+ b = (high - mean) / std
+ tnorm = truncnorm(a=a, b=b, loc=mean, scale=std)
+
+ # now generate the points
+ radii = tnorm.rvs(n, random_state=rng)
+ x_pts = cx + radii * np.cos(angles)
+ y_pts = cy + radii * np.sin(angles)
+
+ points = np.hstack([x_pts[:, None], y_pts[:, None]])
+
+ # Scale to 0-1 space
+ points = points - points.min(axis=0)
+ points = points / points.max(axis=0)
+
+ # Randomly place within 0-1 space
+ points = points * (rng.rand() * .8 + .2)
+ min_pt = points.min(axis=0)
+ max_pt = points.max(axis=0)
+
+ high = (1 - max_pt)
+ low = (0 - min_pt)
+ offset = (rng.rand(2) * (high - low)) + low
+ points = points + offset
+ return points
+
+ def _order_vertices(verts):
+ """
+ References:
+ https://stackoverflow.com/questions/1709283/how-can-i-sort-a-coordinate-list-for-a-rectangle-counterclockwise
+ """
+ mlat = verts.T[0].sum() / len(verts)
+ mlng = verts.T[1].sum() / len(verts)
+
+ tau = np.pi * 2
+ angle = (np.arctan2(mlat - verts.T[0], verts.T[1] - mlng) +
+ tau) % tau
+ sortx = angle.argsort()
+ verts = verts.take(sortx, axis=0)
+ return verts
+
+ # Generate a random exterior for each requested mask
+ masks = []
+ for _ in range(num_masks):
+ exterior = _order_vertices(_gen_polygon(n_verts, 0.9, 0.9))
+ exterior = (exterior * [(width, height)]).astype(dtype)
+ masks.append([exterior.ravel()])
+
+ self = cls(masks, height, width)
+ return self
+
+
+def polygon_to_bitmap(polygons, height, width):
+ """Convert masks from the form of polygons to bitmaps.
+
+ Args:
+ polygons (list[ndarray]): masks in polygon representation
+ height (int): mask height
+ width (int): mask width
+
+ Return:
+ ndarray: the converted masks in bitmap representation
+ """
+ rles = maskUtils.frPyObjects(polygons, height, width)
+ rle = maskUtils.merge(rles)
+ bitmap_mask = maskUtils.decode(rle).astype(np.bool)
+ return bitmap_mask
diff --git a/annotator/uniformer/mmdet_null/core/mask/utils.py b/annotator/uniformer/mmdet_null/core/mask/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..c88208291ab2a605bee9fe6c1a28a443b74c6372
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/core/mask/utils.py
@@ -0,0 +1,63 @@
+import mmcv
+import numpy as np
+import pycocotools.mask as mask_util
+
+
+def split_combined_polys(polys, poly_lens, polys_per_mask):
+ """Split the combined 1-D polys into masks.
+
+ A mask is represented as a list of polys, and a poly is represented as
+ a 1-D array. In dataset, all masks are concatenated into a single 1-D
+ tensor. Here we need to split the tensor into original representations.
+
+ Args:
+ polys (list): a list (length = image num) of 1-D tensors
+ poly_lens (list): a list (length = image num) of poly length
+ polys_per_mask (list): a list (length = image num) of poly number
+ of each mask
+
+ Returns:
+ list: a list (length = image num) of list (length = mask num) of \
+ list (length = poly num) of numpy array.
+ """
+ mask_polys_list = []
+ for img_id in range(len(polys)):
+ polys_single = polys[img_id]
+ polys_lens_single = poly_lens[img_id].tolist()
+ polys_per_mask_single = polys_per_mask[img_id].tolist()
+
+ split_polys = mmcv.slice_list(polys_single, polys_lens_single)
+ mask_polys = mmcv.slice_list(split_polys, polys_per_mask_single)
+ mask_polys_list.append(mask_polys)
+ return mask_polys_list
+
+
+# TODO: move this function to more proper place
+def encode_mask_results(mask_results):
+ """Encode bitmap mask to RLE code.
+
+ Args:
+ mask_results (list | tuple[list]): bitmap mask results.
+ In mask scoring rcnn, mask_results is a tuple of (segm_results,
+ segm_cls_score).
+
+ Returns:
+ list | tuple: RLE encoded mask.
+ """
+ if isinstance(mask_results, tuple): # mask scoring
+ cls_segms, cls_mask_scores = mask_results
+ else:
+ cls_segms = mask_results
+ num_classes = len(cls_segms)
+ encoded_mask_results = [[] for _ in range(num_classes)]
+ for i in range(len(cls_segms)):
+ for cls_segm in cls_segms[i]:
+ encoded_mask_results[i].append(
+ mask_util.encode(
+ np.array(
+ cls_segm[:, :, np.newaxis], order='F',
+ dtype='uint8'))[0]) # encoded with RLE
+ if isinstance(mask_results, tuple):
+ return encoded_mask_results, cls_mask_scores
+ else:
+ return encoded_mask_results
diff --git a/annotator/uniformer/mmdet_null/core/post_processing/__init__.py b/annotator/uniformer/mmdet_null/core/post_processing/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..880b3f06609b050aae163b2e38088c1ee4aa0998
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/core/post_processing/__init__.py
@@ -0,0 +1,8 @@
+from .bbox_nms import fast_nms, multiclass_nms
+from .merge_augs import (merge_aug_bboxes, merge_aug_masks,
+ merge_aug_proposals, merge_aug_scores)
+
+__all__ = [
+ 'multiclass_nms', 'merge_aug_proposals', 'merge_aug_bboxes',
+ 'merge_aug_scores', 'merge_aug_masks', 'fast_nms'
+]
diff --git a/annotator/uniformer/mmdet_null/core/post_processing/bbox_nms.py b/annotator/uniformer/mmdet_null/core/post_processing/bbox_nms.py
new file mode 100644
index 0000000000000000000000000000000000000000..966d3a6ac86637a6be90edc3aab9b6863fb87764
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/core/post_processing/bbox_nms.py
@@ -0,0 +1,168 @@
+import torch
+from mmcv.ops.nms import batched_nms
+
+from mmdet.core.bbox.iou_calculators import bbox_overlaps
+
+
+def multiclass_nms(multi_bboxes,
+ multi_scores,
+ score_thr,
+ nms_cfg,
+ max_num=-1,
+ score_factors=None,
+ return_inds=False):
+ """NMS for multi-class bboxes.
+
+ Args:
+ multi_bboxes (Tensor): shape (n, #class*4) or (n, 4)
+ multi_scores (Tensor): shape (n, #class), where the last column
+ contains scores of the background class, but this will be ignored.
+ score_thr (float): bbox threshold, bboxes with scores lower than it
+ will not be considered.
+ nms_thr (float): NMS IoU threshold
+ max_num (int, optional): if there are more than max_num bboxes after
+ NMS, only top max_num will be kept. Default to -1.
+ score_factors (Tensor, optional): The factors multiplied to scores
+ before applying NMS. Default to None.
+ return_inds (bool, optional): Whether return the indices of kept
+ bboxes. Default to False.
+
+ Returns:
+ tuple: (bboxes, labels, indices (optional)), tensors of shape (k, 5),
+ (k), and (k). Labels are 0-based.
+ """
+ num_classes = multi_scores.size(1) - 1
+ # exclude background category
+ if multi_bboxes.shape[1] > 4:
+ bboxes = multi_bboxes.view(multi_scores.size(0), -1, 4)
+ else:
+ bboxes = multi_bboxes[:, None].expand(
+ multi_scores.size(0), num_classes, 4)
+
+ scores = multi_scores[:, :-1]
+
+ labels = torch.arange(num_classes, dtype=torch.long)
+ labels = labels.view(1, -1).expand_as(scores)
+
+ bboxes = bboxes.reshape(-1, 4)
+ scores = scores.reshape(-1)
+ labels = labels.reshape(-1)
+
+ if not torch.onnx.is_in_onnx_export():
+ # NonZero not supported in TensorRT
+ # remove low scoring boxes
+ valid_mask = scores > score_thr
+ # multiply score_factor after threshold to preserve more bboxes, improve
+ # mAP by 1% for YOLOv3
+ if score_factors is not None:
+ # expand the shape to match original shape of score
+ score_factors = score_factors.view(-1, 1).expand(
+ multi_scores.size(0), num_classes)
+ score_factors = score_factors.reshape(-1)
+ scores = scores * score_factors
+
+ if not torch.onnx.is_in_onnx_export():
+ # NonZero not supported in TensorRT
+ inds = valid_mask.nonzero(as_tuple=False).squeeze(1)
+ bboxes, scores, labels = bboxes[inds], scores[inds], labels[inds]
+ else:
+ # TensorRT NMS plugin has invalid output filled with -1
+ # add dummy data to make detection output correct.
+ bboxes = torch.cat([bboxes, bboxes.new_zeros(1, 4)], dim=0)
+ scores = torch.cat([scores, scores.new_zeros(1)], dim=0)
+ labels = torch.cat([labels, labels.new_zeros(1)], dim=0)
+
+ if bboxes.numel() == 0:
+ if torch.onnx.is_in_onnx_export():
+ raise RuntimeError('[ONNX Error] Can not record NMS '
+ 'as it has not been executed this time')
+ if return_inds:
+ return bboxes, labels, inds
+ else:
+ return bboxes, labels
+
+ dets, keep = batched_nms(bboxes, scores, labels, nms_cfg)
+
+ if max_num > 0:
+ dets = dets[:max_num]
+ keep = keep[:max_num]
+
+ if return_inds:
+ return dets, labels[keep], keep
+ else:
+ return dets, labels[keep]
+
+
+def fast_nms(multi_bboxes,
+ multi_scores,
+ multi_coeffs,
+ score_thr,
+ iou_thr,
+ top_k,
+ max_num=-1):
+ """Fast NMS in `YOLACT `_.
+
+ Fast NMS allows already-removed detections to suppress other detections so
+ that every instance can be decided to be kept or discarded in parallel,
+ which is not possible in traditional NMS. This relaxation allows us to
+ implement Fast NMS entirely in standard GPU-accelerated matrix operations.
+
+ Args:
+ multi_bboxes (Tensor): shape (n, #class*4) or (n, 4)
+ multi_scores (Tensor): shape (n, #class+1), where the last column
+ contains scores of the background class, but this will be ignored.
+ multi_coeffs (Tensor): shape (n, #class*coeffs_dim).
+ score_thr (float): bbox threshold, bboxes with scores lower than it
+ will not be considered.
+ iou_thr (float): IoU threshold to be considered as conflicted.
+ top_k (int): if there are more than top_k bboxes before NMS,
+ only top top_k will be kept.
+ max_num (int): if there are more than max_num bboxes after NMS,
+ only top max_num will be kept. If -1, keep all the bboxes.
+ Default: -1.
+
+ Returns:
+ tuple: (bboxes, labels, coefficients), tensors of shape (k, 5), (k, 1),
+ and (k, coeffs_dim). Labels are 0-based.
+ """
+
+ scores = multi_scores[:, :-1].t() # [#class, n]
+ scores, idx = scores.sort(1, descending=True)
+
+ idx = idx[:, :top_k].contiguous()
+ scores = scores[:, :top_k] # [#class, topk]
+ num_classes, num_dets = idx.size()
+ boxes = multi_bboxes[idx.view(-1), :].view(num_classes, num_dets, 4)
+ coeffs = multi_coeffs[idx.view(-1), :].view(num_classes, num_dets, -1)
+
+ iou = bbox_overlaps(boxes, boxes) # [#class, topk, topk]
+ iou.triu_(diagonal=1)
+ iou_max, _ = iou.max(dim=1)
+
+ # Now just filter out the ones higher than the threshold
+ keep = iou_max <= iou_thr
+
+ # Second thresholding introduces 0.2 mAP gain at negligible time cost
+ keep *= scores > score_thr
+
+ # Assign each kept detection to its corresponding class
+ classes = torch.arange(
+ num_classes, device=boxes.device)[:, None].expand_as(keep)
+ classes = classes[keep]
+
+ boxes = boxes[keep]
+ coeffs = coeffs[keep]
+ scores = scores[keep]
+
+ # Only keep the top max_num highest scores across all classes
+ scores, idx = scores.sort(0, descending=True)
+ if max_num > 0:
+ idx = idx[:max_num]
+ scores = scores[:max_num]
+
+ classes = classes[idx]
+ boxes = boxes[idx]
+ coeffs = coeffs[idx]
+
+ cls_dets = torch.cat([boxes, scores[:, None]], dim=1)
+ return cls_dets, classes, coeffs
diff --git a/annotator/uniformer/mmdet_null/core/post_processing/merge_augs.py b/annotator/uniformer/mmdet_null/core/post_processing/merge_augs.py
new file mode 100644
index 0000000000000000000000000000000000000000..dbcf79d1ac20ddc32cb1605e06d253803250c855
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/core/post_processing/merge_augs.py
@@ -0,0 +1,150 @@
+import copy
+import warnings
+
+import numpy as np
+import torch
+from mmcv import ConfigDict
+from mmcv.ops import nms
+
+from ..bbox import bbox_mapping_back
+
+
+def merge_aug_proposals(aug_proposals, img_metas, cfg):
+ """Merge augmented proposals (multiscale, flip, etc.)
+
+ Args:
+ aug_proposals (list[Tensor]): proposals from different testing
+ schemes, shape (n, 5). Note that they are not rescaled to the
+ original image size.
+
+ img_metas (list[dict]): list of image info dict where each dict has:
+ 'img_shape', 'scale_factor', 'flip', and may also contain
+ 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
+ For details on the values of these keys see
+ `mmdet/datasets/pipelines/formatting.py:Collect`.
+
+ cfg (dict): rpn test config.
+
+ Returns:
+ Tensor: shape (n, 4), proposals corresponding to original image scale.
+ """
+
+ cfg = copy.deepcopy(cfg)
+
+ # deprecate arguments warning
+ if 'nms' not in cfg or 'max_num' in cfg or 'nms_thr' in cfg:
+ warnings.warn(
+ 'In rpn_proposal or test_cfg, '
+ 'nms_thr has been moved to a dict named nms as '
+ 'iou_threshold, max_num has been renamed as max_per_img, '
+ 'name of original arguments and the way to specify '
+ 'iou_threshold of NMS will be deprecated.')
+ if 'nms' not in cfg:
+ cfg.nms = ConfigDict(dict(type='nms', iou_threshold=cfg.nms_thr))
+ if 'max_num' in cfg:
+ if 'max_per_img' in cfg:
+ assert cfg.max_num == cfg.max_per_img, f'You set max_num and ' \
+ f'max_per_img at the same time, but get {cfg.max_num} ' \
+ f'and {cfg.max_per_img} respectively' \
+ f'Please delete max_num which will be deprecated.'
+ else:
+ cfg.max_per_img = cfg.max_num
+ if 'nms_thr' in cfg:
+ assert cfg.nms.iou_threshold == cfg.nms_thr, f'You set ' \
+ f'iou_threshold in nms and ' \
+ f'nms_thr at the same time, but get ' \
+ f'{cfg.nms.iou_threshold} and {cfg.nms_thr}' \
+ f' respectively. Please delete the nms_thr ' \
+ f'which will be deprecated.'
+
+ recovered_proposals = []
+ for proposals, img_info in zip(aug_proposals, img_metas):
+ img_shape = img_info['img_shape']
+ scale_factor = img_info['scale_factor']
+ flip = img_info['flip']
+ flip_direction = img_info['flip_direction']
+ _proposals = proposals.clone()
+ _proposals[:, :4] = bbox_mapping_back(_proposals[:, :4], img_shape,
+ scale_factor, flip,
+ flip_direction)
+ recovered_proposals.append(_proposals)
+ aug_proposals = torch.cat(recovered_proposals, dim=0)
+ merged_proposals, _ = nms(aug_proposals[:, :4].contiguous(),
+ aug_proposals[:, -1].contiguous(),
+ cfg.nms.iou_threshold)
+ scores = merged_proposals[:, 4]
+ _, order = scores.sort(0, descending=True)
+ num = min(cfg.max_per_img, merged_proposals.shape[0])
+ order = order[:num]
+ merged_proposals = merged_proposals[order, :]
+ return merged_proposals
+
+
+def merge_aug_bboxes(aug_bboxes, aug_scores, img_metas, rcnn_test_cfg):
+ """Merge augmented detection bboxes and scores.
+
+ Args:
+ aug_bboxes (list[Tensor]): shape (n, 4*#class)
+ aug_scores (list[Tensor] or None): shape (n, #class)
+ img_shapes (list[Tensor]): shape (3, ).
+ rcnn_test_cfg (dict): rcnn test config.
+
+ Returns:
+ tuple: (bboxes, scores)
+ """
+ recovered_bboxes = []
+ for bboxes, img_info in zip(aug_bboxes, img_metas):
+ img_shape = img_info[0]['img_shape']
+ scale_factor = img_info[0]['scale_factor']
+ flip = img_info[0]['flip']
+ flip_direction = img_info[0]['flip_direction']
+ bboxes = bbox_mapping_back(bboxes, img_shape, scale_factor, flip,
+ flip_direction)
+ recovered_bboxes.append(bboxes)
+ bboxes = torch.stack(recovered_bboxes).mean(dim=0)
+ if aug_scores is None:
+ return bboxes
+ else:
+ scores = torch.stack(aug_scores).mean(dim=0)
+ return bboxes, scores
+
+
+def merge_aug_scores(aug_scores):
+ """Merge augmented bbox scores."""
+ if isinstance(aug_scores[0], torch.Tensor):
+ return torch.mean(torch.stack(aug_scores), dim=0)
+ else:
+ return np.mean(aug_scores, axis=0)
+
+
+def merge_aug_masks(aug_masks, img_metas, rcnn_test_cfg, weights=None):
+ """Merge augmented mask prediction.
+
+ Args:
+ aug_masks (list[ndarray]): shape (n, #class, h, w)
+ img_shapes (list[ndarray]): shape (3, ).
+ rcnn_test_cfg (dict): rcnn test config.
+
+ Returns:
+ tuple: (bboxes, scores)
+ """
+ recovered_masks = []
+ for mask, img_info in zip(aug_masks, img_metas):
+ flip = img_info[0]['flip']
+ flip_direction = img_info[0]['flip_direction']
+ if flip:
+ if flip_direction == 'horizontal':
+ mask = mask[:, :, :, ::-1]
+ elif flip_direction == 'vertical':
+ mask = mask[:, :, ::-1, :]
+ else:
+ raise ValueError(
+ f"Invalid flipping direction '{flip_direction}'")
+ recovered_masks.append(mask)
+
+ if weights is None:
+ merged_masks = np.mean(recovered_masks, axis=0)
+ else:
+ merged_masks = np.average(
+ np.array(recovered_masks), axis=0, weights=np.array(weights))
+ return merged_masks
diff --git a/annotator/uniformer/mmdet_null/core/utils/__init__.py b/annotator/uniformer/mmdet_null/core/utils/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..5c51dac6d648f41d5c5f46dbf703f19469a7bb6c
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/core/utils/__init__.py
@@ -0,0 +1,7 @@
+from .dist_utils import DistOptimizerHook, allreduce_grads, reduce_mean
+from .misc import mask2ndarray, multi_apply, unmap
+
+__all__ = [
+ 'allreduce_grads', 'DistOptimizerHook', 'reduce_mean', 'multi_apply',
+ 'unmap', 'mask2ndarray'
+]
diff --git a/annotator/uniformer/mmdet_null/core/utils/dist_utils.py b/annotator/uniformer/mmdet_null/core/utils/dist_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..5fe77753313783f95bd7111038ef8b58ee4e4bc5
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/core/utils/dist_utils.py
@@ -0,0 +1,69 @@
+import warnings
+from collections import OrderedDict
+
+import torch.distributed as dist
+from mmcv.runner import OptimizerHook
+from torch._utils import (_flatten_dense_tensors, _take_tensors,
+ _unflatten_dense_tensors)
+
+
+def _allreduce_coalesced(tensors, world_size, bucket_size_mb=-1):
+ if bucket_size_mb > 0:
+ bucket_size_bytes = bucket_size_mb * 1024 * 1024
+ buckets = _take_tensors(tensors, bucket_size_bytes)
+ else:
+ buckets = OrderedDict()
+ for tensor in tensors:
+ tp = tensor.type()
+ if tp not in buckets:
+ buckets[tp] = []
+ buckets[tp].append(tensor)
+ buckets = buckets.values()
+
+ for bucket in buckets:
+ flat_tensors = _flatten_dense_tensors(bucket)
+ dist.all_reduce(flat_tensors)
+ flat_tensors.div_(world_size)
+ for tensor, synced in zip(
+ bucket, _unflatten_dense_tensors(flat_tensors, bucket)):
+ tensor.copy_(synced)
+
+
+def allreduce_grads(params, coalesce=True, bucket_size_mb=-1):
+ """Allreduce gradients.
+
+ Args:
+ params (list[torch.Parameters]): List of parameters of a model
+ coalesce (bool, optional): Whether allreduce parameters as a whole.
+ Defaults to True.
+ bucket_size_mb (int, optional): Size of bucket, the unit is MB.
+ Defaults to -1.
+ """
+ grads = [
+ param.grad.data for param in params
+ if param.requires_grad and param.grad is not None
+ ]
+ world_size = dist.get_world_size()
+ if coalesce:
+ _allreduce_coalesced(grads, world_size, bucket_size_mb)
+ else:
+ for tensor in grads:
+ dist.all_reduce(tensor.div_(world_size))
+
+
+class DistOptimizerHook(OptimizerHook):
+ """Deprecated optimizer hook for distributed training."""
+
+ def __init__(self, *args, **kwargs):
+ warnings.warn('"DistOptimizerHook" is deprecated, please switch to'
+ '"mmcv.runner.OptimizerHook".')
+ super().__init__(*args, **kwargs)
+
+
+def reduce_mean(tensor):
+ """"Obtain the mean of tensor on different GPUs."""
+ if not (dist.is_available() and dist.is_initialized()):
+ return tensor
+ tensor = tensor.clone()
+ dist.all_reduce(tensor.div_(dist.get_world_size()), op=dist.ReduceOp.SUM)
+ return tensor
diff --git a/annotator/uniformer/mmdet_null/core/utils/misc.py b/annotator/uniformer/mmdet_null/core/utils/misc.py
new file mode 100644
index 0000000000000000000000000000000000000000..3e22c7b9085317b61a25c67d361f7e70df65bed1
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/core/utils/misc.py
@@ -0,0 +1,61 @@
+from functools import partial
+
+import numpy as np
+import torch
+from six.moves import map, zip
+
+from ..mask.structures import BitmapMasks, PolygonMasks
+
+
+def multi_apply(func, *args, **kwargs):
+ """Apply function to a list of arguments.
+
+ Note:
+ This function applies the ``func`` to multiple inputs and
+ map the multiple outputs of the ``func`` into different
+ list. Each list contains the same type of outputs corresponding
+ to different inputs.
+
+ Args:
+ func (Function): A function that will be applied to a list of
+ arguments
+
+ Returns:
+ tuple(list): A tuple containing multiple list, each list contains \
+ a kind of returned results by the function
+ """
+ pfunc = partial(func, **kwargs) if kwargs else func
+ map_results = map(pfunc, *args)
+ return tuple(map(list, zip(*map_results)))
+
+
+def unmap(data, count, inds, fill=0):
+ """Unmap a subset of item (data) back to the original set of items (of size
+ count)"""
+ if data.dim() == 1:
+ ret = data.new_full((count, ), fill)
+ ret[inds.type(torch.bool)] = data
+ else:
+ new_size = (count, ) + data.size()[1:]
+ ret = data.new_full(new_size, fill)
+ ret[inds.type(torch.bool), :] = data
+ return ret
+
+
+def mask2ndarray(mask):
+ """Convert Mask to ndarray..
+
+ Args:
+ mask (:obj:`BitmapMasks` or :obj:`PolygonMasks` or
+ torch.Tensor or np.ndarray): The mask to be converted.
+
+ Returns:
+ np.ndarray: Ndarray mask of shape (n, h, w) that has been converted
+ """
+ if isinstance(mask, (BitmapMasks, PolygonMasks)):
+ mask = mask.to_ndarray()
+ elif isinstance(mask, torch.Tensor):
+ mask = mask.detach().cpu().numpy()
+ elif not isinstance(mask, np.ndarray):
+ raise TypeError(f'Unsupported {type(mask)} data type')
+ return mask
diff --git a/annotator/uniformer/mmdet_null/core/visualization/__init__.py b/annotator/uniformer/mmdet_null/core/visualization/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..4ff995c0861490941f8cfc19ebbd41a2ee7e2d65
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/core/visualization/__init__.py
@@ -0,0 +1,4 @@
+from .image import (color_val_matplotlib, imshow_det_bboxes,
+ imshow_gt_det_bboxes)
+
+__all__ = ['imshow_det_bboxes', 'imshow_gt_det_bboxes', 'color_val_matplotlib']
diff --git a/annotator/uniformer/mmdet_null/core/visualization/image.py b/annotator/uniformer/mmdet_null/core/visualization/image.py
new file mode 100644
index 0000000000000000000000000000000000000000..5a148384d7a77c4d9849c54570e85740eaff8235
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/core/visualization/image.py
@@ -0,0 +1,303 @@
+import matplotlib.pyplot as plt
+import mmcv
+import numpy as np
+import pycocotools.mask as mask_util
+from matplotlib.collections import PatchCollection
+from matplotlib.patches import Polygon
+
+from ..utils import mask2ndarray
+
+EPS = 1e-2
+
+
+def color_val_matplotlib(color):
+ """Convert various input in BGR order to normalized RGB matplotlib color
+ tuples,
+
+ Args:
+ color (:obj:`Color`/str/tuple/int/ndarray): Color inputs
+
+ Returns:
+ tuple[float]: A tuple of 3 normalized floats indicating RGB channels.
+ """
+ color = mmcv.color_val(color)
+ color = [color / 255 for color in color[::-1]]
+ return tuple(color)
+
+
+def imshow_det_bboxes(img,
+ bboxes,
+ labels,
+ segms=None,
+ class_names=None,
+ score_thr=0,
+ bbox_color='green',
+ text_color='green',
+ mask_color=None,
+ thickness=2,
+ font_size=13,
+ win_name='',
+ show=True,
+ wait_time=0,
+ out_file=None):
+ """Draw bboxes and class labels (with scores) on an image.
+
+ Args:
+ img (str or ndarray): The image to be displayed.
+ bboxes (ndarray): Bounding boxes (with scores), shaped (n, 4) or
+ (n, 5).
+ labels (ndarray): Labels of bboxes.
+ segms (ndarray or None): Masks, shaped (n,h,w) or None
+ class_names (list[str]): Names of each classes.
+ score_thr (float): Minimum score of bboxes to be shown. Default: 0
+ bbox_color (str or tuple(int) or :obj:`Color`):Color of bbox lines.
+ The tuple of color should be in BGR order. Default: 'green'
+ text_color (str or tuple(int) or :obj:`Color`):Color of texts.
+ The tuple of color should be in BGR order. Default: 'green'
+ mask_color (str or tuple(int) or :obj:`Color`, optional):
+ Color of masks. The tuple of color should be in BGR order.
+ Default: None
+ thickness (int): Thickness of lines. Default: 2
+ font_size (int): Font size of texts. Default: 13
+ show (bool): Whether to show the image. Default: True
+ win_name (str): The window name. Default: ''
+ wait_time (float): Value of waitKey param. Default: 0.
+ out_file (str, optional): The filename to write the image.
+ Default: None
+
+ Returns:
+ ndarray: The image with bboxes drawn on it.
+ """
+ assert bboxes.ndim == 2, \
+ f' bboxes ndim should be 2, but its ndim is {bboxes.ndim}.'
+ assert labels.ndim == 1, \
+ f' labels ndim should be 1, but its ndim is {labels.ndim}.'
+ assert bboxes.shape[0] == labels.shape[0], \
+ 'bboxes.shape[0] and labels.shape[0] should have the same length.'
+ assert bboxes.shape[1] == 4 or bboxes.shape[1] == 5, \
+ f' bboxes.shape[1] should be 4 or 5, but its {bboxes.shape[1]}.'
+ img = mmcv.imread(img).astype(np.uint8)
+
+ if score_thr > 0:
+ assert bboxes.shape[1] == 5
+ scores = bboxes[:, -1]
+ inds = scores > score_thr
+ bboxes = bboxes[inds, :]
+ labels = labels[inds]
+ if segms is not None:
+ segms = segms[inds, ...]
+
+ mask_colors = []
+ if labels.shape[0] > 0:
+ if mask_color is None:
+ # random color
+ np.random.seed(42)
+ mask_colors = [
+ np.random.randint(0, 256, (1, 3), dtype=np.uint8)
+ for _ in range(max(labels) + 1)
+ ]
+ else:
+ # specify color
+ mask_colors = [
+ np.array(mmcv.color_val(mask_color)[::-1], dtype=np.uint8)
+ ] * (
+ max(labels) + 1)
+
+ bbox_color = color_val_matplotlib(bbox_color)
+ text_color = color_val_matplotlib(text_color)
+
+ img = mmcv.bgr2rgb(img)
+ width, height = img.shape[1], img.shape[0]
+ img = np.ascontiguousarray(img)
+
+ fig = plt.figure(win_name, frameon=False)
+ plt.title(win_name)
+ canvas = fig.canvas
+ dpi = fig.get_dpi()
+ # add a small EPS to avoid precision lost due to matplotlib's truncation
+ # (https://github.com/matplotlib/matplotlib/issues/15363)
+ fig.set_size_inches((width + EPS) / dpi, (height + EPS) / dpi)
+
+ # remove white edges by set subplot margin
+ plt.subplots_adjust(left=0, right=1, bottom=0, top=1)
+ ax = plt.gca()
+ ax.axis('off')
+
+ polygons = []
+ color = []
+ for i, (bbox, label) in enumerate(zip(bboxes, labels)):
+ bbox_int = bbox.astype(np.int32)
+ poly = [[bbox_int[0], bbox_int[1]], [bbox_int[0], bbox_int[3]],
+ [bbox_int[2], bbox_int[3]], [bbox_int[2], bbox_int[1]]]
+ np_poly = np.array(poly).reshape((4, 2))
+ polygons.append(Polygon(np_poly))
+ color.append(bbox_color)
+ label_text = class_names[
+ label] if class_names is not None else f'class {label}'
+ if len(bbox) > 4:
+ label_text += f'|{bbox[-1]:.02f}'
+ ax.text(
+ bbox_int[0],
+ bbox_int[1],
+ f'{label_text}',
+ bbox={
+ 'facecolor': 'black',
+ 'alpha': 0.8,
+ 'pad': 0.7,
+ 'edgecolor': 'none'
+ },
+ color=text_color,
+ fontsize=font_size,
+ verticalalignment='top',
+ horizontalalignment='left')
+ if segms is not None:
+ color_mask = mask_colors[labels[i]]
+ mask = segms[i].astype(bool)
+ img[mask] = img[mask] * 0.5 + color_mask * 0.5
+
+ plt.imshow(img)
+
+ p = PatchCollection(
+ polygons, facecolor='none', edgecolors=color, linewidths=thickness)
+ ax.add_collection(p)
+
+ stream, _ = canvas.print_to_buffer()
+ buffer = np.frombuffer(stream, dtype='uint8')
+ img_rgba = buffer.reshape(height, width, 4)
+ rgb, alpha = np.split(img_rgba, [3], axis=2)
+ img = rgb.astype('uint8')
+ img = mmcv.rgb2bgr(img)
+
+ if show:
+ # We do not use cv2 for display because in some cases, opencv will
+ # conflict with Qt, it will output a warning: Current thread
+ # is not the object's thread. You can refer to
+ # https://github.com/opencv/opencv-python/issues/46 for details
+ if wait_time == 0:
+ plt.show()
+ else:
+ plt.show(block=False)
+ plt.pause(wait_time)
+ if out_file is not None:
+ mmcv.imwrite(img, out_file)
+
+ plt.close()
+
+ return img
+
+
+def imshow_gt_det_bboxes(img,
+ annotation,
+ result,
+ class_names=None,
+ score_thr=0,
+ gt_bbox_color=(255, 102, 61),
+ gt_text_color=(255, 102, 61),
+ gt_mask_color=(255, 102, 61),
+ det_bbox_color=(72, 101, 241),
+ det_text_color=(72, 101, 241),
+ det_mask_color=(72, 101, 241),
+ thickness=2,
+ font_size=13,
+ win_name='',
+ show=True,
+ wait_time=0,
+ out_file=None):
+ """General visualization GT and result function.
+
+ Args:
+ img (str or ndarray): The image to be displayed.)
+ annotation (dict): Ground truth annotations where contain keys of
+ 'gt_bboxes' and 'gt_labels' or 'gt_masks'
+ result (tuple[list] or list): The detection result, can be either
+ (bbox, segm) or just bbox.
+ class_names (list[str]): Names of each classes.
+ score_thr (float): Minimum score of bboxes to be shown. Default: 0
+ gt_bbox_color (str or tuple(int) or :obj:`Color`):Color of bbox lines.
+ The tuple of color should be in BGR order. Default: (255, 102, 61)
+ gt_text_color (str or tuple(int) or :obj:`Color`):Color of texts.
+ The tuple of color should be in BGR order. Default: (255, 102, 61)
+ gt_mask_color (str or tuple(int) or :obj:`Color`, optional):
+ Color of masks. The tuple of color should be in BGR order.
+ Default: (255, 102, 61)
+ det_bbox_color (str or tuple(int) or :obj:`Color`):Color of bbox lines.
+ The tuple of color should be in BGR order. Default: (72, 101, 241)
+ det_text_color (str or tuple(int) or :obj:`Color`):Color of texts.
+ The tuple of color should be in BGR order. Default: (72, 101, 241)
+ det_mask_color (str or tuple(int) or :obj:`Color`, optional):
+ Color of masks. The tuple of color should be in BGR order.
+ Default: (72, 101, 241)
+ thickness (int): Thickness of lines. Default: 2
+ font_size (int): Font size of texts. Default: 13
+ win_name (str): The window name. Default: ''
+ show (bool): Whether to show the image. Default: True
+ wait_time (float): Value of waitKey param. Default: 0.
+ out_file (str, optional): The filename to write the image.
+ Default: None
+
+ Returns:
+ ndarray: The image with bboxes or masks drawn on it.
+ """
+ assert 'gt_bboxes' in annotation
+ assert 'gt_labels' in annotation
+ assert isinstance(
+ result,
+ (tuple, list)), f'Expected tuple or list, but get {type(result)}'
+
+ gt_masks = annotation.get('gt_masks', None)
+ if gt_masks is not None:
+ gt_masks = mask2ndarray(gt_masks)
+
+ img = mmcv.imread(img)
+
+ img = imshow_det_bboxes(
+ img,
+ annotation['gt_bboxes'],
+ annotation['gt_labels'],
+ gt_masks,
+ class_names=class_names,
+ bbox_color=gt_bbox_color,
+ text_color=gt_text_color,
+ mask_color=gt_mask_color,
+ thickness=thickness,
+ font_size=font_size,
+ win_name=win_name,
+ show=False)
+
+ if isinstance(result, tuple):
+ bbox_result, segm_result = result
+ if isinstance(segm_result, tuple):
+ segm_result = segm_result[0] # ms rcnn
+ else:
+ bbox_result, segm_result = result, None
+
+ bboxes = np.vstack(bbox_result)
+ labels = [
+ np.full(bbox.shape[0], i, dtype=np.int32)
+ for i, bbox in enumerate(bbox_result)
+ ]
+ labels = np.concatenate(labels)
+
+ segms = None
+ if segm_result is not None and len(labels) > 0: # non empty
+ segms = mmcv.concat_list(segm_result)
+ segms = mask_util.decode(segms)
+ segms = segms.transpose(2, 0, 1)
+
+ img = imshow_det_bboxes(
+ img,
+ bboxes,
+ labels,
+ segms=segms,
+ class_names=class_names,
+ score_thr=score_thr,
+ bbox_color=det_bbox_color,
+ text_color=det_text_color,
+ mask_color=det_mask_color,
+ thickness=thickness,
+ font_size=font_size,
+ win_name=win_name,
+ show=show,
+ wait_time=wait_time,
+ out_file=out_file)
+ return img
diff --git a/annotator/uniformer/mmdet_null/datasets/__init__.py b/annotator/uniformer/mmdet_null/datasets/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..be4ea28a86e3c165cc2556f860079305f316294e
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/datasets/__init__.py
@@ -0,0 +1,26 @@
+# from .builder import DATASETS, PIPELINES, build_dataloader, build_dataset
+# from .cityscapes import CityscapesDataset
+# from .coco import CocoDataset
+# from .custom import CustomDataset
+# from .dataset_wrappers import (ClassBalancedDataset, ConcatDataset,
+# RepeatDataset)
+# from .deepfashion import DeepFashionDataset
+# from .lvis import LVISDataset, LVISV1Dataset, LVISV05Dataset
+# from .samplers import DistributedGroupSampler, DistributedSampler, GroupSampler
+# from .utils import (NumClassCheckHook, get_loading_pipeline,
+# replace_ImageToTensor)
+# from .voc import VOCDataset
+# from .wider_face import WIDERFaceDataset
+# from .xml_style import XMLDataset
+
+# __all__ = [
+# 'CustomDataset', 'XMLDataset', 'CocoDataset', 'DeepFashionDataset',
+# 'VOCDataset', 'CityscapesDataset', 'LVISDataset', 'LVISV05Dataset',
+# 'LVISV1Dataset', 'GroupSampler', 'DistributedGroupSampler',
+# 'DistributedSampler', 'build_dataloader', 'ConcatDataset', 'RepeatDataset',
+# 'ClassBalancedDataset', 'WIDERFaceDataset', 'DATASETS', 'PIPELINES',
+# 'build_dataset', 'replace_ImageToTensor', 'get_loading_pipeline',
+# 'NumClassCheckHook'
+# ]
+from .utils import replace_ImageToTensor
+__all__ = ['replace_ImageToTensor']
\ No newline at end of file
diff --git a/annotator/uniformer/mmdet_null/datasets/builder.py b/annotator/uniformer/mmdet_null/datasets/builder.py
new file mode 100644
index 0000000000000000000000000000000000000000..d6bf37d8c8f2dc9bd0e1b7383f446112c4f95cbd
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/datasets/builder.py
@@ -0,0 +1,143 @@
+import copy
+import platform
+import random
+from functools import partial
+
+import numpy as np
+from annotator.uniformer.mmcv.parallel import collate
+from annotator.uniformer.mmcv.runner import get_dist_info
+from annotator.uniformer.mmcv.utils import Registry, build_from_cfg
+from torch.utils.data import DataLoader
+
+from .samplers import DistributedGroupSampler, DistributedSampler, GroupSampler
+
+if platform.system() != 'Windows':
+ # https://github.com/pytorch/pytorch/issues/973
+ import resource
+ rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
+ hard_limit = rlimit[1]
+ soft_limit = min(4096, hard_limit)
+ resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit))
+
+DATASETS = Registry('dataset')
+PIPELINES = Registry('pipeline')
+
+
+def _concat_dataset(cfg, default_args=None):
+ from .dataset_wrappers import ConcatDataset
+ ann_files = cfg['ann_file']
+ img_prefixes = cfg.get('img_prefix', None)
+ seg_prefixes = cfg.get('seg_prefix', None)
+ proposal_files = cfg.get('proposal_file', None)
+ separate_eval = cfg.get('separate_eval', True)
+
+ datasets = []
+ num_dset = len(ann_files)
+ for i in range(num_dset):
+ data_cfg = copy.deepcopy(cfg)
+ # pop 'separate_eval' since it is not a valid key for common datasets.
+ if 'separate_eval' in data_cfg:
+ data_cfg.pop('separate_eval')
+ data_cfg['ann_file'] = ann_files[i]
+ if isinstance(img_prefixes, (list, tuple)):
+ data_cfg['img_prefix'] = img_prefixes[i]
+ if isinstance(seg_prefixes, (list, tuple)):
+ data_cfg['seg_prefix'] = seg_prefixes[i]
+ if isinstance(proposal_files, (list, tuple)):
+ data_cfg['proposal_file'] = proposal_files[i]
+ datasets.append(build_dataset(data_cfg, default_args))
+
+ return ConcatDataset(datasets, separate_eval)
+
+
+def build_dataset(cfg, default_args=None):
+ from .dataset_wrappers import (ConcatDataset, RepeatDataset,
+ ClassBalancedDataset)
+ if isinstance(cfg, (list, tuple)):
+ dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg])
+ elif cfg['type'] == 'ConcatDataset':
+ dataset = ConcatDataset(
+ [build_dataset(c, default_args) for c in cfg['datasets']],
+ cfg.get('separate_eval', True))
+ elif cfg['type'] == 'RepeatDataset':
+ dataset = RepeatDataset(
+ build_dataset(cfg['dataset'], default_args), cfg['times'])
+ elif cfg['type'] == 'ClassBalancedDataset':
+ dataset = ClassBalancedDataset(
+ build_dataset(cfg['dataset'], default_args), cfg['oversample_thr'])
+ elif isinstance(cfg.get('ann_file'), (list, tuple)):
+ dataset = _concat_dataset(cfg, default_args)
+ else:
+ dataset = build_from_cfg(cfg, DATASETS, default_args)
+
+ return dataset
+
+
+def build_dataloader(dataset,
+ samples_per_gpu,
+ workers_per_gpu,
+ num_gpus=1,
+ dist=True,
+ shuffle=True,
+ seed=None,
+ **kwargs):
+ """Build PyTorch DataLoader.
+
+ In distributed training, each GPU/process has a dataloader.
+ In non-distributed training, there is only one dataloader for all GPUs.
+
+ Args:
+ dataset (Dataset): A PyTorch dataset.
+ samples_per_gpu (int): Number of training samples on each GPU, i.e.,
+ batch size of each GPU.
+ workers_per_gpu (int): How many subprocesses to use for data loading
+ for each GPU.
+ num_gpus (int): Number of GPUs. Only used in non-distributed training.
+ dist (bool): Distributed training/test or not. Default: True.
+ shuffle (bool): Whether to shuffle the data at every epoch.
+ Default: True.
+ kwargs: any keyword argument to be used to initialize DataLoader
+
+ Returns:
+ DataLoader: A PyTorch dataloader.
+ """
+ rank, world_size = get_dist_info()
+ if dist:
+ # DistributedGroupSampler will definitely shuffle the data to satisfy
+ # that images on each GPU are in the same group
+ if shuffle:
+ sampler = DistributedGroupSampler(
+ dataset, samples_per_gpu, world_size, rank, seed=seed)
+ else:
+ sampler = DistributedSampler(
+ dataset, world_size, rank, shuffle=False, seed=seed)
+ batch_size = samples_per_gpu
+ num_workers = workers_per_gpu
+ else:
+ sampler = GroupSampler(dataset, samples_per_gpu) if shuffle else None
+ batch_size = num_gpus * samples_per_gpu
+ num_workers = num_gpus * workers_per_gpu
+
+ init_fn = partial(
+ worker_init_fn, num_workers=num_workers, rank=rank,
+ seed=seed) if seed is not None else None
+
+ data_loader = DataLoader(
+ dataset,
+ batch_size=batch_size,
+ sampler=sampler,
+ num_workers=num_workers,
+ collate_fn=partial(collate, samples_per_gpu=samples_per_gpu),
+ pin_memory=False,
+ worker_init_fn=init_fn,
+ **kwargs)
+
+ return data_loader
+
+
+def worker_init_fn(worker_id, num_workers, rank, seed):
+ # The seed of each worker equals to
+ # num_worker * rank + worker_id + user_seed
+ worker_seed = num_workers * rank + worker_id + seed
+ np.random.seed(worker_seed)
+ random.seed(worker_seed)
diff --git a/annotator/uniformer/mmdet_null/datasets/cityscapes.py b/annotator/uniformer/mmdet_null/datasets/cityscapes.py
new file mode 100644
index 0000000000000000000000000000000000000000..71eead87e7f4e511c0cb59e69c3a599832ada0e4
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/datasets/cityscapes.py
@@ -0,0 +1,334 @@
+# Modified from https://github.com/facebookresearch/detectron2/blob/master/detectron2/data/datasets/cityscapes.py # noqa
+# and https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/evaluation/evalInstanceLevelSemanticLabeling.py # noqa
+
+import glob
+import os
+import os.path as osp
+import tempfile
+from collections import OrderedDict
+
+import mmcv
+import numpy as np
+import pycocotools.mask as maskUtils
+from mmcv.utils import print_log
+
+from .builder import DATASETS
+from .coco import CocoDataset
+
+
+@DATASETS.register_module()
+class CityscapesDataset(CocoDataset):
+
+ CLASSES = ('person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle',
+ 'bicycle')
+
+ def _filter_imgs(self, min_size=32):
+ """Filter images too small or without ground truths."""
+ valid_inds = []
+ # obtain images that contain annotation
+ ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values())
+ # obtain images that contain annotations of the required categories
+ ids_in_cat = set()
+ for i, class_id in enumerate(self.cat_ids):
+ ids_in_cat |= set(self.coco.cat_img_map[class_id])
+ # merge the image id sets of the two conditions and use the merged set
+ # to filter out images if self.filter_empty_gt=True
+ ids_in_cat &= ids_with_ann
+
+ valid_img_ids = []
+ for i, img_info in enumerate(self.data_infos):
+ img_id = img_info['id']
+ ann_ids = self.coco.getAnnIds(imgIds=[img_id])
+ ann_info = self.coco.loadAnns(ann_ids)
+ all_iscrowd = all([_['iscrowd'] for _ in ann_info])
+ if self.filter_empty_gt and (self.img_ids[i] not in ids_in_cat
+ or all_iscrowd):
+ continue
+ if min(img_info['width'], img_info['height']) >= min_size:
+ valid_inds.append(i)
+ valid_img_ids.append(img_id)
+ self.img_ids = valid_img_ids
+ return valid_inds
+
+ def _parse_ann_info(self, img_info, ann_info):
+ """Parse bbox and mask annotation.
+
+ Args:
+ img_info (dict): Image info of an image.
+ ann_info (list[dict]): Annotation info of an image.
+
+ Returns:
+ dict: A dict containing the following keys: bboxes, \
+ bboxes_ignore, labels, masks, seg_map. \
+ "masks" are already decoded into binary masks.
+ """
+ gt_bboxes = []
+ gt_labels = []
+ gt_bboxes_ignore = []
+ gt_masks_ann = []
+
+ for i, ann in enumerate(ann_info):
+ if ann.get('ignore', False):
+ continue
+ x1, y1, w, h = ann['bbox']
+ if ann['area'] <= 0 or w < 1 or h < 1:
+ continue
+ if ann['category_id'] not in self.cat_ids:
+ continue
+ bbox = [x1, y1, x1 + w, y1 + h]
+ if ann.get('iscrowd', False):
+ gt_bboxes_ignore.append(bbox)
+ else:
+ gt_bboxes.append(bbox)
+ gt_labels.append(self.cat2label[ann['category_id']])
+ gt_masks_ann.append(ann['segmentation'])
+
+ if gt_bboxes:
+ gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
+ gt_labels = np.array(gt_labels, dtype=np.int64)
+ else:
+ gt_bboxes = np.zeros((0, 4), dtype=np.float32)
+ gt_labels = np.array([], dtype=np.int64)
+
+ if gt_bboxes_ignore:
+ gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)
+ else:
+ gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)
+
+ ann = dict(
+ bboxes=gt_bboxes,
+ labels=gt_labels,
+ bboxes_ignore=gt_bboxes_ignore,
+ masks=gt_masks_ann,
+ seg_map=img_info['segm_file'])
+
+ return ann
+
+ def results2txt(self, results, outfile_prefix):
+ """Dump the detection results to a txt file.
+
+ Args:
+ results (list[list | tuple]): Testing results of the
+ dataset.
+ outfile_prefix (str): The filename prefix of the json files.
+ If the prefix is "somepath/xxx",
+ the txt files will be named "somepath/xxx.txt".
+
+ Returns:
+ list[str]: Result txt files which contains corresponding \
+ instance segmentation images.
+ """
+ try:
+ import cityscapesscripts.helpers.labels as CSLabels
+ except ImportError:
+ raise ImportError('Please run "pip install citscapesscripts" to '
+ 'install cityscapesscripts first.')
+ result_files = []
+ os.makedirs(outfile_prefix, exist_ok=True)
+ prog_bar = mmcv.ProgressBar(len(self))
+ for idx in range(len(self)):
+ result = results[idx]
+ filename = self.data_infos[idx]['filename']
+ basename = osp.splitext(osp.basename(filename))[0]
+ pred_txt = osp.join(outfile_prefix, basename + '_pred.txt')
+
+ bbox_result, segm_result = result
+ bboxes = np.vstack(bbox_result)
+ # segm results
+ if isinstance(segm_result, tuple):
+ # Some detectors use different scores for bbox and mask,
+ # like Mask Scoring R-CNN. Score of segm will be used instead
+ # of bbox score.
+ segms = mmcv.concat_list(segm_result[0])
+ mask_score = segm_result[1]
+ else:
+ # use bbox score for mask score
+ segms = mmcv.concat_list(segm_result)
+ mask_score = [bbox[-1] for bbox in bboxes]
+ labels = [
+ np.full(bbox.shape[0], i, dtype=np.int32)
+ for i, bbox in enumerate(bbox_result)
+ ]
+ labels = np.concatenate(labels)
+
+ assert len(bboxes) == len(segms) == len(labels)
+ num_instances = len(bboxes)
+ prog_bar.update()
+ with open(pred_txt, 'w') as fout:
+ for i in range(num_instances):
+ pred_class = labels[i]
+ classes = self.CLASSES[pred_class]
+ class_id = CSLabels.name2label[classes].id
+ score = mask_score[i]
+ mask = maskUtils.decode(segms[i]).astype(np.uint8)
+ png_filename = osp.join(outfile_prefix,
+ basename + f'_{i}_{classes}.png')
+ mmcv.imwrite(mask, png_filename)
+ fout.write(f'{osp.basename(png_filename)} {class_id} '
+ f'{score}\n')
+ result_files.append(pred_txt)
+
+ return result_files
+
+ def format_results(self, results, txtfile_prefix=None):
+ """Format the results to txt (standard format for Cityscapes
+ evaluation).
+
+ Args:
+ results (list): Testing results of the dataset.
+ txtfile_prefix (str | None): The prefix of txt files. It includes
+ the file path and the prefix of filename, e.g., "a/b/prefix".
+ If not specified, a temp file will be created. Default: None.
+
+ Returns:
+ tuple: (result_files, tmp_dir), result_files is a dict containing \
+ the json filepaths, tmp_dir is the temporal directory created \
+ for saving txt/png files when txtfile_prefix is not specified.
+ """
+ assert isinstance(results, list), 'results must be a list'
+ assert len(results) == len(self), (
+ 'The length of results is not equal to the dataset len: {} != {}'.
+ format(len(results), len(self)))
+
+ assert isinstance(results, list), 'results must be a list'
+ assert len(results) == len(self), (
+ 'The length of results is not equal to the dataset len: {} != {}'.
+ format(len(results), len(self)))
+
+ if txtfile_prefix is None:
+ tmp_dir = tempfile.TemporaryDirectory()
+ txtfile_prefix = osp.join(tmp_dir.name, 'results')
+ else:
+ tmp_dir = None
+ result_files = self.results2txt(results, txtfile_prefix)
+
+ return result_files, tmp_dir
+
+ def evaluate(self,
+ results,
+ metric='bbox',
+ logger=None,
+ outfile_prefix=None,
+ classwise=False,
+ proposal_nums=(100, 300, 1000),
+ iou_thrs=np.arange(0.5, 0.96, 0.05)):
+ """Evaluation in Cityscapes/COCO protocol.
+
+ Args:
+ results (list[list | tuple]): Testing results of the dataset.
+ metric (str | list[str]): Metrics to be evaluated. Options are
+ 'bbox', 'segm', 'proposal', 'proposal_fast'.
+ logger (logging.Logger | str | None): Logger used for printing
+ related information during evaluation. Default: None.
+ outfile_prefix (str | None): The prefix of output file. It includes
+ the file path and the prefix of filename, e.g., "a/b/prefix".
+ If results are evaluated with COCO protocol, it would be the
+ prefix of output json file. For example, the metric is 'bbox'
+ and 'segm', then json files would be "a/b/prefix.bbox.json" and
+ "a/b/prefix.segm.json".
+ If results are evaluated with cityscapes protocol, it would be
+ the prefix of output txt/png files. The output files would be
+ png images under folder "a/b/prefix/xxx/" and the file name of
+ images would be written into a txt file
+ "a/b/prefix/xxx_pred.txt", where "xxx" is the video name of
+ cityscapes. If not specified, a temp file will be created.
+ Default: None.
+ classwise (bool): Whether to evaluating the AP for each class.
+ proposal_nums (Sequence[int]): Proposal number used for evaluating
+ recalls, such as recall@100, recall@1000.
+ Default: (100, 300, 1000).
+ iou_thrs (Sequence[float]): IoU threshold used for evaluating
+ recalls. If set to a list, the average recall of all IoUs will
+ also be computed. Default: 0.5.
+
+ Returns:
+ dict[str, float]: COCO style evaluation metric or cityscapes mAP \
+ and AP@50.
+ """
+ eval_results = dict()
+
+ metrics = metric.copy() if isinstance(metric, list) else [metric]
+
+ if 'cityscapes' in metrics:
+ eval_results.update(
+ self._evaluate_cityscapes(results, outfile_prefix, logger))
+ metrics.remove('cityscapes')
+
+ # left metrics are all coco metric
+ if len(metrics) > 0:
+ # create CocoDataset with CityscapesDataset annotation
+ self_coco = CocoDataset(self.ann_file, self.pipeline.transforms,
+ None, self.data_root, self.img_prefix,
+ self.seg_prefix, self.proposal_file,
+ self.test_mode, self.filter_empty_gt)
+ # TODO: remove this in the future
+ # reload annotations of correct class
+ self_coco.CLASSES = self.CLASSES
+ self_coco.data_infos = self_coco.load_annotations(self.ann_file)
+ eval_results.update(
+ self_coco.evaluate(results, metrics, logger, outfile_prefix,
+ classwise, proposal_nums, iou_thrs))
+
+ return eval_results
+
+ def _evaluate_cityscapes(self, results, txtfile_prefix, logger):
+ """Evaluation in Cityscapes protocol.
+
+ Args:
+ results (list): Testing results of the dataset.
+ txtfile_prefix (str | None): The prefix of output txt file
+ logger (logging.Logger | str | None): Logger used for printing
+ related information during evaluation. Default: None.
+
+ Returns:
+ dict[str: float]: Cityscapes evaluation results, contains 'mAP' \
+ and 'AP@50'.
+ """
+
+ try:
+ import cityscapesscripts.evaluation.evalInstanceLevelSemanticLabeling as CSEval # noqa
+ except ImportError:
+ raise ImportError('Please run "pip install citscapesscripts" to '
+ 'install cityscapesscripts first.')
+ msg = 'Evaluating in Cityscapes style'
+ if logger is None:
+ msg = '\n' + msg
+ print_log(msg, logger=logger)
+
+ result_files, tmp_dir = self.format_results(results, txtfile_prefix)
+
+ if tmp_dir is None:
+ result_dir = osp.join(txtfile_prefix, 'results')
+ else:
+ result_dir = osp.join(tmp_dir.name, 'results')
+
+ eval_results = OrderedDict()
+ print_log(f'Evaluating results under {result_dir} ...', logger=logger)
+
+ # set global states in cityscapes evaluation API
+ CSEval.args.cityscapesPath = os.path.join(self.img_prefix, '../..')
+ CSEval.args.predictionPath = os.path.abspath(result_dir)
+ CSEval.args.predictionWalk = None
+ CSEval.args.JSONOutput = False
+ CSEval.args.colorized = False
+ CSEval.args.gtInstancesFile = os.path.join(result_dir,
+ 'gtInstances.json')
+ CSEval.args.groundTruthSearch = os.path.join(
+ self.img_prefix.replace('leftImg8bit', 'gtFine'),
+ '*/*_gtFine_instanceIds.png')
+
+ groundTruthImgList = glob.glob(CSEval.args.groundTruthSearch)
+ assert len(groundTruthImgList), 'Cannot find ground truth images' \
+ f' in {CSEval.args.groundTruthSearch}.'
+ predictionImgList = []
+ for gt in groundTruthImgList:
+ predictionImgList.append(CSEval.getPrediction(gt, CSEval.args))
+ CSEval_results = CSEval.evaluateImgLists(predictionImgList,
+ groundTruthImgList,
+ CSEval.args)['averages']
+
+ eval_results['mAP'] = CSEval_results['allAp']
+ eval_results['AP@50'] = CSEval_results['allAp50%']
+ if tmp_dir is not None:
+ tmp_dir.cleanup()
+ return eval_results
diff --git a/annotator/uniformer/mmdet_null/datasets/coco.py b/annotator/uniformer/mmdet_null/datasets/coco.py
new file mode 100644
index 0000000000000000000000000000000000000000..65802369de9f82b70e4dcee96c22d6a886120aa1
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/datasets/coco.py
@@ -0,0 +1,546 @@
+import itertools
+import logging
+import os.path as osp
+import tempfile
+from collections import OrderedDict
+
+import annotator.uniformer.mmcv as mmcv
+import numpy as np
+import pycocotools
+from annotator.uniformer.mmcv.utils import print_log
+from pycocotools.coco import COCO
+from pycocotools.cocoeval import COCOeval
+from terminaltables import AsciiTable
+
+from annotator.uniformer.mmdet.core import eval_recalls
+from .builder import DATASETS
+from .custom import CustomDataset
+
+
+@DATASETS.register_module()
+class CocoDataset(CustomDataset):
+
+ CLASSES = ('person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
+ 'train', 'truck', 'boat', 'traffic light', 'fire hydrant',
+ 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog',
+ 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe',
+ 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
+ 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat',
+ 'baseball glove', 'skateboard', 'surfboard', 'tennis racket',
+ 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',
+ 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
+ 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
+ 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop',
+ 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',
+ 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock',
+ 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush')
+
+ def load_annotations(self, ann_file):
+ """Load annotation from COCO style annotation file.
+
+ Args:
+ ann_file (str): Path of annotation file.
+
+ Returns:
+ list[dict]: Annotation info from COCO api.
+ """
+ if not getattr(pycocotools, '__version__', '0') >= '12.0.2':
+ raise AssertionError(
+ 'Incompatible version of pycocotools is installed. '
+ 'Run pip uninstall pycocotools first. Then run pip '
+ 'install mmpycocotools to install open-mmlab forked '
+ 'pycocotools.')
+
+ self.coco = COCO(ann_file)
+ self.cat_ids = self.coco.get_cat_ids(cat_names=self.CLASSES)
+ self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}
+ self.img_ids = self.coco.get_img_ids()
+ data_infos = []
+ total_ann_ids = []
+ for i in self.img_ids:
+ info = self.coco.load_imgs([i])[0]
+ info['filename'] = info['file_name']
+ data_infos.append(info)
+ ann_ids = self.coco.get_ann_ids(img_ids=[i])
+ total_ann_ids.extend(ann_ids)
+ assert len(set(total_ann_ids)) == len(
+ total_ann_ids), f"Annotation ids in '{ann_file}' are not unique!"
+ return data_infos
+
+ def get_ann_info(self, idx):
+ """Get COCO annotation by index.
+
+ Args:
+ idx (int): Index of data.
+
+ Returns:
+ dict: Annotation info of specified index.
+ """
+
+ img_id = self.data_infos[idx]['id']
+ ann_ids = self.coco.get_ann_ids(img_ids=[img_id])
+ ann_info = self.coco.load_anns(ann_ids)
+ return self._parse_ann_info(self.data_infos[idx], ann_info)
+
+ def get_cat_ids(self, idx):
+ """Get COCO category ids by index.
+
+ Args:
+ idx (int): Index of data.
+
+ Returns:
+ list[int]: All categories in the image of specified index.
+ """
+
+ img_id = self.data_infos[idx]['id']
+ ann_ids = self.coco.get_ann_ids(img_ids=[img_id])
+ ann_info = self.coco.load_anns(ann_ids)
+ return [ann['category_id'] for ann in ann_info]
+
+ def _filter_imgs(self, min_size=32):
+ """Filter images too small or without ground truths."""
+ valid_inds = []
+ # obtain images that contain annotation
+ ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values())
+ # obtain images that contain annotations of the required categories
+ ids_in_cat = set()
+ for i, class_id in enumerate(self.cat_ids):
+ ids_in_cat |= set(self.coco.cat_img_map[class_id])
+ # merge the image id sets of the two conditions and use the merged set
+ # to filter out images if self.filter_empty_gt=True
+ ids_in_cat &= ids_with_ann
+
+ valid_img_ids = []
+ for i, img_info in enumerate(self.data_infos):
+ img_id = self.img_ids[i]
+ if self.filter_empty_gt and img_id not in ids_in_cat:
+ continue
+ if min(img_info['width'], img_info['height']) >= min_size:
+ valid_inds.append(i)
+ valid_img_ids.append(img_id)
+ self.img_ids = valid_img_ids
+ return valid_inds
+
+ def _parse_ann_info(self, img_info, ann_info):
+ """Parse bbox and mask annotation.
+
+ Args:
+ ann_info (list[dict]): Annotation info of an image.
+ with_mask (bool): Whether to parse mask annotations.
+
+ Returns:
+ dict: A dict containing the following keys: bboxes, bboxes_ignore,\
+ labels, masks, seg_map. "masks" are raw annotations and not \
+ decoded into binary masks.
+ """
+ gt_bboxes = []
+ gt_labels = []
+ gt_bboxes_ignore = []
+ gt_masks_ann = []
+ for i, ann in enumerate(ann_info):
+ if ann.get('ignore', False):
+ continue
+ x1, y1, w, h = ann['bbox']
+ inter_w = max(0, min(x1 + w, img_info['width']) - max(x1, 0))
+ inter_h = max(0, min(y1 + h, img_info['height']) - max(y1, 0))
+ if inter_w * inter_h == 0:
+ continue
+ if ann['area'] <= 0 or w < 1 or h < 1:
+ continue
+ if ann['category_id'] not in self.cat_ids:
+ continue
+ bbox = [x1, y1, x1 + w, y1 + h]
+ if ann.get('iscrowd', False):
+ gt_bboxes_ignore.append(bbox)
+ else:
+ gt_bboxes.append(bbox)
+ gt_labels.append(self.cat2label[ann['category_id']])
+ gt_masks_ann.append(ann.get('segmentation', None))
+
+ if gt_bboxes:
+ gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
+ gt_labels = np.array(gt_labels, dtype=np.int64)
+ else:
+ gt_bboxes = np.zeros((0, 4), dtype=np.float32)
+ gt_labels = np.array([], dtype=np.int64)
+
+ if gt_bboxes_ignore:
+ gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)
+ else:
+ gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)
+
+ seg_map = img_info['filename'].replace('jpg', 'png')
+
+ ann = dict(
+ bboxes=gt_bboxes,
+ labels=gt_labels,
+ bboxes_ignore=gt_bboxes_ignore,
+ masks=gt_masks_ann,
+ seg_map=seg_map)
+
+ return ann
+
+ def xyxy2xywh(self, bbox):
+ """Convert ``xyxy`` style bounding boxes to ``xywh`` style for COCO
+ evaluation.
+
+ Args:
+ bbox (numpy.ndarray): The bounding boxes, shape (4, ), in
+ ``xyxy`` order.
+
+ Returns:
+ list[float]: The converted bounding boxes, in ``xywh`` order.
+ """
+
+ _bbox = bbox.tolist()
+ return [
+ _bbox[0],
+ _bbox[1],
+ _bbox[2] - _bbox[0],
+ _bbox[3] - _bbox[1],
+ ]
+
+ def _proposal2json(self, results):
+ """Convert proposal results to COCO json style."""
+ json_results = []
+ for idx in range(len(self)):
+ img_id = self.img_ids[idx]
+ bboxes = results[idx]
+ for i in range(bboxes.shape[0]):
+ data = dict()
+ data['image_id'] = img_id
+ data['bbox'] = self.xyxy2xywh(bboxes[i])
+ data['score'] = float(bboxes[i][4])
+ data['category_id'] = 1
+ json_results.append(data)
+ return json_results
+
+ def _det2json(self, results):
+ """Convert detection results to COCO json style."""
+ json_results = []
+ for idx in range(len(self)):
+ img_id = self.img_ids[idx]
+ result = results[idx]
+ for label in range(len(result)):
+ bboxes = result[label]
+ for i in range(bboxes.shape[0]):
+ data = dict()
+ data['image_id'] = img_id
+ data['bbox'] = self.xyxy2xywh(bboxes[i])
+ data['score'] = float(bboxes[i][4])
+ data['category_id'] = self.cat_ids[label]
+ json_results.append(data)
+ return json_results
+
+ def _segm2json(self, results):
+ """Convert instance segmentation results to COCO json style."""
+ bbox_json_results = []
+ segm_json_results = []
+ for idx in range(len(self)):
+ img_id = self.img_ids[idx]
+ det, seg = results[idx]
+ for label in range(len(det)):
+ # bbox results
+ bboxes = det[label]
+ for i in range(bboxes.shape[0]):
+ data = dict()
+ data['image_id'] = img_id
+ data['bbox'] = self.xyxy2xywh(bboxes[i])
+ data['score'] = float(bboxes[i][4])
+ data['category_id'] = self.cat_ids[label]
+ bbox_json_results.append(data)
+
+ # segm results
+ # some detectors use different scores for bbox and mask
+ if isinstance(seg, tuple):
+ segms = seg[0][label]
+ mask_score = seg[1][label]
+ else:
+ segms = seg[label]
+ mask_score = [bbox[4] for bbox in bboxes]
+ for i in range(bboxes.shape[0]):
+ data = dict()
+ data['image_id'] = img_id
+ data['bbox'] = self.xyxy2xywh(bboxes[i])
+ data['score'] = float(mask_score[i])
+ data['category_id'] = self.cat_ids[label]
+ if isinstance(segms[i]['counts'], bytes):
+ segms[i]['counts'] = segms[i]['counts'].decode()
+ data['segmentation'] = segms[i]
+ segm_json_results.append(data)
+ return bbox_json_results, segm_json_results
+
+ def results2json(self, results, outfile_prefix):
+ """Dump the detection results to a COCO style json file.
+
+ There are 3 types of results: proposals, bbox predictions, mask
+ predictions, and they have different data types. This method will
+ automatically recognize the type, and dump them to json files.
+
+ Args:
+ results (list[list | tuple | ndarray]): Testing results of the
+ dataset.
+ outfile_prefix (str): The filename prefix of the json files. If the
+ prefix is "somepath/xxx", the json files will be named
+ "somepath/xxx.bbox.json", "somepath/xxx.segm.json",
+ "somepath/xxx.proposal.json".
+
+ Returns:
+ dict[str: str]: Possible keys are "bbox", "segm", "proposal", and \
+ values are corresponding filenames.
+ """
+ result_files = dict()
+ if isinstance(results[0], list):
+ json_results = self._det2json(results)
+ result_files['bbox'] = f'{outfile_prefix}.bbox.json'
+ result_files['proposal'] = f'{outfile_prefix}.bbox.json'
+ mmcv.dump(json_results, result_files['bbox'])
+ elif isinstance(results[0], tuple):
+ json_results = self._segm2json(results)
+ result_files['bbox'] = f'{outfile_prefix}.bbox.json'
+ result_files['proposal'] = f'{outfile_prefix}.bbox.json'
+ result_files['segm'] = f'{outfile_prefix}.segm.json'
+ mmcv.dump(json_results[0], result_files['bbox'])
+ mmcv.dump(json_results[1], result_files['segm'])
+ elif isinstance(results[0], np.ndarray):
+ json_results = self._proposal2json(results)
+ result_files['proposal'] = f'{outfile_prefix}.proposal.json'
+ mmcv.dump(json_results, result_files['proposal'])
+ else:
+ raise TypeError('invalid type of results')
+ return result_files
+
+ def fast_eval_recall(self, results, proposal_nums, iou_thrs, logger=None):
+ gt_bboxes = []
+ for i in range(len(self.img_ids)):
+ ann_ids = self.coco.get_ann_ids(img_ids=self.img_ids[i])
+ ann_info = self.coco.load_anns(ann_ids)
+ if len(ann_info) == 0:
+ gt_bboxes.append(np.zeros((0, 4)))
+ continue
+ bboxes = []
+ for ann in ann_info:
+ if ann.get('ignore', False) or ann['iscrowd']:
+ continue
+ x1, y1, w, h = ann['bbox']
+ bboxes.append([x1, y1, x1 + w, y1 + h])
+ bboxes = np.array(bboxes, dtype=np.float32)
+ if bboxes.shape[0] == 0:
+ bboxes = np.zeros((0, 4))
+ gt_bboxes.append(bboxes)
+
+ recalls = eval_recalls(
+ gt_bboxes, results, proposal_nums, iou_thrs, logger=logger)
+ ar = recalls.mean(axis=1)
+ return ar
+
+ def format_results(self, results, jsonfile_prefix=None, **kwargs):
+ """Format the results to json (standard format for COCO evaluation).
+
+ Args:
+ results (list[tuple | numpy.ndarray]): Testing results of the
+ dataset.
+ jsonfile_prefix (str | None): The prefix of json files. It includes
+ the file path and the prefix of filename, e.g., "a/b/prefix".
+ If not specified, a temp file will be created. Default: None.
+
+ Returns:
+ tuple: (result_files, tmp_dir), result_files is a dict containing \
+ the json filepaths, tmp_dir is the temporal directory created \
+ for saving json files when jsonfile_prefix is not specified.
+ """
+ assert isinstance(results, list), 'results must be a list'
+ assert len(results) == len(self), (
+ 'The length of results is not equal to the dataset len: {} != {}'.
+ format(len(results), len(self)))
+
+ if jsonfile_prefix is None:
+ tmp_dir = tempfile.TemporaryDirectory()
+ jsonfile_prefix = osp.join(tmp_dir.name, 'results')
+ else:
+ tmp_dir = None
+ result_files = self.results2json(results, jsonfile_prefix)
+ return result_files, tmp_dir
+
+ def evaluate(self,
+ results,
+ metric='bbox',
+ logger=None,
+ jsonfile_prefix=None,
+ classwise=False,
+ proposal_nums=(100, 300, 1000),
+ iou_thrs=None,
+ metric_items=None):
+ """Evaluation in COCO protocol.
+
+ Args:
+ results (list[list | tuple]): Testing results of the dataset.
+ metric (str | list[str]): Metrics to be evaluated. Options are
+ 'bbox', 'segm', 'proposal', 'proposal_fast'.
+ logger (logging.Logger | str | None): Logger used for printing
+ related information during evaluation. Default: None.
+ jsonfile_prefix (str | None): The prefix of json files. It includes
+ the file path and the prefix of filename, e.g., "a/b/prefix".
+ If not specified, a temp file will be created. Default: None.
+ classwise (bool): Whether to evaluating the AP for each class.
+ proposal_nums (Sequence[int]): Proposal number used for evaluating
+ recalls, such as recall@100, recall@1000.
+ Default: (100, 300, 1000).
+ iou_thrs (Sequence[float], optional): IoU threshold used for
+ evaluating recalls/mAPs. If set to a list, the average of all
+ IoUs will also be computed. If not specified, [0.50, 0.55,
+ 0.60, 0.65, 0.70, 0.75, 0.80, 0.85, 0.90, 0.95] will be used.
+ Default: None.
+ metric_items (list[str] | str, optional): Metric items that will
+ be returned. If not specified, ``['AR@100', 'AR@300',
+ 'AR@1000', 'AR_s@1000', 'AR_m@1000', 'AR_l@1000' ]`` will be
+ used when ``metric=='proposal'``, ``['mAP', 'mAP_50', 'mAP_75',
+ 'mAP_s', 'mAP_m', 'mAP_l']`` will be used when
+ ``metric=='bbox' or metric=='segm'``.
+
+ Returns:
+ dict[str, float]: COCO style evaluation metric.
+ """
+
+ metrics = metric if isinstance(metric, list) else [metric]
+ allowed_metrics = ['bbox', 'segm', 'proposal', 'proposal_fast']
+ for metric in metrics:
+ if metric not in allowed_metrics:
+ raise KeyError(f'metric {metric} is not supported')
+ if iou_thrs is None:
+ iou_thrs = np.linspace(
+ .5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True)
+ if metric_items is not None:
+ if not isinstance(metric_items, list):
+ metric_items = [metric_items]
+
+ result_files, tmp_dir = self.format_results(results, jsonfile_prefix)
+
+ eval_results = OrderedDict()
+ cocoGt = self.coco
+ for metric in metrics:
+ msg = f'Evaluating {metric}...'
+ if logger is None:
+ msg = '\n' + msg
+ print_log(msg, logger=logger)
+
+ if metric == 'proposal_fast':
+ ar = self.fast_eval_recall(
+ results, proposal_nums, iou_thrs, logger='silent')
+ log_msg = []
+ for i, num in enumerate(proposal_nums):
+ eval_results[f'AR@{num}'] = ar[i]
+ log_msg.append(f'\nAR@{num}\t{ar[i]:.4f}')
+ log_msg = ''.join(log_msg)
+ print_log(log_msg, logger=logger)
+ continue
+
+ if metric not in result_files:
+ raise KeyError(f'{metric} is not in results')
+ try:
+ cocoDt = cocoGt.loadRes(result_files[metric])
+ except IndexError:
+ print_log(
+ 'The testing results of the whole dataset is empty.',
+ logger=logger,
+ level=logging.ERROR)
+ break
+
+ iou_type = 'bbox' if metric == 'proposal' else metric
+ cocoEval = COCOeval(cocoGt, cocoDt, iou_type)
+ cocoEval.params.catIds = self.cat_ids
+ cocoEval.params.imgIds = self.img_ids
+ cocoEval.params.maxDets = list(proposal_nums)
+ cocoEval.params.iouThrs = iou_thrs
+ # mapping of cocoEval.stats
+ coco_metric_names = {
+ 'mAP': 0,
+ 'mAP_50': 1,
+ 'mAP_75': 2,
+ 'mAP_s': 3,
+ 'mAP_m': 4,
+ 'mAP_l': 5,
+ 'AR@100': 6,
+ 'AR@300': 7,
+ 'AR@1000': 8,
+ 'AR_s@1000': 9,
+ 'AR_m@1000': 10,
+ 'AR_l@1000': 11
+ }
+ if metric_items is not None:
+ for metric_item in metric_items:
+ if metric_item not in coco_metric_names:
+ raise KeyError(
+ f'metric item {metric_item} is not supported')
+
+ if metric == 'proposal':
+ cocoEval.params.useCats = 0
+ cocoEval.evaluate()
+ cocoEval.accumulate()
+ cocoEval.summarize()
+ if metric_items is None:
+ metric_items = [
+ 'AR@100', 'AR@300', 'AR@1000', 'AR_s@1000',
+ 'AR_m@1000', 'AR_l@1000'
+ ]
+
+ for item in metric_items:
+ val = float(
+ f'{cocoEval.stats[coco_metric_names[item]]:.3f}')
+ eval_results[item] = val
+ else:
+ cocoEval.evaluate()
+ cocoEval.accumulate()
+ cocoEval.summarize()
+ if classwise: # Compute per-category AP
+ # Compute per-category AP
+ # from https://github.com/facebookresearch/detectron2/
+ precisions = cocoEval.eval['precision']
+ # precision: (iou, recall, cls, area range, max dets)
+ assert len(self.cat_ids) == precisions.shape[2]
+
+ results_per_category = []
+ for idx, catId in enumerate(self.cat_ids):
+ # area range index 0: all area ranges
+ # max dets index -1: typically 100 per image
+ nm = self.coco.loadCats(catId)[0]
+ precision = precisions[:, :, idx, 0, -1]
+ precision = precision[precision > -1]
+ if precision.size:
+ ap = np.mean(precision)
+ else:
+ ap = float('nan')
+ results_per_category.append(
+ (f'{nm["name"]}', f'{float(ap):0.3f}'))
+
+ num_columns = min(6, len(results_per_category) * 2)
+ results_flatten = list(
+ itertools.chain(*results_per_category))
+ headers = ['category', 'AP'] * (num_columns // 2)
+ results_2d = itertools.zip_longest(*[
+ results_flatten[i::num_columns]
+ for i in range(num_columns)
+ ])
+ table_data = [headers]
+ table_data += [result for result in results_2d]
+ table = AsciiTable(table_data)
+ print_log('\n' + table.table, logger=logger)
+
+ if metric_items is None:
+ metric_items = [
+ 'mAP', 'mAP_50', 'mAP_75', 'mAP_s', 'mAP_m', 'mAP_l'
+ ]
+
+ for metric_item in metric_items:
+ key = f'{metric}_{metric_item}'
+ val = float(
+ f'{cocoEval.stats[coco_metric_names[metric_item]]:.3f}'
+ )
+ eval_results[key] = val
+ ap = cocoEval.stats[:6]
+ eval_results[f'{metric}_mAP_copypaste'] = (
+ f'{ap[0]:.3f} {ap[1]:.3f} {ap[2]:.3f} {ap[3]:.3f} '
+ f'{ap[4]:.3f} {ap[5]:.3f}')
+ if tmp_dir is not None:
+ tmp_dir.cleanup()
+ return eval_results
diff --git a/annotator/uniformer/mmdet_null/datasets/custom.py b/annotator/uniformer/mmdet_null/datasets/custom.py
new file mode 100644
index 0000000000000000000000000000000000000000..1a2351c217f43d32178053dfc682a2b241f9a3f1
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/datasets/custom.py
@@ -0,0 +1,323 @@
+import os.path as osp
+import warnings
+from collections import OrderedDict
+
+import mmcv
+import numpy as np
+from mmcv.utils import print_log
+from torch.utils.data import Dataset
+
+from mmdet.core import eval_map, eval_recalls
+from .builder import DATASETS
+from .pipelines import Compose
+
+
+@DATASETS.register_module()
+class CustomDataset(Dataset):
+ """Custom dataset for detection.
+
+ The annotation format is shown as follows. The `ann` field is optional for
+ testing.
+
+ .. code-block:: none
+
+ [
+ {
+ 'filename': 'a.jpg',
+ 'width': 1280,
+ 'height': 720,
+ 'ann': {
+ 'bboxes': (n, 4) in (x1, y1, x2, y2) order.
+ 'labels': (n, ),
+ 'bboxes_ignore': (k, 4), (optional field)
+ 'labels_ignore': (k, 4) (optional field)
+ }
+ },
+ ...
+ ]
+
+ Args:
+ ann_file (str): Annotation file path.
+ pipeline (list[dict]): Processing pipeline.
+ classes (str | Sequence[str], optional): Specify classes to load.
+ If is None, ``cls.CLASSES`` will be used. Default: None.
+ data_root (str, optional): Data root for ``ann_file``,
+ ``img_prefix``, ``seg_prefix``, ``proposal_file`` if specified.
+ test_mode (bool, optional): If set True, annotation will not be loaded.
+ filter_empty_gt (bool, optional): If set true, images without bounding
+ boxes of the dataset's classes will be filtered out. This option
+ only works when `test_mode=False`, i.e., we never filter images
+ during tests.
+ """
+
+ CLASSES = None
+
+ def __init__(self,
+ ann_file,
+ pipeline,
+ classes=None,
+ data_root=None,
+ img_prefix='',
+ seg_prefix=None,
+ proposal_file=None,
+ test_mode=False,
+ filter_empty_gt=True):
+ self.ann_file = ann_file
+ self.data_root = data_root
+ self.img_prefix = img_prefix
+ self.seg_prefix = seg_prefix
+ self.proposal_file = proposal_file
+ self.test_mode = test_mode
+ self.filter_empty_gt = filter_empty_gt
+ self.CLASSES = self.get_classes(classes)
+
+ # join paths if data_root is specified
+ if self.data_root is not None:
+ if not osp.isabs(self.ann_file):
+ self.ann_file = osp.join(self.data_root, self.ann_file)
+ if not (self.img_prefix is None or osp.isabs(self.img_prefix)):
+ self.img_prefix = osp.join(self.data_root, self.img_prefix)
+ if not (self.seg_prefix is None or osp.isabs(self.seg_prefix)):
+ self.seg_prefix = osp.join(self.data_root, self.seg_prefix)
+ if not (self.proposal_file is None
+ or osp.isabs(self.proposal_file)):
+ self.proposal_file = osp.join(self.data_root,
+ self.proposal_file)
+ # load annotations (and proposals)
+ self.data_infos = self.load_annotations(self.ann_file)
+
+ if self.proposal_file is not None:
+ self.proposals = self.load_proposals(self.proposal_file)
+ else:
+ self.proposals = None
+
+ # filter images too small and containing no annotations
+ if not test_mode:
+ valid_inds = self._filter_imgs()
+ self.data_infos = [self.data_infos[i] for i in valid_inds]
+ if self.proposals is not None:
+ self.proposals = [self.proposals[i] for i in valid_inds]
+ # set group flag for the sampler
+ self._set_group_flag()
+
+ # processing pipeline
+ self.pipeline = Compose(pipeline)
+
+ def __len__(self):
+ """Total number of samples of data."""
+ return len(self.data_infos)
+
+ def load_annotations(self, ann_file):
+ """Load annotation from annotation file."""
+ return mmcv.load(ann_file)
+
+ def load_proposals(self, proposal_file):
+ """Load proposal from proposal file."""
+ return mmcv.load(proposal_file)
+
+ def get_ann_info(self, idx):
+ """Get annotation by index.
+
+ Args:
+ idx (int): Index of data.
+
+ Returns:
+ dict: Annotation info of specified index.
+ """
+
+ return self.data_infos[idx]['ann']
+
+ def get_cat_ids(self, idx):
+ """Get category ids by index.
+
+ Args:
+ idx (int): Index of data.
+
+ Returns:
+ list[int]: All categories in the image of specified index.
+ """
+
+ return self.data_infos[idx]['ann']['labels'].astype(np.int).tolist()
+
+ def pre_pipeline(self, results):
+ """Prepare results dict for pipeline."""
+ results['img_prefix'] = self.img_prefix
+ results['seg_prefix'] = self.seg_prefix
+ results['proposal_file'] = self.proposal_file
+ results['bbox_fields'] = []
+ results['mask_fields'] = []
+ results['seg_fields'] = []
+
+ def _filter_imgs(self, min_size=32):
+ """Filter images too small."""
+ if self.filter_empty_gt:
+ warnings.warn(
+ 'CustomDataset does not support filtering empty gt images.')
+ valid_inds = []
+ for i, img_info in enumerate(self.data_infos):
+ if min(img_info['width'], img_info['height']) >= min_size:
+ valid_inds.append(i)
+ return valid_inds
+
+ def _set_group_flag(self):
+ """Set flag according to image aspect ratio.
+
+ Images with aspect ratio greater than 1 will be set as group 1,
+ otherwise group 0.
+ """
+ self.flag = np.zeros(len(self), dtype=np.uint8)
+ for i in range(len(self)):
+ img_info = self.data_infos[i]
+ if img_info['width'] / img_info['height'] > 1:
+ self.flag[i] = 1
+
+ def _rand_another(self, idx):
+ """Get another random index from the same group as the given index."""
+ pool = np.where(self.flag == self.flag[idx])[0]
+ return np.random.choice(pool)
+
+ def __getitem__(self, idx):
+ """Get training/test data after pipeline.
+
+ Args:
+ idx (int): Index of data.
+
+ Returns:
+ dict: Training/test data (with annotation if `test_mode` is set \
+ True).
+ """
+
+ if self.test_mode:
+ return self.prepare_test_img(idx)
+ while True:
+ data = self.prepare_train_img(idx)
+ if data is None:
+ idx = self._rand_another(idx)
+ continue
+ return data
+
+ def prepare_train_img(self, idx):
+ """Get training data and annotations after pipeline.
+
+ Args:
+ idx (int): Index of data.
+
+ Returns:
+ dict: Training data and annotation after pipeline with new keys \
+ introduced by pipeline.
+ """
+
+ img_info = self.data_infos[idx]
+ ann_info = self.get_ann_info(idx)
+ results = dict(img_info=img_info, ann_info=ann_info)
+ if self.proposals is not None:
+ results['proposals'] = self.proposals[idx]
+ self.pre_pipeline(results)
+ return self.pipeline(results)
+
+ def prepare_test_img(self, idx):
+ """Get testing data after pipeline.
+
+ Args:
+ idx (int): Index of data.
+
+ Returns:
+ dict: Testing data after pipeline with new keys introduced by \
+ pipeline.
+ """
+
+ img_info = self.data_infos[idx]
+ results = dict(img_info=img_info)
+ if self.proposals is not None:
+ results['proposals'] = self.proposals[idx]
+ self.pre_pipeline(results)
+ return self.pipeline(results)
+
+ @classmethod
+ def get_classes(cls, classes=None):
+ """Get class names of current dataset.
+
+ Args:
+ classes (Sequence[str] | str | None): If classes is None, use
+ default CLASSES defined by builtin dataset. If classes is a
+ string, take it as a file name. The file contains the name of
+ classes where each line contains one class name. If classes is
+ a tuple or list, override the CLASSES defined by the dataset.
+
+ Returns:
+ tuple[str] or list[str]: Names of categories of the dataset.
+ """
+ if classes is None:
+ return cls.CLASSES
+
+ if isinstance(classes, str):
+ # take it as a file path
+ class_names = mmcv.list_from_file(classes)
+ elif isinstance(classes, (tuple, list)):
+ class_names = classes
+ else:
+ raise ValueError(f'Unsupported type {type(classes)} of classes.')
+
+ return class_names
+
+ def format_results(self, results, **kwargs):
+ """Place holder to format result to dataset specific output."""
+
+ def evaluate(self,
+ results,
+ metric='mAP',
+ logger=None,
+ proposal_nums=(100, 300, 1000),
+ iou_thr=0.5,
+ scale_ranges=None):
+ """Evaluate the dataset.
+
+ Args:
+ results (list): Testing results of the dataset.
+ metric (str | list[str]): Metrics to be evaluated.
+ logger (logging.Logger | None | str): Logger used for printing
+ related information during evaluation. Default: None.
+ proposal_nums (Sequence[int]): Proposal number used for evaluating
+ recalls, such as recall@100, recall@1000.
+ Default: (100, 300, 1000).
+ iou_thr (float | list[float]): IoU threshold. Default: 0.5.
+ scale_ranges (list[tuple] | None): Scale ranges for evaluating mAP.
+ Default: None.
+ """
+
+ if not isinstance(metric, str):
+ assert len(metric) == 1
+ metric = metric[0]
+ allowed_metrics = ['mAP', 'recall']
+ if metric not in allowed_metrics:
+ raise KeyError(f'metric {metric} is not supported')
+ annotations = [self.get_ann_info(i) for i in range(len(self))]
+ eval_results = OrderedDict()
+ iou_thrs = [iou_thr] if isinstance(iou_thr, float) else iou_thr
+ if metric == 'mAP':
+ assert isinstance(iou_thrs, list)
+ mean_aps = []
+ for iou_thr in iou_thrs:
+ print_log(f'\n{"-" * 15}iou_thr: {iou_thr}{"-" * 15}')
+ mean_ap, _ = eval_map(
+ results,
+ annotations,
+ scale_ranges=scale_ranges,
+ iou_thr=iou_thr,
+ dataset=self.CLASSES,
+ logger=logger)
+ mean_aps.append(mean_ap)
+ eval_results[f'AP{int(iou_thr * 100):02d}'] = round(mean_ap, 3)
+ eval_results['mAP'] = sum(mean_aps) / len(mean_aps)
+ elif metric == 'recall':
+ gt_bboxes = [ann['bboxes'] for ann in annotations]
+ recalls = eval_recalls(
+ gt_bboxes, results, proposal_nums, iou_thr, logger=logger)
+ for i, num in enumerate(proposal_nums):
+ for j, iou in enumerate(iou_thrs):
+ eval_results[f'recall@{num}@{iou}'] = recalls[i, j]
+ if recalls.shape[1] > 1:
+ ar = recalls.mean(axis=1)
+ for i, num in enumerate(proposal_nums):
+ eval_results[f'AR@{num}'] = ar[i]
+ return eval_results
diff --git a/annotator/uniformer/mmdet_null/datasets/dataset_wrappers.py b/annotator/uniformer/mmdet_null/datasets/dataset_wrappers.py
new file mode 100644
index 0000000000000000000000000000000000000000..55ad5cb60e581a96bdbd1fbbeebc2f46f8c4e899
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/datasets/dataset_wrappers.py
@@ -0,0 +1,282 @@
+import bisect
+import math
+from collections import defaultdict
+
+import numpy as np
+from mmcv.utils import print_log
+from torch.utils.data.dataset import ConcatDataset as _ConcatDataset
+
+from .builder import DATASETS
+from .coco import CocoDataset
+
+
+@DATASETS.register_module()
+class ConcatDataset(_ConcatDataset):
+ """A wrapper of concatenated dataset.
+
+ Same as :obj:`torch.utils.data.dataset.ConcatDataset`, but
+ concat the group flag for image aspect ratio.
+
+ Args:
+ datasets (list[:obj:`Dataset`]): A list of datasets.
+ separate_eval (bool): Whether to evaluate the results
+ separately if it is used as validation dataset.
+ Defaults to True.
+ """
+
+ def __init__(self, datasets, separate_eval=True):
+ super(ConcatDataset, self).__init__(datasets)
+ self.CLASSES = datasets[0].CLASSES
+ self.separate_eval = separate_eval
+ if not separate_eval:
+ if any([isinstance(ds, CocoDataset) for ds in datasets]):
+ raise NotImplementedError(
+ 'Evaluating concatenated CocoDataset as a whole is not'
+ ' supported! Please set "separate_eval=True"')
+ elif len(set([type(ds) for ds in datasets])) != 1:
+ raise NotImplementedError(
+ 'All the datasets should have same types')
+
+ if hasattr(datasets[0], 'flag'):
+ flags = []
+ for i in range(0, len(datasets)):
+ flags.append(datasets[i].flag)
+ self.flag = np.concatenate(flags)
+
+ def get_cat_ids(self, idx):
+ """Get category ids of concatenated dataset by index.
+
+ Args:
+ idx (int): Index of data.
+
+ Returns:
+ list[int]: All categories in the image of specified index.
+ """
+
+ if idx < 0:
+ if -idx > len(self):
+ raise ValueError(
+ 'absolute value of index should not exceed dataset length')
+ idx = len(self) + idx
+ dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx)
+ if dataset_idx == 0:
+ sample_idx = idx
+ else:
+ sample_idx = idx - self.cumulative_sizes[dataset_idx - 1]
+ return self.datasets[dataset_idx].get_cat_ids(sample_idx)
+
+ def evaluate(self, results, logger=None, **kwargs):
+ """Evaluate the results.
+
+ Args:
+ results (list[list | tuple]): Testing results of the dataset.
+ logger (logging.Logger | str | None): Logger used for printing
+ related information during evaluation. Default: None.
+
+ Returns:
+ dict[str: float]: AP results of the total dataset or each separate
+ dataset if `self.separate_eval=True`.
+ """
+ assert len(results) == self.cumulative_sizes[-1], \
+ ('Dataset and results have different sizes: '
+ f'{self.cumulative_sizes[-1]} v.s. {len(results)}')
+
+ # Check whether all the datasets support evaluation
+ for dataset in self.datasets:
+ assert hasattr(dataset, 'evaluate'), \
+ f'{type(dataset)} does not implement evaluate function'
+
+ if self.separate_eval:
+ dataset_idx = -1
+ total_eval_results = dict()
+ for size, dataset in zip(self.cumulative_sizes, self.datasets):
+ start_idx = 0 if dataset_idx == -1 else \
+ self.cumulative_sizes[dataset_idx]
+ end_idx = self.cumulative_sizes[dataset_idx + 1]
+
+ results_per_dataset = results[start_idx:end_idx]
+ print_log(
+ f'\nEvaluateing {dataset.ann_file} with '
+ f'{len(results_per_dataset)} images now',
+ logger=logger)
+
+ eval_results_per_dataset = dataset.evaluate(
+ results_per_dataset, logger=logger, **kwargs)
+ dataset_idx += 1
+ for k, v in eval_results_per_dataset.items():
+ total_eval_results.update({f'{dataset_idx}_{k}': v})
+
+ return total_eval_results
+ elif any([isinstance(ds, CocoDataset) for ds in self.datasets]):
+ raise NotImplementedError(
+ 'Evaluating concatenated CocoDataset as a whole is not'
+ ' supported! Please set "separate_eval=True"')
+ elif len(set([type(ds) for ds in self.datasets])) != 1:
+ raise NotImplementedError(
+ 'All the datasets should have same types')
+ else:
+ original_data_infos = self.datasets[0].data_infos
+ self.datasets[0].data_infos = sum(
+ [dataset.data_infos for dataset in self.datasets], [])
+ eval_results = self.datasets[0].evaluate(
+ results, logger=logger, **kwargs)
+ self.datasets[0].data_infos = original_data_infos
+ return eval_results
+
+
+@DATASETS.register_module()
+class RepeatDataset(object):
+ """A wrapper of repeated dataset.
+
+ The length of repeated dataset will be `times` larger than the original
+ dataset. This is useful when the data loading time is long but the dataset
+ is small. Using RepeatDataset can reduce the data loading time between
+ epochs.
+
+ Args:
+ dataset (:obj:`Dataset`): The dataset to be repeated.
+ times (int): Repeat times.
+ """
+
+ def __init__(self, dataset, times):
+ self.dataset = dataset
+ self.times = times
+ self.CLASSES = dataset.CLASSES
+ if hasattr(self.dataset, 'flag'):
+ self.flag = np.tile(self.dataset.flag, times)
+
+ self._ori_len = len(self.dataset)
+
+ def __getitem__(self, idx):
+ return self.dataset[idx % self._ori_len]
+
+ def get_cat_ids(self, idx):
+ """Get category ids of repeat dataset by index.
+
+ Args:
+ idx (int): Index of data.
+
+ Returns:
+ list[int]: All categories in the image of specified index.
+ """
+
+ return self.dataset.get_cat_ids(idx % self._ori_len)
+
+ def __len__(self):
+ """Length after repetition."""
+ return self.times * self._ori_len
+
+
+# Modified from https://github.com/facebookresearch/detectron2/blob/41d475b75a230221e21d9cac5d69655e3415e3a4/detectron2/data/samplers/distributed_sampler.py#L57 # noqa
+@DATASETS.register_module()
+class ClassBalancedDataset(object):
+ """A wrapper of repeated dataset with repeat factor.
+
+ Suitable for training on class imbalanced datasets like LVIS. Following
+ the sampling strategy in the `paper `_,
+ in each epoch, an image may appear multiple times based on its
+ "repeat factor".
+ The repeat factor for an image is a function of the frequency the rarest
+ category labeled in that image. The "frequency of category c" in [0, 1]
+ is defined by the fraction of images in the training set (without repeats)
+ in which category c appears.
+ The dataset needs to instantiate :func:`self.get_cat_ids` to support
+ ClassBalancedDataset.
+
+ The repeat factor is computed as followed.
+
+ 1. For each category c, compute the fraction # of images
+ that contain it: :math:`f(c)`
+ 2. For each category c, compute the category-level repeat factor:
+ :math:`r(c) = max(1, sqrt(t/f(c)))`
+ 3. For each image I, compute the image-level repeat factor:
+ :math:`r(I) = max_{c in I} r(c)`
+
+ Args:
+ dataset (:obj:`CustomDataset`): The dataset to be repeated.
+ oversample_thr (float): frequency threshold below which data is
+ repeated. For categories with ``f_c >= oversample_thr``, there is
+ no oversampling. For categories with ``f_c < oversample_thr``, the
+ degree of oversampling following the square-root inverse frequency
+ heuristic above.
+ filter_empty_gt (bool, optional): If set true, images without bounding
+ boxes will not be oversampled. Otherwise, they will be categorized
+ as the pure background class and involved into the oversampling.
+ Default: True.
+ """
+
+ def __init__(self, dataset, oversample_thr, filter_empty_gt=True):
+ self.dataset = dataset
+ self.oversample_thr = oversample_thr
+ self.filter_empty_gt = filter_empty_gt
+ self.CLASSES = dataset.CLASSES
+
+ repeat_factors = self._get_repeat_factors(dataset, oversample_thr)
+ repeat_indices = []
+ for dataset_idx, repeat_factor in enumerate(repeat_factors):
+ repeat_indices.extend([dataset_idx] * math.ceil(repeat_factor))
+ self.repeat_indices = repeat_indices
+
+ flags = []
+ if hasattr(self.dataset, 'flag'):
+ for flag, repeat_factor in zip(self.dataset.flag, repeat_factors):
+ flags.extend([flag] * int(math.ceil(repeat_factor)))
+ assert len(flags) == len(repeat_indices)
+ self.flag = np.asarray(flags, dtype=np.uint8)
+
+ def _get_repeat_factors(self, dataset, repeat_thr):
+ """Get repeat factor for each images in the dataset.
+
+ Args:
+ dataset (:obj:`CustomDataset`): The dataset
+ repeat_thr (float): The threshold of frequency. If an image
+ contains the categories whose frequency below the threshold,
+ it would be repeated.
+
+ Returns:
+ list[float]: The repeat factors for each images in the dataset.
+ """
+
+ # 1. For each category c, compute the fraction # of images
+ # that contain it: f(c)
+ category_freq = defaultdict(int)
+ num_images = len(dataset)
+ for idx in range(num_images):
+ cat_ids = set(self.dataset.get_cat_ids(idx))
+ if len(cat_ids) == 0 and not self.filter_empty_gt:
+ cat_ids = set([len(self.CLASSES)])
+ for cat_id in cat_ids:
+ category_freq[cat_id] += 1
+ for k, v in category_freq.items():
+ category_freq[k] = v / num_images
+
+ # 2. For each category c, compute the category-level repeat factor:
+ # r(c) = max(1, sqrt(t/f(c)))
+ category_repeat = {
+ cat_id: max(1.0, math.sqrt(repeat_thr / cat_freq))
+ for cat_id, cat_freq in category_freq.items()
+ }
+
+ # 3. For each image I, compute the image-level repeat factor:
+ # r(I) = max_{c in I} r(c)
+ repeat_factors = []
+ for idx in range(num_images):
+ cat_ids = set(self.dataset.get_cat_ids(idx))
+ if len(cat_ids) == 0 and not self.filter_empty_gt:
+ cat_ids = set([len(self.CLASSES)])
+ repeat_factor = 1
+ if len(cat_ids) > 0:
+ repeat_factor = max(
+ {category_repeat[cat_id]
+ for cat_id in cat_ids})
+ repeat_factors.append(repeat_factor)
+
+ return repeat_factors
+
+ def __getitem__(self, idx):
+ ori_index = self.repeat_indices[idx]
+ return self.dataset[ori_index]
+
+ def __len__(self):
+ """Length after repetition."""
+ return len(self.repeat_indices)
diff --git a/annotator/uniformer/mmdet_null/datasets/deepfashion.py b/annotator/uniformer/mmdet_null/datasets/deepfashion.py
new file mode 100644
index 0000000000000000000000000000000000000000..1125376091f2d4ee6843ae4f2156b3b0453be369
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/datasets/deepfashion.py
@@ -0,0 +1,10 @@
+from .builder import DATASETS
+from .coco import CocoDataset
+
+
+@DATASETS.register_module()
+class DeepFashionDataset(CocoDataset):
+
+ CLASSES = ('top', 'skirt', 'leggings', 'dress', 'outer', 'pants', 'bag',
+ 'neckwear', 'headwear', 'eyeglass', 'belt', 'footwear', 'hair',
+ 'skin', 'face')
diff --git a/annotator/uniformer/mmdet_null/datasets/lvis.py b/annotator/uniformer/mmdet_null/datasets/lvis.py
new file mode 100644
index 0000000000000000000000000000000000000000..122c64e79cf5f060d7ceddf4ad29c4debe40944b
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/datasets/lvis.py
@@ -0,0 +1,742 @@
+import itertools
+import logging
+import os.path as osp
+import tempfile
+from collections import OrderedDict
+
+import numpy as np
+from mmcv.utils import print_log
+from terminaltables import AsciiTable
+
+from .builder import DATASETS
+from .coco import CocoDataset
+
+
+@DATASETS.register_module()
+class LVISV05Dataset(CocoDataset):
+
+ CLASSES = (
+ 'acorn', 'aerosol_can', 'air_conditioner', 'airplane', 'alarm_clock',
+ 'alcohol', 'alligator', 'almond', 'ambulance', 'amplifier', 'anklet',
+ 'antenna', 'apple', 'apple_juice', 'applesauce', 'apricot', 'apron',
+ 'aquarium', 'armband', 'armchair', 'armoire', 'armor', 'artichoke',
+ 'trash_can', 'ashtray', 'asparagus', 'atomizer', 'avocado', 'award',
+ 'awning', 'ax', 'baby_buggy', 'basketball_backboard', 'backpack',
+ 'handbag', 'suitcase', 'bagel', 'bagpipe', 'baguet', 'bait', 'ball',
+ 'ballet_skirt', 'balloon', 'bamboo', 'banana', 'Band_Aid', 'bandage',
+ 'bandanna', 'banjo', 'banner', 'barbell', 'barge', 'barrel',
+ 'barrette', 'barrow', 'baseball_base', 'baseball', 'baseball_bat',
+ 'baseball_cap', 'baseball_glove', 'basket', 'basketball_hoop',
+ 'basketball', 'bass_horn', 'bat_(animal)', 'bath_mat', 'bath_towel',
+ 'bathrobe', 'bathtub', 'batter_(food)', 'battery', 'beachball', 'bead',
+ 'beaker', 'bean_curd', 'beanbag', 'beanie', 'bear', 'bed',
+ 'bedspread', 'cow', 'beef_(food)', 'beeper', 'beer_bottle', 'beer_can',
+ 'beetle', 'bell', 'bell_pepper', 'belt', 'belt_buckle', 'bench',
+ 'beret', 'bib', 'Bible', 'bicycle', 'visor', 'binder', 'binoculars',
+ 'bird', 'birdfeeder', 'birdbath', 'birdcage', 'birdhouse',
+ 'birthday_cake', 'birthday_card', 'biscuit_(bread)', 'pirate_flag',
+ 'black_sheep', 'blackboard', 'blanket', 'blazer', 'blender', 'blimp',
+ 'blinker', 'blueberry', 'boar', 'gameboard', 'boat', 'bobbin',
+ 'bobby_pin', 'boiled_egg', 'bolo_tie', 'deadbolt', 'bolt', 'bonnet',
+ 'book', 'book_bag', 'bookcase', 'booklet', 'bookmark',
+ 'boom_microphone', 'boot', 'bottle', 'bottle_opener', 'bouquet',
+ 'bow_(weapon)', 'bow_(decorative_ribbons)', 'bow-tie', 'bowl',
+ 'pipe_bowl', 'bowler_hat', 'bowling_ball', 'bowling_pin',
+ 'boxing_glove', 'suspenders', 'bracelet', 'brass_plaque', 'brassiere',
+ 'bread-bin', 'breechcloth', 'bridal_gown', 'briefcase',
+ 'bristle_brush', 'broccoli', 'broach', 'broom', 'brownie',
+ 'brussels_sprouts', 'bubble_gum', 'bucket', 'horse_buggy', 'bull',
+ 'bulldog', 'bulldozer', 'bullet_train', 'bulletin_board',
+ 'bulletproof_vest', 'bullhorn', 'corned_beef', 'bun', 'bunk_bed',
+ 'buoy', 'burrito', 'bus_(vehicle)', 'business_card', 'butcher_knife',
+ 'butter', 'butterfly', 'button', 'cab_(taxi)', 'cabana', 'cabin_car',
+ 'cabinet', 'locker', 'cake', 'calculator', 'calendar', 'calf',
+ 'camcorder', 'camel', 'camera', 'camera_lens', 'camper_(vehicle)',
+ 'can', 'can_opener', 'candelabrum', 'candle', 'candle_holder',
+ 'candy_bar', 'candy_cane', 'walking_cane', 'canister', 'cannon',
+ 'canoe', 'cantaloup', 'canteen', 'cap_(headwear)', 'bottle_cap',
+ 'cape', 'cappuccino', 'car_(automobile)', 'railcar_(part_of_a_train)',
+ 'elevator_car', 'car_battery', 'identity_card', 'card', 'cardigan',
+ 'cargo_ship', 'carnation', 'horse_carriage', 'carrot', 'tote_bag',
+ 'cart', 'carton', 'cash_register', 'casserole', 'cassette', 'cast',
+ 'cat', 'cauliflower', 'caviar', 'cayenne_(spice)', 'CD_player',
+ 'celery', 'cellular_telephone', 'chain_mail', 'chair', 'chaise_longue',
+ 'champagne', 'chandelier', 'chap', 'checkbook', 'checkerboard',
+ 'cherry', 'chessboard', 'chest_of_drawers_(furniture)',
+ 'chicken_(animal)', 'chicken_wire', 'chickpea', 'Chihuahua',
+ 'chili_(vegetable)', 'chime', 'chinaware', 'crisp_(potato_chip)',
+ 'poker_chip', 'chocolate_bar', 'chocolate_cake', 'chocolate_milk',
+ 'chocolate_mousse', 'choker', 'chopping_board', 'chopstick',
+ 'Christmas_tree', 'slide', 'cider', 'cigar_box', 'cigarette',
+ 'cigarette_case', 'cistern', 'clarinet', 'clasp', 'cleansing_agent',
+ 'clementine', 'clip', 'clipboard', 'clock', 'clock_tower',
+ 'clothes_hamper', 'clothespin', 'clutch_bag', 'coaster', 'coat',
+ 'coat_hanger', 'coatrack', 'cock', 'coconut', 'coffee_filter',
+ 'coffee_maker', 'coffee_table', 'coffeepot', 'coil', 'coin',
+ 'colander', 'coleslaw', 'coloring_material', 'combination_lock',
+ 'pacifier', 'comic_book', 'computer_keyboard', 'concrete_mixer',
+ 'cone', 'control', 'convertible_(automobile)', 'sofa_bed', 'cookie',
+ 'cookie_jar', 'cooking_utensil', 'cooler_(for_food)',
+ 'cork_(bottle_plug)', 'corkboard', 'corkscrew', 'edible_corn',
+ 'cornbread', 'cornet', 'cornice', 'cornmeal', 'corset',
+ 'romaine_lettuce', 'costume', 'cougar', 'coverall', 'cowbell',
+ 'cowboy_hat', 'crab_(animal)', 'cracker', 'crape', 'crate', 'crayon',
+ 'cream_pitcher', 'credit_card', 'crescent_roll', 'crib', 'crock_pot',
+ 'crossbar', 'crouton', 'crow', 'crown', 'crucifix', 'cruise_ship',
+ 'police_cruiser', 'crumb', 'crutch', 'cub_(animal)', 'cube',
+ 'cucumber', 'cufflink', 'cup', 'trophy_cup', 'cupcake', 'hair_curler',
+ 'curling_iron', 'curtain', 'cushion', 'custard', 'cutting_tool',
+ 'cylinder', 'cymbal', 'dachshund', 'dagger', 'dartboard',
+ 'date_(fruit)', 'deck_chair', 'deer', 'dental_floss', 'desk',
+ 'detergent', 'diaper', 'diary', 'die', 'dinghy', 'dining_table', 'tux',
+ 'dish', 'dish_antenna', 'dishrag', 'dishtowel', 'dishwasher',
+ 'dishwasher_detergent', 'diskette', 'dispenser', 'Dixie_cup', 'dog',
+ 'dog_collar', 'doll', 'dollar', 'dolphin', 'domestic_ass', 'eye_mask',
+ 'doorbell', 'doorknob', 'doormat', 'doughnut', 'dove', 'dragonfly',
+ 'drawer', 'underdrawers', 'dress', 'dress_hat', 'dress_suit',
+ 'dresser', 'drill', 'drinking_fountain', 'drone', 'dropper',
+ 'drum_(musical_instrument)', 'drumstick', 'duck', 'duckling',
+ 'duct_tape', 'duffel_bag', 'dumbbell', 'dumpster', 'dustpan',
+ 'Dutch_oven', 'eagle', 'earphone', 'earplug', 'earring', 'easel',
+ 'eclair', 'eel', 'egg', 'egg_roll', 'egg_yolk', 'eggbeater',
+ 'eggplant', 'electric_chair', 'refrigerator', 'elephant', 'elk',
+ 'envelope', 'eraser', 'escargot', 'eyepatch', 'falcon', 'fan',
+ 'faucet', 'fedora', 'ferret', 'Ferris_wheel', 'ferry', 'fig_(fruit)',
+ 'fighter_jet', 'figurine', 'file_cabinet', 'file_(tool)', 'fire_alarm',
+ 'fire_engine', 'fire_extinguisher', 'fire_hose', 'fireplace',
+ 'fireplug', 'fish', 'fish_(food)', 'fishbowl', 'fishing_boat',
+ 'fishing_rod', 'flag', 'flagpole', 'flamingo', 'flannel', 'flash',
+ 'flashlight', 'fleece', 'flip-flop_(sandal)', 'flipper_(footwear)',
+ 'flower_arrangement', 'flute_glass', 'foal', 'folding_chair',
+ 'food_processor', 'football_(American)', 'football_helmet',
+ 'footstool', 'fork', 'forklift', 'freight_car', 'French_toast',
+ 'freshener', 'frisbee', 'frog', 'fruit_juice', 'fruit_salad',
+ 'frying_pan', 'fudge', 'funnel', 'futon', 'gag', 'garbage',
+ 'garbage_truck', 'garden_hose', 'gargle', 'gargoyle', 'garlic',
+ 'gasmask', 'gazelle', 'gelatin', 'gemstone', 'giant_panda',
+ 'gift_wrap', 'ginger', 'giraffe', 'cincture',
+ 'glass_(drink_container)', 'globe', 'glove', 'goat', 'goggles',
+ 'goldfish', 'golf_club', 'golfcart', 'gondola_(boat)', 'goose',
+ 'gorilla', 'gourd', 'surgical_gown', 'grape', 'grasshopper', 'grater',
+ 'gravestone', 'gravy_boat', 'green_bean', 'green_onion', 'griddle',
+ 'grillroom', 'grinder_(tool)', 'grits', 'grizzly', 'grocery_bag',
+ 'guacamole', 'guitar', 'gull', 'gun', 'hair_spray', 'hairbrush',
+ 'hairnet', 'hairpin', 'ham', 'hamburger', 'hammer', 'hammock',
+ 'hamper', 'hamster', 'hair_dryer', 'hand_glass', 'hand_towel',
+ 'handcart', 'handcuff', 'handkerchief', 'handle', 'handsaw',
+ 'hardback_book', 'harmonium', 'hat', 'hatbox', 'hatch', 'veil',
+ 'headband', 'headboard', 'headlight', 'headscarf', 'headset',
+ 'headstall_(for_horses)', 'hearing_aid', 'heart', 'heater',
+ 'helicopter', 'helmet', 'heron', 'highchair', 'hinge', 'hippopotamus',
+ 'hockey_stick', 'hog', 'home_plate_(baseball)', 'honey', 'fume_hood',
+ 'hook', 'horse', 'hose', 'hot-air_balloon', 'hotplate', 'hot_sauce',
+ 'hourglass', 'houseboat', 'hummingbird', 'hummus', 'polar_bear',
+ 'icecream', 'popsicle', 'ice_maker', 'ice_pack', 'ice_skate',
+ 'ice_tea', 'igniter', 'incense', 'inhaler', 'iPod',
+ 'iron_(for_clothing)', 'ironing_board', 'jacket', 'jam', 'jean',
+ 'jeep', 'jelly_bean', 'jersey', 'jet_plane', 'jewelry', 'joystick',
+ 'jumpsuit', 'kayak', 'keg', 'kennel', 'kettle', 'key', 'keycard',
+ 'kilt', 'kimono', 'kitchen_sink', 'kitchen_table', 'kite', 'kitten',
+ 'kiwi_fruit', 'knee_pad', 'knife', 'knight_(chess_piece)',
+ 'knitting_needle', 'knob', 'knocker_(on_a_door)', 'koala', 'lab_coat',
+ 'ladder', 'ladle', 'ladybug', 'lamb_(animal)', 'lamb-chop', 'lamp',
+ 'lamppost', 'lampshade', 'lantern', 'lanyard', 'laptop_computer',
+ 'lasagna', 'latch', 'lawn_mower', 'leather', 'legging_(clothing)',
+ 'Lego', 'lemon', 'lemonade', 'lettuce', 'license_plate', 'life_buoy',
+ 'life_jacket', 'lightbulb', 'lightning_rod', 'lime', 'limousine',
+ 'linen_paper', 'lion', 'lip_balm', 'lipstick', 'liquor', 'lizard',
+ 'Loafer_(type_of_shoe)', 'log', 'lollipop', 'lotion',
+ 'speaker_(stero_equipment)', 'loveseat', 'machine_gun', 'magazine',
+ 'magnet', 'mail_slot', 'mailbox_(at_home)', 'mallet', 'mammoth',
+ 'mandarin_orange', 'manger', 'manhole', 'map', 'marker', 'martini',
+ 'mascot', 'mashed_potato', 'masher', 'mask', 'mast',
+ 'mat_(gym_equipment)', 'matchbox', 'mattress', 'measuring_cup',
+ 'measuring_stick', 'meatball', 'medicine', 'melon', 'microphone',
+ 'microscope', 'microwave_oven', 'milestone', 'milk', 'minivan',
+ 'mint_candy', 'mirror', 'mitten', 'mixer_(kitchen_tool)', 'money',
+ 'monitor_(computer_equipment) computer_monitor', 'monkey', 'motor',
+ 'motor_scooter', 'motor_vehicle', 'motorboat', 'motorcycle',
+ 'mound_(baseball)', 'mouse_(animal_rodent)',
+ 'mouse_(computer_equipment)', 'mousepad', 'muffin', 'mug', 'mushroom',
+ 'music_stool', 'musical_instrument', 'nailfile', 'nameplate', 'napkin',
+ 'neckerchief', 'necklace', 'necktie', 'needle', 'nest', 'newsstand',
+ 'nightshirt', 'nosebag_(for_animals)', 'noseband_(for_animals)',
+ 'notebook', 'notepad', 'nut', 'nutcracker', 'oar', 'octopus_(food)',
+ 'octopus_(animal)', 'oil_lamp', 'olive_oil', 'omelet', 'onion',
+ 'orange_(fruit)', 'orange_juice', 'oregano', 'ostrich', 'ottoman',
+ 'overalls_(clothing)', 'owl', 'packet', 'inkpad', 'pad', 'paddle',
+ 'padlock', 'paintbox', 'paintbrush', 'painting', 'pajamas', 'palette',
+ 'pan_(for_cooking)', 'pan_(metal_container)', 'pancake', 'pantyhose',
+ 'papaya', 'paperclip', 'paper_plate', 'paper_towel', 'paperback_book',
+ 'paperweight', 'parachute', 'parakeet', 'parasail_(sports)',
+ 'parchment', 'parka', 'parking_meter', 'parrot',
+ 'passenger_car_(part_of_a_train)', 'passenger_ship', 'passport',
+ 'pastry', 'patty_(food)', 'pea_(food)', 'peach', 'peanut_butter',
+ 'pear', 'peeler_(tool_for_fruit_and_vegetables)', 'pegboard',
+ 'pelican', 'pen', 'pencil', 'pencil_box', 'pencil_sharpener',
+ 'pendulum', 'penguin', 'pennant', 'penny_(coin)', 'pepper',
+ 'pepper_mill', 'perfume', 'persimmon', 'baby', 'pet', 'petfood',
+ 'pew_(church_bench)', 'phonebook', 'phonograph_record', 'piano',
+ 'pickle', 'pickup_truck', 'pie', 'pigeon', 'piggy_bank', 'pillow',
+ 'pin_(non_jewelry)', 'pineapple', 'pinecone', 'ping-pong_ball',
+ 'pinwheel', 'tobacco_pipe', 'pipe', 'pistol', 'pita_(bread)',
+ 'pitcher_(vessel_for_liquid)', 'pitchfork', 'pizza', 'place_mat',
+ 'plate', 'platter', 'playing_card', 'playpen', 'pliers',
+ 'plow_(farm_equipment)', 'pocket_watch', 'pocketknife',
+ 'poker_(fire_stirring_tool)', 'pole', 'police_van', 'polo_shirt',
+ 'poncho', 'pony', 'pool_table', 'pop_(soda)', 'portrait',
+ 'postbox_(public)', 'postcard', 'poster', 'pot', 'flowerpot', 'potato',
+ 'potholder', 'pottery', 'pouch', 'power_shovel', 'prawn', 'printer',
+ 'projectile_(weapon)', 'projector', 'propeller', 'prune', 'pudding',
+ 'puffer_(fish)', 'puffin', 'pug-dog', 'pumpkin', 'puncher', 'puppet',
+ 'puppy', 'quesadilla', 'quiche', 'quilt', 'rabbit', 'race_car',
+ 'racket', 'radar', 'radiator', 'radio_receiver', 'radish', 'raft',
+ 'rag_doll', 'raincoat', 'ram_(animal)', 'raspberry', 'rat',
+ 'razorblade', 'reamer_(juicer)', 'rearview_mirror', 'receipt',
+ 'recliner', 'record_player', 'red_cabbage', 'reflector',
+ 'remote_control', 'rhinoceros', 'rib_(food)', 'rifle', 'ring',
+ 'river_boat', 'road_map', 'robe', 'rocking_chair', 'roller_skate',
+ 'Rollerblade', 'rolling_pin', 'root_beer',
+ 'router_(computer_equipment)', 'rubber_band', 'runner_(carpet)',
+ 'plastic_bag', 'saddle_(on_an_animal)', 'saddle_blanket', 'saddlebag',
+ 'safety_pin', 'sail', 'salad', 'salad_plate', 'salami',
+ 'salmon_(fish)', 'salmon_(food)', 'salsa', 'saltshaker',
+ 'sandal_(type_of_shoe)', 'sandwich', 'satchel', 'saucepan', 'saucer',
+ 'sausage', 'sawhorse', 'saxophone', 'scale_(measuring_instrument)',
+ 'scarecrow', 'scarf', 'school_bus', 'scissors', 'scoreboard',
+ 'scrambled_eggs', 'scraper', 'scratcher', 'screwdriver',
+ 'scrubbing_brush', 'sculpture', 'seabird', 'seahorse', 'seaplane',
+ 'seashell', 'seedling', 'serving_dish', 'sewing_machine', 'shaker',
+ 'shampoo', 'shark', 'sharpener', 'Sharpie', 'shaver_(electric)',
+ 'shaving_cream', 'shawl', 'shears', 'sheep', 'shepherd_dog',
+ 'sherbert', 'shield', 'shirt', 'shoe', 'shopping_bag', 'shopping_cart',
+ 'short_pants', 'shot_glass', 'shoulder_bag', 'shovel', 'shower_head',
+ 'shower_curtain', 'shredder_(for_paper)', 'sieve', 'signboard', 'silo',
+ 'sink', 'skateboard', 'skewer', 'ski', 'ski_boot', 'ski_parka',
+ 'ski_pole', 'skirt', 'sled', 'sleeping_bag', 'sling_(bandage)',
+ 'slipper_(footwear)', 'smoothie', 'snake', 'snowboard', 'snowman',
+ 'snowmobile', 'soap', 'soccer_ball', 'sock', 'soda_fountain',
+ 'carbonated_water', 'sofa', 'softball', 'solar_array', 'sombrero',
+ 'soup', 'soup_bowl', 'soupspoon', 'sour_cream', 'soya_milk',
+ 'space_shuttle', 'sparkler_(fireworks)', 'spatula', 'spear',
+ 'spectacles', 'spice_rack', 'spider', 'sponge', 'spoon', 'sportswear',
+ 'spotlight', 'squirrel', 'stapler_(stapling_machine)', 'starfish',
+ 'statue_(sculpture)', 'steak_(food)', 'steak_knife',
+ 'steamer_(kitchen_appliance)', 'steering_wheel', 'stencil',
+ 'stepladder', 'step_stool', 'stereo_(sound_system)', 'stew', 'stirrer',
+ 'stirrup', 'stockings_(leg_wear)', 'stool', 'stop_sign', 'brake_light',
+ 'stove', 'strainer', 'strap', 'straw_(for_drinking)', 'strawberry',
+ 'street_sign', 'streetlight', 'string_cheese', 'stylus', 'subwoofer',
+ 'sugar_bowl', 'sugarcane_(plant)', 'suit_(clothing)', 'sunflower',
+ 'sunglasses', 'sunhat', 'sunscreen', 'surfboard', 'sushi', 'mop',
+ 'sweat_pants', 'sweatband', 'sweater', 'sweatshirt', 'sweet_potato',
+ 'swimsuit', 'sword', 'syringe', 'Tabasco_sauce', 'table-tennis_table',
+ 'table', 'table_lamp', 'tablecloth', 'tachometer', 'taco', 'tag',
+ 'taillight', 'tambourine', 'army_tank', 'tank_(storage_vessel)',
+ 'tank_top_(clothing)', 'tape_(sticky_cloth_or_paper)', 'tape_measure',
+ 'tapestry', 'tarp', 'tartan', 'tassel', 'tea_bag', 'teacup',
+ 'teakettle', 'teapot', 'teddy_bear', 'telephone', 'telephone_booth',
+ 'telephone_pole', 'telephoto_lens', 'television_camera',
+ 'television_set', 'tennis_ball', 'tennis_racket', 'tequila',
+ 'thermometer', 'thermos_bottle', 'thermostat', 'thimble', 'thread',
+ 'thumbtack', 'tiara', 'tiger', 'tights_(clothing)', 'timer', 'tinfoil',
+ 'tinsel', 'tissue_paper', 'toast_(food)', 'toaster', 'toaster_oven',
+ 'toilet', 'toilet_tissue', 'tomato', 'tongs', 'toolbox', 'toothbrush',
+ 'toothpaste', 'toothpick', 'cover', 'tortilla', 'tow_truck', 'towel',
+ 'towel_rack', 'toy', 'tractor_(farm_equipment)', 'traffic_light',
+ 'dirt_bike', 'trailer_truck', 'train_(railroad_vehicle)', 'trampoline',
+ 'tray', 'tree_house', 'trench_coat', 'triangle_(musical_instrument)',
+ 'tricycle', 'tripod', 'trousers', 'truck', 'truffle_(chocolate)',
+ 'trunk', 'vat', 'turban', 'turkey_(bird)', 'turkey_(food)', 'turnip',
+ 'turtle', 'turtleneck_(clothing)', 'typewriter', 'umbrella',
+ 'underwear', 'unicycle', 'urinal', 'urn', 'vacuum_cleaner', 'valve',
+ 'vase', 'vending_machine', 'vent', 'videotape', 'vinegar', 'violin',
+ 'vodka', 'volleyball', 'vulture', 'waffle', 'waffle_iron', 'wagon',
+ 'wagon_wheel', 'walking_stick', 'wall_clock', 'wall_socket', 'wallet',
+ 'walrus', 'wardrobe', 'wasabi', 'automatic_washer', 'watch',
+ 'water_bottle', 'water_cooler', 'water_faucet', 'water_filter',
+ 'water_heater', 'water_jug', 'water_gun', 'water_scooter', 'water_ski',
+ 'water_tower', 'watering_can', 'watermelon', 'weathervane', 'webcam',
+ 'wedding_cake', 'wedding_ring', 'wet_suit', 'wheel', 'wheelchair',
+ 'whipped_cream', 'whiskey', 'whistle', 'wick', 'wig', 'wind_chime',
+ 'windmill', 'window_box_(for_plants)', 'windshield_wiper', 'windsock',
+ 'wine_bottle', 'wine_bucket', 'wineglass', 'wing_chair',
+ 'blinder_(for_horses)', 'wok', 'wolf', 'wooden_spoon', 'wreath',
+ 'wrench', 'wristband', 'wristlet', 'yacht', 'yak', 'yogurt',
+ 'yoke_(animal_equipment)', 'zebra', 'zucchini')
+
+ def load_annotations(self, ann_file):
+ """Load annotation from lvis style annotation file.
+
+ Args:
+ ann_file (str): Path of annotation file.
+
+ Returns:
+ list[dict]: Annotation info from LVIS api.
+ """
+
+ try:
+ import lvis
+ assert lvis.__version__ >= '10.5.3'
+ from lvis import LVIS
+ except AssertionError:
+ raise AssertionError('Incompatible version of lvis is installed. '
+ 'Run pip uninstall lvis first. Then run pip '
+ 'install mmlvis to install open-mmlab forked '
+ 'lvis. ')
+ except ImportError:
+ raise ImportError('Package lvis is not installed. Please run pip '
+ 'install mmlvis to install open-mmlab forked '
+ 'lvis.')
+ self.coco = LVIS(ann_file)
+ self.cat_ids = self.coco.get_cat_ids()
+ self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}
+ self.img_ids = self.coco.get_img_ids()
+ data_infos = []
+ for i in self.img_ids:
+ info = self.coco.load_imgs([i])[0]
+ if info['file_name'].startswith('COCO'):
+ # Convert form the COCO 2014 file naming convention of
+ # COCO_[train/val/test]2014_000000000000.jpg to the 2017
+ # naming convention of 000000000000.jpg
+ # (LVIS v1 will fix this naming issue)
+ info['filename'] = info['file_name'][-16:]
+ else:
+ info['filename'] = info['file_name']
+ data_infos.append(info)
+ return data_infos
+
+ def evaluate(self,
+ results,
+ metric='bbox',
+ logger=None,
+ jsonfile_prefix=None,
+ classwise=False,
+ proposal_nums=(100, 300, 1000),
+ iou_thrs=np.arange(0.5, 0.96, 0.05)):
+ """Evaluation in LVIS protocol.
+
+ Args:
+ results (list[list | tuple]): Testing results of the dataset.
+ metric (str | list[str]): Metrics to be evaluated. Options are
+ 'bbox', 'segm', 'proposal', 'proposal_fast'.
+ logger (logging.Logger | str | None): Logger used for printing
+ related information during evaluation. Default: None.
+ jsonfile_prefix (str | None):
+ classwise (bool): Whether to evaluating the AP for each class.
+ proposal_nums (Sequence[int]): Proposal number used for evaluating
+ recalls, such as recall@100, recall@1000.
+ Default: (100, 300, 1000).
+ iou_thrs (Sequence[float]): IoU threshold used for evaluating
+ recalls. If set to a list, the average recall of all IoUs will
+ also be computed. Default: 0.5.
+
+ Returns:
+ dict[str, float]: LVIS style metrics.
+ """
+
+ try:
+ import lvis
+ assert lvis.__version__ >= '10.5.3'
+ from lvis import LVISResults, LVISEval
+ except AssertionError:
+ raise AssertionError('Incompatible version of lvis is installed. '
+ 'Run pip uninstall lvis first. Then run pip '
+ 'install mmlvis to install open-mmlab forked '
+ 'lvis. ')
+ except ImportError:
+ raise ImportError('Package lvis is not installed. Please run pip '
+ 'install mmlvis to install open-mmlab forked '
+ 'lvis.')
+ assert isinstance(results, list), 'results must be a list'
+ assert len(results) == len(self), (
+ 'The length of results is not equal to the dataset len: {} != {}'.
+ format(len(results), len(self)))
+
+ metrics = metric if isinstance(metric, list) else [metric]
+ allowed_metrics = ['bbox', 'segm', 'proposal', 'proposal_fast']
+ for metric in metrics:
+ if metric not in allowed_metrics:
+ raise KeyError('metric {} is not supported'.format(metric))
+
+ if jsonfile_prefix is None:
+ tmp_dir = tempfile.TemporaryDirectory()
+ jsonfile_prefix = osp.join(tmp_dir.name, 'results')
+ else:
+ tmp_dir = None
+ result_files = self.results2json(results, jsonfile_prefix)
+
+ eval_results = OrderedDict()
+ # get original api
+ lvis_gt = self.coco
+ for metric in metrics:
+ msg = 'Evaluating {}...'.format(metric)
+ if logger is None:
+ msg = '\n' + msg
+ print_log(msg, logger=logger)
+
+ if metric == 'proposal_fast':
+ ar = self.fast_eval_recall(
+ results, proposal_nums, iou_thrs, logger='silent')
+ log_msg = []
+ for i, num in enumerate(proposal_nums):
+ eval_results['AR@{}'.format(num)] = ar[i]
+ log_msg.append('\nAR@{}\t{:.4f}'.format(num, ar[i]))
+ log_msg = ''.join(log_msg)
+ print_log(log_msg, logger=logger)
+ continue
+
+ if metric not in result_files:
+ raise KeyError('{} is not in results'.format(metric))
+ try:
+ lvis_dt = LVISResults(lvis_gt, result_files[metric])
+ except IndexError:
+ print_log(
+ 'The testing results of the whole dataset is empty.',
+ logger=logger,
+ level=logging.ERROR)
+ break
+
+ iou_type = 'bbox' if metric == 'proposal' else metric
+ lvis_eval = LVISEval(lvis_gt, lvis_dt, iou_type)
+ lvis_eval.params.imgIds = self.img_ids
+ if metric == 'proposal':
+ lvis_eval.params.useCats = 0
+ lvis_eval.params.maxDets = list(proposal_nums)
+ lvis_eval.evaluate()
+ lvis_eval.accumulate()
+ lvis_eval.summarize()
+ for k, v in lvis_eval.get_results().items():
+ if k.startswith('AR'):
+ val = float('{:.3f}'.format(float(v)))
+ eval_results[k] = val
+ else:
+ lvis_eval.evaluate()
+ lvis_eval.accumulate()
+ lvis_eval.summarize()
+ lvis_results = lvis_eval.get_results()
+ if classwise: # Compute per-category AP
+ # Compute per-category AP
+ # from https://github.com/facebookresearch/detectron2/
+ precisions = lvis_eval.eval['precision']
+ # precision: (iou, recall, cls, area range, max dets)
+ assert len(self.cat_ids) == precisions.shape[2]
+
+ results_per_category = []
+ for idx, catId in enumerate(self.cat_ids):
+ # area range index 0: all area ranges
+ # max dets index -1: typically 100 per image
+ nm = self.coco.load_cats(catId)[0]
+ precision = precisions[:, :, idx, 0, -1]
+ precision = precision[precision > -1]
+ if precision.size:
+ ap = np.mean(precision)
+ else:
+ ap = float('nan')
+ results_per_category.append(
+ (f'{nm["name"]}', f'{float(ap):0.3f}'))
+
+ num_columns = min(6, len(results_per_category) * 2)
+ results_flatten = list(
+ itertools.chain(*results_per_category))
+ headers = ['category', 'AP'] * (num_columns // 2)
+ results_2d = itertools.zip_longest(*[
+ results_flatten[i::num_columns]
+ for i in range(num_columns)
+ ])
+ table_data = [headers]
+ table_data += [result for result in results_2d]
+ table = AsciiTable(table_data)
+ print_log('\n' + table.table, logger=logger)
+
+ for k, v in lvis_results.items():
+ if k.startswith('AP'):
+ key = '{}_{}'.format(metric, k)
+ val = float('{:.3f}'.format(float(v)))
+ eval_results[key] = val
+ ap_summary = ' '.join([
+ '{}:{:.3f}'.format(k, float(v))
+ for k, v in lvis_results.items() if k.startswith('AP')
+ ])
+ eval_results['{}_mAP_copypaste'.format(metric)] = ap_summary
+ lvis_eval.print_results()
+ if tmp_dir is not None:
+ tmp_dir.cleanup()
+ return eval_results
+
+
+LVISDataset = LVISV05Dataset
+DATASETS.register_module(name='LVISDataset', module=LVISDataset)
+
+
+@DATASETS.register_module()
+class LVISV1Dataset(LVISDataset):
+
+ CLASSES = (
+ 'aerosol_can', 'air_conditioner', 'airplane', 'alarm_clock', 'alcohol',
+ 'alligator', 'almond', 'ambulance', 'amplifier', 'anklet', 'antenna',
+ 'apple', 'applesauce', 'apricot', 'apron', 'aquarium',
+ 'arctic_(type_of_shoe)', 'armband', 'armchair', 'armoire', 'armor',
+ 'artichoke', 'trash_can', 'ashtray', 'asparagus', 'atomizer',
+ 'avocado', 'award', 'awning', 'ax', 'baboon', 'baby_buggy',
+ 'basketball_backboard', 'backpack', 'handbag', 'suitcase', 'bagel',
+ 'bagpipe', 'baguet', 'bait', 'ball', 'ballet_skirt', 'balloon',
+ 'bamboo', 'banana', 'Band_Aid', 'bandage', 'bandanna', 'banjo',
+ 'banner', 'barbell', 'barge', 'barrel', 'barrette', 'barrow',
+ 'baseball_base', 'baseball', 'baseball_bat', 'baseball_cap',
+ 'baseball_glove', 'basket', 'basketball', 'bass_horn', 'bat_(animal)',
+ 'bath_mat', 'bath_towel', 'bathrobe', 'bathtub', 'batter_(food)',
+ 'battery', 'beachball', 'bead', 'bean_curd', 'beanbag', 'beanie',
+ 'bear', 'bed', 'bedpan', 'bedspread', 'cow', 'beef_(food)', 'beeper',
+ 'beer_bottle', 'beer_can', 'beetle', 'bell', 'bell_pepper', 'belt',
+ 'belt_buckle', 'bench', 'beret', 'bib', 'Bible', 'bicycle', 'visor',
+ 'billboard', 'binder', 'binoculars', 'bird', 'birdfeeder', 'birdbath',
+ 'birdcage', 'birdhouse', 'birthday_cake', 'birthday_card',
+ 'pirate_flag', 'black_sheep', 'blackberry', 'blackboard', 'blanket',
+ 'blazer', 'blender', 'blimp', 'blinker', 'blouse', 'blueberry',
+ 'gameboard', 'boat', 'bob', 'bobbin', 'bobby_pin', 'boiled_egg',
+ 'bolo_tie', 'deadbolt', 'bolt', 'bonnet', 'book', 'bookcase',
+ 'booklet', 'bookmark', 'boom_microphone', 'boot', 'bottle',
+ 'bottle_opener', 'bouquet', 'bow_(weapon)', 'bow_(decorative_ribbons)',
+ 'bow-tie', 'bowl', 'pipe_bowl', 'bowler_hat', 'bowling_ball', 'box',
+ 'boxing_glove', 'suspenders', 'bracelet', 'brass_plaque', 'brassiere',
+ 'bread-bin', 'bread', 'breechcloth', 'bridal_gown', 'briefcase',
+ 'broccoli', 'broach', 'broom', 'brownie', 'brussels_sprouts',
+ 'bubble_gum', 'bucket', 'horse_buggy', 'bull', 'bulldog', 'bulldozer',
+ 'bullet_train', 'bulletin_board', 'bulletproof_vest', 'bullhorn',
+ 'bun', 'bunk_bed', 'buoy', 'burrito', 'bus_(vehicle)', 'business_card',
+ 'butter', 'butterfly', 'button', 'cab_(taxi)', 'cabana', 'cabin_car',
+ 'cabinet', 'locker', 'cake', 'calculator', 'calendar', 'calf',
+ 'camcorder', 'camel', 'camera', 'camera_lens', 'camper_(vehicle)',
+ 'can', 'can_opener', 'candle', 'candle_holder', 'candy_bar',
+ 'candy_cane', 'walking_cane', 'canister', 'canoe', 'cantaloup',
+ 'canteen', 'cap_(headwear)', 'bottle_cap', 'cape', 'cappuccino',
+ 'car_(automobile)', 'railcar_(part_of_a_train)', 'elevator_car',
+ 'car_battery', 'identity_card', 'card', 'cardigan', 'cargo_ship',
+ 'carnation', 'horse_carriage', 'carrot', 'tote_bag', 'cart', 'carton',
+ 'cash_register', 'casserole', 'cassette', 'cast', 'cat', 'cauliflower',
+ 'cayenne_(spice)', 'CD_player', 'celery', 'cellular_telephone',
+ 'chain_mail', 'chair', 'chaise_longue', 'chalice', 'chandelier',
+ 'chap', 'checkbook', 'checkerboard', 'cherry', 'chessboard',
+ 'chicken_(animal)', 'chickpea', 'chili_(vegetable)', 'chime',
+ 'chinaware', 'crisp_(potato_chip)', 'poker_chip', 'chocolate_bar',
+ 'chocolate_cake', 'chocolate_milk', 'chocolate_mousse', 'choker',
+ 'chopping_board', 'chopstick', 'Christmas_tree', 'slide', 'cider',
+ 'cigar_box', 'cigarette', 'cigarette_case', 'cistern', 'clarinet',
+ 'clasp', 'cleansing_agent', 'cleat_(for_securing_rope)', 'clementine',
+ 'clip', 'clipboard', 'clippers_(for_plants)', 'cloak', 'clock',
+ 'clock_tower', 'clothes_hamper', 'clothespin', 'clutch_bag', 'coaster',
+ 'coat', 'coat_hanger', 'coatrack', 'cock', 'cockroach',
+ 'cocoa_(beverage)', 'coconut', 'coffee_maker', 'coffee_table',
+ 'coffeepot', 'coil', 'coin', 'colander', 'coleslaw',
+ 'coloring_material', 'combination_lock', 'pacifier', 'comic_book',
+ 'compass', 'computer_keyboard', 'condiment', 'cone', 'control',
+ 'convertible_(automobile)', 'sofa_bed', 'cooker', 'cookie',
+ 'cooking_utensil', 'cooler_(for_food)', 'cork_(bottle_plug)',
+ 'corkboard', 'corkscrew', 'edible_corn', 'cornbread', 'cornet',
+ 'cornice', 'cornmeal', 'corset', 'costume', 'cougar', 'coverall',
+ 'cowbell', 'cowboy_hat', 'crab_(animal)', 'crabmeat', 'cracker',
+ 'crape', 'crate', 'crayon', 'cream_pitcher', 'crescent_roll', 'crib',
+ 'crock_pot', 'crossbar', 'crouton', 'crow', 'crowbar', 'crown',
+ 'crucifix', 'cruise_ship', 'police_cruiser', 'crumb', 'crutch',
+ 'cub_(animal)', 'cube', 'cucumber', 'cufflink', 'cup', 'trophy_cup',
+ 'cupboard', 'cupcake', 'hair_curler', 'curling_iron', 'curtain',
+ 'cushion', 'cylinder', 'cymbal', 'dagger', 'dalmatian', 'dartboard',
+ 'date_(fruit)', 'deck_chair', 'deer', 'dental_floss', 'desk',
+ 'detergent', 'diaper', 'diary', 'die', 'dinghy', 'dining_table', 'tux',
+ 'dish', 'dish_antenna', 'dishrag', 'dishtowel', 'dishwasher',
+ 'dishwasher_detergent', 'dispenser', 'diving_board', 'Dixie_cup',
+ 'dog', 'dog_collar', 'doll', 'dollar', 'dollhouse', 'dolphin',
+ 'domestic_ass', 'doorknob', 'doormat', 'doughnut', 'dove', 'dragonfly',
+ 'drawer', 'underdrawers', 'dress', 'dress_hat', 'dress_suit',
+ 'dresser', 'drill', 'drone', 'dropper', 'drum_(musical_instrument)',
+ 'drumstick', 'duck', 'duckling', 'duct_tape', 'duffel_bag', 'dumbbell',
+ 'dumpster', 'dustpan', 'eagle', 'earphone', 'earplug', 'earring',
+ 'easel', 'eclair', 'eel', 'egg', 'egg_roll', 'egg_yolk', 'eggbeater',
+ 'eggplant', 'electric_chair', 'refrigerator', 'elephant', 'elk',
+ 'envelope', 'eraser', 'escargot', 'eyepatch', 'falcon', 'fan',
+ 'faucet', 'fedora', 'ferret', 'Ferris_wheel', 'ferry', 'fig_(fruit)',
+ 'fighter_jet', 'figurine', 'file_cabinet', 'file_(tool)', 'fire_alarm',
+ 'fire_engine', 'fire_extinguisher', 'fire_hose', 'fireplace',
+ 'fireplug', 'first-aid_kit', 'fish', 'fish_(food)', 'fishbowl',
+ 'fishing_rod', 'flag', 'flagpole', 'flamingo', 'flannel', 'flap',
+ 'flash', 'flashlight', 'fleece', 'flip-flop_(sandal)',
+ 'flipper_(footwear)', 'flower_arrangement', 'flute_glass', 'foal',
+ 'folding_chair', 'food_processor', 'football_(American)',
+ 'football_helmet', 'footstool', 'fork', 'forklift', 'freight_car',
+ 'French_toast', 'freshener', 'frisbee', 'frog', 'fruit_juice',
+ 'frying_pan', 'fudge', 'funnel', 'futon', 'gag', 'garbage',
+ 'garbage_truck', 'garden_hose', 'gargle', 'gargoyle', 'garlic',
+ 'gasmask', 'gazelle', 'gelatin', 'gemstone', 'generator',
+ 'giant_panda', 'gift_wrap', 'ginger', 'giraffe', 'cincture',
+ 'glass_(drink_container)', 'globe', 'glove', 'goat', 'goggles',
+ 'goldfish', 'golf_club', 'golfcart', 'gondola_(boat)', 'goose',
+ 'gorilla', 'gourd', 'grape', 'grater', 'gravestone', 'gravy_boat',
+ 'green_bean', 'green_onion', 'griddle', 'grill', 'grits', 'grizzly',
+ 'grocery_bag', 'guitar', 'gull', 'gun', 'hairbrush', 'hairnet',
+ 'hairpin', 'halter_top', 'ham', 'hamburger', 'hammer', 'hammock',
+ 'hamper', 'hamster', 'hair_dryer', 'hand_glass', 'hand_towel',
+ 'handcart', 'handcuff', 'handkerchief', 'handle', 'handsaw',
+ 'hardback_book', 'harmonium', 'hat', 'hatbox', 'veil', 'headband',
+ 'headboard', 'headlight', 'headscarf', 'headset',
+ 'headstall_(for_horses)', 'heart', 'heater', 'helicopter', 'helmet',
+ 'heron', 'highchair', 'hinge', 'hippopotamus', 'hockey_stick', 'hog',
+ 'home_plate_(baseball)', 'honey', 'fume_hood', 'hook', 'hookah',
+ 'hornet', 'horse', 'hose', 'hot-air_balloon', 'hotplate', 'hot_sauce',
+ 'hourglass', 'houseboat', 'hummingbird', 'hummus', 'polar_bear',
+ 'icecream', 'popsicle', 'ice_maker', 'ice_pack', 'ice_skate',
+ 'igniter', 'inhaler', 'iPod', 'iron_(for_clothing)', 'ironing_board',
+ 'jacket', 'jam', 'jar', 'jean', 'jeep', 'jelly_bean', 'jersey',
+ 'jet_plane', 'jewel', 'jewelry', 'joystick', 'jumpsuit', 'kayak',
+ 'keg', 'kennel', 'kettle', 'key', 'keycard', 'kilt', 'kimono',
+ 'kitchen_sink', 'kitchen_table', 'kite', 'kitten', 'kiwi_fruit',
+ 'knee_pad', 'knife', 'knitting_needle', 'knob', 'knocker_(on_a_door)',
+ 'koala', 'lab_coat', 'ladder', 'ladle', 'ladybug', 'lamb_(animal)',
+ 'lamb-chop', 'lamp', 'lamppost', 'lampshade', 'lantern', 'lanyard',
+ 'laptop_computer', 'lasagna', 'latch', 'lawn_mower', 'leather',
+ 'legging_(clothing)', 'Lego', 'legume', 'lemon', 'lemonade', 'lettuce',
+ 'license_plate', 'life_buoy', 'life_jacket', 'lightbulb',
+ 'lightning_rod', 'lime', 'limousine', 'lion', 'lip_balm', 'liquor',
+ 'lizard', 'log', 'lollipop', 'speaker_(stero_equipment)', 'loveseat',
+ 'machine_gun', 'magazine', 'magnet', 'mail_slot', 'mailbox_(at_home)',
+ 'mallard', 'mallet', 'mammoth', 'manatee', 'mandarin_orange', 'manger',
+ 'manhole', 'map', 'marker', 'martini', 'mascot', 'mashed_potato',
+ 'masher', 'mask', 'mast', 'mat_(gym_equipment)', 'matchbox',
+ 'mattress', 'measuring_cup', 'measuring_stick', 'meatball', 'medicine',
+ 'melon', 'microphone', 'microscope', 'microwave_oven', 'milestone',
+ 'milk', 'milk_can', 'milkshake', 'minivan', 'mint_candy', 'mirror',
+ 'mitten', 'mixer_(kitchen_tool)', 'money',
+ 'monitor_(computer_equipment) computer_monitor', 'monkey', 'motor',
+ 'motor_scooter', 'motor_vehicle', 'motorcycle', 'mound_(baseball)',
+ 'mouse_(computer_equipment)', 'mousepad', 'muffin', 'mug', 'mushroom',
+ 'music_stool', 'musical_instrument', 'nailfile', 'napkin',
+ 'neckerchief', 'necklace', 'necktie', 'needle', 'nest', 'newspaper',
+ 'newsstand', 'nightshirt', 'nosebag_(for_animals)',
+ 'noseband_(for_animals)', 'notebook', 'notepad', 'nut', 'nutcracker',
+ 'oar', 'octopus_(food)', 'octopus_(animal)', 'oil_lamp', 'olive_oil',
+ 'omelet', 'onion', 'orange_(fruit)', 'orange_juice', 'ostrich',
+ 'ottoman', 'oven', 'overalls_(clothing)', 'owl', 'packet', 'inkpad',
+ 'pad', 'paddle', 'padlock', 'paintbrush', 'painting', 'pajamas',
+ 'palette', 'pan_(for_cooking)', 'pan_(metal_container)', 'pancake',
+ 'pantyhose', 'papaya', 'paper_plate', 'paper_towel', 'paperback_book',
+ 'paperweight', 'parachute', 'parakeet', 'parasail_(sports)', 'parasol',
+ 'parchment', 'parka', 'parking_meter', 'parrot',
+ 'passenger_car_(part_of_a_train)', 'passenger_ship', 'passport',
+ 'pastry', 'patty_(food)', 'pea_(food)', 'peach', 'peanut_butter',
+ 'pear', 'peeler_(tool_for_fruit_and_vegetables)', 'wooden_leg',
+ 'pegboard', 'pelican', 'pen', 'pencil', 'pencil_box',
+ 'pencil_sharpener', 'pendulum', 'penguin', 'pennant', 'penny_(coin)',
+ 'pepper', 'pepper_mill', 'perfume', 'persimmon', 'person', 'pet',
+ 'pew_(church_bench)', 'phonebook', 'phonograph_record', 'piano',
+ 'pickle', 'pickup_truck', 'pie', 'pigeon', 'piggy_bank', 'pillow',
+ 'pin_(non_jewelry)', 'pineapple', 'pinecone', 'ping-pong_ball',
+ 'pinwheel', 'tobacco_pipe', 'pipe', 'pistol', 'pita_(bread)',
+ 'pitcher_(vessel_for_liquid)', 'pitchfork', 'pizza', 'place_mat',
+ 'plate', 'platter', 'playpen', 'pliers', 'plow_(farm_equipment)',
+ 'plume', 'pocket_watch', 'pocketknife', 'poker_(fire_stirring_tool)',
+ 'pole', 'polo_shirt', 'poncho', 'pony', 'pool_table', 'pop_(soda)',
+ 'postbox_(public)', 'postcard', 'poster', 'pot', 'flowerpot', 'potato',
+ 'potholder', 'pottery', 'pouch', 'power_shovel', 'prawn', 'pretzel',
+ 'printer', 'projectile_(weapon)', 'projector', 'propeller', 'prune',
+ 'pudding', 'puffer_(fish)', 'puffin', 'pug-dog', 'pumpkin', 'puncher',
+ 'puppet', 'puppy', 'quesadilla', 'quiche', 'quilt', 'rabbit',
+ 'race_car', 'racket', 'radar', 'radiator', 'radio_receiver', 'radish',
+ 'raft', 'rag_doll', 'raincoat', 'ram_(animal)', 'raspberry', 'rat',
+ 'razorblade', 'reamer_(juicer)', 'rearview_mirror', 'receipt',
+ 'recliner', 'record_player', 'reflector', 'remote_control',
+ 'rhinoceros', 'rib_(food)', 'rifle', 'ring', 'river_boat', 'road_map',
+ 'robe', 'rocking_chair', 'rodent', 'roller_skate', 'Rollerblade',
+ 'rolling_pin', 'root_beer', 'router_(computer_equipment)',
+ 'rubber_band', 'runner_(carpet)', 'plastic_bag',
+ 'saddle_(on_an_animal)', 'saddle_blanket', 'saddlebag', 'safety_pin',
+ 'sail', 'salad', 'salad_plate', 'salami', 'salmon_(fish)',
+ 'salmon_(food)', 'salsa', 'saltshaker', 'sandal_(type_of_shoe)',
+ 'sandwich', 'satchel', 'saucepan', 'saucer', 'sausage', 'sawhorse',
+ 'saxophone', 'scale_(measuring_instrument)', 'scarecrow', 'scarf',
+ 'school_bus', 'scissors', 'scoreboard', 'scraper', 'screwdriver',
+ 'scrubbing_brush', 'sculpture', 'seabird', 'seahorse', 'seaplane',
+ 'seashell', 'sewing_machine', 'shaker', 'shampoo', 'shark',
+ 'sharpener', 'Sharpie', 'shaver_(electric)', 'shaving_cream', 'shawl',
+ 'shears', 'sheep', 'shepherd_dog', 'sherbert', 'shield', 'shirt',
+ 'shoe', 'shopping_bag', 'shopping_cart', 'short_pants', 'shot_glass',
+ 'shoulder_bag', 'shovel', 'shower_head', 'shower_cap',
+ 'shower_curtain', 'shredder_(for_paper)', 'signboard', 'silo', 'sink',
+ 'skateboard', 'skewer', 'ski', 'ski_boot', 'ski_parka', 'ski_pole',
+ 'skirt', 'skullcap', 'sled', 'sleeping_bag', 'sling_(bandage)',
+ 'slipper_(footwear)', 'smoothie', 'snake', 'snowboard', 'snowman',
+ 'snowmobile', 'soap', 'soccer_ball', 'sock', 'sofa', 'softball',
+ 'solar_array', 'sombrero', 'soup', 'soup_bowl', 'soupspoon',
+ 'sour_cream', 'soya_milk', 'space_shuttle', 'sparkler_(fireworks)',
+ 'spatula', 'spear', 'spectacles', 'spice_rack', 'spider', 'crawfish',
+ 'sponge', 'spoon', 'sportswear', 'spotlight', 'squid_(food)',
+ 'squirrel', 'stagecoach', 'stapler_(stapling_machine)', 'starfish',
+ 'statue_(sculpture)', 'steak_(food)', 'steak_knife', 'steering_wheel',
+ 'stepladder', 'step_stool', 'stereo_(sound_system)', 'stew', 'stirrer',
+ 'stirrup', 'stool', 'stop_sign', 'brake_light', 'stove', 'strainer',
+ 'strap', 'straw_(for_drinking)', 'strawberry', 'street_sign',
+ 'streetlight', 'string_cheese', 'stylus', 'subwoofer', 'sugar_bowl',
+ 'sugarcane_(plant)', 'suit_(clothing)', 'sunflower', 'sunglasses',
+ 'sunhat', 'surfboard', 'sushi', 'mop', 'sweat_pants', 'sweatband',
+ 'sweater', 'sweatshirt', 'sweet_potato', 'swimsuit', 'sword',
+ 'syringe', 'Tabasco_sauce', 'table-tennis_table', 'table',
+ 'table_lamp', 'tablecloth', 'tachometer', 'taco', 'tag', 'taillight',
+ 'tambourine', 'army_tank', 'tank_(storage_vessel)',
+ 'tank_top_(clothing)', 'tape_(sticky_cloth_or_paper)', 'tape_measure',
+ 'tapestry', 'tarp', 'tartan', 'tassel', 'tea_bag', 'teacup',
+ 'teakettle', 'teapot', 'teddy_bear', 'telephone', 'telephone_booth',
+ 'telephone_pole', 'telephoto_lens', 'television_camera',
+ 'television_set', 'tennis_ball', 'tennis_racket', 'tequila',
+ 'thermometer', 'thermos_bottle', 'thermostat', 'thimble', 'thread',
+ 'thumbtack', 'tiara', 'tiger', 'tights_(clothing)', 'timer', 'tinfoil',
+ 'tinsel', 'tissue_paper', 'toast_(food)', 'toaster', 'toaster_oven',
+ 'toilet', 'toilet_tissue', 'tomato', 'tongs', 'toolbox', 'toothbrush',
+ 'toothpaste', 'toothpick', 'cover', 'tortilla', 'tow_truck', 'towel',
+ 'towel_rack', 'toy', 'tractor_(farm_equipment)', 'traffic_light',
+ 'dirt_bike', 'trailer_truck', 'train_(railroad_vehicle)', 'trampoline',
+ 'tray', 'trench_coat', 'triangle_(musical_instrument)', 'tricycle',
+ 'tripod', 'trousers', 'truck', 'truffle_(chocolate)', 'trunk', 'vat',
+ 'turban', 'turkey_(food)', 'turnip', 'turtle', 'turtleneck_(clothing)',
+ 'typewriter', 'umbrella', 'underwear', 'unicycle', 'urinal', 'urn',
+ 'vacuum_cleaner', 'vase', 'vending_machine', 'vent', 'vest',
+ 'videotape', 'vinegar', 'violin', 'vodka', 'volleyball', 'vulture',
+ 'waffle', 'waffle_iron', 'wagon', 'wagon_wheel', 'walking_stick',
+ 'wall_clock', 'wall_socket', 'wallet', 'walrus', 'wardrobe',
+ 'washbasin', 'automatic_washer', 'watch', 'water_bottle',
+ 'water_cooler', 'water_faucet', 'water_heater', 'water_jug',
+ 'water_gun', 'water_scooter', 'water_ski', 'water_tower',
+ 'watering_can', 'watermelon', 'weathervane', 'webcam', 'wedding_cake',
+ 'wedding_ring', 'wet_suit', 'wheel', 'wheelchair', 'whipped_cream',
+ 'whistle', 'wig', 'wind_chime', 'windmill', 'window_box_(for_plants)',
+ 'windshield_wiper', 'windsock', 'wine_bottle', 'wine_bucket',
+ 'wineglass', 'blinder_(for_horses)', 'wok', 'wolf', 'wooden_spoon',
+ 'wreath', 'wrench', 'wristband', 'wristlet', 'yacht', 'yogurt',
+ 'yoke_(animal_equipment)', 'zebra', 'zucchini')
+
+ def load_annotations(self, ann_file):
+ try:
+ import lvis
+ assert lvis.__version__ >= '10.5.3'
+ from lvis import LVIS
+ except AssertionError:
+ raise AssertionError('Incompatible version of lvis is installed. '
+ 'Run pip uninstall lvis first. Then run pip '
+ 'install mmlvis to install open-mmlab forked '
+ 'lvis. ')
+ except ImportError:
+ raise ImportError('Package lvis is not installed. Please run pip '
+ 'install mmlvis to install open-mmlab forked '
+ 'lvis.')
+ self.coco = LVIS(ann_file)
+ self.cat_ids = self.coco.get_cat_ids()
+ self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}
+ self.img_ids = self.coco.get_img_ids()
+ data_infos = []
+ for i in self.img_ids:
+ info = self.coco.load_imgs([i])[0]
+ # coco_url is used in LVISv1 instead of file_name
+ # e.g. http://images.cocodataset.org/train2017/000000391895.jpg
+ # train/val split in specified in url
+ info['filename'] = info['coco_url'].replace(
+ 'http://images.cocodataset.org/', '')
+ data_infos.append(info)
+ return data_infos
diff --git a/annotator/uniformer/mmdet_null/datasets/pipelines/__init__.py b/annotator/uniformer/mmdet_null/datasets/pipelines/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..c6f424debd1623e7511dd77da464a6639d816745
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/datasets/pipelines/__init__.py
@@ -0,0 +1,25 @@
+from .auto_augment import (AutoAugment, BrightnessTransform, ColorTransform,
+ ContrastTransform, EqualizeTransform, Rotate, Shear,
+ Translate)
+from .compose import Compose
+from .formating import (Collect, DefaultFormatBundle, ImageToTensor,
+ ToDataContainer, ToTensor, Transpose, to_tensor)
+from .instaboost import InstaBoost
+from .loading import (LoadAnnotations, LoadImageFromFile, LoadImageFromWebcam,
+ LoadMultiChannelImageFromFiles, LoadProposals)
+from .test_time_aug import MultiScaleFlipAug
+from .transforms import (Albu, CutOut, Expand, MinIoURandomCrop, Normalize,
+ Pad, PhotoMetricDistortion, RandomCenterCropPad,
+ RandomCrop, RandomFlip, Resize, SegRescale)
+
+__all__ = [
+ 'Compose', 'to_tensor', 'ToTensor', 'ImageToTensor', 'ToDataContainer',
+ 'Transpose', 'Collect', 'DefaultFormatBundle', 'LoadAnnotations',
+ 'LoadImageFromFile', 'LoadImageFromWebcam',
+ 'LoadMultiChannelImageFromFiles', 'LoadProposals', 'MultiScaleFlipAug',
+ 'Resize', 'RandomFlip', 'Pad', 'RandomCrop', 'Normalize', 'SegRescale',
+ 'MinIoURandomCrop', 'Expand', 'PhotoMetricDistortion', 'Albu',
+ 'InstaBoost', 'RandomCenterCropPad', 'AutoAugment', 'CutOut', 'Shear',
+ 'Rotate', 'ColorTransform', 'EqualizeTransform', 'BrightnessTransform',
+ 'ContrastTransform', 'Translate'
+]
diff --git a/annotator/uniformer/mmdet_null/datasets/pipelines/auto_augment.py b/annotator/uniformer/mmdet_null/datasets/pipelines/auto_augment.py
new file mode 100644
index 0000000000000000000000000000000000000000..e19adaec18a96cac4dbe1d8c2c9193e9901be1fb
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/datasets/pipelines/auto_augment.py
@@ -0,0 +1,890 @@
+import copy
+
+import cv2
+import mmcv
+import numpy as np
+
+from ..builder import PIPELINES
+from .compose import Compose
+
+_MAX_LEVEL = 10
+
+
+def level_to_value(level, max_value):
+ """Map from level to values based on max_value."""
+ return (level / _MAX_LEVEL) * max_value
+
+
+def enhance_level_to_value(level, a=1.8, b=0.1):
+ """Map from level to values."""
+ return (level / _MAX_LEVEL) * a + b
+
+
+def random_negative(value, random_negative_prob):
+ """Randomly negate value based on random_negative_prob."""
+ return -value if np.random.rand() < random_negative_prob else value
+
+
+def bbox2fields():
+ """The key correspondence from bboxes to labels, masks and
+ segmentations."""
+ bbox2label = {
+ 'gt_bboxes': 'gt_labels',
+ 'gt_bboxes_ignore': 'gt_labels_ignore'
+ }
+ bbox2mask = {
+ 'gt_bboxes': 'gt_masks',
+ 'gt_bboxes_ignore': 'gt_masks_ignore'
+ }
+ bbox2seg = {
+ 'gt_bboxes': 'gt_semantic_seg',
+ }
+ return bbox2label, bbox2mask, bbox2seg
+
+
+@PIPELINES.register_module()
+class AutoAugment(object):
+ """Auto augmentation.
+
+ This data augmentation is proposed in `Learning Data Augmentation
+ Strategies for Object Detection `_.
+
+ TODO: Implement 'Shear', 'Sharpness' and 'Rotate' transforms
+
+ Args:
+ policies (list[list[dict]]): The policies of auto augmentation. Each
+ policy in ``policies`` is a specific augmentation policy, and is
+ composed by several augmentations (dict). When AutoAugment is
+ called, a random policy in ``policies`` will be selected to
+ augment images.
+
+ Examples:
+ >>> replace = (104, 116, 124)
+ >>> policies = [
+ >>> [
+ >>> dict(type='Sharpness', prob=0.0, level=8),
+ >>> dict(
+ >>> type='Shear',
+ >>> prob=0.4,
+ >>> level=0,
+ >>> replace=replace,
+ >>> axis='x')
+ >>> ],
+ >>> [
+ >>> dict(
+ >>> type='Rotate',
+ >>> prob=0.6,
+ >>> level=10,
+ >>> replace=replace),
+ >>> dict(type='Color', prob=1.0, level=6)
+ >>> ]
+ >>> ]
+ >>> augmentation = AutoAugment(policies)
+ >>> img = np.ones(100, 100, 3)
+ >>> gt_bboxes = np.ones(10, 4)
+ >>> results = dict(img=img, gt_bboxes=gt_bboxes)
+ >>> results = augmentation(results)
+ """
+
+ def __init__(self, policies):
+ assert isinstance(policies, list) and len(policies) > 0, \
+ 'Policies must be a non-empty list.'
+ for policy in policies:
+ assert isinstance(policy, list) and len(policy) > 0, \
+ 'Each policy in policies must be a non-empty list.'
+ for augment in policy:
+ assert isinstance(augment, dict) and 'type' in augment, \
+ 'Each specific augmentation must be a dict with key' \
+ ' "type".'
+
+ self.policies = copy.deepcopy(policies)
+ self.transforms = [Compose(policy) for policy in self.policies]
+
+ def __call__(self, results):
+ transform = np.random.choice(self.transforms)
+ return transform(results)
+
+ def __repr__(self):
+ return f'{self.__class__.__name__}(policies={self.policies})'
+
+
+@PIPELINES.register_module()
+class Shear(object):
+ """Apply Shear Transformation to image (and its corresponding bbox, mask,
+ segmentation).
+
+ Args:
+ level (int | float): The level should be in range [0,_MAX_LEVEL].
+ img_fill_val (int | float | tuple): The filled values for image border.
+ If float, the same fill value will be used for all the three
+ channels of image. If tuple, the should be 3 elements.
+ seg_ignore_label (int): The fill value used for segmentation map.
+ Note this value must equals ``ignore_label`` in ``semantic_head``
+ of the corresponding config. Default 255.
+ prob (float): The probability for performing Shear and should be in
+ range [0, 1].
+ direction (str): The direction for shear, either "horizontal"
+ or "vertical".
+ max_shear_magnitude (float): The maximum magnitude for Shear
+ transformation.
+ random_negative_prob (float): The probability that turns the
+ offset negative. Should be in range [0,1]
+ interpolation (str): Same as in :func:`mmcv.imshear`.
+ """
+
+ def __init__(self,
+ level,
+ img_fill_val=128,
+ seg_ignore_label=255,
+ prob=0.5,
+ direction='horizontal',
+ max_shear_magnitude=0.3,
+ random_negative_prob=0.5,
+ interpolation='bilinear'):
+ assert isinstance(level, (int, float)), 'The level must be type ' \
+ f'int or float, got {type(level)}.'
+ assert 0 <= level <= _MAX_LEVEL, 'The level should be in range ' \
+ f'[0,{_MAX_LEVEL}], got {level}.'
+ if isinstance(img_fill_val, (float, int)):
+ img_fill_val = tuple([float(img_fill_val)] * 3)
+ elif isinstance(img_fill_val, tuple):
+ assert len(img_fill_val) == 3, 'img_fill_val as tuple must ' \
+ f'have 3 elements. got {len(img_fill_val)}.'
+ img_fill_val = tuple([float(val) for val in img_fill_val])
+ else:
+ raise ValueError(
+ 'img_fill_val must be float or tuple with 3 elements.')
+ assert np.all([0 <= val <= 255 for val in img_fill_val]), 'all ' \
+ 'elements of img_fill_val should between range [0,255].' \
+ f'got {img_fill_val}.'
+ assert 0 <= prob <= 1.0, 'The probability of shear should be in ' \
+ f'range [0,1]. got {prob}.'
+ assert direction in ('horizontal', 'vertical'), 'direction must ' \
+ f'in be either "horizontal" or "vertical". got {direction}.'
+ assert isinstance(max_shear_magnitude, float), 'max_shear_magnitude ' \
+ f'should be type float. got {type(max_shear_magnitude)}.'
+ assert 0. <= max_shear_magnitude <= 1., 'Defaultly ' \
+ 'max_shear_magnitude should be in range [0,1]. ' \
+ f'got {max_shear_magnitude}.'
+ self.level = level
+ self.magnitude = level_to_value(level, max_shear_magnitude)
+ self.img_fill_val = img_fill_val
+ self.seg_ignore_label = seg_ignore_label
+ self.prob = prob
+ self.direction = direction
+ self.max_shear_magnitude = max_shear_magnitude
+ self.random_negative_prob = random_negative_prob
+ self.interpolation = interpolation
+
+ def _shear_img(self,
+ results,
+ magnitude,
+ direction='horizontal',
+ interpolation='bilinear'):
+ """Shear the image.
+
+ Args:
+ results (dict): Result dict from loading pipeline.
+ magnitude (int | float): The magnitude used for shear.
+ direction (str): The direction for shear, either "horizontal"
+ or "vertical".
+ interpolation (str): Same as in :func:`mmcv.imshear`.
+ """
+ for key in results.get('img_fields', ['img']):
+ img = results[key]
+ img_sheared = mmcv.imshear(
+ img,
+ magnitude,
+ direction,
+ border_value=self.img_fill_val,
+ interpolation=interpolation)
+ results[key] = img_sheared.astype(img.dtype)
+
+ def _shear_bboxes(self, results, magnitude):
+ """Shear the bboxes."""
+ h, w, c = results['img_shape']
+ if self.direction == 'horizontal':
+ shear_matrix = np.stack([[1, magnitude],
+ [0, 1]]).astype(np.float32) # [2, 2]
+ else:
+ shear_matrix = np.stack([[1, 0], [magnitude,
+ 1]]).astype(np.float32)
+ for key in results.get('bbox_fields', []):
+ min_x, min_y, max_x, max_y = np.split(
+ results[key], results[key].shape[-1], axis=-1)
+ coordinates = np.stack([[min_x, min_y], [max_x, min_y],
+ [min_x, max_y],
+ [max_x, max_y]]) # [4, 2, nb_box, 1]
+ coordinates = coordinates[..., 0].transpose(
+ (2, 1, 0)).astype(np.float32) # [nb_box, 2, 4]
+ new_coords = np.matmul(shear_matrix[None, :, :],
+ coordinates) # [nb_box, 2, 4]
+ min_x = np.min(new_coords[:, 0, :], axis=-1)
+ min_y = np.min(new_coords[:, 1, :], axis=-1)
+ max_x = np.max(new_coords[:, 0, :], axis=-1)
+ max_y = np.max(new_coords[:, 1, :], axis=-1)
+ min_x = np.clip(min_x, a_min=0, a_max=w)
+ min_y = np.clip(min_y, a_min=0, a_max=h)
+ max_x = np.clip(max_x, a_min=min_x, a_max=w)
+ max_y = np.clip(max_y, a_min=min_y, a_max=h)
+ results[key] = np.stack([min_x, min_y, max_x, max_y],
+ axis=-1).astype(results[key].dtype)
+
+ def _shear_masks(self,
+ results,
+ magnitude,
+ direction='horizontal',
+ fill_val=0,
+ interpolation='bilinear'):
+ """Shear the masks."""
+ h, w, c = results['img_shape']
+ for key in results.get('mask_fields', []):
+ masks = results[key]
+ results[key] = masks.shear((h, w),
+ magnitude,
+ direction,
+ border_value=fill_val,
+ interpolation=interpolation)
+
+ def _shear_seg(self,
+ results,
+ magnitude,
+ direction='horizontal',
+ fill_val=255,
+ interpolation='bilinear'):
+ """Shear the segmentation maps."""
+ for key in results.get('seg_fields', []):
+ seg = results[key]
+ results[key] = mmcv.imshear(
+ seg,
+ magnitude,
+ direction,
+ border_value=fill_val,
+ interpolation=interpolation).astype(seg.dtype)
+
+ def _filter_invalid(self, results, min_bbox_size=0):
+ """Filter bboxes and corresponding masks too small after shear
+ augmentation."""
+ bbox2label, bbox2mask, _ = bbox2fields()
+ for key in results.get('bbox_fields', []):
+ bbox_w = results[key][:, 2] - results[key][:, 0]
+ bbox_h = results[key][:, 3] - results[key][:, 1]
+ valid_inds = (bbox_w > min_bbox_size) & (bbox_h > min_bbox_size)
+ valid_inds = np.nonzero(valid_inds)[0]
+ results[key] = results[key][valid_inds]
+ # label fields. e.g. gt_labels and gt_labels_ignore
+ label_key = bbox2label.get(key)
+ if label_key in results:
+ results[label_key] = results[label_key][valid_inds]
+ # mask fields, e.g. gt_masks and gt_masks_ignore
+ mask_key = bbox2mask.get(key)
+ if mask_key in results:
+ results[mask_key] = results[mask_key][valid_inds]
+
+ def __call__(self, results):
+ """Call function to shear images, bounding boxes, masks and semantic
+ segmentation maps.
+
+ Args:
+ results (dict): Result dict from loading pipeline.
+
+ Returns:
+ dict: Sheared results.
+ """
+ if np.random.rand() > self.prob:
+ return results
+ magnitude = random_negative(self.magnitude, self.random_negative_prob)
+ self._shear_img(results, magnitude, self.direction, self.interpolation)
+ self._shear_bboxes(results, magnitude)
+ # fill_val set to 0 for background of mask.
+ self._shear_masks(
+ results,
+ magnitude,
+ self.direction,
+ fill_val=0,
+ interpolation=self.interpolation)
+ self._shear_seg(
+ results,
+ magnitude,
+ self.direction,
+ fill_val=self.seg_ignore_label,
+ interpolation=self.interpolation)
+ self._filter_invalid(results)
+ return results
+
+ def __repr__(self):
+ repr_str = self.__class__.__name__
+ repr_str += f'(level={self.level}, '
+ repr_str += f'img_fill_val={self.img_fill_val}, '
+ repr_str += f'seg_ignore_label={self.seg_ignore_label}, '
+ repr_str += f'prob={self.prob}, '
+ repr_str += f'direction={self.direction}, '
+ repr_str += f'max_shear_magnitude={self.max_shear_magnitude}, '
+ repr_str += f'random_negative_prob={self.random_negative_prob}, '
+ repr_str += f'interpolation={self.interpolation})'
+ return repr_str
+
+
+@PIPELINES.register_module()
+class Rotate(object):
+ """Apply Rotate Transformation to image (and its corresponding bbox, mask,
+ segmentation).
+
+ Args:
+ level (int | float): The level should be in range (0,_MAX_LEVEL].
+ scale (int | float): Isotropic scale factor. Same in
+ ``mmcv.imrotate``.
+ center (int | float | tuple[float]): Center point (w, h) of the
+ rotation in the source image. If None, the center of the
+ image will be used. Same in ``mmcv.imrotate``.
+ img_fill_val (int | float | tuple): The fill value for image border.
+ If float, the same value will be used for all the three
+ channels of image. If tuple, the should be 3 elements (e.g.
+ equals the number of channels for image).
+ seg_ignore_label (int): The fill value used for segmentation map.
+ Note this value must equals ``ignore_label`` in ``semantic_head``
+ of the corresponding config. Default 255.
+ prob (float): The probability for perform transformation and
+ should be in range 0 to 1.
+ max_rotate_angle (int | float): The maximum angles for rotate
+ transformation.
+ random_negative_prob (float): The probability that turns the
+ offset negative.
+ """
+
+ def __init__(self,
+ level,
+ scale=1,
+ center=None,
+ img_fill_val=128,
+ seg_ignore_label=255,
+ prob=0.5,
+ max_rotate_angle=30,
+ random_negative_prob=0.5):
+ assert isinstance(level, (int, float)), \
+ f'The level must be type int or float. got {type(level)}.'
+ assert 0 <= level <= _MAX_LEVEL, \
+ f'The level should be in range (0,{_MAX_LEVEL}]. got {level}.'
+ assert isinstance(scale, (int, float)), \
+ f'The scale must be type int or float. got type {type(scale)}.'
+ if isinstance(center, (int, float)):
+ center = (center, center)
+ elif isinstance(center, tuple):
+ assert len(center) == 2, 'center with type tuple must have '\
+ f'2 elements. got {len(center)} elements.'
+ else:
+ assert center is None, 'center must be None or type int, '\
+ f'float or tuple, got type {type(center)}.'
+ if isinstance(img_fill_val, (float, int)):
+ img_fill_val = tuple([float(img_fill_val)] * 3)
+ elif isinstance(img_fill_val, tuple):
+ assert len(img_fill_val) == 3, 'img_fill_val as tuple must '\
+ f'have 3 elements. got {len(img_fill_val)}.'
+ img_fill_val = tuple([float(val) for val in img_fill_val])
+ else:
+ raise ValueError(
+ 'img_fill_val must be float or tuple with 3 elements.')
+ assert np.all([0 <= val <= 255 for val in img_fill_val]), \
+ 'all elements of img_fill_val should between range [0,255]. '\
+ f'got {img_fill_val}.'
+ assert 0 <= prob <= 1.0, 'The probability should be in range [0,1]. '\
+ 'got {prob}.'
+ assert isinstance(max_rotate_angle, (int, float)), 'max_rotate_angle '\
+ f'should be type int or float. got type {type(max_rotate_angle)}.'
+ self.level = level
+ self.scale = scale
+ # Rotation angle in degrees. Positive values mean
+ # clockwise rotation.
+ self.angle = level_to_value(level, max_rotate_angle)
+ self.center = center
+ self.img_fill_val = img_fill_val
+ self.seg_ignore_label = seg_ignore_label
+ self.prob = prob
+ self.max_rotate_angle = max_rotate_angle
+ self.random_negative_prob = random_negative_prob
+
+ def _rotate_img(self, results, angle, center=None, scale=1.0):
+ """Rotate the image.
+
+ Args:
+ results (dict): Result dict from loading pipeline.
+ angle (float): Rotation angle in degrees, positive values
+ mean clockwise rotation. Same in ``mmcv.imrotate``.
+ center (tuple[float], optional): Center point (w, h) of the
+ rotation. Same in ``mmcv.imrotate``.
+ scale (int | float): Isotropic scale factor. Same in
+ ``mmcv.imrotate``.
+ """
+ for key in results.get('img_fields', ['img']):
+ img = results[key].copy()
+ img_rotated = mmcv.imrotate(
+ img, angle, center, scale, border_value=self.img_fill_val)
+ results[key] = img_rotated.astype(img.dtype)
+
+ def _rotate_bboxes(self, results, rotate_matrix):
+ """Rotate the bboxes."""
+ h, w, c = results['img_shape']
+ for key in results.get('bbox_fields', []):
+ min_x, min_y, max_x, max_y = np.split(
+ results[key], results[key].shape[-1], axis=-1)
+ coordinates = np.stack([[min_x, min_y], [max_x, min_y],
+ [min_x, max_y],
+ [max_x, max_y]]) # [4, 2, nb_bbox, 1]
+ # pad 1 to convert from format [x, y] to homogeneous
+ # coordinates format [x, y, 1]
+ coordinates = np.concatenate(
+ (coordinates,
+ np.ones((4, 1, coordinates.shape[2], 1), coordinates.dtype)),
+ axis=1) # [4, 3, nb_bbox, 1]
+ coordinates = coordinates.transpose(
+ (2, 0, 1, 3)) # [nb_bbox, 4, 3, 1]
+ rotated_coords = np.matmul(rotate_matrix,
+ coordinates) # [nb_bbox, 4, 2, 1]
+ rotated_coords = rotated_coords[..., 0] # [nb_bbox, 4, 2]
+ min_x, min_y = np.min(
+ rotated_coords[:, :, 0], axis=1), np.min(
+ rotated_coords[:, :, 1], axis=1)
+ max_x, max_y = np.max(
+ rotated_coords[:, :, 0], axis=1), np.max(
+ rotated_coords[:, :, 1], axis=1)
+ min_x, min_y = np.clip(
+ min_x, a_min=0, a_max=w), np.clip(
+ min_y, a_min=0, a_max=h)
+ max_x, max_y = np.clip(
+ max_x, a_min=min_x, a_max=w), np.clip(
+ max_y, a_min=min_y, a_max=h)
+ results[key] = np.stack([min_x, min_y, max_x, max_y],
+ axis=-1).astype(results[key].dtype)
+
+ def _rotate_masks(self,
+ results,
+ angle,
+ center=None,
+ scale=1.0,
+ fill_val=0):
+ """Rotate the masks."""
+ h, w, c = results['img_shape']
+ for key in results.get('mask_fields', []):
+ masks = results[key]
+ results[key] = masks.rotate((h, w), angle, center, scale, fill_val)
+
+ def _rotate_seg(self,
+ results,
+ angle,
+ center=None,
+ scale=1.0,
+ fill_val=255):
+ """Rotate the segmentation map."""
+ for key in results.get('seg_fields', []):
+ seg = results[key].copy()
+ results[key] = mmcv.imrotate(
+ seg, angle, center, scale,
+ border_value=fill_val).astype(seg.dtype)
+
+ def _filter_invalid(self, results, min_bbox_size=0):
+ """Filter bboxes and corresponding masks too small after rotate
+ augmentation."""
+ bbox2label, bbox2mask, _ = bbox2fields()
+ for key in results.get('bbox_fields', []):
+ bbox_w = results[key][:, 2] - results[key][:, 0]
+ bbox_h = results[key][:, 3] - results[key][:, 1]
+ valid_inds = (bbox_w > min_bbox_size) & (bbox_h > min_bbox_size)
+ valid_inds = np.nonzero(valid_inds)[0]
+ results[key] = results[key][valid_inds]
+ # label fields. e.g. gt_labels and gt_labels_ignore
+ label_key = bbox2label.get(key)
+ if label_key in results:
+ results[label_key] = results[label_key][valid_inds]
+ # mask fields, e.g. gt_masks and gt_masks_ignore
+ mask_key = bbox2mask.get(key)
+ if mask_key in results:
+ results[mask_key] = results[mask_key][valid_inds]
+
+ def __call__(self, results):
+ """Call function to rotate images, bounding boxes, masks and semantic
+ segmentation maps.
+
+ Args:
+ results (dict): Result dict from loading pipeline.
+
+ Returns:
+ dict: Rotated results.
+ """
+ if np.random.rand() > self.prob:
+ return results
+ h, w = results['img'].shape[:2]
+ center = self.center
+ if center is None:
+ center = ((w - 1) * 0.5, (h - 1) * 0.5)
+ angle = random_negative(self.angle, self.random_negative_prob)
+ self._rotate_img(results, angle, center, self.scale)
+ rotate_matrix = cv2.getRotationMatrix2D(center, -angle, self.scale)
+ self._rotate_bboxes(results, rotate_matrix)
+ self._rotate_masks(results, angle, center, self.scale, fill_val=0)
+ self._rotate_seg(
+ results, angle, center, self.scale, fill_val=self.seg_ignore_label)
+ self._filter_invalid(results)
+ return results
+
+ def __repr__(self):
+ repr_str = self.__class__.__name__
+ repr_str += f'(level={self.level}, '
+ repr_str += f'scale={self.scale}, '
+ repr_str += f'center={self.center}, '
+ repr_str += f'img_fill_val={self.img_fill_val}, '
+ repr_str += f'seg_ignore_label={self.seg_ignore_label}, '
+ repr_str += f'prob={self.prob}, '
+ repr_str += f'max_rotate_angle={self.max_rotate_angle}, '
+ repr_str += f'random_negative_prob={self.random_negative_prob})'
+ return repr_str
+
+
+@PIPELINES.register_module()
+class Translate(object):
+ """Translate the images, bboxes, masks and segmentation maps horizontally
+ or vertically.
+
+ Args:
+ level (int | float): The level for Translate and should be in
+ range [0,_MAX_LEVEL].
+ prob (float): The probability for performing translation and
+ should be in range [0, 1].
+ img_fill_val (int | float | tuple): The filled value for image
+ border. If float, the same fill value will be used for all
+ the three channels of image. If tuple, the should be 3
+ elements (e.g. equals the number of channels for image).
+ seg_ignore_label (int): The fill value used for segmentation map.
+ Note this value must equals ``ignore_label`` in ``semantic_head``
+ of the corresponding config. Default 255.
+ direction (str): The translate direction, either "horizontal"
+ or "vertical".
+ max_translate_offset (int | float): The maximum pixel's offset for
+ Translate.
+ random_negative_prob (float): The probability that turns the
+ offset negative.
+ min_size (int | float): The minimum pixel for filtering
+ invalid bboxes after the translation.
+ """
+
+ def __init__(self,
+ level,
+ prob=0.5,
+ img_fill_val=128,
+ seg_ignore_label=255,
+ direction='horizontal',
+ max_translate_offset=250.,
+ random_negative_prob=0.5,
+ min_size=0):
+ assert isinstance(level, (int, float)), \
+ 'The level must be type int or float.'
+ assert 0 <= level <= _MAX_LEVEL, \
+ 'The level used for calculating Translate\'s offset should be ' \
+ 'in range [0,_MAX_LEVEL]'
+ assert 0 <= prob <= 1.0, \
+ 'The probability of translation should be in range [0, 1].'
+ if isinstance(img_fill_val, (float, int)):
+ img_fill_val = tuple([float(img_fill_val)] * 3)
+ elif isinstance(img_fill_val, tuple):
+ assert len(img_fill_val) == 3, \
+ 'img_fill_val as tuple must have 3 elements.'
+ img_fill_val = tuple([float(val) for val in img_fill_val])
+ else:
+ raise ValueError('img_fill_val must be type float or tuple.')
+ assert np.all([0 <= val <= 255 for val in img_fill_val]), \
+ 'all elements of img_fill_val should between range [0,255].'
+ assert direction in ('horizontal', 'vertical'), \
+ 'direction should be "horizontal" or "vertical".'
+ assert isinstance(max_translate_offset, (int, float)), \
+ 'The max_translate_offset must be type int or float.'
+ # the offset used for translation
+ self.offset = int(level_to_value(level, max_translate_offset))
+ self.level = level
+ self.prob = prob
+ self.img_fill_val = img_fill_val
+ self.seg_ignore_label = seg_ignore_label
+ self.direction = direction
+ self.max_translate_offset = max_translate_offset
+ self.random_negative_prob = random_negative_prob
+ self.min_size = min_size
+
+ def _translate_img(self, results, offset, direction='horizontal'):
+ """Translate the image.
+
+ Args:
+ results (dict): Result dict from loading pipeline.
+ offset (int | float): The offset for translate.
+ direction (str): The translate direction, either "horizontal"
+ or "vertical".
+ """
+ for key in results.get('img_fields', ['img']):
+ img = results[key].copy()
+ results[key] = mmcv.imtranslate(
+ img, offset, direction, self.img_fill_val).astype(img.dtype)
+
+ def _translate_bboxes(self, results, offset):
+ """Shift bboxes horizontally or vertically, according to offset."""
+ h, w, c = results['img_shape']
+ for key in results.get('bbox_fields', []):
+ min_x, min_y, max_x, max_y = np.split(
+ results[key], results[key].shape[-1], axis=-1)
+ if self.direction == 'horizontal':
+ min_x = np.maximum(0, min_x + offset)
+ max_x = np.minimum(w, max_x + offset)
+ elif self.direction == 'vertical':
+ min_y = np.maximum(0, min_y + offset)
+ max_y = np.minimum(h, max_y + offset)
+
+ # the boxes translated outside of image will be filtered along with
+ # the corresponding masks, by invoking ``_filter_invalid``.
+ results[key] = np.concatenate([min_x, min_y, max_x, max_y],
+ axis=-1)
+
+ def _translate_masks(self,
+ results,
+ offset,
+ direction='horizontal',
+ fill_val=0):
+ """Translate masks horizontally or vertically."""
+ h, w, c = results['img_shape']
+ for key in results.get('mask_fields', []):
+ masks = results[key]
+ results[key] = masks.translate((h, w), offset, direction, fill_val)
+
+ def _translate_seg(self,
+ results,
+ offset,
+ direction='horizontal',
+ fill_val=255):
+ """Translate segmentation maps horizontally or vertically."""
+ for key in results.get('seg_fields', []):
+ seg = results[key].copy()
+ results[key] = mmcv.imtranslate(seg, offset, direction,
+ fill_val).astype(seg.dtype)
+
+ def _filter_invalid(self, results, min_size=0):
+ """Filter bboxes and masks too small or translated out of image."""
+ bbox2label, bbox2mask, _ = bbox2fields()
+ for key in results.get('bbox_fields', []):
+ bbox_w = results[key][:, 2] - results[key][:, 0]
+ bbox_h = results[key][:, 3] - results[key][:, 1]
+ valid_inds = (bbox_w > min_size) & (bbox_h > min_size)
+ valid_inds = np.nonzero(valid_inds)[0]
+ results[key] = results[key][valid_inds]
+ # label fields. e.g. gt_labels and gt_labels_ignore
+ label_key = bbox2label.get(key)
+ if label_key in results:
+ results[label_key] = results[label_key][valid_inds]
+ # mask fields, e.g. gt_masks and gt_masks_ignore
+ mask_key = bbox2mask.get(key)
+ if mask_key in results:
+ results[mask_key] = results[mask_key][valid_inds]
+ return results
+
+ def __call__(self, results):
+ """Call function to translate images, bounding boxes, masks and
+ semantic segmentation maps.
+
+ Args:
+ results (dict): Result dict from loading pipeline.
+
+ Returns:
+ dict: Translated results.
+ """
+ if np.random.rand() > self.prob:
+ return results
+ offset = random_negative(self.offset, self.random_negative_prob)
+ self._translate_img(results, offset, self.direction)
+ self._translate_bboxes(results, offset)
+ # fill_val defaultly 0 for BitmapMasks and None for PolygonMasks.
+ self._translate_masks(results, offset, self.direction)
+ # fill_val set to ``seg_ignore_label`` for the ignored value
+ # of segmentation map.
+ self._translate_seg(
+ results, offset, self.direction, fill_val=self.seg_ignore_label)
+ self._filter_invalid(results, min_size=self.min_size)
+ return results
+
+
+@PIPELINES.register_module()
+class ColorTransform(object):
+ """Apply Color transformation to image. The bboxes, masks, and
+ segmentations are not modified.
+
+ Args:
+ level (int | float): Should be in range [0,_MAX_LEVEL].
+ prob (float): The probability for performing Color transformation.
+ """
+
+ def __init__(self, level, prob=0.5):
+ assert isinstance(level, (int, float)), \
+ 'The level must be type int or float.'
+ assert 0 <= level <= _MAX_LEVEL, \
+ 'The level should be in range [0,_MAX_LEVEL].'
+ assert 0 <= prob <= 1.0, \
+ 'The probability should be in range [0,1].'
+ self.level = level
+ self.prob = prob
+ self.factor = enhance_level_to_value(level)
+
+ def _adjust_color_img(self, results, factor=1.0):
+ """Apply Color transformation to image."""
+ for key in results.get('img_fields', ['img']):
+ # NOTE defaultly the image should be BGR format
+ img = results[key]
+ results[key] = mmcv.adjust_color(img, factor).astype(img.dtype)
+
+ def __call__(self, results):
+ """Call function for Color transformation.
+
+ Args:
+ results (dict): Result dict from loading pipeline.
+
+ Returns:
+ dict: Colored results.
+ """
+ if np.random.rand() > self.prob:
+ return results
+ self._adjust_color_img(results, self.factor)
+ return results
+
+ def __repr__(self):
+ repr_str = self.__class__.__name__
+ repr_str += f'(level={self.level}, '
+ repr_str += f'prob={self.prob})'
+ return repr_str
+
+
+@PIPELINES.register_module()
+class EqualizeTransform(object):
+ """Apply Equalize transformation to image. The bboxes, masks and
+ segmentations are not modified.
+
+ Args:
+ prob (float): The probability for performing Equalize transformation.
+ """
+
+ def __init__(self, prob=0.5):
+ assert 0 <= prob <= 1.0, \
+ 'The probability should be in range [0,1].'
+ self.prob = prob
+
+ def _imequalize(self, results):
+ """Equalizes the histogram of one image."""
+ for key in results.get('img_fields', ['img']):
+ img = results[key]
+ results[key] = mmcv.imequalize(img).astype(img.dtype)
+
+ def __call__(self, results):
+ """Call function for Equalize transformation.
+
+ Args:
+ results (dict): Results dict from loading pipeline.
+
+ Returns:
+ dict: Results after the transformation.
+ """
+ if np.random.rand() > self.prob:
+ return results
+ self._imequalize(results)
+ return results
+
+ def __repr__(self):
+ repr_str = self.__class__.__name__
+ repr_str += f'(prob={self.prob})'
+
+
+@PIPELINES.register_module()
+class BrightnessTransform(object):
+ """Apply Brightness transformation to image. The bboxes, masks and
+ segmentations are not modified.
+
+ Args:
+ level (int | float): Should be in range [0,_MAX_LEVEL].
+ prob (float): The probability for performing Brightness transformation.
+ """
+
+ def __init__(self, level, prob=0.5):
+ assert isinstance(level, (int, float)), \
+ 'The level must be type int or float.'
+ assert 0 <= level <= _MAX_LEVEL, \
+ 'The level should be in range [0,_MAX_LEVEL].'
+ assert 0 <= prob <= 1.0, \
+ 'The probability should be in range [0,1].'
+ self.level = level
+ self.prob = prob
+ self.factor = enhance_level_to_value(level)
+
+ def _adjust_brightness_img(self, results, factor=1.0):
+ """Adjust the brightness of image."""
+ for key in results.get('img_fields', ['img']):
+ img = results[key]
+ results[key] = mmcv.adjust_brightness(img,
+ factor).astype(img.dtype)
+
+ def __call__(self, results):
+ """Call function for Brightness transformation.
+
+ Args:
+ results (dict): Results dict from loading pipeline.
+
+ Returns:
+ dict: Results after the transformation.
+ """
+ if np.random.rand() > self.prob:
+ return results
+ self._adjust_brightness_img(results, self.factor)
+ return results
+
+ def __repr__(self):
+ repr_str = self.__class__.__name__
+ repr_str += f'(level={self.level}, '
+ repr_str += f'prob={self.prob})'
+ return repr_str
+
+
+@PIPELINES.register_module()
+class ContrastTransform(object):
+ """Apply Contrast transformation to image. The bboxes, masks and
+ segmentations are not modified.
+
+ Args:
+ level (int | float): Should be in range [0,_MAX_LEVEL].
+ prob (float): The probability for performing Contrast transformation.
+ """
+
+ def __init__(self, level, prob=0.5):
+ assert isinstance(level, (int, float)), \
+ 'The level must be type int or float.'
+ assert 0 <= level <= _MAX_LEVEL, \
+ 'The level should be in range [0,_MAX_LEVEL].'
+ assert 0 <= prob <= 1.0, \
+ 'The probability should be in range [0,1].'
+ self.level = level
+ self.prob = prob
+ self.factor = enhance_level_to_value(level)
+
+ def _adjust_contrast_img(self, results, factor=1.0):
+ """Adjust the image contrast."""
+ for key in results.get('img_fields', ['img']):
+ img = results[key]
+ results[key] = mmcv.adjust_contrast(img, factor).astype(img.dtype)
+
+ def __call__(self, results):
+ """Call function for Contrast transformation.
+
+ Args:
+ results (dict): Results dict from loading pipeline.
+
+ Returns:
+ dict: Results after the transformation.
+ """
+ if np.random.rand() > self.prob:
+ return results
+ self._adjust_contrast_img(results, self.factor)
+ return results
+
+ def __repr__(self):
+ repr_str = self.__class__.__name__
+ repr_str += f'(level={self.level}, '
+ repr_str += f'prob={self.prob})'
+ return repr_str
diff --git a/annotator/uniformer/mmdet_null/datasets/pipelines/compose.py b/annotator/uniformer/mmdet_null/datasets/pipelines/compose.py
new file mode 100644
index 0000000000000000000000000000000000000000..ca48f1c935755c486edc2744e1713e2b5ba3cdc8
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/datasets/pipelines/compose.py
@@ -0,0 +1,51 @@
+import collections
+
+from mmcv.utils import build_from_cfg
+
+from ..builder import PIPELINES
+
+
+@PIPELINES.register_module()
+class Compose(object):
+ """Compose multiple transforms sequentially.
+
+ Args:
+ transforms (Sequence[dict | callable]): Sequence of transform object or
+ config dict to be composed.
+ """
+
+ def __init__(self, transforms):
+ assert isinstance(transforms, collections.abc.Sequence)
+ self.transforms = []
+ for transform in transforms:
+ if isinstance(transform, dict):
+ transform = build_from_cfg(transform, PIPELINES)
+ self.transforms.append(transform)
+ elif callable(transform):
+ self.transforms.append(transform)
+ else:
+ raise TypeError('transform must be callable or a dict')
+
+ def __call__(self, data):
+ """Call function to apply transforms sequentially.
+
+ Args:
+ data (dict): A result dict contains the data to transform.
+
+ Returns:
+ dict: Transformed data.
+ """
+
+ for t in self.transforms:
+ data = t(data)
+ if data is None:
+ return None
+ return data
+
+ def __repr__(self):
+ format_string = self.__class__.__name__ + '('
+ for t in self.transforms:
+ format_string += '\n'
+ format_string += f' {t}'
+ format_string += '\n)'
+ return format_string
diff --git a/annotator/uniformer/mmdet_null/datasets/pipelines/formating.py b/annotator/uniformer/mmdet_null/datasets/pipelines/formating.py
new file mode 100644
index 0000000000000000000000000000000000000000..5781341bd48766a740f23ebba7a85cf8993642d7
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/datasets/pipelines/formating.py
@@ -0,0 +1,364 @@
+from collections.abc import Sequence
+
+import mmcv
+import numpy as np
+import torch
+from mmcv.parallel import DataContainer as DC
+
+from ..builder import PIPELINES
+
+
+def to_tensor(data):
+ """Convert objects of various python types to :obj:`torch.Tensor`.
+
+ Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`,
+ :class:`Sequence`, :class:`int` and :class:`float`.
+
+ Args:
+ data (torch.Tensor | numpy.ndarray | Sequence | int | float): Data to
+ be converted.
+ """
+
+ if isinstance(data, torch.Tensor):
+ return data
+ elif isinstance(data, np.ndarray):
+ return torch.from_numpy(data)
+ elif isinstance(data, Sequence) and not mmcv.is_str(data):
+ return torch.tensor(data)
+ elif isinstance(data, int):
+ return torch.LongTensor([data])
+ elif isinstance(data, float):
+ return torch.FloatTensor([data])
+ else:
+ raise TypeError(f'type {type(data)} cannot be converted to tensor.')
+
+
+@PIPELINES.register_module()
+class ToTensor(object):
+ """Convert some results to :obj:`torch.Tensor` by given keys.
+
+ Args:
+ keys (Sequence[str]): Keys that need to be converted to Tensor.
+ """
+
+ def __init__(self, keys):
+ self.keys = keys
+
+ def __call__(self, results):
+ """Call function to convert data in results to :obj:`torch.Tensor`.
+
+ Args:
+ results (dict): Result dict contains the data to convert.
+
+ Returns:
+ dict: The result dict contains the data converted
+ to :obj:`torch.Tensor`.
+ """
+ for key in self.keys:
+ results[key] = to_tensor(results[key])
+ return results
+
+ def __repr__(self):
+ return self.__class__.__name__ + f'(keys={self.keys})'
+
+
+@PIPELINES.register_module()
+class ImageToTensor(object):
+ """Convert image to :obj:`torch.Tensor` by given keys.
+
+ The dimension order of input image is (H, W, C). The pipeline will convert
+ it to (C, H, W). If only 2 dimension (H, W) is given, the output would be
+ (1, H, W).
+
+ Args:
+ keys (Sequence[str]): Key of images to be converted to Tensor.
+ """
+
+ def __init__(self, keys):
+ self.keys = keys
+
+ def __call__(self, results):
+ """Call function to convert image in results to :obj:`torch.Tensor` and
+ transpose the channel order.
+
+ Args:
+ results (dict): Result dict contains the image data to convert.
+
+ Returns:
+ dict: The result dict contains the image converted
+ to :obj:`torch.Tensor` and transposed to (C, H, W) order.
+ """
+ for key in self.keys:
+ img = results[key]
+ if len(img.shape) < 3:
+ img = np.expand_dims(img, -1)
+ results[key] = to_tensor(img.transpose(2, 0, 1))
+ return results
+
+ def __repr__(self):
+ return self.__class__.__name__ + f'(keys={self.keys})'
+
+
+@PIPELINES.register_module()
+class Transpose(object):
+ """Transpose some results by given keys.
+
+ Args:
+ keys (Sequence[str]): Keys of results to be transposed.
+ order (Sequence[int]): Order of transpose.
+ """
+
+ def __init__(self, keys, order):
+ self.keys = keys
+ self.order = order
+
+ def __call__(self, results):
+ """Call function to transpose the channel order of data in results.
+
+ Args:
+ results (dict): Result dict contains the data to transpose.
+
+ Returns:
+ dict: The result dict contains the data transposed to \
+ ``self.order``.
+ """
+ for key in self.keys:
+ results[key] = results[key].transpose(self.order)
+ return results
+
+ def __repr__(self):
+ return self.__class__.__name__ + \
+ f'(keys={self.keys}, order={self.order})'
+
+
+@PIPELINES.register_module()
+class ToDataContainer(object):
+ """Convert results to :obj:`mmcv.DataContainer` by given fields.
+
+ Args:
+ fields (Sequence[dict]): Each field is a dict like
+ ``dict(key='xxx', **kwargs)``. The ``key`` in result will
+ be converted to :obj:`mmcv.DataContainer` with ``**kwargs``.
+ Default: ``(dict(key='img', stack=True), dict(key='gt_bboxes'),
+ dict(key='gt_labels'))``.
+ """
+
+ def __init__(self,
+ fields=(dict(key='img', stack=True), dict(key='gt_bboxes'),
+ dict(key='gt_labels'))):
+ self.fields = fields
+
+ def __call__(self, results):
+ """Call function to convert data in results to
+ :obj:`mmcv.DataContainer`.
+
+ Args:
+ results (dict): Result dict contains the data to convert.
+
+ Returns:
+ dict: The result dict contains the data converted to \
+ :obj:`mmcv.DataContainer`.
+ """
+
+ for field in self.fields:
+ field = field.copy()
+ key = field.pop('key')
+ results[key] = DC(results[key], **field)
+ return results
+
+ def __repr__(self):
+ return self.__class__.__name__ + f'(fields={self.fields})'
+
+
+@PIPELINES.register_module()
+class DefaultFormatBundle(object):
+ """Default formatting bundle.
+
+ It simplifies the pipeline of formatting common fields, including "img",
+ "proposals", "gt_bboxes", "gt_labels", "gt_masks" and "gt_semantic_seg".
+ These fields are formatted as follows.
+
+ - img: (1)transpose, (2)to tensor, (3)to DataContainer (stack=True)
+ - proposals: (1)to tensor, (2)to DataContainer
+ - gt_bboxes: (1)to tensor, (2)to DataContainer
+ - gt_bboxes_ignore: (1)to tensor, (2)to DataContainer
+ - gt_labels: (1)to tensor, (2)to DataContainer
+ - gt_masks: (1)to tensor, (2)to DataContainer (cpu_only=True)
+ - gt_semantic_seg: (1)unsqueeze dim-0 (2)to tensor, \
+ (3)to DataContainer (stack=True)
+ """
+
+ def __call__(self, results):
+ """Call function to transform and format common fields in results.
+
+ Args:
+ results (dict): Result dict contains the data to convert.
+
+ Returns:
+ dict: The result dict contains the data that is formatted with \
+ default bundle.
+ """
+
+ if 'img' in results:
+ img = results['img']
+ # add default meta keys
+ results = self._add_default_meta_keys(results)
+ if len(img.shape) < 3:
+ img = np.expand_dims(img, -1)
+ img = np.ascontiguousarray(img.transpose(2, 0, 1))
+ results['img'] = DC(to_tensor(img), stack=True)
+ for key in ['proposals', 'gt_bboxes', 'gt_bboxes_ignore', 'gt_labels']:
+ if key not in results:
+ continue
+ results[key] = DC(to_tensor(results[key]))
+ if 'gt_masks' in results:
+ results['gt_masks'] = DC(results['gt_masks'], cpu_only=True)
+ if 'gt_semantic_seg' in results:
+ results['gt_semantic_seg'] = DC(
+ to_tensor(results['gt_semantic_seg'][None, ...]), stack=True)
+ return results
+
+ def _add_default_meta_keys(self, results):
+ """Add default meta keys.
+
+ We set default meta keys including `pad_shape`, `scale_factor` and
+ `img_norm_cfg` to avoid the case where no `Resize`, `Normalize` and
+ `Pad` are implemented during the whole pipeline.
+
+ Args:
+ results (dict): Result dict contains the data to convert.
+
+ Returns:
+ results (dict): Updated result dict contains the data to convert.
+ """
+ img = results['img']
+ results.setdefault('pad_shape', img.shape)
+ results.setdefault('scale_factor', 1.0)
+ num_channels = 1 if len(img.shape) < 3 else img.shape[2]
+ results.setdefault(
+ 'img_norm_cfg',
+ dict(
+ mean=np.zeros(num_channels, dtype=np.float32),
+ std=np.ones(num_channels, dtype=np.float32),
+ to_rgb=False))
+ return results
+
+ def __repr__(self):
+ return self.__class__.__name__
+
+
+@PIPELINES.register_module()
+class Collect(object):
+ """Collect data from the loader relevant to the specific task.
+
+ This is usually the last stage of the data loader pipeline. Typically keys
+ is set to some subset of "img", "proposals", "gt_bboxes",
+ "gt_bboxes_ignore", "gt_labels", and/or "gt_masks".
+
+ The "img_meta" item is always populated. The contents of the "img_meta"
+ dictionary depends on "meta_keys". By default this includes:
+
+ - "img_shape": shape of the image input to the network as a tuple \
+ (h, w, c). Note that images may be zero padded on the \
+ bottom/right if the batch tensor is larger than this shape.
+
+ - "scale_factor": a float indicating the preprocessing scale
+
+ - "flip": a boolean indicating if image flip transform was used
+
+ - "filename": path to the image file
+
+ - "ori_shape": original shape of the image as a tuple (h, w, c)
+
+ - "pad_shape": image shape after padding
+
+ - "img_norm_cfg": a dict of normalization information:
+
+ - mean - per channel mean subtraction
+ - std - per channel std divisor
+ - to_rgb - bool indicating if bgr was converted to rgb
+
+ Args:
+ keys (Sequence[str]): Keys of results to be collected in ``data``.
+ meta_keys (Sequence[str], optional): Meta keys to be converted to
+ ``mmcv.DataContainer`` and collected in ``data[img_metas]``.
+ Default: ``('filename', 'ori_filename', 'ori_shape', 'img_shape',
+ 'pad_shape', 'scale_factor', 'flip', 'flip_direction',
+ 'img_norm_cfg')``
+ """
+
+ def __init__(self,
+ keys,
+ meta_keys=('filename', 'ori_filename', 'ori_shape',
+ 'img_shape', 'pad_shape', 'scale_factor', 'flip',
+ 'flip_direction', 'img_norm_cfg')):
+ self.keys = keys
+ self.meta_keys = meta_keys
+
+ def __call__(self, results):
+ """Call function to collect keys in results. The keys in ``meta_keys``
+ will be converted to :obj:mmcv.DataContainer.
+
+ Args:
+ results (dict): Result dict contains the data to collect.
+
+ Returns:
+ dict: The result dict contains the following keys
+
+ - keys in``self.keys``
+ - ``img_metas``
+ """
+
+ data = {}
+ img_meta = {}
+ for key in self.meta_keys:
+ img_meta[key] = results[key]
+ data['img_metas'] = DC(img_meta, cpu_only=True)
+ for key in self.keys:
+ data[key] = results[key]
+ return data
+
+ def __repr__(self):
+ return self.__class__.__name__ + \
+ f'(keys={self.keys}, meta_keys={self.meta_keys})'
+
+
+@PIPELINES.register_module()
+class WrapFieldsToLists(object):
+ """Wrap fields of the data dictionary into lists for evaluation.
+
+ This class can be used as a last step of a test or validation
+ pipeline for single image evaluation or inference.
+
+ Example:
+ >>> test_pipeline = [
+ >>> dict(type='LoadImageFromFile'),
+ >>> dict(type='Normalize',
+ mean=[123.675, 116.28, 103.53],
+ std=[58.395, 57.12, 57.375],
+ to_rgb=True),
+ >>> dict(type='Pad', size_divisor=32),
+ >>> dict(type='ImageToTensor', keys=['img']),
+ >>> dict(type='Collect', keys=['img']),
+ >>> dict(type='WrapFieldsToLists')
+ >>> ]
+ """
+
+ def __call__(self, results):
+ """Call function to wrap fields into lists.
+
+ Args:
+ results (dict): Result dict contains the data to wrap.
+
+ Returns:
+ dict: The result dict where value of ``self.keys`` are wrapped \
+ into list.
+ """
+
+ # Wrap dict fields into lists
+ for key, val in results.items():
+ results[key] = [val]
+ return results
+
+ def __repr__(self):
+ return f'{self.__class__.__name__}()'
diff --git a/annotator/uniformer/mmdet_null/datasets/pipelines/instaboost.py b/annotator/uniformer/mmdet_null/datasets/pipelines/instaboost.py
new file mode 100644
index 0000000000000000000000000000000000000000..38b6819f60587a6e0c0f6d57bfda32bb3a7a4267
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/datasets/pipelines/instaboost.py
@@ -0,0 +1,98 @@
+import numpy as np
+
+from ..builder import PIPELINES
+
+
+@PIPELINES.register_module()
+class InstaBoost(object):
+ r"""Data augmentation method in `InstaBoost: Boosting Instance
+ Segmentation Via Probability Map Guided Copy-Pasting
+ `_.
+
+ Refer to https://github.com/GothicAi/Instaboost for implementation details.
+ """
+
+ def __init__(self,
+ action_candidate=('normal', 'horizontal', 'skip'),
+ action_prob=(1, 0, 0),
+ scale=(0.8, 1.2),
+ dx=15,
+ dy=15,
+ theta=(-1, 1),
+ color_prob=0.5,
+ hflag=False,
+ aug_ratio=0.5):
+ try:
+ import instaboostfast as instaboost
+ except ImportError:
+ raise ImportError(
+ 'Please run "pip install instaboostfast" '
+ 'to install instaboostfast first for instaboost augmentation.')
+ self.cfg = instaboost.InstaBoostConfig(action_candidate, action_prob,
+ scale, dx, dy, theta,
+ color_prob, hflag)
+ self.aug_ratio = aug_ratio
+
+ def _load_anns(self, results):
+ labels = results['ann_info']['labels']
+ masks = results['ann_info']['masks']
+ bboxes = results['ann_info']['bboxes']
+ n = len(labels)
+
+ anns = []
+ for i in range(n):
+ label = labels[i]
+ bbox = bboxes[i]
+ mask = masks[i]
+ x1, y1, x2, y2 = bbox
+ # assert (x2 - x1) >= 1 and (y2 - y1) >= 1
+ bbox = [x1, y1, x2 - x1, y2 - y1]
+ anns.append({
+ 'category_id': label,
+ 'segmentation': mask,
+ 'bbox': bbox
+ })
+
+ return anns
+
+ def _parse_anns(self, results, anns, img):
+ gt_bboxes = []
+ gt_labels = []
+ gt_masks_ann = []
+ for ann in anns:
+ x1, y1, w, h = ann['bbox']
+ # TODO: more essential bug need to be fixed in instaboost
+ if w <= 0 or h <= 0:
+ continue
+ bbox = [x1, y1, x1 + w, y1 + h]
+ gt_bboxes.append(bbox)
+ gt_labels.append(ann['category_id'])
+ gt_masks_ann.append(ann['segmentation'])
+ gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
+ gt_labels = np.array(gt_labels, dtype=np.int64)
+ results['ann_info']['labels'] = gt_labels
+ results['ann_info']['bboxes'] = gt_bboxes
+ results['ann_info']['masks'] = gt_masks_ann
+ results['img'] = img
+ return results
+
+ def __call__(self, results):
+ img = results['img']
+ orig_type = img.dtype
+ anns = self._load_anns(results)
+ if np.random.choice([0, 1], p=[1 - self.aug_ratio, self.aug_ratio]):
+ try:
+ import instaboostfast as instaboost
+ except ImportError:
+ raise ImportError('Please run "pip install instaboostfast" '
+ 'to install instaboostfast first.')
+ anns, img = instaboost.get_new_data(
+ anns, img.astype(np.uint8), self.cfg, background=None)
+
+ results = self._parse_anns(results, anns, img.astype(orig_type))
+ return results
+
+ def __repr__(self):
+ repr_str = self.__class__.__name__
+ repr_str += f'(cfg={self.cfg}, aug_ratio={self.aug_ratio})'
+ return repr_str
diff --git a/annotator/uniformer/mmdet_null/datasets/pipelines/loading.py b/annotator/uniformer/mmdet_null/datasets/pipelines/loading.py
new file mode 100644
index 0000000000000000000000000000000000000000..cfae701da3dd48c9a02e11b6a6f7cc627221fede
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/datasets/pipelines/loading.py
@@ -0,0 +1,458 @@
+import os.path as osp
+
+import mmcv
+import numpy as np
+import pycocotools.mask as maskUtils
+
+from annotator.uniformer.mmdet.core import BitmapMasks, PolygonMasks
+from ..builder import PIPELINES
+
+
+@PIPELINES.register_module()
+class LoadImageFromFile(object):
+ """Load an image from file.
+
+ Required keys are "img_prefix" and "img_info" (a dict that must contain the
+ key "filename"). Added or updated keys are "filename", "img", "img_shape",
+ "ori_shape" (same as `img_shape`), "pad_shape" (same as `img_shape`),
+ "scale_factor" (1.0) and "img_norm_cfg" (means=0 and stds=1).
+
+ Args:
+ to_float32 (bool): Whether to convert the loaded image to a float32
+ numpy array. If set to False, the loaded image is an uint8 array.
+ Defaults to False.
+ color_type (str): The flag argument for :func:`mmcv.imfrombytes`.
+ Defaults to 'color'.
+ file_client_args (dict): Arguments to instantiate a FileClient.
+ See :class:`mmcv.fileio.FileClient` for details.
+ Defaults to ``dict(backend='disk')``.
+ """
+
+ def __init__(self,
+ to_float32=False,
+ color_type='color',
+ file_client_args=dict(backend='disk')):
+ self.to_float32 = to_float32
+ self.color_type = color_type
+ self.file_client_args = file_client_args.copy()
+ self.file_client = None
+
+ def __call__(self, results):
+ """Call functions to load image and get image meta information.
+
+ Args:
+ results (dict): Result dict from :obj:`mmdet.CustomDataset`.
+
+ Returns:
+ dict: The dict contains loaded image and meta information.
+ """
+
+ if self.file_client is None:
+ self.file_client = mmcv.FileClient(**self.file_client_args)
+
+ if results['img_prefix'] is not None:
+ filename = osp.join(results['img_prefix'],
+ results['img_info']['filename'])
+ else:
+ filename = results['img_info']['filename']
+
+ img_bytes = self.file_client.get(filename)
+ img = mmcv.imfrombytes(img_bytes, flag=self.color_type)
+ if self.to_float32:
+ img = img.astype(np.float32)
+
+ results['filename'] = filename
+ results['ori_filename'] = results['img_info']['filename']
+ results['img'] = img
+ results['img_shape'] = img.shape
+ results['ori_shape'] = img.shape
+ results['img_fields'] = ['img']
+ return results
+
+ def __repr__(self):
+ repr_str = (f'{self.__class__.__name__}('
+ f'to_float32={self.to_float32}, '
+ f"color_type='{self.color_type}', "
+ f'file_client_args={self.file_client_args})')
+ return repr_str
+
+
+@PIPELINES.register_module()
+class LoadImageFromWebcam(LoadImageFromFile):
+ """Load an image from webcam.
+
+ Similar with :obj:`LoadImageFromFile`, but the image read from webcam is in
+ ``results['img']``.
+ """
+
+ def __call__(self, results):
+ """Call functions to add image meta information.
+
+ Args:
+ results (dict): Result dict with Webcam read image in
+ ``results['img']``.
+
+ Returns:
+ dict: The dict contains loaded image and meta information.
+ """
+
+ img = results['img']
+ if self.to_float32:
+ img = img.astype(np.float32)
+
+ results['filename'] = None
+ results['ori_filename'] = None
+ results['img'] = img
+ results['img_shape'] = img.shape
+ results['ori_shape'] = img.shape
+ results['img_fields'] = ['img']
+ return results
+
+
+@PIPELINES.register_module()
+class LoadMultiChannelImageFromFiles(object):
+ """Load multi-channel images from a list of separate channel files.
+
+ Required keys are "img_prefix" and "img_info" (a dict that must contain the
+ key "filename", which is expected to be a list of filenames).
+ Added or updated keys are "filename", "img", "img_shape",
+ "ori_shape" (same as `img_shape`), "pad_shape" (same as `img_shape`),
+ "scale_factor" (1.0) and "img_norm_cfg" (means=0 and stds=1).
+
+ Args:
+ to_float32 (bool): Whether to convert the loaded image to a float32
+ numpy array. If set to False, the loaded image is an uint8 array.
+ Defaults to False.
+ color_type (str): The flag argument for :func:`mmcv.imfrombytes`.
+ Defaults to 'color'.
+ file_client_args (dict): Arguments to instantiate a FileClient.
+ See :class:`mmcv.fileio.FileClient` for details.
+ Defaults to ``dict(backend='disk')``.
+ """
+
+ def __init__(self,
+ to_float32=False,
+ color_type='unchanged',
+ file_client_args=dict(backend='disk')):
+ self.to_float32 = to_float32
+ self.color_type = color_type
+ self.file_client_args = file_client_args.copy()
+ self.file_client = None
+
+ def __call__(self, results):
+ """Call functions to load multiple images and get images meta
+ information.
+
+ Args:
+ results (dict): Result dict from :obj:`mmdet.CustomDataset`.
+
+ Returns:
+ dict: The dict contains loaded images and meta information.
+ """
+
+ if self.file_client is None:
+ self.file_client = mmcv.FileClient(**self.file_client_args)
+
+ if results['img_prefix'] is not None:
+ filename = [
+ osp.join(results['img_prefix'], fname)
+ for fname in results['img_info']['filename']
+ ]
+ else:
+ filename = results['img_info']['filename']
+
+ img = []
+ for name in filename:
+ img_bytes = self.file_client.get(name)
+ img.append(mmcv.imfrombytes(img_bytes, flag=self.color_type))
+ img = np.stack(img, axis=-1)
+ if self.to_float32:
+ img = img.astype(np.float32)
+
+ results['filename'] = filename
+ results['ori_filename'] = results['img_info']['filename']
+ results['img'] = img
+ results['img_shape'] = img.shape
+ results['ori_shape'] = img.shape
+ # Set initial values for default meta_keys
+ results['pad_shape'] = img.shape
+ results['scale_factor'] = 1.0
+ num_channels = 1 if len(img.shape) < 3 else img.shape[2]
+ results['img_norm_cfg'] = dict(
+ mean=np.zeros(num_channels, dtype=np.float32),
+ std=np.ones(num_channels, dtype=np.float32),
+ to_rgb=False)
+ return results
+
+ def __repr__(self):
+ repr_str = (f'{self.__class__.__name__}('
+ f'to_float32={self.to_float32}, '
+ f"color_type='{self.color_type}', "
+ f'file_client_args={self.file_client_args})')
+ return repr_str
+
+
+@PIPELINES.register_module()
+class LoadAnnotations(object):
+ """Load mutiple types of annotations.
+
+ Args:
+ with_bbox (bool): Whether to parse and load the bbox annotation.
+ Default: True.
+ with_label (bool): Whether to parse and load the label annotation.
+ Default: True.
+ with_mask (bool): Whether to parse and load the mask annotation.
+ Default: False.
+ with_seg (bool): Whether to parse and load the semantic segmentation
+ annotation. Default: False.
+ poly2mask (bool): Whether to convert the instance masks from polygons
+ to bitmaps. Default: True.
+ file_client_args (dict): Arguments to instantiate a FileClient.
+ See :class:`mmcv.fileio.FileClient` for details.
+ Defaults to ``dict(backend='disk')``.
+ """
+
+ def __init__(self,
+ with_bbox=True,
+ with_label=True,
+ with_mask=False,
+ with_seg=False,
+ poly2mask=True,
+ file_client_args=dict(backend='disk')):
+ self.with_bbox = with_bbox
+ self.with_label = with_label
+ self.with_mask = with_mask
+ self.with_seg = with_seg
+ self.poly2mask = poly2mask
+ self.file_client_args = file_client_args.copy()
+ self.file_client = None
+
+ def _load_bboxes(self, results):
+ """Private function to load bounding box annotations.
+
+ Args:
+ results (dict): Result dict from :obj:`mmdet.CustomDataset`.
+
+ Returns:
+ dict: The dict contains loaded bounding box annotations.
+ """
+
+ ann_info = results['ann_info']
+ results['gt_bboxes'] = ann_info['bboxes'].copy()
+
+ gt_bboxes_ignore = ann_info.get('bboxes_ignore', None)
+ if gt_bboxes_ignore is not None:
+ results['gt_bboxes_ignore'] = gt_bboxes_ignore.copy()
+ results['bbox_fields'].append('gt_bboxes_ignore')
+ results['bbox_fields'].append('gt_bboxes')
+ return results
+
+ def _load_labels(self, results):
+ """Private function to load label annotations.
+
+ Args:
+ results (dict): Result dict from :obj:`mmdet.CustomDataset`.
+
+ Returns:
+ dict: The dict contains loaded label annotations.
+ """
+
+ results['gt_labels'] = results['ann_info']['labels'].copy()
+ return results
+
+ def _poly2mask(self, mask_ann, img_h, img_w):
+ """Private function to convert masks represented with polygon to
+ bitmaps.
+
+ Args:
+ mask_ann (list | dict): Polygon mask annotation input.
+ img_h (int): The height of output mask.
+ img_w (int): The width of output mask.
+
+ Returns:
+ numpy.ndarray: The decode bitmap mask of shape (img_h, img_w).
+ """
+
+ if isinstance(mask_ann, list):
+ # polygon -- a single object might consist of multiple parts
+ # we merge all parts into one mask rle code
+ rles = maskUtils.frPyObjects(mask_ann, img_h, img_w)
+ rle = maskUtils.merge(rles)
+ elif isinstance(mask_ann['counts'], list):
+ # uncompressed RLE
+ rle = maskUtils.frPyObjects(mask_ann, img_h, img_w)
+ else:
+ # rle
+ rle = mask_ann
+ mask = maskUtils.decode(rle)
+ return mask
+
+ def process_polygons(self, polygons):
+ """Convert polygons to list of ndarray and filter invalid polygons.
+
+ Args:
+ polygons (list[list]): Polygons of one instance.
+
+ Returns:
+ list[numpy.ndarray]: Processed polygons.
+ """
+
+ polygons = [np.array(p) for p in polygons]
+ valid_polygons = []
+ for polygon in polygons:
+ if len(polygon) % 2 == 0 and len(polygon) >= 6:
+ valid_polygons.append(polygon)
+ return valid_polygons
+
+ def _load_masks(self, results):
+ """Private function to load mask annotations.
+
+ Args:
+ results (dict): Result dict from :obj:`mmdet.CustomDataset`.
+
+ Returns:
+ dict: The dict contains loaded mask annotations.
+ If ``self.poly2mask`` is set ``True``, `gt_mask` will contain
+ :obj:`PolygonMasks`. Otherwise, :obj:`BitmapMasks` is used.
+ """
+
+ h, w = results['img_info']['height'], results['img_info']['width']
+ gt_masks = results['ann_info']['masks']
+ if self.poly2mask:
+ gt_masks = BitmapMasks(
+ [self._poly2mask(mask, h, w) for mask in gt_masks], h, w)
+ else:
+ gt_masks = PolygonMasks(
+ [self.process_polygons(polygons) for polygons in gt_masks], h,
+ w)
+ results['gt_masks'] = gt_masks
+ results['mask_fields'].append('gt_masks')
+ return results
+
+ def _load_semantic_seg(self, results):
+ """Private function to load semantic segmentation annotations.
+
+ Args:
+ results (dict): Result dict from :obj:`dataset`.
+
+ Returns:
+ dict: The dict contains loaded semantic segmentation annotations.
+ """
+
+ if self.file_client is None:
+ self.file_client = mmcv.FileClient(**self.file_client_args)
+
+ filename = osp.join(results['seg_prefix'],
+ results['ann_info']['seg_map'])
+ img_bytes = self.file_client.get(filename)
+ results['gt_semantic_seg'] = mmcv.imfrombytes(
+ img_bytes, flag='unchanged').squeeze()
+ results['seg_fields'].append('gt_semantic_seg')
+ return results
+
+ def __call__(self, results):
+ """Call function to load multiple types annotations.
+
+ Args:
+ results (dict): Result dict from :obj:`mmdet.CustomDataset`.
+
+ Returns:
+ dict: The dict contains loaded bounding box, label, mask and
+ semantic segmentation annotations.
+ """
+
+ if self.with_bbox:
+ results = self._load_bboxes(results)
+ if results is None:
+ return None
+ if self.with_label:
+ results = self._load_labels(results)
+ if self.with_mask:
+ results = self._load_masks(results)
+ if self.with_seg:
+ results = self._load_semantic_seg(results)
+ return results
+
+ def __repr__(self):
+ repr_str = self.__class__.__name__
+ repr_str += f'(with_bbox={self.with_bbox}, '
+ repr_str += f'with_label={self.with_label}, '
+ repr_str += f'with_mask={self.with_mask}, '
+ repr_str += f'with_seg={self.with_seg}, '
+ repr_str += f'poly2mask={self.poly2mask}, '
+ repr_str += f'poly2mask={self.file_client_args})'
+ return repr_str
+
+
+@PIPELINES.register_module()
+class LoadProposals(object):
+ """Load proposal pipeline.
+
+ Required key is "proposals". Updated keys are "proposals", "bbox_fields".
+
+ Args:
+ num_max_proposals (int, optional): Maximum number of proposals to load.
+ If not specified, all proposals will be loaded.
+ """
+
+ def __init__(self, num_max_proposals=None):
+ self.num_max_proposals = num_max_proposals
+
+ def __call__(self, results):
+ """Call function to load proposals from file.
+
+ Args:
+ results (dict): Result dict from :obj:`mmdet.CustomDataset`.
+
+ Returns:
+ dict: The dict contains loaded proposal annotations.
+ """
+
+ proposals = results['proposals']
+ if proposals.shape[1] not in (4, 5):
+ raise AssertionError(
+ 'proposals should have shapes (n, 4) or (n, 5), '
+ f'but found {proposals.shape}')
+ proposals = proposals[:, :4]
+
+ if self.num_max_proposals is not None:
+ proposals = proposals[:self.num_max_proposals]
+
+ if len(proposals) == 0:
+ proposals = np.array([[0, 0, 0, 0]], dtype=np.float32)
+ results['proposals'] = proposals
+ results['bbox_fields'].append('proposals')
+ return results
+
+ def __repr__(self):
+ return self.__class__.__name__ + \
+ f'(num_max_proposals={self.num_max_proposals})'
+
+
+@PIPELINES.register_module()
+class FilterAnnotations(object):
+ """Filter invalid annotations.
+
+ Args:
+ min_gt_bbox_wh (tuple[int]): Minimum width and height of ground truth
+ boxes.
+ """
+
+ def __init__(self, min_gt_bbox_wh):
+ # TODO: add more filter options
+ self.min_gt_bbox_wh = min_gt_bbox_wh
+
+ def __call__(self, results):
+ assert 'gt_bboxes' in results
+ gt_bboxes = results['gt_bboxes']
+ w = gt_bboxes[:, 2] - gt_bboxes[:, 0]
+ h = gt_bboxes[:, 3] - gt_bboxes[:, 1]
+ keep = (w > self.min_gt_bbox_wh[0]) & (h > self.min_gt_bbox_wh[1])
+ if not keep.any():
+ return None
+ else:
+ keys = ('gt_bboxes', 'gt_labels', 'gt_masks', 'gt_semantic_seg')
+ for key in keys:
+ if key in results:
+ results[key] = results[key][keep]
+ return results
diff --git a/annotator/uniformer/mmdet_null/datasets/pipelines/test_time_aug.py b/annotator/uniformer/mmdet_null/datasets/pipelines/test_time_aug.py
new file mode 100644
index 0000000000000000000000000000000000000000..b6226e040499882c99f15594c66ebf3d07829168
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/datasets/pipelines/test_time_aug.py
@@ -0,0 +1,119 @@
+import warnings
+
+import mmcv
+
+from ..builder import PIPELINES
+from .compose import Compose
+
+
+@PIPELINES.register_module()
+class MultiScaleFlipAug(object):
+ """Test-time augmentation with multiple scales and flipping.
+
+ An example configuration is as followed:
+
+ .. code-block::
+
+ img_scale=[(1333, 400), (1333, 800)],
+ flip=True,
+ transforms=[
+ dict(type='Resize', keep_ratio=True),
+ dict(type='RandomFlip'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='Pad', size_divisor=32),
+ dict(type='ImageToTensor', keys=['img']),
+ dict(type='Collect', keys=['img']),
+ ]
+
+ After MultiScaleFLipAug with above configuration, the results are wrapped
+ into lists of the same length as followed:
+
+ .. code-block::
+
+ dict(
+ img=[...],
+ img_shape=[...],
+ scale=[(1333, 400), (1333, 400), (1333, 800), (1333, 800)]
+ flip=[False, True, False, True]
+ ...
+ )
+
+ Args:
+ transforms (list[dict]): Transforms to apply in each augmentation.
+ img_scale (tuple | list[tuple] | None): Images scales for resizing.
+ scale_factor (float | list[float] | None): Scale factors for resizing.
+ flip (bool): Whether apply flip augmentation. Default: False.
+ flip_direction (str | list[str]): Flip augmentation directions,
+ options are "horizontal" and "vertical". If flip_direction is list,
+ multiple flip augmentations will be applied.
+ It has no effect when flip == False. Default: "horizontal".
+ """
+
+ def __init__(self,
+ transforms,
+ img_scale=None,
+ scale_factor=None,
+ flip=False,
+ flip_direction='horizontal'):
+ self.transforms = Compose(transforms)
+ assert (img_scale is None) ^ (scale_factor is None), (
+ 'Must have but only one variable can be setted')
+ if img_scale is not None:
+ self.img_scale = img_scale if isinstance(img_scale,
+ list) else [img_scale]
+ self.scale_key = 'scale'
+ assert mmcv.is_list_of(self.img_scale, tuple)
+ else:
+ self.img_scale = scale_factor if isinstance(
+ scale_factor, list) else [scale_factor]
+ self.scale_key = 'scale_factor'
+
+ self.flip = flip
+ self.flip_direction = flip_direction if isinstance(
+ flip_direction, list) else [flip_direction]
+ assert mmcv.is_list_of(self.flip_direction, str)
+ if not self.flip and self.flip_direction != ['horizontal']:
+ warnings.warn(
+ 'flip_direction has no effect when flip is set to False')
+ if (self.flip
+ and not any([t['type'] == 'RandomFlip' for t in transforms])):
+ warnings.warn(
+ 'flip has no effect when RandomFlip is not in transforms')
+
+ def __call__(self, results):
+ """Call function to apply test time augment transforms on results.
+
+ Args:
+ results (dict): Result dict contains the data to transform.
+
+ Returns:
+ dict[str: list]: The augmented data, where each value is wrapped
+ into a list.
+ """
+
+ aug_data = []
+ flip_args = [(False, None)]
+ if self.flip:
+ flip_args += [(True, direction)
+ for direction in self.flip_direction]
+ for scale in self.img_scale:
+ for flip, direction in flip_args:
+ _results = results.copy()
+ _results[self.scale_key] = scale
+ _results['flip'] = flip
+ _results['flip_direction'] = direction
+ data = self.transforms(_results)
+ aug_data.append(data)
+ # list of dict to dict of list
+ aug_data_dict = {key: [] for key in aug_data[0]}
+ for data in aug_data:
+ for key, val in data.items():
+ aug_data_dict[key].append(val)
+ return aug_data_dict
+
+ def __repr__(self):
+ repr_str = self.__class__.__name__
+ repr_str += f'(transforms={self.transforms}, '
+ repr_str += f'img_scale={self.img_scale}, flip={self.flip}, '
+ repr_str += f'flip_direction={self.flip_direction})'
+ return repr_str
diff --git a/annotator/uniformer/mmdet_null/datasets/pipelines/transforms.py b/annotator/uniformer/mmdet_null/datasets/pipelines/transforms.py
new file mode 100644
index 0000000000000000000000000000000000000000..caed51d89ffc1259d0b086954f03c3d4c0749cf2
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/datasets/pipelines/transforms.py
@@ -0,0 +1,1811 @@
+import copy
+import inspect
+
+import mmcv
+import numpy as np
+from numpy import random
+
+from mmdet.core import PolygonMasks
+from mmdet.core.evaluation.bbox_overlaps import bbox_overlaps
+from ..builder import PIPELINES
+
+try:
+ from imagecorruptions import corrupt
+except ImportError:
+ corrupt = None
+
+try:
+ import albumentations
+ from albumentations import Compose
+except ImportError:
+ albumentations = None
+ Compose = None
+
+
+@PIPELINES.register_module()
+class Resize(object):
+ """Resize images & bbox & mask.
+
+ This transform resizes the input image to some scale. Bboxes and masks are
+ then resized with the same scale factor. If the input dict contains the key
+ "scale", then the scale in the input dict is used, otherwise the specified
+ scale in the init method is used. If the input dict contains the key
+ "scale_factor" (if MultiScaleFlipAug does not give img_scale but
+ scale_factor), the actual scale will be computed by image shape and
+ scale_factor.
+
+ `img_scale` can either be a tuple (single-scale) or a list of tuple
+ (multi-scale). There are 3 multiscale modes:
+
+ - ``ratio_range is not None``: randomly sample a ratio from the ratio \
+ range and multiply it with the image scale.
+ - ``ratio_range is None`` and ``multiscale_mode == "range"``: randomly \
+ sample a scale from the multiscale range.
+ - ``ratio_range is None`` and ``multiscale_mode == "value"``: randomly \
+ sample a scale from multiple scales.
+
+ Args:
+ img_scale (tuple or list[tuple]): Images scales for resizing.
+ multiscale_mode (str): Either "range" or "value".
+ ratio_range (tuple[float]): (min_ratio, max_ratio)
+ keep_ratio (bool): Whether to keep the aspect ratio when resizing the
+ image.
+ bbox_clip_border (bool, optional): Whether clip the objects outside
+ the border of the image. Defaults to True.
+ backend (str): Image resize backend, choices are 'cv2' and 'pillow'.
+ These two backends generates slightly different results. Defaults
+ to 'cv2'.
+ override (bool, optional): Whether to override `scale` and
+ `scale_factor` so as to call resize twice. Default False. If True,
+ after the first resizing, the existed `scale` and `scale_factor`
+ will be ignored so the second resizing can be allowed.
+ This option is a work-around for multiple times of resize in DETR.
+ Defaults to False.
+ """
+
+ def __init__(self,
+ img_scale=None,
+ multiscale_mode='range',
+ ratio_range=None,
+ keep_ratio=True,
+ bbox_clip_border=True,
+ backend='cv2',
+ override=False):
+ if img_scale is None:
+ self.img_scale = None
+ else:
+ if isinstance(img_scale, list):
+ self.img_scale = img_scale
+ else:
+ self.img_scale = [img_scale]
+ assert mmcv.is_list_of(self.img_scale, tuple)
+
+ if ratio_range is not None:
+ # mode 1: given a scale and a range of image ratio
+ assert len(self.img_scale) == 1
+ else:
+ # mode 2: given multiple scales or a range of scales
+ assert multiscale_mode in ['value', 'range']
+
+ self.backend = backend
+ self.multiscale_mode = multiscale_mode
+ self.ratio_range = ratio_range
+ self.keep_ratio = keep_ratio
+ # TODO: refactor the override option in Resize
+ self.override = override
+ self.bbox_clip_border = bbox_clip_border
+
+ @staticmethod
+ def random_select(img_scales):
+ """Randomly select an img_scale from given candidates.
+
+ Args:
+ img_scales (list[tuple]): Images scales for selection.
+
+ Returns:
+ (tuple, int): Returns a tuple ``(img_scale, scale_dix)``, \
+ where ``img_scale`` is the selected image scale and \
+ ``scale_idx`` is the selected index in the given candidates.
+ """
+
+ assert mmcv.is_list_of(img_scales, tuple)
+ scale_idx = np.random.randint(len(img_scales))
+ img_scale = img_scales[scale_idx]
+ return img_scale, scale_idx
+
+ @staticmethod
+ def random_sample(img_scales):
+ """Randomly sample an img_scale when ``multiscale_mode=='range'``.
+
+ Args:
+ img_scales (list[tuple]): Images scale range for sampling.
+ There must be two tuples in img_scales, which specify the lower
+ and upper bound of image scales.
+
+ Returns:
+ (tuple, None): Returns a tuple ``(img_scale, None)``, where \
+ ``img_scale`` is sampled scale and None is just a placeholder \
+ to be consistent with :func:`random_select`.
+ """
+
+ assert mmcv.is_list_of(img_scales, tuple) and len(img_scales) == 2
+ img_scale_long = [max(s) for s in img_scales]
+ img_scale_short = [min(s) for s in img_scales]
+ long_edge = np.random.randint(
+ min(img_scale_long),
+ max(img_scale_long) + 1)
+ short_edge = np.random.randint(
+ min(img_scale_short),
+ max(img_scale_short) + 1)
+ img_scale = (long_edge, short_edge)
+ return img_scale, None
+
+ @staticmethod
+ def random_sample_ratio(img_scale, ratio_range):
+ """Randomly sample an img_scale when ``ratio_range`` is specified.
+
+ A ratio will be randomly sampled from the range specified by
+ ``ratio_range``. Then it would be multiplied with ``img_scale`` to
+ generate sampled scale.
+
+ Args:
+ img_scale (tuple): Images scale base to multiply with ratio.
+ ratio_range (tuple[float]): The minimum and maximum ratio to scale
+ the ``img_scale``.
+
+ Returns:
+ (tuple, None): Returns a tuple ``(scale, None)``, where \
+ ``scale`` is sampled ratio multiplied with ``img_scale`` and \
+ None is just a placeholder to be consistent with \
+ :func:`random_select`.
+ """
+
+ assert isinstance(img_scale, tuple) and len(img_scale) == 2
+ min_ratio, max_ratio = ratio_range
+ assert min_ratio <= max_ratio
+ ratio = np.random.random_sample() * (max_ratio - min_ratio) + min_ratio
+ scale = int(img_scale[0] * ratio), int(img_scale[1] * ratio)
+ return scale, None
+
+ def _random_scale(self, results):
+ """Randomly sample an img_scale according to ``ratio_range`` and
+ ``multiscale_mode``.
+
+ If ``ratio_range`` is specified, a ratio will be sampled and be
+ multiplied with ``img_scale``.
+ If multiple scales are specified by ``img_scale``, a scale will be
+ sampled according to ``multiscale_mode``.
+ Otherwise, single scale will be used.
+
+ Args:
+ results (dict): Result dict from :obj:`dataset`.
+
+ Returns:
+ dict: Two new keys 'scale` and 'scale_idx` are added into \
+ ``results``, which would be used by subsequent pipelines.
+ """
+
+ if self.ratio_range is not None:
+ scale, scale_idx = self.random_sample_ratio(
+ self.img_scale[0], self.ratio_range)
+ elif len(self.img_scale) == 1:
+ scale, scale_idx = self.img_scale[0], 0
+ elif self.multiscale_mode == 'range':
+ scale, scale_idx = self.random_sample(self.img_scale)
+ elif self.multiscale_mode == 'value':
+ scale, scale_idx = self.random_select(self.img_scale)
+ else:
+ raise NotImplementedError
+
+ results['scale'] = scale
+ results['scale_idx'] = scale_idx
+
+ def _resize_img(self, results):
+ """Resize images with ``results['scale']``."""
+ for key in results.get('img_fields', ['img']):
+ if self.keep_ratio:
+ img, scale_factor = mmcv.imrescale(
+ results[key],
+ results['scale'],
+ return_scale=True,
+ backend=self.backend)
+ # the w_scale and h_scale has minor difference
+ # a real fix should be done in the mmcv.imrescale in the future
+ new_h, new_w = img.shape[:2]
+ h, w = results[key].shape[:2]
+ w_scale = new_w / w
+ h_scale = new_h / h
+ else:
+ img, w_scale, h_scale = mmcv.imresize(
+ results[key],
+ results['scale'],
+ return_scale=True,
+ backend=self.backend)
+ results[key] = img
+
+ scale_factor = np.array([w_scale, h_scale, w_scale, h_scale],
+ dtype=np.float32)
+ results['img_shape'] = img.shape
+ # in case that there is no padding
+ results['pad_shape'] = img.shape
+ results['scale_factor'] = scale_factor
+ results['keep_ratio'] = self.keep_ratio
+
+ def _resize_bboxes(self, results):
+ """Resize bounding boxes with ``results['scale_factor']``."""
+ for key in results.get('bbox_fields', []):
+ bboxes = results[key] * results['scale_factor']
+ if self.bbox_clip_border:
+ img_shape = results['img_shape']
+ bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, img_shape[1])
+ bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, img_shape[0])
+ results[key] = bboxes
+
+ def _resize_masks(self, results):
+ """Resize masks with ``results['scale']``"""
+ for key in results.get('mask_fields', []):
+ if results[key] is None:
+ continue
+ if self.keep_ratio:
+ results[key] = results[key].rescale(results['scale'])
+ else:
+ results[key] = results[key].resize(results['img_shape'][:2])
+
+ def _resize_seg(self, results):
+ """Resize semantic segmentation map with ``results['scale']``."""
+ for key in results.get('seg_fields', []):
+ if self.keep_ratio:
+ gt_seg = mmcv.imrescale(
+ results[key],
+ results['scale'],
+ interpolation='nearest',
+ backend=self.backend)
+ else:
+ gt_seg = mmcv.imresize(
+ results[key],
+ results['scale'],
+ interpolation='nearest',
+ backend=self.backend)
+ results['gt_semantic_seg'] = gt_seg
+
+ def __call__(self, results):
+ """Call function to resize images, bounding boxes, masks, semantic
+ segmentation map.
+
+ Args:
+ results (dict): Result dict from loading pipeline.
+
+ Returns:
+ dict: Resized results, 'img_shape', 'pad_shape', 'scale_factor', \
+ 'keep_ratio' keys are added into result dict.
+ """
+
+ if 'scale' not in results:
+ if 'scale_factor' in results:
+ img_shape = results['img'].shape[:2]
+ scale_factor = results['scale_factor']
+ assert isinstance(scale_factor, float)
+ results['scale'] = tuple(
+ [int(x * scale_factor) for x in img_shape][::-1])
+ else:
+ self._random_scale(results)
+ else:
+ if not self.override:
+ assert 'scale_factor' not in results, (
+ 'scale and scale_factor cannot be both set.')
+ else:
+ results.pop('scale')
+ if 'scale_factor' in results:
+ results.pop('scale_factor')
+ self._random_scale(results)
+
+ self._resize_img(results)
+ self._resize_bboxes(results)
+ self._resize_masks(results)
+ self._resize_seg(results)
+ return results
+
+ def __repr__(self):
+ repr_str = self.__class__.__name__
+ repr_str += f'(img_scale={self.img_scale}, '
+ repr_str += f'multiscale_mode={self.multiscale_mode}, '
+ repr_str += f'ratio_range={self.ratio_range}, '
+ repr_str += f'keep_ratio={self.keep_ratio}, '
+ repr_str += f'bbox_clip_border={self.bbox_clip_border})'
+ return repr_str
+
+
+@PIPELINES.register_module()
+class RandomFlip(object):
+ """Flip the image & bbox & mask.
+
+ If the input dict contains the key "flip", then the flag will be used,
+ otherwise it will be randomly decided by a ratio specified in the init
+ method.
+
+ When random flip is enabled, ``flip_ratio``/``direction`` can either be a
+ float/string or tuple of float/string. There are 3 flip modes:
+
+ - ``flip_ratio`` is float, ``direction`` is string: the image will be
+ ``direction``ly flipped with probability of ``flip_ratio`` .
+ E.g., ``flip_ratio=0.5``, ``direction='horizontal'``,
+ then image will be horizontally flipped with probability of 0.5.
+ - ``flip_ratio`` is float, ``direction`` is list of string: the image wil
+ be ``direction[i]``ly flipped with probability of
+ ``flip_ratio/len(direction)``.
+ E.g., ``flip_ratio=0.5``, ``direction=['horizontal', 'vertical']``,
+ then image will be horizontally flipped with probability of 0.25,
+ vertically with probability of 0.25.
+ - ``flip_ratio`` is list of float, ``direction`` is list of string:
+ given ``len(flip_ratio) == len(direction)``, the image wil
+ be ``direction[i]``ly flipped with probability of ``flip_ratio[i]``.
+ E.g., ``flip_ratio=[0.3, 0.5]``, ``direction=['horizontal',
+ 'vertical']``, then image will be horizontally flipped with probability
+ of 0.3, vertically with probability of 0.5
+
+ Args:
+ flip_ratio (float | list[float], optional): The flipping probability.
+ Default: None.
+ direction(str | list[str], optional): The flipping direction. Options
+ are 'horizontal', 'vertical', 'diagonal'. Default: 'horizontal'.
+ If input is a list, the length must equal ``flip_ratio``. Each
+ element in ``flip_ratio`` indicates the flip probability of
+ corresponding direction.
+ """
+
+ def __init__(self, flip_ratio=None, direction='horizontal'):
+ if isinstance(flip_ratio, list):
+ assert mmcv.is_list_of(flip_ratio, float)
+ assert 0 <= sum(flip_ratio) <= 1
+ elif isinstance(flip_ratio, float):
+ assert 0 <= flip_ratio <= 1
+ elif flip_ratio is None:
+ pass
+ else:
+ raise ValueError('flip_ratios must be None, float, '
+ 'or list of float')
+ self.flip_ratio = flip_ratio
+
+ valid_directions = ['horizontal', 'vertical', 'diagonal']
+ if isinstance(direction, str):
+ assert direction in valid_directions
+ elif isinstance(direction, list):
+ assert mmcv.is_list_of(direction, str)
+ assert set(direction).issubset(set(valid_directions))
+ else:
+ raise ValueError('direction must be either str or list of str')
+ self.direction = direction
+
+ if isinstance(flip_ratio, list):
+ assert len(self.flip_ratio) == len(self.direction)
+
+ def bbox_flip(self, bboxes, img_shape, direction):
+ """Flip bboxes horizontally.
+
+ Args:
+ bboxes (numpy.ndarray): Bounding boxes, shape (..., 4*k)
+ img_shape (tuple[int]): Image shape (height, width)
+ direction (str): Flip direction. Options are 'horizontal',
+ 'vertical'.
+
+ Returns:
+ numpy.ndarray: Flipped bounding boxes.
+ """
+
+ assert bboxes.shape[-1] % 4 == 0
+ flipped = bboxes.copy()
+ if direction == 'horizontal':
+ w = img_shape[1]
+ flipped[..., 0::4] = w - bboxes[..., 2::4]
+ flipped[..., 2::4] = w - bboxes[..., 0::4]
+ elif direction == 'vertical':
+ h = img_shape[0]
+ flipped[..., 1::4] = h - bboxes[..., 3::4]
+ flipped[..., 3::4] = h - bboxes[..., 1::4]
+ elif direction == 'diagonal':
+ w = img_shape[1]
+ h = img_shape[0]
+ flipped[..., 0::4] = w - bboxes[..., 2::4]
+ flipped[..., 1::4] = h - bboxes[..., 3::4]
+ flipped[..., 2::4] = w - bboxes[..., 0::4]
+ flipped[..., 3::4] = h - bboxes[..., 1::4]
+ else:
+ raise ValueError(f"Invalid flipping direction '{direction}'")
+ return flipped
+
+ def __call__(self, results):
+ """Call function to flip bounding boxes, masks, semantic segmentation
+ maps.
+
+ Args:
+ results (dict): Result dict from loading pipeline.
+
+ Returns:
+ dict: Flipped results, 'flip', 'flip_direction' keys are added \
+ into result dict.
+ """
+
+ if 'flip' not in results:
+ if isinstance(self.direction, list):
+ # None means non-flip
+ direction_list = self.direction + [None]
+ else:
+ # None means non-flip
+ direction_list = [self.direction, None]
+
+ if isinstance(self.flip_ratio, list):
+ non_flip_ratio = 1 - sum(self.flip_ratio)
+ flip_ratio_list = self.flip_ratio + [non_flip_ratio]
+ else:
+ non_flip_ratio = 1 - self.flip_ratio
+ # exclude non-flip
+ single_ratio = self.flip_ratio / (len(direction_list) - 1)
+ flip_ratio_list = [single_ratio] * (len(direction_list) -
+ 1) + [non_flip_ratio]
+
+ cur_dir = np.random.choice(direction_list, p=flip_ratio_list)
+
+ results['flip'] = cur_dir is not None
+ if 'flip_direction' not in results:
+ results['flip_direction'] = cur_dir
+ if results['flip']:
+ # flip image
+ for key in results.get('img_fields', ['img']):
+ results[key] = mmcv.imflip(
+ results[key], direction=results['flip_direction'])
+ # flip bboxes
+ for key in results.get('bbox_fields', []):
+ results[key] = self.bbox_flip(results[key],
+ results['img_shape'],
+ results['flip_direction'])
+ # flip masks
+ for key in results.get('mask_fields', []):
+ results[key] = results[key].flip(results['flip_direction'])
+
+ # flip segs
+ for key in results.get('seg_fields', []):
+ results[key] = mmcv.imflip(
+ results[key], direction=results['flip_direction'])
+ return results
+
+ def __repr__(self):
+ return self.__class__.__name__ + f'(flip_ratio={self.flip_ratio})'
+
+
+@PIPELINES.register_module()
+class Pad(object):
+ """Pad the image & mask.
+
+ There are two padding modes: (1) pad to a fixed size and (2) pad to the
+ minimum size that is divisible by some number.
+ Added keys are "pad_shape", "pad_fixed_size", "pad_size_divisor",
+
+ Args:
+ size (tuple, optional): Fixed padding size.
+ size_divisor (int, optional): The divisor of padded size.
+ pad_val (float, optional): Padding value, 0 by default.
+ """
+
+ def __init__(self, size=None, size_divisor=None, pad_val=0):
+ self.size = size
+ self.size_divisor = size_divisor
+ self.pad_val = pad_val
+ # only one of size and size_divisor should be valid
+ assert size is not None or size_divisor is not None
+ assert size is None or size_divisor is None
+
+ def _pad_img(self, results):
+ """Pad images according to ``self.size``."""
+ for key in results.get('img_fields', ['img']):
+ if self.size is not None:
+ padded_img = mmcv.impad(
+ results[key], shape=self.size, pad_val=self.pad_val)
+ elif self.size_divisor is not None:
+ padded_img = mmcv.impad_to_multiple(
+ results[key], self.size_divisor, pad_val=self.pad_val)
+ results[key] = padded_img
+ results['pad_shape'] = padded_img.shape
+ results['pad_fixed_size'] = self.size
+ results['pad_size_divisor'] = self.size_divisor
+
+ def _pad_masks(self, results):
+ """Pad masks according to ``results['pad_shape']``."""
+ pad_shape = results['pad_shape'][:2]
+ for key in results.get('mask_fields', []):
+ results[key] = results[key].pad(pad_shape, pad_val=self.pad_val)
+
+ def _pad_seg(self, results):
+ """Pad semantic segmentation map according to
+ ``results['pad_shape']``."""
+ for key in results.get('seg_fields', []):
+ results[key] = mmcv.impad(
+ results[key], shape=results['pad_shape'][:2])
+
+ def __call__(self, results):
+ """Call function to pad images, masks, semantic segmentation maps.
+
+ Args:
+ results (dict): Result dict from loading pipeline.
+
+ Returns:
+ dict: Updated result dict.
+ """
+ self._pad_img(results)
+ self._pad_masks(results)
+ self._pad_seg(results)
+ return results
+
+ def __repr__(self):
+ repr_str = self.__class__.__name__
+ repr_str += f'(size={self.size}, '
+ repr_str += f'size_divisor={self.size_divisor}, '
+ repr_str += f'pad_val={self.pad_val})'
+ return repr_str
+
+
+@PIPELINES.register_module()
+class Normalize(object):
+ """Normalize the image.
+
+ Added key is "img_norm_cfg".
+
+ Args:
+ mean (sequence): Mean values of 3 channels.
+ std (sequence): Std values of 3 channels.
+ to_rgb (bool): Whether to convert the image from BGR to RGB,
+ default is true.
+ """
+
+ def __init__(self, mean, std, to_rgb=True):
+ self.mean = np.array(mean, dtype=np.float32)
+ self.std = np.array(std, dtype=np.float32)
+ self.to_rgb = to_rgb
+
+ def __call__(self, results):
+ """Call function to normalize images.
+
+ Args:
+ results (dict): Result dict from loading pipeline.
+
+ Returns:
+ dict: Normalized results, 'img_norm_cfg' key is added into
+ result dict.
+ """
+ for key in results.get('img_fields', ['img']):
+ results[key] = mmcv.imnormalize(results[key], self.mean, self.std,
+ self.to_rgb)
+ results['img_norm_cfg'] = dict(
+ mean=self.mean, std=self.std, to_rgb=self.to_rgb)
+ return results
+
+ def __repr__(self):
+ repr_str = self.__class__.__name__
+ repr_str += f'(mean={self.mean}, std={self.std}, to_rgb={self.to_rgb})'
+ return repr_str
+
+
+@PIPELINES.register_module()
+class RandomCrop(object):
+ """Random crop the image & bboxes & masks.
+
+ The absolute `crop_size` is sampled based on `crop_type` and `image_size`,
+ then the cropped results are generated.
+
+ Args:
+ crop_size (tuple): The relative ratio or absolute pixels of
+ height and width.
+ crop_type (str, optional): one of "relative_range", "relative",
+ "absolute", "absolute_range". "relative" randomly crops
+ (h * crop_size[0], w * crop_size[1]) part from an input of size
+ (h, w). "relative_range" uniformly samples relative crop size from
+ range [crop_size[0], 1] and [crop_size[1], 1] for height and width
+ respectively. "absolute" crops from an input with absolute size
+ (crop_size[0], crop_size[1]). "absolute_range" uniformly samples
+ crop_h in range [crop_size[0], min(h, crop_size[1])] and crop_w
+ in range [crop_size[0], min(w, crop_size[1])]. Default "absolute".
+ allow_negative_crop (bool, optional): Whether to allow a crop that does
+ not contain any bbox area. Default False.
+ bbox_clip_border (bool, optional): Whether clip the objects outside
+ the border of the image. Defaults to True.
+
+ Note:
+ - If the image is smaller than the absolute crop size, return the
+ original image.
+ - The keys for bboxes, labels and masks must be aligned. That is,
+ `gt_bboxes` corresponds to `gt_labels` and `gt_masks`, and
+ `gt_bboxes_ignore` corresponds to `gt_labels_ignore` and
+ `gt_masks_ignore`.
+ - If the crop does not contain any gt-bbox region and
+ `allow_negative_crop` is set to False, skip this image.
+ """
+
+ def __init__(self,
+ crop_size,
+ crop_type='absolute',
+ allow_negative_crop=False,
+ bbox_clip_border=True):
+ if crop_type not in [
+ 'relative_range', 'relative', 'absolute', 'absolute_range'
+ ]:
+ raise ValueError(f'Invalid crop_type {crop_type}.')
+ if crop_type in ['absolute', 'absolute_range']:
+ assert crop_size[0] > 0 and crop_size[1] > 0
+ assert isinstance(crop_size[0], int) and isinstance(
+ crop_size[1], int)
+ else:
+ assert 0 < crop_size[0] <= 1 and 0 < crop_size[1] <= 1
+ self.crop_size = crop_size
+ self.crop_type = crop_type
+ self.allow_negative_crop = allow_negative_crop
+ self.bbox_clip_border = bbox_clip_border
+ # The key correspondence from bboxes to labels and masks.
+ self.bbox2label = {
+ 'gt_bboxes': 'gt_labels',
+ 'gt_bboxes_ignore': 'gt_labels_ignore'
+ }
+ self.bbox2mask = {
+ 'gt_bboxes': 'gt_masks',
+ 'gt_bboxes_ignore': 'gt_masks_ignore'
+ }
+
+ def _crop_data(self, results, crop_size, allow_negative_crop):
+ """Function to randomly crop images, bounding boxes, masks, semantic
+ segmentation maps.
+
+ Args:
+ results (dict): Result dict from loading pipeline.
+ crop_size (tuple): Expected absolute size after cropping, (h, w).
+ allow_negative_crop (bool): Whether to allow a crop that does not
+ contain any bbox area. Default to False.
+
+ Returns:
+ dict: Randomly cropped results, 'img_shape' key in result dict is
+ updated according to crop size.
+ """
+ assert crop_size[0] > 0 and crop_size[1] > 0
+ for key in results.get('img_fields', ['img']):
+ img = results[key]
+ margin_h = max(img.shape[0] - crop_size[0], 0)
+ margin_w = max(img.shape[1] - crop_size[1], 0)
+ offset_h = np.random.randint(0, margin_h + 1)
+ offset_w = np.random.randint(0, margin_w + 1)
+ crop_y1, crop_y2 = offset_h, offset_h + crop_size[0]
+ crop_x1, crop_x2 = offset_w, offset_w + crop_size[1]
+
+ # crop the image
+ img = img[crop_y1:crop_y2, crop_x1:crop_x2, ...]
+ img_shape = img.shape
+ results[key] = img
+ results['img_shape'] = img_shape
+
+ # crop bboxes accordingly and clip to the image boundary
+ for key in results.get('bbox_fields', []):
+ # e.g. gt_bboxes and gt_bboxes_ignore
+ bbox_offset = np.array([offset_w, offset_h, offset_w, offset_h],
+ dtype=np.float32)
+ bboxes = results[key] - bbox_offset
+ if self.bbox_clip_border:
+ bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, img_shape[1])
+ bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, img_shape[0])
+ valid_inds = (bboxes[:, 2] > bboxes[:, 0]) & (
+ bboxes[:, 3] > bboxes[:, 1])
+ # If the crop does not contain any gt-bbox area and
+ # allow_negative_crop is False, skip this image.
+ if (key == 'gt_bboxes' and not valid_inds.any()
+ and not allow_negative_crop):
+ return None
+ results[key] = bboxes[valid_inds, :]
+ # label fields. e.g. gt_labels and gt_labels_ignore
+ label_key = self.bbox2label.get(key)
+ if label_key in results:
+ results[label_key] = results[label_key][valid_inds]
+
+ # mask fields, e.g. gt_masks and gt_masks_ignore
+ mask_key = self.bbox2mask.get(key)
+ if mask_key in results:
+ results[mask_key] = results[mask_key][
+ valid_inds.nonzero()[0]].crop(
+ np.asarray([crop_x1, crop_y1, crop_x2, crop_y2]))
+
+ # crop semantic seg
+ for key in results.get('seg_fields', []):
+ results[key] = results[key][crop_y1:crop_y2, crop_x1:crop_x2]
+
+ return results
+
+ def _get_crop_size(self, image_size):
+ """Randomly generates the absolute crop size based on `crop_type` and
+ `image_size`.
+
+ Args:
+ image_size (tuple): (h, w).
+
+ Returns:
+ crop_size (tuple): (crop_h, crop_w) in absolute pixels.
+ """
+ h, w = image_size
+ if self.crop_type == 'absolute':
+ return (min(self.crop_size[0], h), min(self.crop_size[1], w))
+ elif self.crop_type == 'absolute_range':
+ assert self.crop_size[0] <= self.crop_size[1]
+ crop_h = np.random.randint(
+ min(h, self.crop_size[0]),
+ min(h, self.crop_size[1]) + 1)
+ crop_w = np.random.randint(
+ min(w, self.crop_size[0]),
+ min(w, self.crop_size[1]) + 1)
+ return crop_h, crop_w
+ elif self.crop_type == 'relative':
+ crop_h, crop_w = self.crop_size
+ return int(h * crop_h + 0.5), int(w * crop_w + 0.5)
+ elif self.crop_type == 'relative_range':
+ crop_size = np.asarray(self.crop_size, dtype=np.float32)
+ crop_h, crop_w = crop_size + np.random.rand(2) * (1 - crop_size)
+ return int(h * crop_h + 0.5), int(w * crop_w + 0.5)
+
+ def __call__(self, results):
+ """Call function to randomly crop images, bounding boxes, masks,
+ semantic segmentation maps.
+
+ Args:
+ results (dict): Result dict from loading pipeline.
+
+ Returns:
+ dict: Randomly cropped results, 'img_shape' key in result dict is
+ updated according to crop size.
+ """
+ image_size = results['img'].shape[:2]
+ crop_size = self._get_crop_size(image_size)
+ results = self._crop_data(results, crop_size, self.allow_negative_crop)
+ return results
+
+ def __repr__(self):
+ repr_str = self.__class__.__name__
+ repr_str += f'(crop_size={self.crop_size}, '
+ repr_str += f'crop_type={self.crop_type}, '
+ repr_str += f'allow_negative_crop={self.allow_negative_crop}, '
+ repr_str += f'bbox_clip_border={self.bbox_clip_border})'
+ return repr_str
+
+
+@PIPELINES.register_module()
+class SegRescale(object):
+ """Rescale semantic segmentation maps.
+
+ Args:
+ scale_factor (float): The scale factor of the final output.
+ backend (str): Image rescale backend, choices are 'cv2' and 'pillow'.
+ These two backends generates slightly different results. Defaults
+ to 'cv2'.
+ """
+
+ def __init__(self, scale_factor=1, backend='cv2'):
+ self.scale_factor = scale_factor
+ self.backend = backend
+
+ def __call__(self, results):
+ """Call function to scale the semantic segmentation map.
+
+ Args:
+ results (dict): Result dict from loading pipeline.
+
+ Returns:
+ dict: Result dict with semantic segmentation map scaled.
+ """
+
+ for key in results.get('seg_fields', []):
+ if self.scale_factor != 1:
+ results[key] = mmcv.imrescale(
+ results[key],
+ self.scale_factor,
+ interpolation='nearest',
+ backend=self.backend)
+ return results
+
+ def __repr__(self):
+ return self.__class__.__name__ + f'(scale_factor={self.scale_factor})'
+
+
+@PIPELINES.register_module()
+class PhotoMetricDistortion(object):
+ """Apply photometric distortion to image sequentially, every transformation
+ is applied with a probability of 0.5. The position of random contrast is in
+ second or second to last.
+
+ 1. random brightness
+ 2. random contrast (mode 0)
+ 3. convert color from BGR to HSV
+ 4. random saturation
+ 5. random hue
+ 6. convert color from HSV to BGR
+ 7. random contrast (mode 1)
+ 8. randomly swap channels
+
+ Args:
+ brightness_delta (int): delta of brightness.
+ contrast_range (tuple): range of contrast.
+ saturation_range (tuple): range of saturation.
+ hue_delta (int): delta of hue.
+ """
+
+ def __init__(self,
+ brightness_delta=32,
+ contrast_range=(0.5, 1.5),
+ saturation_range=(0.5, 1.5),
+ hue_delta=18):
+ self.brightness_delta = brightness_delta
+ self.contrast_lower, self.contrast_upper = contrast_range
+ self.saturation_lower, self.saturation_upper = saturation_range
+ self.hue_delta = hue_delta
+
+ def __call__(self, results):
+ """Call function to perform photometric distortion on images.
+
+ Args:
+ results (dict): Result dict from loading pipeline.
+
+ Returns:
+ dict: Result dict with images distorted.
+ """
+
+ if 'img_fields' in results:
+ assert results['img_fields'] == ['img'], \
+ 'Only single img_fields is allowed'
+ img = results['img']
+ assert img.dtype == np.float32, \
+ 'PhotoMetricDistortion needs the input image of dtype np.float32,'\
+ ' please set "to_float32=True" in "LoadImageFromFile" pipeline'
+ # random brightness
+ if random.randint(2):
+ delta = random.uniform(-self.brightness_delta,
+ self.brightness_delta)
+ img += delta
+
+ # mode == 0 --> do random contrast first
+ # mode == 1 --> do random contrast last
+ mode = random.randint(2)
+ if mode == 1:
+ if random.randint(2):
+ alpha = random.uniform(self.contrast_lower,
+ self.contrast_upper)
+ img *= alpha
+
+ # convert color from BGR to HSV
+ img = mmcv.bgr2hsv(img)
+
+ # random saturation
+ if random.randint(2):
+ img[..., 1] *= random.uniform(self.saturation_lower,
+ self.saturation_upper)
+
+ # random hue
+ if random.randint(2):
+ img[..., 0] += random.uniform(-self.hue_delta, self.hue_delta)
+ img[..., 0][img[..., 0] > 360] -= 360
+ img[..., 0][img[..., 0] < 0] += 360
+
+ # convert color from HSV to BGR
+ img = mmcv.hsv2bgr(img)
+
+ # random contrast
+ if mode == 0:
+ if random.randint(2):
+ alpha = random.uniform(self.contrast_lower,
+ self.contrast_upper)
+ img *= alpha
+
+ # randomly swap channels
+ if random.randint(2):
+ img = img[..., random.permutation(3)]
+
+ results['img'] = img
+ return results
+
+ def __repr__(self):
+ repr_str = self.__class__.__name__
+ repr_str += f'(\nbrightness_delta={self.brightness_delta},\n'
+ repr_str += 'contrast_range='
+ repr_str += f'{(self.contrast_lower, self.contrast_upper)},\n'
+ repr_str += 'saturation_range='
+ repr_str += f'{(self.saturation_lower, self.saturation_upper)},\n'
+ repr_str += f'hue_delta={self.hue_delta})'
+ return repr_str
+
+
+@PIPELINES.register_module()
+class Expand(object):
+ """Random expand the image & bboxes.
+
+ Randomly place the original image on a canvas of 'ratio' x original image
+ size filled with mean values. The ratio is in the range of ratio_range.
+
+ Args:
+ mean (tuple): mean value of dataset.
+ to_rgb (bool): if need to convert the order of mean to align with RGB.
+ ratio_range (tuple): range of expand ratio.
+ prob (float): probability of applying this transformation
+ """
+
+ def __init__(self,
+ mean=(0, 0, 0),
+ to_rgb=True,
+ ratio_range=(1, 4),
+ seg_ignore_label=None,
+ prob=0.5):
+ self.to_rgb = to_rgb
+ self.ratio_range = ratio_range
+ if to_rgb:
+ self.mean = mean[::-1]
+ else:
+ self.mean = mean
+ self.min_ratio, self.max_ratio = ratio_range
+ self.seg_ignore_label = seg_ignore_label
+ self.prob = prob
+
+ def __call__(self, results):
+ """Call function to expand images, bounding boxes.
+
+ Args:
+ results (dict): Result dict from loading pipeline.
+
+ Returns:
+ dict: Result dict with images, bounding boxes expanded
+ """
+
+ if random.uniform(0, 1) > self.prob:
+ return results
+
+ if 'img_fields' in results:
+ assert results['img_fields'] == ['img'], \
+ 'Only single img_fields is allowed'
+ img = results['img']
+
+ h, w, c = img.shape
+ ratio = random.uniform(self.min_ratio, self.max_ratio)
+ # speedup expand when meets large image
+ if np.all(self.mean == self.mean[0]):
+ expand_img = np.empty((int(h * ratio), int(w * ratio), c),
+ img.dtype)
+ expand_img.fill(self.mean[0])
+ else:
+ expand_img = np.full((int(h * ratio), int(w * ratio), c),
+ self.mean,
+ dtype=img.dtype)
+ left = int(random.uniform(0, w * ratio - w))
+ top = int(random.uniform(0, h * ratio - h))
+ expand_img[top:top + h, left:left + w] = img
+
+ results['img'] = expand_img
+ # expand bboxes
+ for key in results.get('bbox_fields', []):
+ results[key] = results[key] + np.tile(
+ (left, top), 2).astype(results[key].dtype)
+
+ # expand masks
+ for key in results.get('mask_fields', []):
+ results[key] = results[key].expand(
+ int(h * ratio), int(w * ratio), top, left)
+
+ # expand segs
+ for key in results.get('seg_fields', []):
+ gt_seg = results[key]
+ expand_gt_seg = np.full((int(h * ratio), int(w * ratio)),
+ self.seg_ignore_label,
+ dtype=gt_seg.dtype)
+ expand_gt_seg[top:top + h, left:left + w] = gt_seg
+ results[key] = expand_gt_seg
+ return results
+
+ def __repr__(self):
+ repr_str = self.__class__.__name__
+ repr_str += f'(mean={self.mean}, to_rgb={self.to_rgb}, '
+ repr_str += f'ratio_range={self.ratio_range}, '
+ repr_str += f'seg_ignore_label={self.seg_ignore_label})'
+ return repr_str
+
+
+@PIPELINES.register_module()
+class MinIoURandomCrop(object):
+ """Random crop the image & bboxes, the cropped patches have minimum IoU
+ requirement with original image & bboxes, the IoU threshold is randomly
+ selected from min_ious.
+
+ Args:
+ min_ious (tuple): minimum IoU threshold for all intersections with
+ bounding boxes
+ min_crop_size (float): minimum crop's size (i.e. h,w := a*h, a*w,
+ where a >= min_crop_size).
+ bbox_clip_border (bool, optional): Whether clip the objects outside
+ the border of the image. Defaults to True.
+
+ Note:
+ The keys for bboxes, labels and masks should be paired. That is, \
+ `gt_bboxes` corresponds to `gt_labels` and `gt_masks`, and \
+ `gt_bboxes_ignore` to `gt_labels_ignore` and `gt_masks_ignore`.
+ """
+
+ def __init__(self,
+ min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
+ min_crop_size=0.3,
+ bbox_clip_border=True):
+ # 1: return ori img
+ self.min_ious = min_ious
+ self.sample_mode = (1, *min_ious, 0)
+ self.min_crop_size = min_crop_size
+ self.bbox_clip_border = bbox_clip_border
+ self.bbox2label = {
+ 'gt_bboxes': 'gt_labels',
+ 'gt_bboxes_ignore': 'gt_labels_ignore'
+ }
+ self.bbox2mask = {
+ 'gt_bboxes': 'gt_masks',
+ 'gt_bboxes_ignore': 'gt_masks_ignore'
+ }
+
+ def __call__(self, results):
+ """Call function to crop images and bounding boxes with minimum IoU
+ constraint.
+
+ Args:
+ results (dict): Result dict from loading pipeline.
+
+ Returns:
+ dict: Result dict with images and bounding boxes cropped, \
+ 'img_shape' key is updated.
+ """
+
+ if 'img_fields' in results:
+ assert results['img_fields'] == ['img'], \
+ 'Only single img_fields is allowed'
+ img = results['img']
+ assert 'bbox_fields' in results
+ boxes = [results[key] for key in results['bbox_fields']]
+ boxes = np.concatenate(boxes, 0)
+ h, w, c = img.shape
+ while True:
+ mode = random.choice(self.sample_mode)
+ self.mode = mode
+ if mode == 1:
+ return results
+
+ min_iou = mode
+ for i in range(50):
+ new_w = random.uniform(self.min_crop_size * w, w)
+ new_h = random.uniform(self.min_crop_size * h, h)
+
+ # h / w in [0.5, 2]
+ if new_h / new_w < 0.5 or new_h / new_w > 2:
+ continue
+
+ left = random.uniform(w - new_w)
+ top = random.uniform(h - new_h)
+
+ patch = np.array(
+ (int(left), int(top), int(left + new_w), int(top + new_h)))
+ # Line or point crop is not allowed
+ if patch[2] == patch[0] or patch[3] == patch[1]:
+ continue
+ overlaps = bbox_overlaps(
+ patch.reshape(-1, 4), boxes.reshape(-1, 4)).reshape(-1)
+ if len(overlaps) > 0 and overlaps.min() < min_iou:
+ continue
+
+ # center of boxes should inside the crop img
+ # only adjust boxes and instance masks when the gt is not empty
+ if len(overlaps) > 0:
+ # adjust boxes
+ def is_center_of_bboxes_in_patch(boxes, patch):
+ center = (boxes[:, :2] + boxes[:, 2:]) / 2
+ mask = ((center[:, 0] > patch[0]) *
+ (center[:, 1] > patch[1]) *
+ (center[:, 0] < patch[2]) *
+ (center[:, 1] < patch[3]))
+ return mask
+
+ mask = is_center_of_bboxes_in_patch(boxes, patch)
+ if not mask.any():
+ continue
+ for key in results.get('bbox_fields', []):
+ boxes = results[key].copy()
+ mask = is_center_of_bboxes_in_patch(boxes, patch)
+ boxes = boxes[mask]
+ if self.bbox_clip_border:
+ boxes[:, 2:] = boxes[:, 2:].clip(max=patch[2:])
+ boxes[:, :2] = boxes[:, :2].clip(min=patch[:2])
+ boxes -= np.tile(patch[:2], 2)
+
+ results[key] = boxes
+ # labels
+ label_key = self.bbox2label.get(key)
+ if label_key in results:
+ results[label_key] = results[label_key][mask]
+
+ # mask fields
+ mask_key = self.bbox2mask.get(key)
+ if mask_key in results:
+ results[mask_key] = results[mask_key][
+ mask.nonzero()[0]].crop(patch)
+ # adjust the img no matter whether the gt is empty before crop
+ img = img[patch[1]:patch[3], patch[0]:patch[2]]
+ results['img'] = img
+ results['img_shape'] = img.shape
+
+ # seg fields
+ for key in results.get('seg_fields', []):
+ results[key] = results[key][patch[1]:patch[3],
+ patch[0]:patch[2]]
+ return results
+
+ def __repr__(self):
+ repr_str = self.__class__.__name__
+ repr_str += f'(min_ious={self.min_ious}, '
+ repr_str += f'min_crop_size={self.min_crop_size}, '
+ repr_str += f'bbox_clip_border={self.bbox_clip_border})'
+ return repr_str
+
+
+@PIPELINES.register_module()
+class Corrupt(object):
+ """Corruption augmentation.
+
+ Corruption transforms implemented based on
+ `imagecorruptions `_.
+
+ Args:
+ corruption (str): Corruption name.
+ severity (int, optional): The severity of corruption. Default: 1.
+ """
+
+ def __init__(self, corruption, severity=1):
+ self.corruption = corruption
+ self.severity = severity
+
+ def __call__(self, results):
+ """Call function to corrupt image.
+
+ Args:
+ results (dict): Result dict from loading pipeline.
+
+ Returns:
+ dict: Result dict with images corrupted.
+ """
+
+ if corrupt is None:
+ raise RuntimeError('imagecorruptions is not installed')
+ if 'img_fields' in results:
+ assert results['img_fields'] == ['img'], \
+ 'Only single img_fields is allowed'
+ results['img'] = corrupt(
+ results['img'].astype(np.uint8),
+ corruption_name=self.corruption,
+ severity=self.severity)
+ return results
+
+ def __repr__(self):
+ repr_str = self.__class__.__name__
+ repr_str += f'(corruption={self.corruption}, '
+ repr_str += f'severity={self.severity})'
+ return repr_str
+
+
+@PIPELINES.register_module()
+class Albu(object):
+ """Albumentation augmentation.
+
+ Adds custom transformations from Albumentations library.
+ Please, visit `https://albumentations.readthedocs.io`
+ to get more information.
+
+ An example of ``transforms`` is as followed:
+
+ .. code-block::
+
+ [
+ dict(
+ type='ShiftScaleRotate',
+ shift_limit=0.0625,
+ scale_limit=0.0,
+ rotate_limit=0,
+ interpolation=1,
+ p=0.5),
+ dict(
+ type='RandomBrightnessContrast',
+ brightness_limit=[0.1, 0.3],
+ contrast_limit=[0.1, 0.3],
+ p=0.2),
+ dict(type='ChannelShuffle', p=0.1),
+ dict(
+ type='OneOf',
+ transforms=[
+ dict(type='Blur', blur_limit=3, p=1.0),
+ dict(type='MedianBlur', blur_limit=3, p=1.0)
+ ],
+ p=0.1),
+ ]
+
+ Args:
+ transforms (list[dict]): A list of albu transformations
+ bbox_params (dict): Bbox_params for albumentation `Compose`
+ keymap (dict): Contains {'input key':'albumentation-style key'}
+ skip_img_without_anno (bool): Whether to skip the image if no ann left
+ after aug
+ """
+
+ def __init__(self,
+ transforms,
+ bbox_params=None,
+ keymap=None,
+ update_pad_shape=False,
+ skip_img_without_anno=False):
+ if Compose is None:
+ raise RuntimeError('albumentations is not installed')
+
+ # Args will be modified later, copying it will be safer
+ transforms = copy.deepcopy(transforms)
+ if bbox_params is not None:
+ bbox_params = copy.deepcopy(bbox_params)
+ if keymap is not None:
+ keymap = copy.deepcopy(keymap)
+ self.transforms = transforms
+ self.filter_lost_elements = False
+ self.update_pad_shape = update_pad_shape
+ self.skip_img_without_anno = skip_img_without_anno
+
+ # A simple workaround to remove masks without boxes
+ if (isinstance(bbox_params, dict) and 'label_fields' in bbox_params
+ and 'filter_lost_elements' in bbox_params):
+ self.filter_lost_elements = True
+ self.origin_label_fields = bbox_params['label_fields']
+ bbox_params['label_fields'] = ['idx_mapper']
+ del bbox_params['filter_lost_elements']
+
+ self.bbox_params = (
+ self.albu_builder(bbox_params) if bbox_params else None)
+ self.aug = Compose([self.albu_builder(t) for t in self.transforms],
+ bbox_params=self.bbox_params)
+
+ if not keymap:
+ self.keymap_to_albu = {
+ 'img': 'image',
+ 'gt_masks': 'masks',
+ 'gt_bboxes': 'bboxes'
+ }
+ else:
+ self.keymap_to_albu = keymap
+ self.keymap_back = {v: k for k, v in self.keymap_to_albu.items()}
+
+ def albu_builder(self, cfg):
+ """Import a module from albumentations.
+
+ It inherits some of :func:`build_from_cfg` logic.
+
+ Args:
+ cfg (dict): Config dict. It should at least contain the key "type".
+
+ Returns:
+ obj: The constructed object.
+ """
+
+ assert isinstance(cfg, dict) and 'type' in cfg
+ args = cfg.copy()
+
+ obj_type = args.pop('type')
+ if mmcv.is_str(obj_type):
+ if albumentations is None:
+ raise RuntimeError('albumentations is not installed')
+ obj_cls = getattr(albumentations, obj_type)
+ elif inspect.isclass(obj_type):
+ obj_cls = obj_type
+ else:
+ raise TypeError(
+ f'type must be a str or valid type, but got {type(obj_type)}')
+
+ if 'transforms' in args:
+ args['transforms'] = [
+ self.albu_builder(transform)
+ for transform in args['transforms']
+ ]
+
+ return obj_cls(**args)
+
+ @staticmethod
+ def mapper(d, keymap):
+ """Dictionary mapper. Renames keys according to keymap provided.
+
+ Args:
+ d (dict): old dict
+ keymap (dict): {'old_key':'new_key'}
+ Returns:
+ dict: new dict.
+ """
+
+ updated_dict = {}
+ for k, v in zip(d.keys(), d.values()):
+ new_k = keymap.get(k, k)
+ updated_dict[new_k] = d[k]
+ return updated_dict
+
+ def __call__(self, results):
+ # dict to albumentations format
+ results = self.mapper(results, self.keymap_to_albu)
+ # TODO: add bbox_fields
+ if 'bboxes' in results:
+ # to list of boxes
+ if isinstance(results['bboxes'], np.ndarray):
+ results['bboxes'] = [x for x in results['bboxes']]
+ # add pseudo-field for filtration
+ if self.filter_lost_elements:
+ results['idx_mapper'] = np.arange(len(results['bboxes']))
+
+ # TODO: Support mask structure in albu
+ if 'masks' in results:
+ if isinstance(results['masks'], PolygonMasks):
+ raise NotImplementedError(
+ 'Albu only supports BitMap masks now')
+ ori_masks = results['masks']
+ if albumentations.__version__ < '0.5':
+ results['masks'] = results['masks'].masks
+ else:
+ results['masks'] = [mask for mask in results['masks'].masks]
+
+ results = self.aug(**results)
+
+ if 'bboxes' in results:
+ if isinstance(results['bboxes'], list):
+ results['bboxes'] = np.array(
+ results['bboxes'], dtype=np.float32)
+ results['bboxes'] = results['bboxes'].reshape(-1, 4)
+
+ # filter label_fields
+ if self.filter_lost_elements:
+
+ for label in self.origin_label_fields:
+ results[label] = np.array(
+ [results[label][i] for i in results['idx_mapper']])
+ if 'masks' in results:
+ results['masks'] = np.array(
+ [results['masks'][i] for i in results['idx_mapper']])
+ results['masks'] = ori_masks.__class__(
+ results['masks'], results['image'].shape[0],
+ results['image'].shape[1])
+
+ if (not len(results['idx_mapper'])
+ and self.skip_img_without_anno):
+ return None
+
+ if 'gt_labels' in results:
+ if isinstance(results['gt_labels'], list):
+ results['gt_labels'] = np.array(results['gt_labels'])
+ results['gt_labels'] = results['gt_labels'].astype(np.int64)
+
+ # back to the original format
+ results = self.mapper(results, self.keymap_back)
+
+ # update final shape
+ if self.update_pad_shape:
+ results['pad_shape'] = results['img'].shape
+
+ return results
+
+ def __repr__(self):
+ repr_str = self.__class__.__name__ + f'(transforms={self.transforms})'
+ return repr_str
+
+
+@PIPELINES.register_module()
+class RandomCenterCropPad(object):
+ """Random center crop and random around padding for CornerNet.
+
+ This operation generates randomly cropped image from the original image and
+ pads it simultaneously. Different from :class:`RandomCrop`, the output
+ shape may not equal to ``crop_size`` strictly. We choose a random value
+ from ``ratios`` and the output shape could be larger or smaller than
+ ``crop_size``. The padding operation is also different from :class:`Pad`,
+ here we use around padding instead of right-bottom padding.
+
+ The relation between output image (padding image) and original image:
+
+ .. code:: text
+
+ output image
+
+ +----------------------------+
+ | padded area |
+ +------|----------------------------|----------+
+ | | cropped area | |
+ | | +---------------+ | |
+ | | | . center | | | original image
+ | | | range | | |
+ | | +---------------+ | |
+ +------|----------------------------|----------+
+ | padded area |
+ +----------------------------+
+
+ There are 5 main areas in the figure:
+
+ - output image: output image of this operation, also called padding
+ image in following instruction.
+ - original image: input image of this operation.
+ - padded area: non-intersect area of output image and original image.
+ - cropped area: the overlap of output image and original image.
+ - center range: a smaller area where random center chosen from.
+ center range is computed by ``border`` and original image's shape
+ to avoid our random center is too close to original image's border.
+
+ Also this operation act differently in train and test mode, the summary
+ pipeline is listed below.
+
+ Train pipeline:
+
+ 1. Choose a ``random_ratio`` from ``ratios``, the shape of padding image
+ will be ``random_ratio * crop_size``.
+ 2. Choose a ``random_center`` in center range.
+ 3. Generate padding image with center matches the ``random_center``.
+ 4. Initialize the padding image with pixel value equals to ``mean``.
+ 5. Copy the cropped area to padding image.
+ 6. Refine annotations.
+
+ Test pipeline:
+
+ 1. Compute output shape according to ``test_pad_mode``.
+ 2. Generate padding image with center matches the original image
+ center.
+ 3. Initialize the padding image with pixel value equals to ``mean``.
+ 4. Copy the ``cropped area`` to padding image.
+
+ Args:
+ crop_size (tuple | None): expected size after crop, final size will
+ computed according to ratio. Requires (h, w) in train mode, and
+ None in test mode.
+ ratios (tuple): random select a ratio from tuple and crop image to
+ (crop_size[0] * ratio) * (crop_size[1] * ratio).
+ Only available in train mode.
+ border (int): max distance from center select area to image border.
+ Only available in train mode.
+ mean (sequence): Mean values of 3 channels.
+ std (sequence): Std values of 3 channels.
+ to_rgb (bool): Whether to convert the image from BGR to RGB.
+ test_mode (bool): whether involve random variables in transform.
+ In train mode, crop_size is fixed, center coords and ratio is
+ random selected from predefined lists. In test mode, crop_size
+ is image's original shape, center coords and ratio is fixed.
+ test_pad_mode (tuple): padding method and padding shape value, only
+ available in test mode. Default is using 'logical_or' with
+ 127 as padding shape value.
+
+ - 'logical_or': final_shape = input_shape | padding_shape_value
+ - 'size_divisor': final_shape = int(
+ ceil(input_shape / padding_shape_value) * padding_shape_value)
+ bbox_clip_border (bool, optional): Whether clip the objects outside
+ the border of the image. Defaults to True.
+ """
+
+ def __init__(self,
+ crop_size=None,
+ ratios=(0.9, 1.0, 1.1),
+ border=128,
+ mean=None,
+ std=None,
+ to_rgb=None,
+ test_mode=False,
+ test_pad_mode=('logical_or', 127),
+ bbox_clip_border=True):
+ if test_mode:
+ assert crop_size is None, 'crop_size must be None in test mode'
+ assert ratios is None, 'ratios must be None in test mode'
+ assert border is None, 'border must be None in test mode'
+ assert isinstance(test_pad_mode, (list, tuple))
+ assert test_pad_mode[0] in ['logical_or', 'size_divisor']
+ else:
+ assert isinstance(crop_size, (list, tuple))
+ assert crop_size[0] > 0 and crop_size[1] > 0, (
+ 'crop_size must > 0 in train mode')
+ assert isinstance(ratios, (list, tuple))
+ assert test_pad_mode is None, (
+ 'test_pad_mode must be None in train mode')
+
+ self.crop_size = crop_size
+ self.ratios = ratios
+ self.border = border
+ # We do not set default value to mean, std and to_rgb because these
+ # hyper-parameters are easy to forget but could affect the performance.
+ # Please use the same setting as Normalize for performance assurance.
+ assert mean is not None and std is not None and to_rgb is not None
+ self.to_rgb = to_rgb
+ self.input_mean = mean
+ self.input_std = std
+ if to_rgb:
+ self.mean = mean[::-1]
+ self.std = std[::-1]
+ else:
+ self.mean = mean
+ self.std = std
+ self.test_mode = test_mode
+ self.test_pad_mode = test_pad_mode
+ self.bbox_clip_border = bbox_clip_border
+
+ def _get_border(self, border, size):
+ """Get final border for the target size.
+
+ This function generates a ``final_border`` according to image's shape.
+ The area between ``final_border`` and ``size - final_border`` is the
+ ``center range``. We randomly choose center from the ``center range``
+ to avoid our random center is too close to original image's border.
+ Also ``center range`` should be larger than 0.
+
+ Args:
+ border (int): The initial border, default is 128.
+ size (int): The width or height of original image.
+ Returns:
+ int: The final border.
+ """
+ k = 2 * border / size
+ i = pow(2, np.ceil(np.log2(np.ceil(k))) + (k == int(k)))
+ return border // i
+
+ def _filter_boxes(self, patch, boxes):
+ """Check whether the center of each box is in the patch.
+
+ Args:
+ patch (list[int]): The cropped area, [left, top, right, bottom].
+ boxes (numpy array, (N x 4)): Ground truth boxes.
+
+ Returns:
+ mask (numpy array, (N,)): Each box is inside or outside the patch.
+ """
+ center = (boxes[:, :2] + boxes[:, 2:]) / 2
+ mask = (center[:, 0] > patch[0]) * (center[:, 1] > patch[1]) * (
+ center[:, 0] < patch[2]) * (
+ center[:, 1] < patch[3])
+ return mask
+
+ def _crop_image_and_paste(self, image, center, size):
+ """Crop image with a given center and size, then paste the cropped
+ image to a blank image with two centers align.
+
+ This function is equivalent to generating a blank image with ``size``
+ as its shape. Then cover it on the original image with two centers (
+ the center of blank image and the random center of original image)
+ aligned. The overlap area is paste from the original image and the
+ outside area is filled with ``mean pixel``.
+
+ Args:
+ image (np array, H x W x C): Original image.
+ center (list[int]): Target crop center coord.
+ size (list[int]): Target crop size. [target_h, target_w]
+
+ Returns:
+ cropped_img (np array, target_h x target_w x C): Cropped image.
+ border (np array, 4): The distance of four border of
+ ``cropped_img`` to the original image area, [top, bottom,
+ left, right]
+ patch (list[int]): The cropped area, [left, top, right, bottom].
+ """
+ center_y, center_x = center
+ target_h, target_w = size
+ img_h, img_w, img_c = image.shape
+
+ x0 = max(0, center_x - target_w // 2)
+ x1 = min(center_x + target_w // 2, img_w)
+ y0 = max(0, center_y - target_h // 2)
+ y1 = min(center_y + target_h // 2, img_h)
+ patch = np.array((int(x0), int(y0), int(x1), int(y1)))
+
+ left, right = center_x - x0, x1 - center_x
+ top, bottom = center_y - y0, y1 - center_y
+
+ cropped_center_y, cropped_center_x = target_h // 2, target_w // 2
+ cropped_img = np.zeros((target_h, target_w, img_c), dtype=image.dtype)
+ for i in range(img_c):
+ cropped_img[:, :, i] += self.mean[i]
+ y_slice = slice(cropped_center_y - top, cropped_center_y + bottom)
+ x_slice = slice(cropped_center_x - left, cropped_center_x + right)
+ cropped_img[y_slice, x_slice, :] = image[y0:y1, x0:x1, :]
+
+ border = np.array([
+ cropped_center_y - top, cropped_center_y + bottom,
+ cropped_center_x - left, cropped_center_x + right
+ ],
+ dtype=np.float32)
+
+ return cropped_img, border, patch
+
+ def _train_aug(self, results):
+ """Random crop and around padding the original image.
+
+ Args:
+ results (dict): Image infomations in the augment pipeline.
+
+ Returns:
+ results (dict): The updated dict.
+ """
+ img = results['img']
+ h, w, c = img.shape
+ boxes = results['gt_bboxes']
+ while True:
+ scale = random.choice(self.ratios)
+ new_h = int(self.crop_size[0] * scale)
+ new_w = int(self.crop_size[1] * scale)
+ h_border = self._get_border(self.border, h)
+ w_border = self._get_border(self.border, w)
+
+ for i in range(50):
+ center_x = random.randint(low=w_border, high=w - w_border)
+ center_y = random.randint(low=h_border, high=h - h_border)
+
+ cropped_img, border, patch = self._crop_image_and_paste(
+ img, [center_y, center_x], [new_h, new_w])
+
+ mask = self._filter_boxes(patch, boxes)
+ # if image do not have valid bbox, any crop patch is valid.
+ if not mask.any() and len(boxes) > 0:
+ continue
+
+ results['img'] = cropped_img
+ results['img_shape'] = cropped_img.shape
+ results['pad_shape'] = cropped_img.shape
+
+ x0, y0, x1, y1 = patch
+
+ left_w, top_h = center_x - x0, center_y - y0
+ cropped_center_x, cropped_center_y = new_w // 2, new_h // 2
+
+ # crop bboxes accordingly and clip to the image boundary
+ for key in results.get('bbox_fields', []):
+ mask = self._filter_boxes(patch, results[key])
+ bboxes = results[key][mask]
+ bboxes[:, 0:4:2] += cropped_center_x - left_w - x0
+ bboxes[:, 1:4:2] += cropped_center_y - top_h - y0
+ if self.bbox_clip_border:
+ bboxes[:, 0:4:2] = np.clip(bboxes[:, 0:4:2], 0, new_w)
+ bboxes[:, 1:4:2] = np.clip(bboxes[:, 1:4:2], 0, new_h)
+ keep = (bboxes[:, 2] > bboxes[:, 0]) & (
+ bboxes[:, 3] > bboxes[:, 1])
+ bboxes = bboxes[keep]
+ results[key] = bboxes
+ if key in ['gt_bboxes']:
+ if 'gt_labels' in results:
+ labels = results['gt_labels'][mask]
+ labels = labels[keep]
+ results['gt_labels'] = labels
+ if 'gt_masks' in results:
+ raise NotImplementedError(
+ 'RandomCenterCropPad only supports bbox.')
+
+ # crop semantic seg
+ for key in results.get('seg_fields', []):
+ raise NotImplementedError(
+ 'RandomCenterCropPad only supports bbox.')
+ return results
+
+ def _test_aug(self, results):
+ """Around padding the original image without cropping.
+
+ The padding mode and value are from ``test_pad_mode``.
+
+ Args:
+ results (dict): Image infomations in the augment pipeline.
+
+ Returns:
+ results (dict): The updated dict.
+ """
+ img = results['img']
+ h, w, c = img.shape
+ results['img_shape'] = img.shape
+ if self.test_pad_mode[0] in ['logical_or']:
+ target_h = h | self.test_pad_mode[1]
+ target_w = w | self.test_pad_mode[1]
+ elif self.test_pad_mode[0] in ['size_divisor']:
+ divisor = self.test_pad_mode[1]
+ target_h = int(np.ceil(h / divisor)) * divisor
+ target_w = int(np.ceil(w / divisor)) * divisor
+ else:
+ raise NotImplementedError(
+ 'RandomCenterCropPad only support two testing pad mode:'
+ 'logical-or and size_divisor.')
+
+ cropped_img, border, _ = self._crop_image_and_paste(
+ img, [h // 2, w // 2], [target_h, target_w])
+ results['img'] = cropped_img
+ results['pad_shape'] = cropped_img.shape
+ results['border'] = border
+ return results
+
+ def __call__(self, results):
+ img = results['img']
+ assert img.dtype == np.float32, (
+ 'RandomCenterCropPad needs the input image of dtype np.float32,'
+ ' please set "to_float32=True" in "LoadImageFromFile" pipeline')
+ h, w, c = img.shape
+ assert c == len(self.mean)
+ if self.test_mode:
+ return self._test_aug(results)
+ else:
+ return self._train_aug(results)
+
+ def __repr__(self):
+ repr_str = self.__class__.__name__
+ repr_str += f'(crop_size={self.crop_size}, '
+ repr_str += f'ratios={self.ratios}, '
+ repr_str += f'border={self.border}, '
+ repr_str += f'mean={self.input_mean}, '
+ repr_str += f'std={self.input_std}, '
+ repr_str += f'to_rgb={self.to_rgb}, '
+ repr_str += f'test_mode={self.test_mode}, '
+ repr_str += f'test_pad_mode={self.test_pad_mode}, '
+ repr_str += f'bbox_clip_border={self.bbox_clip_border})'
+ return repr_str
+
+
+@PIPELINES.register_module()
+class CutOut(object):
+ """CutOut operation.
+
+ Randomly drop some regions of image used in
+ `Cutout `_.
+
+ Args:
+ n_holes (int | tuple[int, int]): Number of regions to be dropped.
+ If it is given as a list, number of holes will be randomly
+ selected from the closed interval [`n_holes[0]`, `n_holes[1]`].
+ cutout_shape (tuple[int, int] | list[tuple[int, int]]): The candidate
+ shape of dropped regions. It can be `tuple[int, int]` to use a
+ fixed cutout shape, or `list[tuple[int, int]]` to randomly choose
+ shape from the list.
+ cutout_ratio (tuple[float, float] | list[tuple[float, float]]): The
+ candidate ratio of dropped regions. It can be `tuple[float, float]`
+ to use a fixed ratio or `list[tuple[float, float]]` to randomly
+ choose ratio from the list. Please note that `cutout_shape`
+ and `cutout_ratio` cannot be both given at the same time.
+ fill_in (tuple[float, float, float] | tuple[int, int, int]): The value
+ of pixel to fill in the dropped regions. Default: (0, 0, 0).
+ """
+
+ def __init__(self,
+ n_holes,
+ cutout_shape=None,
+ cutout_ratio=None,
+ fill_in=(0, 0, 0)):
+
+ assert (cutout_shape is None) ^ (cutout_ratio is None), \
+ 'Either cutout_shape or cutout_ratio should be specified.'
+ assert (isinstance(cutout_shape, (list, tuple))
+ or isinstance(cutout_ratio, (list, tuple)))
+ if isinstance(n_holes, tuple):
+ assert len(n_holes) == 2 and 0 <= n_holes[0] < n_holes[1]
+ else:
+ n_holes = (n_holes, n_holes)
+ self.n_holes = n_holes
+ self.fill_in = fill_in
+ self.with_ratio = cutout_ratio is not None
+ self.candidates = cutout_ratio if self.with_ratio else cutout_shape
+ if not isinstance(self.candidates, list):
+ self.candidates = [self.candidates]
+
+ def __call__(self, results):
+ """Call function to drop some regions of image."""
+ h, w, c = results['img'].shape
+ n_holes = np.random.randint(self.n_holes[0], self.n_holes[1] + 1)
+ for _ in range(n_holes):
+ x1 = np.random.randint(0, w)
+ y1 = np.random.randint(0, h)
+ index = np.random.randint(0, len(self.candidates))
+ if not self.with_ratio:
+ cutout_w, cutout_h = self.candidates[index]
+ else:
+ cutout_w = int(self.candidates[index][0] * w)
+ cutout_h = int(self.candidates[index][1] * h)
+
+ x2 = np.clip(x1 + cutout_w, 0, w)
+ y2 = np.clip(y1 + cutout_h, 0, h)
+ results['img'][y1:y2, x1:x2, :] = self.fill_in
+
+ return results
+
+ def __repr__(self):
+ repr_str = self.__class__.__name__
+ repr_str += f'(n_holes={self.n_holes}, '
+ repr_str += (f'cutout_ratio={self.candidates}, ' if self.with_ratio
+ else f'cutout_shape={self.candidates}, ')
+ repr_str += f'fill_in={self.fill_in})'
+ return repr_str
diff --git a/annotator/uniformer/mmdet_null/datasets/samplers/__init__.py b/annotator/uniformer/mmdet_null/datasets/samplers/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..2596aeb2ccfc85b58624713c04453d34e94a4062
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/datasets/samplers/__init__.py
@@ -0,0 +1,4 @@
+from .distributed_sampler import DistributedSampler
+from .group_sampler import DistributedGroupSampler, GroupSampler
+
+__all__ = ['DistributedSampler', 'DistributedGroupSampler', 'GroupSampler']
diff --git a/annotator/uniformer/mmdet_null/datasets/samplers/distributed_sampler.py b/annotator/uniformer/mmdet_null/datasets/samplers/distributed_sampler.py
new file mode 100644
index 0000000000000000000000000000000000000000..cc61019484655ee2829f7908dc442caa20cf1d54
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/datasets/samplers/distributed_sampler.py
@@ -0,0 +1,39 @@
+import math
+
+import torch
+from torch.utils.data import DistributedSampler as _DistributedSampler
+
+
+class DistributedSampler(_DistributedSampler):
+
+ def __init__(self,
+ dataset,
+ num_replicas=None,
+ rank=None,
+ shuffle=True,
+ seed=0):
+ super().__init__(
+ dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)
+ # for the compatibility from PyTorch 1.3+
+ self.seed = seed if seed is not None else 0
+
+ def __iter__(self):
+ # deterministically shuffle based on epoch
+ if self.shuffle:
+ g = torch.Generator()
+ g.manual_seed(self.epoch + self.seed)
+ indices = torch.randperm(len(self.dataset), generator=g).tolist()
+ else:
+ indices = torch.arange(len(self.dataset)).tolist()
+
+ # add extra samples to make it evenly divisible
+ # in case that indices is shorter than half of total_size
+ indices = (indices *
+ math.ceil(self.total_size / len(indices)))[:self.total_size]
+ assert len(indices) == self.total_size
+
+ # subsample
+ indices = indices[self.rank:self.total_size:self.num_replicas]
+ assert len(indices) == self.num_samples
+
+ return iter(indices)
diff --git a/annotator/uniformer/mmdet_null/datasets/samplers/group_sampler.py b/annotator/uniformer/mmdet_null/datasets/samplers/group_sampler.py
new file mode 100644
index 0000000000000000000000000000000000000000..f88cf3439446a2eb7d8656388ddbe93196315f5b
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/datasets/samplers/group_sampler.py
@@ -0,0 +1,148 @@
+from __future__ import division
+import math
+
+import numpy as np
+import torch
+from mmcv.runner import get_dist_info
+from torch.utils.data import Sampler
+
+
+class GroupSampler(Sampler):
+
+ def __init__(self, dataset, samples_per_gpu=1):
+ assert hasattr(dataset, 'flag')
+ self.dataset = dataset
+ self.samples_per_gpu = samples_per_gpu
+ self.flag = dataset.flag.astype(np.int64)
+ self.group_sizes = np.bincount(self.flag)
+ self.num_samples = 0
+ for i, size in enumerate(self.group_sizes):
+ self.num_samples += int(np.ceil(
+ size / self.samples_per_gpu)) * self.samples_per_gpu
+
+ def __iter__(self):
+ indices = []
+ for i, size in enumerate(self.group_sizes):
+ if size == 0:
+ continue
+ indice = np.where(self.flag == i)[0]
+ assert len(indice) == size
+ np.random.shuffle(indice)
+ num_extra = int(np.ceil(size / self.samples_per_gpu)
+ ) * self.samples_per_gpu - len(indice)
+ indice = np.concatenate(
+ [indice, np.random.choice(indice, num_extra)])
+ indices.append(indice)
+ indices = np.concatenate(indices)
+ indices = [
+ indices[i * self.samples_per_gpu:(i + 1) * self.samples_per_gpu]
+ for i in np.random.permutation(
+ range(len(indices) // self.samples_per_gpu))
+ ]
+ indices = np.concatenate(indices)
+ indices = indices.astype(np.int64).tolist()
+ assert len(indices) == self.num_samples
+ return iter(indices)
+
+ def __len__(self):
+ return self.num_samples
+
+
+class DistributedGroupSampler(Sampler):
+ """Sampler that restricts data loading to a subset of the dataset.
+
+ It is especially useful in conjunction with
+ :class:`torch.nn.parallel.DistributedDataParallel`. In such case, each
+ process can pass a DistributedSampler instance as a DataLoader sampler,
+ and load a subset of the original dataset that is exclusive to it.
+
+ .. note::
+ Dataset is assumed to be of constant size.
+
+ Arguments:
+ dataset: Dataset used for sampling.
+ num_replicas (optional): Number of processes participating in
+ distributed training.
+ rank (optional): Rank of the current process within num_replicas.
+ seed (int, optional): random seed used to shuffle the sampler if
+ ``shuffle=True``. This number should be identical across all
+ processes in the distributed group. Default: 0.
+ """
+
+ def __init__(self,
+ dataset,
+ samples_per_gpu=1,
+ num_replicas=None,
+ rank=None,
+ seed=0):
+ _rank, _num_replicas = get_dist_info()
+ if num_replicas is None:
+ num_replicas = _num_replicas
+ if rank is None:
+ rank = _rank
+ self.dataset = dataset
+ self.samples_per_gpu = samples_per_gpu
+ self.num_replicas = num_replicas
+ self.rank = rank
+ self.epoch = 0
+ self.seed = seed if seed is not None else 0
+
+ assert hasattr(self.dataset, 'flag')
+ self.flag = self.dataset.flag
+ self.group_sizes = np.bincount(self.flag)
+
+ self.num_samples = 0
+ for i, j in enumerate(self.group_sizes):
+ self.num_samples += int(
+ math.ceil(self.group_sizes[i] * 1.0 / self.samples_per_gpu /
+ self.num_replicas)) * self.samples_per_gpu
+ self.total_size = self.num_samples * self.num_replicas
+
+ def __iter__(self):
+ # deterministically shuffle based on epoch
+ g = torch.Generator()
+ g.manual_seed(self.epoch + self.seed)
+
+ indices = []
+ for i, size in enumerate(self.group_sizes):
+ if size > 0:
+ indice = np.where(self.flag == i)[0]
+ assert len(indice) == size
+ # add .numpy() to avoid bug when selecting indice in parrots.
+ # TODO: check whether torch.randperm() can be replaced by
+ # numpy.random.permutation().
+ indice = indice[list(
+ torch.randperm(int(size), generator=g).numpy())].tolist()
+ extra = int(
+ math.ceil(
+ size * 1.0 / self.samples_per_gpu / self.num_replicas)
+ ) * self.samples_per_gpu * self.num_replicas - len(indice)
+ # pad indice
+ tmp = indice.copy()
+ for _ in range(extra // size):
+ indice.extend(tmp)
+ indice.extend(tmp[:extra % size])
+ indices.extend(indice)
+
+ assert len(indices) == self.total_size
+
+ indices = [
+ indices[j] for i in list(
+ torch.randperm(
+ len(indices) // self.samples_per_gpu, generator=g))
+ for j in range(i * self.samples_per_gpu, (i + 1) *
+ self.samples_per_gpu)
+ ]
+
+ # subsample
+ offset = self.num_samples * self.rank
+ indices = indices[offset:offset + self.num_samples]
+ assert len(indices) == self.num_samples
+
+ return iter(indices)
+
+ def __len__(self):
+ return self.num_samples
+
+ def set_epoch(self, epoch):
+ self.epoch = epoch
diff --git a/annotator/uniformer/mmdet_null/datasets/utils.py b/annotator/uniformer/mmdet_null/datasets/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..74b97d6044eeac7466d41b599c8c9809929a792a
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/datasets/utils.py
@@ -0,0 +1,158 @@
+import copy
+import warnings
+
+from annotator.uniformer.mmcv.cnn import VGG
+from annotator.uniformer.mmcv.runner.hooks import HOOKS, Hook
+
+from annotator.uniformer.mmdet.datasets.builder import PIPELINES
+from annotator.uniformer.mmdet.datasets.pipelines import LoadAnnotations, LoadImageFromFile
+from annotator.uniformer.mmdet.models.dense_heads import GARPNHead, RPNHead
+from annotator.uniformer.mmdet.models.roi_heads.mask_heads import FusedSemanticHead
+
+
+def replace_ImageToTensor(pipelines):
+ """Replace the ImageToTensor transform in a data pipeline to
+ DefaultFormatBundle, which is normally useful in batch inference.
+
+ Args:
+ pipelines (list[dict]): Data pipeline configs.
+
+ Returns:
+ list: The new pipeline list with all ImageToTensor replaced by
+ DefaultFormatBundle.
+
+ Examples:
+ >>> pipelines = [
+ ... dict(type='LoadImageFromFile'),
+ ... dict(
+ ... type='MultiScaleFlipAug',
+ ... img_scale=(1333, 800),
+ ... flip=False,
+ ... transforms=[
+ ... dict(type='Resize', keep_ratio=True),
+ ... dict(type='RandomFlip'),
+ ... dict(type='Normalize', mean=[0, 0, 0], std=[1, 1, 1]),
+ ... dict(type='Pad', size_divisor=32),
+ ... dict(type='ImageToTensor', keys=['img']),
+ ... dict(type='Collect', keys=['img']),
+ ... ])
+ ... ]
+ >>> expected_pipelines = [
+ ... dict(type='LoadImageFromFile'),
+ ... dict(
+ ... type='MultiScaleFlipAug',
+ ... img_scale=(1333, 800),
+ ... flip=False,
+ ... transforms=[
+ ... dict(type='Resize', keep_ratio=True),
+ ... dict(type='RandomFlip'),
+ ... dict(type='Normalize', mean=[0, 0, 0], std=[1, 1, 1]),
+ ... dict(type='Pad', size_divisor=32),
+ ... dict(type='DefaultFormatBundle'),
+ ... dict(type='Collect', keys=['img']),
+ ... ])
+ ... ]
+ >>> assert expected_pipelines == replace_ImageToTensor(pipelines)
+ """
+ pipelines = copy.deepcopy(pipelines)
+ for i, pipeline in enumerate(pipelines):
+ if pipeline['type'] == 'MultiScaleFlipAug':
+ assert 'transforms' in pipeline
+ pipeline['transforms'] = replace_ImageToTensor(
+ pipeline['transforms'])
+ elif pipeline['type'] == 'ImageToTensor':
+ warnings.warn(
+ '"ImageToTensor" pipeline is replaced by '
+ '"DefaultFormatBundle" for batch inference. It is '
+ 'recommended to manually replace it in the test '
+ 'data pipeline in your config file.', UserWarning)
+ pipelines[i] = {'type': 'DefaultFormatBundle'}
+ return pipelines
+
+
+def get_loading_pipeline(pipeline):
+ """Only keep loading image and annotations related configuration.
+
+ Args:
+ pipeline (list[dict]): Data pipeline configs.
+
+ Returns:
+ list[dict]: The new pipeline list with only keep
+ loading image and annotations related configuration.
+
+ Examples:
+ >>> pipelines = [
+ ... dict(type='LoadImageFromFile'),
+ ... dict(type='LoadAnnotations', with_bbox=True),
+ ... dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+ ... dict(type='RandomFlip', flip_ratio=0.5),
+ ... dict(type='Normalize', **img_norm_cfg),
+ ... dict(type='Pad', size_divisor=32),
+ ... dict(type='DefaultFormatBundle'),
+ ... dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
+ ... ]
+ >>> expected_pipelines = [
+ ... dict(type='LoadImageFromFile'),
+ ... dict(type='LoadAnnotations', with_bbox=True)
+ ... ]
+ >>> assert expected_pipelines ==\
+ ... get_loading_pipeline(pipelines)
+ """
+ loading_pipeline_cfg = []
+ for cfg in pipeline:
+ obj_cls = PIPELINES.get(cfg['type'])
+ # TODO:use more elegant way to distinguish loading modules
+ if obj_cls is not None and obj_cls in (LoadImageFromFile,
+ LoadAnnotations):
+ loading_pipeline_cfg.append(cfg)
+ assert len(loading_pipeline_cfg) == 2, \
+ 'The data pipeline in your config file must include ' \
+ 'loading image and annotations related pipeline.'
+ return loading_pipeline_cfg
+
+
+@HOOKS.register_module()
+class NumClassCheckHook(Hook):
+
+ def _check_head(self, runner):
+ """Check whether the `num_classes` in head matches the length of
+ `CLASSSES` in `dataset`.
+
+ Args:
+ runner (obj:`EpochBasedRunner`): Epoch based Runner.
+ """
+ model = runner.model
+ dataset = runner.data_loader.dataset
+ if dataset.CLASSES is None:
+ runner.logger.warning(
+ f'Please set `CLASSES` '
+ f'in the {dataset.__class__.__name__} and'
+ f'check if it is consistent with the `num_classes` '
+ f'of head')
+ else:
+ for name, module in model.named_modules():
+ if hasattr(module, 'num_classes') and not isinstance(
+ module, (RPNHead, VGG, FusedSemanticHead, GARPNHead)):
+ assert module.num_classes == len(dataset.CLASSES), \
+ (f'The `num_classes` ({module.num_classes}) in '
+ f'{module.__class__.__name__} of '
+ f'{model.__class__.__name__} does not matches '
+ f'the length of `CLASSES` '
+ f'{len(dataset.CLASSES)}) in '
+ f'{dataset.__class__.__name__}')
+
+ def before_train_epoch(self, runner):
+ """Check whether the training dataset is compatible with head.
+
+ Args:
+ runner (obj:`EpochBasedRunner`): Epoch based Runner.
+ """
+ self._check_head(runner)
+
+ def before_val_epoch(self, runner):
+ """Check whether the dataset in val epoch is compatible with head.
+
+ Args:
+ runner (obj:`EpochBasedRunner`): Epoch based Runner.
+ """
+ self._check_head(runner)
diff --git a/annotator/uniformer/mmdet_null/datasets/voc.py b/annotator/uniformer/mmdet_null/datasets/voc.py
new file mode 100644
index 0000000000000000000000000000000000000000..abd4cb8947238936faff48fc92c093c8ae06daff
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/datasets/voc.py
@@ -0,0 +1,93 @@
+from collections import OrderedDict
+
+from mmcv.utils import print_log
+
+from mmdet.core import eval_map, eval_recalls
+from .builder import DATASETS
+from .xml_style import XMLDataset
+
+
+@DATASETS.register_module()
+class VOCDataset(XMLDataset):
+
+ CLASSES = ('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car',
+ 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse',
+ 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train',
+ 'tvmonitor')
+
+ def __init__(self, **kwargs):
+ super(VOCDataset, self).__init__(**kwargs)
+ if 'VOC2007' in self.img_prefix:
+ self.year = 2007
+ elif 'VOC2012' in self.img_prefix:
+ self.year = 2012
+ else:
+ raise ValueError('Cannot infer dataset year from img_prefix')
+
+ def evaluate(self,
+ results,
+ metric='mAP',
+ logger=None,
+ proposal_nums=(100, 300, 1000),
+ iou_thr=0.5,
+ scale_ranges=None):
+ """Evaluate in VOC protocol.
+
+ Args:
+ results (list[list | tuple]): Testing results of the dataset.
+ metric (str | list[str]): Metrics to be evaluated. Options are
+ 'mAP', 'recall'.
+ logger (logging.Logger | str, optional): Logger used for printing
+ related information during evaluation. Default: None.
+ proposal_nums (Sequence[int]): Proposal number used for evaluating
+ recalls, such as recall@100, recall@1000.
+ Default: (100, 300, 1000).
+ iou_thr (float | list[float]): IoU threshold. Default: 0.5.
+ scale_ranges (list[tuple], optional): Scale ranges for evaluating
+ mAP. If not specified, all bounding boxes would be included in
+ evaluation. Default: None.
+
+ Returns:
+ dict[str, float]: AP/recall metrics.
+ """
+
+ if not isinstance(metric, str):
+ assert len(metric) == 1
+ metric = metric[0]
+ allowed_metrics = ['mAP', 'recall']
+ if metric not in allowed_metrics:
+ raise KeyError(f'metric {metric} is not supported')
+ annotations = [self.get_ann_info(i) for i in range(len(self))]
+ eval_results = OrderedDict()
+ iou_thrs = [iou_thr] if isinstance(iou_thr, float) else iou_thr
+ if metric == 'mAP':
+ assert isinstance(iou_thrs, list)
+ if self.year == 2007:
+ ds_name = 'voc07'
+ else:
+ ds_name = self.CLASSES
+ mean_aps = []
+ for iou_thr in iou_thrs:
+ print_log(f'\n{"-" * 15}iou_thr: {iou_thr}{"-" * 15}')
+ mean_ap, _ = eval_map(
+ results,
+ annotations,
+ scale_ranges=None,
+ iou_thr=iou_thr,
+ dataset=ds_name,
+ logger=logger)
+ mean_aps.append(mean_ap)
+ eval_results[f'AP{int(iou_thr * 100):02d}'] = round(mean_ap, 3)
+ eval_results['mAP'] = sum(mean_aps) / len(mean_aps)
+ elif metric == 'recall':
+ gt_bboxes = [ann['bboxes'] for ann in annotations]
+ recalls = eval_recalls(
+ gt_bboxes, results, proposal_nums, iou_thr, logger=logger)
+ for i, num in enumerate(proposal_nums):
+ for j, iou in enumerate(iou_thr):
+ eval_results[f'recall@{num}@{iou}'] = recalls[i, j]
+ if recalls.shape[1] > 1:
+ ar = recalls.mean(axis=1)
+ for i, num in enumerate(proposal_nums):
+ eval_results[f'AR@{num}'] = ar[i]
+ return eval_results
diff --git a/annotator/uniformer/mmdet_null/datasets/wider_face.py b/annotator/uniformer/mmdet_null/datasets/wider_face.py
new file mode 100644
index 0000000000000000000000000000000000000000..3a13907db87a9986a7d701837259a0b712fc9dca
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/datasets/wider_face.py
@@ -0,0 +1,51 @@
+import os.path as osp
+import xml.etree.ElementTree as ET
+
+import mmcv
+
+from .builder import DATASETS
+from .xml_style import XMLDataset
+
+
+@DATASETS.register_module()
+class WIDERFaceDataset(XMLDataset):
+ """Reader for the WIDER Face dataset in PASCAL VOC format.
+
+ Conversion scripts can be found in
+ https://github.com/sovrasov/wider-face-pascal-voc-annotations
+ """
+ CLASSES = ('face', )
+
+ def __init__(self, **kwargs):
+ super(WIDERFaceDataset, self).__init__(**kwargs)
+
+ def load_annotations(self, ann_file):
+ """Load annotation from WIDERFace XML style annotation file.
+
+ Args:
+ ann_file (str): Path of XML file.
+
+ Returns:
+ list[dict]: Annotation info from XML file.
+ """
+
+ data_infos = []
+ img_ids = mmcv.list_from_file(ann_file)
+ for img_id in img_ids:
+ filename = f'{img_id}.jpg'
+ xml_path = osp.join(self.img_prefix, 'Annotations',
+ f'{img_id}.xml')
+ tree = ET.parse(xml_path)
+ root = tree.getroot()
+ size = root.find('size')
+ width = int(size.find('width').text)
+ height = int(size.find('height').text)
+ folder = root.find('folder').text
+ data_infos.append(
+ dict(
+ id=img_id,
+ filename=osp.join(folder, filename),
+ width=width,
+ height=height))
+
+ return data_infos
diff --git a/annotator/uniformer/mmdet_null/datasets/xml_style.py b/annotator/uniformer/mmdet_null/datasets/xml_style.py
new file mode 100644
index 0000000000000000000000000000000000000000..71069488b0f6da3b37e588228f44460ce5f00679
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/datasets/xml_style.py
@@ -0,0 +1,170 @@
+import os.path as osp
+import xml.etree.ElementTree as ET
+
+import mmcv
+import numpy as np
+from PIL import Image
+
+from .builder import DATASETS
+from .custom import CustomDataset
+
+
+@DATASETS.register_module()
+class XMLDataset(CustomDataset):
+ """XML dataset for detection.
+
+ Args:
+ min_size (int | float, optional): The minimum size of bounding
+ boxes in the images. If the size of a bounding box is less than
+ ``min_size``, it would be add to ignored field.
+ """
+
+ def __init__(self, min_size=None, **kwargs):
+ assert self.CLASSES or kwargs.get(
+ 'classes', None), 'CLASSES in `XMLDataset` can not be None.'
+ super(XMLDataset, self).__init__(**kwargs)
+ self.cat2label = {cat: i for i, cat in enumerate(self.CLASSES)}
+ self.min_size = min_size
+
+ def load_annotations(self, ann_file):
+ """Load annotation from XML style ann_file.
+
+ Args:
+ ann_file (str): Path of XML file.
+
+ Returns:
+ list[dict]: Annotation info from XML file.
+ """
+
+ data_infos = []
+ img_ids = mmcv.list_from_file(ann_file)
+ for img_id in img_ids:
+ filename = f'JPEGImages/{img_id}.jpg'
+ xml_path = osp.join(self.img_prefix, 'Annotations',
+ f'{img_id}.xml')
+ tree = ET.parse(xml_path)
+ root = tree.getroot()
+ size = root.find('size')
+ if size is not None:
+ width = int(size.find('width').text)
+ height = int(size.find('height').text)
+ else:
+ img_path = osp.join(self.img_prefix, 'JPEGImages',
+ '{}.jpg'.format(img_id))
+ img = Image.open(img_path)
+ width, height = img.size
+ data_infos.append(
+ dict(id=img_id, filename=filename, width=width, height=height))
+
+ return data_infos
+
+ def _filter_imgs(self, min_size=32):
+ """Filter images too small or without annotation."""
+ valid_inds = []
+ for i, img_info in enumerate(self.data_infos):
+ if min(img_info['width'], img_info['height']) < min_size:
+ continue
+ if self.filter_empty_gt:
+ img_id = img_info['id']
+ xml_path = osp.join(self.img_prefix, 'Annotations',
+ f'{img_id}.xml')
+ tree = ET.parse(xml_path)
+ root = tree.getroot()
+ for obj in root.findall('object'):
+ name = obj.find('name').text
+ if name in self.CLASSES:
+ valid_inds.append(i)
+ break
+ else:
+ valid_inds.append(i)
+ return valid_inds
+
+ def get_ann_info(self, idx):
+ """Get annotation from XML file by index.
+
+ Args:
+ idx (int): Index of data.
+
+ Returns:
+ dict: Annotation info of specified index.
+ """
+
+ img_id = self.data_infos[idx]['id']
+ xml_path = osp.join(self.img_prefix, 'Annotations', f'{img_id}.xml')
+ tree = ET.parse(xml_path)
+ root = tree.getroot()
+ bboxes = []
+ labels = []
+ bboxes_ignore = []
+ labels_ignore = []
+ for obj in root.findall('object'):
+ name = obj.find('name').text
+ if name not in self.CLASSES:
+ continue
+ label = self.cat2label[name]
+ difficult = obj.find('difficult')
+ difficult = 0 if difficult is None else int(difficult.text)
+ bnd_box = obj.find('bndbox')
+ # TODO: check whether it is necessary to use int
+ # Coordinates may be float type
+ bbox = [
+ int(float(bnd_box.find('xmin').text)),
+ int(float(bnd_box.find('ymin').text)),
+ int(float(bnd_box.find('xmax').text)),
+ int(float(bnd_box.find('ymax').text))
+ ]
+ ignore = False
+ if self.min_size:
+ assert not self.test_mode
+ w = bbox[2] - bbox[0]
+ h = bbox[3] - bbox[1]
+ if w < self.min_size or h < self.min_size:
+ ignore = True
+ if difficult or ignore:
+ bboxes_ignore.append(bbox)
+ labels_ignore.append(label)
+ else:
+ bboxes.append(bbox)
+ labels.append(label)
+ if not bboxes:
+ bboxes = np.zeros((0, 4))
+ labels = np.zeros((0, ))
+ else:
+ bboxes = np.array(bboxes, ndmin=2) - 1
+ labels = np.array(labels)
+ if not bboxes_ignore:
+ bboxes_ignore = np.zeros((0, 4))
+ labels_ignore = np.zeros((0, ))
+ else:
+ bboxes_ignore = np.array(bboxes_ignore, ndmin=2) - 1
+ labels_ignore = np.array(labels_ignore)
+ ann = dict(
+ bboxes=bboxes.astype(np.float32),
+ labels=labels.astype(np.int64),
+ bboxes_ignore=bboxes_ignore.astype(np.float32),
+ labels_ignore=labels_ignore.astype(np.int64))
+ return ann
+
+ def get_cat_ids(self, idx):
+ """Get category ids in XML file by index.
+
+ Args:
+ idx (int): Index of data.
+
+ Returns:
+ list[int]: All categories in the image of specified index.
+ """
+
+ cat_ids = []
+ img_id = self.data_infos[idx]['id']
+ xml_path = osp.join(self.img_prefix, 'Annotations', f'{img_id}.xml')
+ tree = ET.parse(xml_path)
+ root = tree.getroot()
+ for obj in root.findall('object'):
+ name = obj.find('name').text
+ if name not in self.CLASSES:
+ continue
+ label = self.cat2label[name]
+ cat_ids.append(label)
+
+ return cat_ids
diff --git a/annotator/uniformer/mmdet_null/models/__init__.py b/annotator/uniformer/mmdet_null/models/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..44ac99855ae52101c91be167fa78d8219fc47259
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/__init__.py
@@ -0,0 +1,16 @@
+from .backbones import * # noqa: F401,F403
+from .builder import (BACKBONES, DETECTORS, HEADS, LOSSES, NECKS,
+ ROI_EXTRACTORS, SHARED_HEADS, build_backbone,
+ build_detector, build_head, build_loss, build_neck,
+ build_roi_extractor, build_shared_head)
+from .dense_heads import * # noqa: F401,F403
+from .detectors import * # noqa: F401,F403
+from .losses import * # noqa: F401,F403
+from .necks import * # noqa: F401,F403
+from .roi_heads import * # noqa: F401,F403
+
+__all__ = [
+ 'BACKBONES', 'NECKS', 'ROI_EXTRACTORS', 'SHARED_HEADS', 'HEADS', 'LOSSES',
+ 'DETECTORS', 'build_backbone', 'build_neck', 'build_roi_extractor',
+ 'build_shared_head', 'build_head', 'build_loss', 'build_detector'
+]
diff --git a/annotator/uniformer/mmdet_null/models/backbones/__init__.py b/annotator/uniformer/mmdet_null/models/backbones/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e54b088acf644d285ecbeb1440c414e722b9db58
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/backbones/__init__.py
@@ -0,0 +1,20 @@
+from .darknet import Darknet
+from .detectors_resnet import DetectoRS_ResNet
+from .detectors_resnext import DetectoRS_ResNeXt
+from .hourglass import HourglassNet
+from .hrnet import HRNet
+from .regnet import RegNet
+from .res2net import Res2Net
+from .resnest import ResNeSt
+from .resnet import ResNet, ResNetV1d
+from .resnext import ResNeXt
+from .ssd_vgg import SSDVGG
+from .trident_resnet import TridentResNet
+from .swin_transformer import SwinTransformer
+from .uniformer import UniFormer
+
+__all__ = [
+ 'RegNet', 'ResNet', 'ResNetV1d', 'ResNeXt', 'SSDVGG', 'HRNet', 'Res2Net',
+ 'HourglassNet', 'DetectoRS_ResNet', 'DetectoRS_ResNeXt', 'Darknet',
+ 'ResNeSt', 'TridentResNet', 'SwinTransformer', 'UniFormer'
+]
diff --git a/annotator/uniformer/mmdet_null/models/backbones/darknet.py b/annotator/uniformer/mmdet_null/models/backbones/darknet.py
new file mode 100644
index 0000000000000000000000000000000000000000..517fe26259217792e0dad80ca3824d914cfe3904
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/backbones/darknet.py
@@ -0,0 +1,199 @@
+# Copyright (c) 2019 Western Digital Corporation or its affiliates.
+
+import logging
+
+import torch.nn as nn
+from mmcv.cnn import ConvModule, constant_init, kaiming_init
+from mmcv.runner import load_checkpoint
+from torch.nn.modules.batchnorm import _BatchNorm
+
+from ..builder import BACKBONES
+
+
+class ResBlock(nn.Module):
+ """The basic residual block used in Darknet. Each ResBlock consists of two
+ ConvModules and the input is added to the final output. Each ConvModule is
+ composed of Conv, BN, and LeakyReLU. In YoloV3 paper, the first convLayer
+ has half of the number of the filters as much as the second convLayer. The
+ first convLayer has filter size of 1x1 and the second one has the filter
+ size of 3x3.
+
+ Args:
+ in_channels (int): The input channels. Must be even.
+ conv_cfg (dict): Config dict for convolution layer. Default: None.
+ norm_cfg (dict): Dictionary to construct and config norm layer.
+ Default: dict(type='BN', requires_grad=True)
+ act_cfg (dict): Config dict for activation layer.
+ Default: dict(type='LeakyReLU', negative_slope=0.1).
+ """
+
+ def __init__(self,
+ in_channels,
+ conv_cfg=None,
+ norm_cfg=dict(type='BN', requires_grad=True),
+ act_cfg=dict(type='LeakyReLU', negative_slope=0.1)):
+ super(ResBlock, self).__init__()
+ assert in_channels % 2 == 0 # ensure the in_channels is even
+ half_in_channels = in_channels // 2
+
+ # shortcut
+ cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)
+
+ self.conv1 = ConvModule(in_channels, half_in_channels, 1, **cfg)
+ self.conv2 = ConvModule(
+ half_in_channels, in_channels, 3, padding=1, **cfg)
+
+ def forward(self, x):
+ residual = x
+ out = self.conv1(x)
+ out = self.conv2(out)
+ out = out + residual
+
+ return out
+
+
+@BACKBONES.register_module()
+class Darknet(nn.Module):
+ """Darknet backbone.
+
+ Args:
+ depth (int): Depth of Darknet. Currently only support 53.
+ out_indices (Sequence[int]): Output from which stages.
+ frozen_stages (int): Stages to be frozen (stop grad and set eval mode).
+ -1 means not freezing any parameters. Default: -1.
+ conv_cfg (dict): Config dict for convolution layer. Default: None.
+ norm_cfg (dict): Dictionary to construct and config norm layer.
+ Default: dict(type='BN', requires_grad=True)
+ act_cfg (dict): Config dict for activation layer.
+ Default: dict(type='LeakyReLU', negative_slope=0.1).
+ norm_eval (bool): Whether to set norm layers to eval mode, namely,
+ freeze running stats (mean and var). Note: Effect on Batch Norm
+ and its variants only.
+
+ Example:
+ >>> from mmdet.models import Darknet
+ >>> import torch
+ >>> self = Darknet(depth=53)
+ >>> self.eval()
+ >>> inputs = torch.rand(1, 3, 416, 416)
+ >>> level_outputs = self.forward(inputs)
+ >>> for level_out in level_outputs:
+ ... print(tuple(level_out.shape))
+ ...
+ (1, 256, 52, 52)
+ (1, 512, 26, 26)
+ (1, 1024, 13, 13)
+ """
+
+ # Dict(depth: (layers, channels))
+ arch_settings = {
+ 53: ((1, 2, 8, 8, 4), ((32, 64), (64, 128), (128, 256), (256, 512),
+ (512, 1024)))
+ }
+
+ def __init__(self,
+ depth=53,
+ out_indices=(3, 4, 5),
+ frozen_stages=-1,
+ conv_cfg=None,
+ norm_cfg=dict(type='BN', requires_grad=True),
+ act_cfg=dict(type='LeakyReLU', negative_slope=0.1),
+ norm_eval=True):
+ super(Darknet, self).__init__()
+ if depth not in self.arch_settings:
+ raise KeyError(f'invalid depth {depth} for darknet')
+ self.depth = depth
+ self.out_indices = out_indices
+ self.frozen_stages = frozen_stages
+ self.layers, self.channels = self.arch_settings[depth]
+
+ cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)
+
+ self.conv1 = ConvModule(3, 32, 3, padding=1, **cfg)
+
+ self.cr_blocks = ['conv1']
+ for i, n_layers in enumerate(self.layers):
+ layer_name = f'conv_res_block{i + 1}'
+ in_c, out_c = self.channels[i]
+ self.add_module(
+ layer_name,
+ self.make_conv_res_block(in_c, out_c, n_layers, **cfg))
+ self.cr_blocks.append(layer_name)
+
+ self.norm_eval = norm_eval
+
+ def forward(self, x):
+ outs = []
+ for i, layer_name in enumerate(self.cr_blocks):
+ cr_block = getattr(self, layer_name)
+ x = cr_block(x)
+ if i in self.out_indices:
+ outs.append(x)
+
+ return tuple(outs)
+
+ def init_weights(self, pretrained=None):
+ if isinstance(pretrained, str):
+ logger = logging.getLogger()
+ load_checkpoint(self, pretrained, strict=False, logger=logger)
+ elif pretrained is None:
+ for m in self.modules():
+ if isinstance(m, nn.Conv2d):
+ kaiming_init(m)
+ elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
+ constant_init(m, 1)
+
+ else:
+ raise TypeError('pretrained must be a str or None')
+
+ def _freeze_stages(self):
+ if self.frozen_stages >= 0:
+ for i in range(self.frozen_stages):
+ m = getattr(self, self.cr_blocks[i])
+ m.eval()
+ for param in m.parameters():
+ param.requires_grad = False
+
+ def train(self, mode=True):
+ super(Darknet, self).train(mode)
+ self._freeze_stages()
+ if mode and self.norm_eval:
+ for m in self.modules():
+ if isinstance(m, _BatchNorm):
+ m.eval()
+
+ @staticmethod
+ def make_conv_res_block(in_channels,
+ out_channels,
+ res_repeat,
+ conv_cfg=None,
+ norm_cfg=dict(type='BN', requires_grad=True),
+ act_cfg=dict(type='LeakyReLU',
+ negative_slope=0.1)):
+ """In Darknet backbone, ConvLayer is usually followed by ResBlock. This
+ function will make that. The Conv layers always have 3x3 filters with
+ stride=2. The number of the filters in Conv layer is the same as the
+ out channels of the ResBlock.
+
+ Args:
+ in_channels (int): The number of input channels.
+ out_channels (int): The number of output channels.
+ res_repeat (int): The number of ResBlocks.
+ conv_cfg (dict): Config dict for convolution layer. Default: None.
+ norm_cfg (dict): Dictionary to construct and config norm layer.
+ Default: dict(type='BN', requires_grad=True)
+ act_cfg (dict): Config dict for activation layer.
+ Default: dict(type='LeakyReLU', negative_slope=0.1).
+ """
+
+ cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)
+
+ model = nn.Sequential()
+ model.add_module(
+ 'conv',
+ ConvModule(
+ in_channels, out_channels, 3, stride=2, padding=1, **cfg))
+ for idx in range(res_repeat):
+ model.add_module('res{}'.format(idx),
+ ResBlock(out_channels, **cfg))
+ return model
diff --git a/annotator/uniformer/mmdet_null/models/backbones/detectors_resnet.py b/annotator/uniformer/mmdet_null/models/backbones/detectors_resnet.py
new file mode 100644
index 0000000000000000000000000000000000000000..519db464493c7c7b60fc34be1d21add2235ec341
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/backbones/detectors_resnet.py
@@ -0,0 +1,305 @@
+import torch.nn as nn
+import torch.utils.checkpoint as cp
+from mmcv.cnn import build_conv_layer, build_norm_layer, constant_init
+
+from ..builder import BACKBONES
+from .resnet import Bottleneck as _Bottleneck
+from .resnet import ResNet
+
+
+class Bottleneck(_Bottleneck):
+ r"""Bottleneck for the ResNet backbone in `DetectoRS
+ `_.
+
+ This bottleneck allows the users to specify whether to use
+ SAC (Switchable Atrous Convolution) and RFP (Recursive Feature Pyramid).
+
+ Args:
+ inplanes (int): The number of input channels.
+ planes (int): The number of output channels before expansion.
+ rfp_inplanes (int, optional): The number of channels from RFP.
+ Default: None. If specified, an additional conv layer will be
+ added for ``rfp_feat``. Otherwise, the structure is the same as
+ base class.
+ sac (dict, optional): Dictionary to construct SAC. Default: None.
+ """
+ expansion = 4
+
+ def __init__(self,
+ inplanes,
+ planes,
+ rfp_inplanes=None,
+ sac=None,
+ **kwargs):
+ super(Bottleneck, self).__init__(inplanes, planes, **kwargs)
+
+ assert sac is None or isinstance(sac, dict)
+ self.sac = sac
+ self.with_sac = sac is not None
+ if self.with_sac:
+ self.conv2 = build_conv_layer(
+ self.sac,
+ planes,
+ planes,
+ kernel_size=3,
+ stride=self.conv2_stride,
+ padding=self.dilation,
+ dilation=self.dilation,
+ bias=False)
+
+ self.rfp_inplanes = rfp_inplanes
+ if self.rfp_inplanes:
+ self.rfp_conv = build_conv_layer(
+ None,
+ self.rfp_inplanes,
+ planes * self.expansion,
+ 1,
+ stride=1,
+ bias=True)
+ self.init_weights()
+
+ def init_weights(self):
+ """Initialize the weights."""
+ if self.rfp_inplanes:
+ constant_init(self.rfp_conv, 0)
+
+ def rfp_forward(self, x, rfp_feat):
+ """The forward function that also takes the RFP features as input."""
+
+ def _inner_forward(x):
+ identity = x
+
+ out = self.conv1(x)
+ out = self.norm1(out)
+ out = self.relu(out)
+
+ if self.with_plugins:
+ out = self.forward_plugin(out, self.after_conv1_plugin_names)
+
+ out = self.conv2(out)
+ out = self.norm2(out)
+ out = self.relu(out)
+
+ if self.with_plugins:
+ out = self.forward_plugin(out, self.after_conv2_plugin_names)
+
+ out = self.conv3(out)
+ out = self.norm3(out)
+
+ if self.with_plugins:
+ out = self.forward_plugin(out, self.after_conv3_plugin_names)
+
+ if self.downsample is not None:
+ identity = self.downsample(x)
+
+ out += identity
+
+ return out
+
+ if self.with_cp and x.requires_grad:
+ out = cp.checkpoint(_inner_forward, x)
+ else:
+ out = _inner_forward(x)
+
+ if self.rfp_inplanes:
+ rfp_feat = self.rfp_conv(rfp_feat)
+ out = out + rfp_feat
+
+ out = self.relu(out)
+
+ return out
+
+
+class ResLayer(nn.Sequential):
+ """ResLayer to build ResNet style backbone for RPF in detectoRS.
+
+ The difference between this module and base class is that we pass
+ ``rfp_inplanes`` to the first block.
+
+ Args:
+ block (nn.Module): block used to build ResLayer.
+ inplanes (int): inplanes of block.
+ planes (int): planes of block.
+ num_blocks (int): number of blocks.
+ stride (int): stride of the first block. Default: 1
+ avg_down (bool): Use AvgPool instead of stride conv when
+ downsampling in the bottleneck. Default: False
+ conv_cfg (dict): dictionary to construct and config conv layer.
+ Default: None
+ norm_cfg (dict): dictionary to construct and config norm layer.
+ Default: dict(type='BN')
+ downsample_first (bool): Downsample at the first block or last block.
+ False for Hourglass, True for ResNet. Default: True
+ rfp_inplanes (int, optional): The number of channels from RFP.
+ Default: None. If specified, an additional conv layer will be
+ added for ``rfp_feat``. Otherwise, the structure is the same as
+ base class.
+ """
+
+ def __init__(self,
+ block,
+ inplanes,
+ planes,
+ num_blocks,
+ stride=1,
+ avg_down=False,
+ conv_cfg=None,
+ norm_cfg=dict(type='BN'),
+ downsample_first=True,
+ rfp_inplanes=None,
+ **kwargs):
+ self.block = block
+ assert downsample_first, f'downsample_first={downsample_first} is ' \
+ 'not supported in DetectoRS'
+
+ downsample = None
+ if stride != 1 or inplanes != planes * block.expansion:
+ downsample = []
+ conv_stride = stride
+ if avg_down and stride != 1:
+ conv_stride = 1
+ downsample.append(
+ nn.AvgPool2d(
+ kernel_size=stride,
+ stride=stride,
+ ceil_mode=True,
+ count_include_pad=False))
+ downsample.extend([
+ build_conv_layer(
+ conv_cfg,
+ inplanes,
+ planes * block.expansion,
+ kernel_size=1,
+ stride=conv_stride,
+ bias=False),
+ build_norm_layer(norm_cfg, planes * block.expansion)[1]
+ ])
+ downsample = nn.Sequential(*downsample)
+
+ layers = []
+ layers.append(
+ block(
+ inplanes=inplanes,
+ planes=planes,
+ stride=stride,
+ downsample=downsample,
+ conv_cfg=conv_cfg,
+ norm_cfg=norm_cfg,
+ rfp_inplanes=rfp_inplanes,
+ **kwargs))
+ inplanes = planes * block.expansion
+ for _ in range(1, num_blocks):
+ layers.append(
+ block(
+ inplanes=inplanes,
+ planes=planes,
+ stride=1,
+ conv_cfg=conv_cfg,
+ norm_cfg=norm_cfg,
+ **kwargs))
+
+ super(ResLayer, self).__init__(*layers)
+
+
+@BACKBONES.register_module()
+class DetectoRS_ResNet(ResNet):
+ """ResNet backbone for DetectoRS.
+
+ Args:
+ sac (dict, optional): Dictionary to construct SAC (Switchable Atrous
+ Convolution). Default: None.
+ stage_with_sac (list): Which stage to use sac. Default: (False, False,
+ False, False).
+ rfp_inplanes (int, optional): The number of channels from RFP.
+ Default: None. If specified, an additional conv layer will be
+ added for ``rfp_feat``. Otherwise, the structure is the same as
+ base class.
+ output_img (bool): If ``True``, the input image will be inserted into
+ the starting position of output. Default: False.
+ pretrained (str, optional): The pretrained model to load.
+ """
+
+ arch_settings = {
+ 50: (Bottleneck, (3, 4, 6, 3)),
+ 101: (Bottleneck, (3, 4, 23, 3)),
+ 152: (Bottleneck, (3, 8, 36, 3))
+ }
+
+ def __init__(self,
+ sac=None,
+ stage_with_sac=(False, False, False, False),
+ rfp_inplanes=None,
+ output_img=False,
+ pretrained=None,
+ **kwargs):
+ self.sac = sac
+ self.stage_with_sac = stage_with_sac
+ self.rfp_inplanes = rfp_inplanes
+ self.output_img = output_img
+ self.pretrained = pretrained
+ super(DetectoRS_ResNet, self).__init__(**kwargs)
+
+ self.inplanes = self.stem_channels
+ self.res_layers = []
+ for i, num_blocks in enumerate(self.stage_blocks):
+ stride = self.strides[i]
+ dilation = self.dilations[i]
+ dcn = self.dcn if self.stage_with_dcn[i] else None
+ sac = self.sac if self.stage_with_sac[i] else None
+ if self.plugins is not None:
+ stage_plugins = self.make_stage_plugins(self.plugins, i)
+ else:
+ stage_plugins = None
+ planes = self.base_channels * 2**i
+ res_layer = self.make_res_layer(
+ block=self.block,
+ inplanes=self.inplanes,
+ planes=planes,
+ num_blocks=num_blocks,
+ stride=stride,
+ dilation=dilation,
+ style=self.style,
+ avg_down=self.avg_down,
+ with_cp=self.with_cp,
+ conv_cfg=self.conv_cfg,
+ norm_cfg=self.norm_cfg,
+ dcn=dcn,
+ sac=sac,
+ rfp_inplanes=rfp_inplanes if i > 0 else None,
+ plugins=stage_plugins)
+ self.inplanes = planes * self.block.expansion
+ layer_name = f'layer{i + 1}'
+ self.add_module(layer_name, res_layer)
+ self.res_layers.append(layer_name)
+
+ self._freeze_stages()
+
+ def make_res_layer(self, **kwargs):
+ """Pack all blocks in a stage into a ``ResLayer`` for DetectoRS."""
+ return ResLayer(**kwargs)
+
+ def forward(self, x):
+ """Forward function."""
+ outs = list(super(DetectoRS_ResNet, self).forward(x))
+ if self.output_img:
+ outs.insert(0, x)
+ return tuple(outs)
+
+ def rfp_forward(self, x, rfp_feats):
+ """Forward function for RFP."""
+ if self.deep_stem:
+ x = self.stem(x)
+ else:
+ x = self.conv1(x)
+ x = self.norm1(x)
+ x = self.relu(x)
+ x = self.maxpool(x)
+ outs = []
+ for i, layer_name in enumerate(self.res_layers):
+ res_layer = getattr(self, layer_name)
+ rfp_feat = rfp_feats[i] if i > 0 else None
+ for layer in res_layer:
+ x = layer.rfp_forward(x, rfp_feat)
+ if i in self.out_indices:
+ outs.append(x)
+ return tuple(outs)
diff --git a/annotator/uniformer/mmdet_null/models/backbones/detectors_resnext.py b/annotator/uniformer/mmdet_null/models/backbones/detectors_resnext.py
new file mode 100644
index 0000000000000000000000000000000000000000..57d032fe37ed82d5ba24e761bdc014cc0ee5ac64
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/backbones/detectors_resnext.py
@@ -0,0 +1,122 @@
+import math
+
+from mmcv.cnn import build_conv_layer, build_norm_layer
+
+from ..builder import BACKBONES
+from .detectors_resnet import Bottleneck as _Bottleneck
+from .detectors_resnet import DetectoRS_ResNet
+
+
+class Bottleneck(_Bottleneck):
+ expansion = 4
+
+ def __init__(self,
+ inplanes,
+ planes,
+ groups=1,
+ base_width=4,
+ base_channels=64,
+ **kwargs):
+ """Bottleneck block for ResNeXt.
+
+ If style is "pytorch", the stride-two layer is the 3x3 conv layer, if
+ it is "caffe", the stride-two layer is the first 1x1 conv layer.
+ """
+ super(Bottleneck, self).__init__(inplanes, planes, **kwargs)
+
+ if groups == 1:
+ width = self.planes
+ else:
+ width = math.floor(self.planes *
+ (base_width / base_channels)) * groups
+
+ self.norm1_name, norm1 = build_norm_layer(
+ self.norm_cfg, width, postfix=1)
+ self.norm2_name, norm2 = build_norm_layer(
+ self.norm_cfg, width, postfix=2)
+ self.norm3_name, norm3 = build_norm_layer(
+ self.norm_cfg, self.planes * self.expansion, postfix=3)
+
+ self.conv1 = build_conv_layer(
+ self.conv_cfg,
+ self.inplanes,
+ width,
+ kernel_size=1,
+ stride=self.conv1_stride,
+ bias=False)
+ self.add_module(self.norm1_name, norm1)
+ fallback_on_stride = False
+ self.with_modulated_dcn = False
+ if self.with_dcn:
+ fallback_on_stride = self.dcn.pop('fallback_on_stride', False)
+ if self.with_sac:
+ self.conv2 = build_conv_layer(
+ self.sac,
+ width,
+ width,
+ kernel_size=3,
+ stride=self.conv2_stride,
+ padding=self.dilation,
+ dilation=self.dilation,
+ groups=groups,
+ bias=False)
+ elif not self.with_dcn or fallback_on_stride:
+ self.conv2 = build_conv_layer(
+ self.conv_cfg,
+ width,
+ width,
+ kernel_size=3,
+ stride=self.conv2_stride,
+ padding=self.dilation,
+ dilation=self.dilation,
+ groups=groups,
+ bias=False)
+ else:
+ assert self.conv_cfg is None, 'conv_cfg must be None for DCN'
+ self.conv2 = build_conv_layer(
+ self.dcn,
+ width,
+ width,
+ kernel_size=3,
+ stride=self.conv2_stride,
+ padding=self.dilation,
+ dilation=self.dilation,
+ groups=groups,
+ bias=False)
+
+ self.add_module(self.norm2_name, norm2)
+ self.conv3 = build_conv_layer(
+ self.conv_cfg,
+ width,
+ self.planes * self.expansion,
+ kernel_size=1,
+ bias=False)
+ self.add_module(self.norm3_name, norm3)
+
+
+@BACKBONES.register_module()
+class DetectoRS_ResNeXt(DetectoRS_ResNet):
+ """ResNeXt backbone for DetectoRS.
+
+ Args:
+ groups (int): The number of groups in ResNeXt.
+ base_width (int): The base width of ResNeXt.
+ """
+
+ arch_settings = {
+ 50: (Bottleneck, (3, 4, 6, 3)),
+ 101: (Bottleneck, (3, 4, 23, 3)),
+ 152: (Bottleneck, (3, 8, 36, 3))
+ }
+
+ def __init__(self, groups=1, base_width=4, **kwargs):
+ self.groups = groups
+ self.base_width = base_width
+ super(DetectoRS_ResNeXt, self).__init__(**kwargs)
+
+ def make_res_layer(self, **kwargs):
+ return super().make_res_layer(
+ groups=self.groups,
+ base_width=self.base_width,
+ base_channels=self.base_channels,
+ **kwargs)
diff --git a/annotator/uniformer/mmdet_null/models/backbones/hourglass.py b/annotator/uniformer/mmdet_null/models/backbones/hourglass.py
new file mode 100644
index 0000000000000000000000000000000000000000..3422acee35e3c6f8731cdb310f188e671b5be12f
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/backbones/hourglass.py
@@ -0,0 +1,198 @@
+import torch.nn as nn
+from mmcv.cnn import ConvModule
+
+from ..builder import BACKBONES
+from ..utils import ResLayer
+from .resnet import BasicBlock
+
+
+class HourglassModule(nn.Module):
+ """Hourglass Module for HourglassNet backbone.
+
+ Generate module recursively and use BasicBlock as the base unit.
+
+ Args:
+ depth (int): Depth of current HourglassModule.
+ stage_channels (list[int]): Feature channels of sub-modules in current
+ and follow-up HourglassModule.
+ stage_blocks (list[int]): Number of sub-modules stacked in current and
+ follow-up HourglassModule.
+ norm_cfg (dict): Dictionary to construct and config norm layer.
+ """
+
+ def __init__(self,
+ depth,
+ stage_channels,
+ stage_blocks,
+ norm_cfg=dict(type='BN', requires_grad=True)):
+ super(HourglassModule, self).__init__()
+
+ self.depth = depth
+
+ cur_block = stage_blocks[0]
+ next_block = stage_blocks[1]
+
+ cur_channel = stage_channels[0]
+ next_channel = stage_channels[1]
+
+ self.up1 = ResLayer(
+ BasicBlock, cur_channel, cur_channel, cur_block, norm_cfg=norm_cfg)
+
+ self.low1 = ResLayer(
+ BasicBlock,
+ cur_channel,
+ next_channel,
+ cur_block,
+ stride=2,
+ norm_cfg=norm_cfg)
+
+ if self.depth > 1:
+ self.low2 = HourglassModule(depth - 1, stage_channels[1:],
+ stage_blocks[1:])
+ else:
+ self.low2 = ResLayer(
+ BasicBlock,
+ next_channel,
+ next_channel,
+ next_block,
+ norm_cfg=norm_cfg)
+
+ self.low3 = ResLayer(
+ BasicBlock,
+ next_channel,
+ cur_channel,
+ cur_block,
+ norm_cfg=norm_cfg,
+ downsample_first=False)
+
+ self.up2 = nn.Upsample(scale_factor=2)
+
+ def forward(self, x):
+ """Forward function."""
+ up1 = self.up1(x)
+ low1 = self.low1(x)
+ low2 = self.low2(low1)
+ low3 = self.low3(low2)
+ up2 = self.up2(low3)
+ return up1 + up2
+
+
+@BACKBONES.register_module()
+class HourglassNet(nn.Module):
+ """HourglassNet backbone.
+
+ Stacked Hourglass Networks for Human Pose Estimation.
+ More details can be found in the `paper
+ `_ .
+
+ Args:
+ downsample_times (int): Downsample times in a HourglassModule.
+ num_stacks (int): Number of HourglassModule modules stacked,
+ 1 for Hourglass-52, 2 for Hourglass-104.
+ stage_channels (list[int]): Feature channel of each sub-module in a
+ HourglassModule.
+ stage_blocks (list[int]): Number of sub-modules stacked in a
+ HourglassModule.
+ feat_channel (int): Feature channel of conv after a HourglassModule.
+ norm_cfg (dict): Dictionary to construct and config norm layer.
+
+ Example:
+ >>> from mmdet.models import HourglassNet
+ >>> import torch
+ >>> self = HourglassNet()
+ >>> self.eval()
+ >>> inputs = torch.rand(1, 3, 511, 511)
+ >>> level_outputs = self.forward(inputs)
+ >>> for level_output in level_outputs:
+ ... print(tuple(level_output.shape))
+ (1, 256, 128, 128)
+ (1, 256, 128, 128)
+ """
+
+ def __init__(self,
+ downsample_times=5,
+ num_stacks=2,
+ stage_channels=(256, 256, 384, 384, 384, 512),
+ stage_blocks=(2, 2, 2, 2, 2, 4),
+ feat_channel=256,
+ norm_cfg=dict(type='BN', requires_grad=True)):
+ super(HourglassNet, self).__init__()
+
+ self.num_stacks = num_stacks
+ assert self.num_stacks >= 1
+ assert len(stage_channels) == len(stage_blocks)
+ assert len(stage_channels) > downsample_times
+
+ cur_channel = stage_channels[0]
+
+ self.stem = nn.Sequential(
+ ConvModule(3, 128, 7, padding=3, stride=2, norm_cfg=norm_cfg),
+ ResLayer(BasicBlock, 128, 256, 1, stride=2, norm_cfg=norm_cfg))
+
+ self.hourglass_modules = nn.ModuleList([
+ HourglassModule(downsample_times, stage_channels, stage_blocks)
+ for _ in range(num_stacks)
+ ])
+
+ self.inters = ResLayer(
+ BasicBlock,
+ cur_channel,
+ cur_channel,
+ num_stacks - 1,
+ norm_cfg=norm_cfg)
+
+ self.conv1x1s = nn.ModuleList([
+ ConvModule(
+ cur_channel, cur_channel, 1, norm_cfg=norm_cfg, act_cfg=None)
+ for _ in range(num_stacks - 1)
+ ])
+
+ self.out_convs = nn.ModuleList([
+ ConvModule(
+ cur_channel, feat_channel, 3, padding=1, norm_cfg=norm_cfg)
+ for _ in range(num_stacks)
+ ])
+
+ self.remap_convs = nn.ModuleList([
+ ConvModule(
+ feat_channel, cur_channel, 1, norm_cfg=norm_cfg, act_cfg=None)
+ for _ in range(num_stacks - 1)
+ ])
+
+ self.relu = nn.ReLU(inplace=True)
+
+ def init_weights(self, pretrained=None):
+ """Init module weights.
+
+ We do nothing in this function because all modules we used
+ (ConvModule, BasicBlock and etc.) have default initialization, and
+ currently we don't provide pretrained model of HourglassNet.
+
+ Detector's __init__() will call backbone's init_weights() with
+ pretrained as input, so we keep this function.
+ """
+ # Training Centripetal Model needs to reset parameters for Conv2d
+ for m in self.modules():
+ if isinstance(m, nn.Conv2d):
+ m.reset_parameters()
+
+ def forward(self, x):
+ """Forward function."""
+ inter_feat = self.stem(x)
+ out_feats = []
+
+ for ind in range(self.num_stacks):
+ single_hourglass = self.hourglass_modules[ind]
+ out_conv = self.out_convs[ind]
+
+ hourglass_feat = single_hourglass(inter_feat)
+ out_feat = out_conv(hourglass_feat)
+ out_feats.append(out_feat)
+
+ if ind < self.num_stacks - 1:
+ inter_feat = self.conv1x1s[ind](
+ inter_feat) + self.remap_convs[ind](
+ out_feat)
+ inter_feat = self.inters[ind](self.relu(inter_feat))
+
+ return out_feats
diff --git a/annotator/uniformer/mmdet_null/models/backbones/hrnet.py b/annotator/uniformer/mmdet_null/models/backbones/hrnet.py
new file mode 100644
index 0000000000000000000000000000000000000000..c0fd0a974192231506aa68b1e1719f618b78a1b3
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/backbones/hrnet.py
@@ -0,0 +1,537 @@
+import torch.nn as nn
+from mmcv.cnn import (build_conv_layer, build_norm_layer, constant_init,
+ kaiming_init)
+from mmcv.runner import load_checkpoint
+from torch.nn.modules.batchnorm import _BatchNorm
+
+from mmdet.utils import get_root_logger
+from ..builder import BACKBONES
+from .resnet import BasicBlock, Bottleneck
+
+
+class HRModule(nn.Module):
+ """High-Resolution Module for HRNet.
+
+ In this module, every branch has 4 BasicBlocks/Bottlenecks. Fusion/Exchange
+ is in this module.
+ """
+
+ def __init__(self,
+ num_branches,
+ blocks,
+ num_blocks,
+ in_channels,
+ num_channels,
+ multiscale_output=True,
+ with_cp=False,
+ conv_cfg=None,
+ norm_cfg=dict(type='BN')):
+ super(HRModule, self).__init__()
+ self._check_branches(num_branches, num_blocks, in_channels,
+ num_channels)
+
+ self.in_channels = in_channels
+ self.num_branches = num_branches
+
+ self.multiscale_output = multiscale_output
+ self.norm_cfg = norm_cfg
+ self.conv_cfg = conv_cfg
+ self.with_cp = with_cp
+ self.branches = self._make_branches(num_branches, blocks, num_blocks,
+ num_channels)
+ self.fuse_layers = self._make_fuse_layers()
+ self.relu = nn.ReLU(inplace=False)
+
+ def _check_branches(self, num_branches, num_blocks, in_channels,
+ num_channels):
+ if num_branches != len(num_blocks):
+ error_msg = f'NUM_BRANCHES({num_branches}) ' \
+ f'!= NUM_BLOCKS({len(num_blocks)})'
+ raise ValueError(error_msg)
+
+ if num_branches != len(num_channels):
+ error_msg = f'NUM_BRANCHES({num_branches}) ' \
+ f'!= NUM_CHANNELS({len(num_channels)})'
+ raise ValueError(error_msg)
+
+ if num_branches != len(in_channels):
+ error_msg = f'NUM_BRANCHES({num_branches}) ' \
+ f'!= NUM_INCHANNELS({len(in_channels)})'
+ raise ValueError(error_msg)
+
+ def _make_one_branch(self,
+ branch_index,
+ block,
+ num_blocks,
+ num_channels,
+ stride=1):
+ downsample = None
+ if stride != 1 or \
+ self.in_channels[branch_index] != \
+ num_channels[branch_index] * block.expansion:
+ downsample = nn.Sequential(
+ build_conv_layer(
+ self.conv_cfg,
+ self.in_channels[branch_index],
+ num_channels[branch_index] * block.expansion,
+ kernel_size=1,
+ stride=stride,
+ bias=False),
+ build_norm_layer(self.norm_cfg, num_channels[branch_index] *
+ block.expansion)[1])
+
+ layers = []
+ layers.append(
+ block(
+ self.in_channels[branch_index],
+ num_channels[branch_index],
+ stride,
+ downsample=downsample,
+ with_cp=self.with_cp,
+ norm_cfg=self.norm_cfg,
+ conv_cfg=self.conv_cfg))
+ self.in_channels[branch_index] = \
+ num_channels[branch_index] * block.expansion
+ for i in range(1, num_blocks[branch_index]):
+ layers.append(
+ block(
+ self.in_channels[branch_index],
+ num_channels[branch_index],
+ with_cp=self.with_cp,
+ norm_cfg=self.norm_cfg,
+ conv_cfg=self.conv_cfg))
+
+ return nn.Sequential(*layers)
+
+ def _make_branches(self, num_branches, block, num_blocks, num_channels):
+ branches = []
+
+ for i in range(num_branches):
+ branches.append(
+ self._make_one_branch(i, block, num_blocks, num_channels))
+
+ return nn.ModuleList(branches)
+
+ def _make_fuse_layers(self):
+ if self.num_branches == 1:
+ return None
+
+ num_branches = self.num_branches
+ in_channels = self.in_channels
+ fuse_layers = []
+ num_out_branches = num_branches if self.multiscale_output else 1
+ for i in range(num_out_branches):
+ fuse_layer = []
+ for j in range(num_branches):
+ if j > i:
+ fuse_layer.append(
+ nn.Sequential(
+ build_conv_layer(
+ self.conv_cfg,
+ in_channels[j],
+ in_channels[i],
+ kernel_size=1,
+ stride=1,
+ padding=0,
+ bias=False),
+ build_norm_layer(self.norm_cfg, in_channels[i])[1],
+ nn.Upsample(
+ scale_factor=2**(j - i), mode='nearest')))
+ elif j == i:
+ fuse_layer.append(None)
+ else:
+ conv_downsamples = []
+ for k in range(i - j):
+ if k == i - j - 1:
+ conv_downsamples.append(
+ nn.Sequential(
+ build_conv_layer(
+ self.conv_cfg,
+ in_channels[j],
+ in_channels[i],
+ kernel_size=3,
+ stride=2,
+ padding=1,
+ bias=False),
+ build_norm_layer(self.norm_cfg,
+ in_channels[i])[1]))
+ else:
+ conv_downsamples.append(
+ nn.Sequential(
+ build_conv_layer(
+ self.conv_cfg,
+ in_channels[j],
+ in_channels[j],
+ kernel_size=3,
+ stride=2,
+ padding=1,
+ bias=False),
+ build_norm_layer(self.norm_cfg,
+ in_channels[j])[1],
+ nn.ReLU(inplace=False)))
+ fuse_layer.append(nn.Sequential(*conv_downsamples))
+ fuse_layers.append(nn.ModuleList(fuse_layer))
+
+ return nn.ModuleList(fuse_layers)
+
+ def forward(self, x):
+ """Forward function."""
+ if self.num_branches == 1:
+ return [self.branches[0](x[0])]
+
+ for i in range(self.num_branches):
+ x[i] = self.branches[i](x[i])
+
+ x_fuse = []
+ for i in range(len(self.fuse_layers)):
+ y = 0
+ for j in range(self.num_branches):
+ if i == j:
+ y += x[j]
+ else:
+ y += self.fuse_layers[i][j](x[j])
+ x_fuse.append(self.relu(y))
+ return x_fuse
+
+
+@BACKBONES.register_module()
+class HRNet(nn.Module):
+ """HRNet backbone.
+
+ High-Resolution Representations for Labeling Pixels and Regions
+ arXiv: https://arxiv.org/abs/1904.04514
+
+ Args:
+ extra (dict): detailed configuration for each stage of HRNet.
+ in_channels (int): Number of input image channels. Default: 3.
+ conv_cfg (dict): dictionary to construct and config conv layer.
+ norm_cfg (dict): dictionary to construct and config norm layer.
+ norm_eval (bool): Whether to set norm layers to eval mode, namely,
+ freeze running stats (mean and var). Note: Effect on Batch Norm
+ and its variants only.
+ with_cp (bool): Use checkpoint or not. Using checkpoint will save some
+ memory while slowing down the training speed.
+ zero_init_residual (bool): whether to use zero init for last norm layer
+ in resblocks to let them behave as identity.
+
+ Example:
+ >>> from mmdet.models import HRNet
+ >>> import torch
+ >>> extra = dict(
+ >>> stage1=dict(
+ >>> num_modules=1,
+ >>> num_branches=1,
+ >>> block='BOTTLENECK',
+ >>> num_blocks=(4, ),
+ >>> num_channels=(64, )),
+ >>> stage2=dict(
+ >>> num_modules=1,
+ >>> num_branches=2,
+ >>> block='BASIC',
+ >>> num_blocks=(4, 4),
+ >>> num_channels=(32, 64)),
+ >>> stage3=dict(
+ >>> num_modules=4,
+ >>> num_branches=3,
+ >>> block='BASIC',
+ >>> num_blocks=(4, 4, 4),
+ >>> num_channels=(32, 64, 128)),
+ >>> stage4=dict(
+ >>> num_modules=3,
+ >>> num_branches=4,
+ >>> block='BASIC',
+ >>> num_blocks=(4, 4, 4, 4),
+ >>> num_channels=(32, 64, 128, 256)))
+ >>> self = HRNet(extra, in_channels=1)
+ >>> self.eval()
+ >>> inputs = torch.rand(1, 1, 32, 32)
+ >>> level_outputs = self.forward(inputs)
+ >>> for level_out in level_outputs:
+ ... print(tuple(level_out.shape))
+ (1, 32, 8, 8)
+ (1, 64, 4, 4)
+ (1, 128, 2, 2)
+ (1, 256, 1, 1)
+ """
+
+ blocks_dict = {'BASIC': BasicBlock, 'BOTTLENECK': Bottleneck}
+
+ def __init__(self,
+ extra,
+ in_channels=3,
+ conv_cfg=None,
+ norm_cfg=dict(type='BN'),
+ norm_eval=True,
+ with_cp=False,
+ zero_init_residual=False):
+ super(HRNet, self).__init__()
+ self.extra = extra
+ self.conv_cfg = conv_cfg
+ self.norm_cfg = norm_cfg
+ self.norm_eval = norm_eval
+ self.with_cp = with_cp
+ self.zero_init_residual = zero_init_residual
+
+ # stem net
+ self.norm1_name, norm1 = build_norm_layer(self.norm_cfg, 64, postfix=1)
+ self.norm2_name, norm2 = build_norm_layer(self.norm_cfg, 64, postfix=2)
+
+ self.conv1 = build_conv_layer(
+ self.conv_cfg,
+ in_channels,
+ 64,
+ kernel_size=3,
+ stride=2,
+ padding=1,
+ bias=False)
+
+ self.add_module(self.norm1_name, norm1)
+ self.conv2 = build_conv_layer(
+ self.conv_cfg,
+ 64,
+ 64,
+ kernel_size=3,
+ stride=2,
+ padding=1,
+ bias=False)
+
+ self.add_module(self.norm2_name, norm2)
+ self.relu = nn.ReLU(inplace=True)
+
+ # stage 1
+ self.stage1_cfg = self.extra['stage1']
+ num_channels = self.stage1_cfg['num_channels'][0]
+ block_type = self.stage1_cfg['block']
+ num_blocks = self.stage1_cfg['num_blocks'][0]
+
+ block = self.blocks_dict[block_type]
+ stage1_out_channels = num_channels * block.expansion
+ self.layer1 = self._make_layer(block, 64, num_channels, num_blocks)
+
+ # stage 2
+ self.stage2_cfg = self.extra['stage2']
+ num_channels = self.stage2_cfg['num_channels']
+ block_type = self.stage2_cfg['block']
+
+ block = self.blocks_dict[block_type]
+ num_channels = [channel * block.expansion for channel in num_channels]
+ self.transition1 = self._make_transition_layer([stage1_out_channels],
+ num_channels)
+ self.stage2, pre_stage_channels = self._make_stage(
+ self.stage2_cfg, num_channels)
+
+ # stage 3
+ self.stage3_cfg = self.extra['stage3']
+ num_channels = self.stage3_cfg['num_channels']
+ block_type = self.stage3_cfg['block']
+
+ block = self.blocks_dict[block_type]
+ num_channels = [channel * block.expansion for channel in num_channels]
+ self.transition2 = self._make_transition_layer(pre_stage_channels,
+ num_channels)
+ self.stage3, pre_stage_channels = self._make_stage(
+ self.stage3_cfg, num_channels)
+
+ # stage 4
+ self.stage4_cfg = self.extra['stage4']
+ num_channels = self.stage4_cfg['num_channels']
+ block_type = self.stage4_cfg['block']
+
+ block = self.blocks_dict[block_type]
+ num_channels = [channel * block.expansion for channel in num_channels]
+ self.transition3 = self._make_transition_layer(pre_stage_channels,
+ num_channels)
+ self.stage4, pre_stage_channels = self._make_stage(
+ self.stage4_cfg, num_channels)
+
+ @property
+ def norm1(self):
+ """nn.Module: the normalization layer named "norm1" """
+ return getattr(self, self.norm1_name)
+
+ @property
+ def norm2(self):
+ """nn.Module: the normalization layer named "norm2" """
+ return getattr(self, self.norm2_name)
+
+ def _make_transition_layer(self, num_channels_pre_layer,
+ num_channels_cur_layer):
+ num_branches_cur = len(num_channels_cur_layer)
+ num_branches_pre = len(num_channels_pre_layer)
+
+ transition_layers = []
+ for i in range(num_branches_cur):
+ if i < num_branches_pre:
+ if num_channels_cur_layer[i] != num_channels_pre_layer[i]:
+ transition_layers.append(
+ nn.Sequential(
+ build_conv_layer(
+ self.conv_cfg,
+ num_channels_pre_layer[i],
+ num_channels_cur_layer[i],
+ kernel_size=3,
+ stride=1,
+ padding=1,
+ bias=False),
+ build_norm_layer(self.norm_cfg,
+ num_channels_cur_layer[i])[1],
+ nn.ReLU(inplace=True)))
+ else:
+ transition_layers.append(None)
+ else:
+ conv_downsamples = []
+ for j in range(i + 1 - num_branches_pre):
+ in_channels = num_channels_pre_layer[-1]
+ out_channels = num_channels_cur_layer[i] \
+ if j == i - num_branches_pre else in_channels
+ conv_downsamples.append(
+ nn.Sequential(
+ build_conv_layer(
+ self.conv_cfg,
+ in_channels,
+ out_channels,
+ kernel_size=3,
+ stride=2,
+ padding=1,
+ bias=False),
+ build_norm_layer(self.norm_cfg, out_channels)[1],
+ nn.ReLU(inplace=True)))
+ transition_layers.append(nn.Sequential(*conv_downsamples))
+
+ return nn.ModuleList(transition_layers)
+
+ def _make_layer(self, block, inplanes, planes, blocks, stride=1):
+ downsample = None
+ if stride != 1 or inplanes != planes * block.expansion:
+ downsample = nn.Sequential(
+ build_conv_layer(
+ self.conv_cfg,
+ inplanes,
+ planes * block.expansion,
+ kernel_size=1,
+ stride=stride,
+ bias=False),
+ build_norm_layer(self.norm_cfg, planes * block.expansion)[1])
+
+ layers = []
+ layers.append(
+ block(
+ inplanes,
+ planes,
+ stride,
+ downsample=downsample,
+ with_cp=self.with_cp,
+ norm_cfg=self.norm_cfg,
+ conv_cfg=self.conv_cfg))
+ inplanes = planes * block.expansion
+ for i in range(1, blocks):
+ layers.append(
+ block(
+ inplanes,
+ planes,
+ with_cp=self.with_cp,
+ norm_cfg=self.norm_cfg,
+ conv_cfg=self.conv_cfg))
+
+ return nn.Sequential(*layers)
+
+ def _make_stage(self, layer_config, in_channels, multiscale_output=True):
+ num_modules = layer_config['num_modules']
+ num_branches = layer_config['num_branches']
+ num_blocks = layer_config['num_blocks']
+ num_channels = layer_config['num_channels']
+ block = self.blocks_dict[layer_config['block']]
+
+ hr_modules = []
+ for i in range(num_modules):
+ # multi_scale_output is only used for the last module
+ if not multiscale_output and i == num_modules - 1:
+ reset_multiscale_output = False
+ else:
+ reset_multiscale_output = True
+
+ hr_modules.append(
+ HRModule(
+ num_branches,
+ block,
+ num_blocks,
+ in_channels,
+ num_channels,
+ reset_multiscale_output,
+ with_cp=self.with_cp,
+ norm_cfg=self.norm_cfg,
+ conv_cfg=self.conv_cfg))
+
+ return nn.Sequential(*hr_modules), in_channels
+
+ def init_weights(self, pretrained=None):
+ """Initialize the weights in backbone.
+
+ Args:
+ pretrained (str, optional): Path to pre-trained weights.
+ Defaults to None.
+ """
+ if isinstance(pretrained, str):
+ logger = get_root_logger()
+ load_checkpoint(self, pretrained, strict=False, logger=logger)
+ elif pretrained is None:
+ for m in self.modules():
+ if isinstance(m, nn.Conv2d):
+ kaiming_init(m)
+ elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
+ constant_init(m, 1)
+
+ if self.zero_init_residual:
+ for m in self.modules():
+ if isinstance(m, Bottleneck):
+ constant_init(m.norm3, 0)
+ elif isinstance(m, BasicBlock):
+ constant_init(m.norm2, 0)
+ else:
+ raise TypeError('pretrained must be a str or None')
+
+ def forward(self, x):
+ """Forward function."""
+ x = self.conv1(x)
+ x = self.norm1(x)
+ x = self.relu(x)
+ x = self.conv2(x)
+ x = self.norm2(x)
+ x = self.relu(x)
+ x = self.layer1(x)
+
+ x_list = []
+ for i in range(self.stage2_cfg['num_branches']):
+ if self.transition1[i] is not None:
+ x_list.append(self.transition1[i](x))
+ else:
+ x_list.append(x)
+ y_list = self.stage2(x_list)
+
+ x_list = []
+ for i in range(self.stage3_cfg['num_branches']):
+ if self.transition2[i] is not None:
+ x_list.append(self.transition2[i](y_list[-1]))
+ else:
+ x_list.append(y_list[i])
+ y_list = self.stage3(x_list)
+
+ x_list = []
+ for i in range(self.stage4_cfg['num_branches']):
+ if self.transition3[i] is not None:
+ x_list.append(self.transition3[i](y_list[-1]))
+ else:
+ x_list.append(y_list[i])
+ y_list = self.stage4(x_list)
+
+ return y_list
+
+ def train(self, mode=True):
+ """Convert the model into training mode will keeping the normalization
+ layer freezed."""
+ super(HRNet, self).train(mode)
+ if mode and self.norm_eval:
+ for m in self.modules():
+ # trick: eval have effect on BatchNorm only
+ if isinstance(m, _BatchNorm):
+ m.eval()
diff --git a/annotator/uniformer/mmdet_null/models/backbones/regnet.py b/annotator/uniformer/mmdet_null/models/backbones/regnet.py
new file mode 100644
index 0000000000000000000000000000000000000000..91a602a952226cebb5fd0e3e282c6f98ae4fa455
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/backbones/regnet.py
@@ -0,0 +1,325 @@
+import numpy as np
+import torch.nn as nn
+from mmcv.cnn import build_conv_layer, build_norm_layer
+
+from ..builder import BACKBONES
+from .resnet import ResNet
+from .resnext import Bottleneck
+
+
+@BACKBONES.register_module()
+class RegNet(ResNet):
+ """RegNet backbone.
+
+ More details can be found in `paper `_ .
+
+ Args:
+ arch (dict): The parameter of RegNets.
+
+ - w0 (int): initial width
+ - wa (float): slope of width
+ - wm (float): quantization parameter to quantize the width
+ - depth (int): depth of the backbone
+ - group_w (int): width of group
+ - bot_mul (float): bottleneck ratio, i.e. expansion of bottleneck.
+ strides (Sequence[int]): Strides of the first block of each stage.
+ base_channels (int): Base channels after stem layer.
+ in_channels (int): Number of input image channels. Default: 3.
+ dilations (Sequence[int]): Dilation of each stage.
+ out_indices (Sequence[int]): Output from which stages.
+ style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two
+ layer is the 3x3 conv layer, otherwise the stride-two layer is
+ the first 1x1 conv layer.
+ frozen_stages (int): Stages to be frozen (all param fixed). -1 means
+ not freezing any parameters.
+ norm_cfg (dict): dictionary to construct and config norm layer.
+ norm_eval (bool): Whether to set norm layers to eval mode, namely,
+ freeze running stats (mean and var). Note: Effect on Batch Norm
+ and its variants only.
+ with_cp (bool): Use checkpoint or not. Using checkpoint will save some
+ memory while slowing down the training speed.
+ zero_init_residual (bool): whether to use zero init for last norm layer
+ in resblocks to let them behave as identity.
+
+ Example:
+ >>> from mmdet.models import RegNet
+ >>> import torch
+ >>> self = RegNet(
+ arch=dict(
+ w0=88,
+ wa=26.31,
+ wm=2.25,
+ group_w=48,
+ depth=25,
+ bot_mul=1.0))
+ >>> self.eval()
+ >>> inputs = torch.rand(1, 3, 32, 32)
+ >>> level_outputs = self.forward(inputs)
+ >>> for level_out in level_outputs:
+ ... print(tuple(level_out.shape))
+ (1, 96, 8, 8)
+ (1, 192, 4, 4)
+ (1, 432, 2, 2)
+ (1, 1008, 1, 1)
+ """
+ arch_settings = {
+ 'regnetx_400mf':
+ dict(w0=24, wa=24.48, wm=2.54, group_w=16, depth=22, bot_mul=1.0),
+ 'regnetx_800mf':
+ dict(w0=56, wa=35.73, wm=2.28, group_w=16, depth=16, bot_mul=1.0),
+ 'regnetx_1.6gf':
+ dict(w0=80, wa=34.01, wm=2.25, group_w=24, depth=18, bot_mul=1.0),
+ 'regnetx_3.2gf':
+ dict(w0=88, wa=26.31, wm=2.25, group_w=48, depth=25, bot_mul=1.0),
+ 'regnetx_4.0gf':
+ dict(w0=96, wa=38.65, wm=2.43, group_w=40, depth=23, bot_mul=1.0),
+ 'regnetx_6.4gf':
+ dict(w0=184, wa=60.83, wm=2.07, group_w=56, depth=17, bot_mul=1.0),
+ 'regnetx_8.0gf':
+ dict(w0=80, wa=49.56, wm=2.88, group_w=120, depth=23, bot_mul=1.0),
+ 'regnetx_12gf':
+ dict(w0=168, wa=73.36, wm=2.37, group_w=112, depth=19, bot_mul=1.0),
+ }
+
+ def __init__(self,
+ arch,
+ in_channels=3,
+ stem_channels=32,
+ base_channels=32,
+ strides=(2, 2, 2, 2),
+ dilations=(1, 1, 1, 1),
+ out_indices=(0, 1, 2, 3),
+ style='pytorch',
+ deep_stem=False,
+ avg_down=False,
+ frozen_stages=-1,
+ conv_cfg=None,
+ norm_cfg=dict(type='BN', requires_grad=True),
+ norm_eval=True,
+ dcn=None,
+ stage_with_dcn=(False, False, False, False),
+ plugins=None,
+ with_cp=False,
+ zero_init_residual=True):
+ super(ResNet, self).__init__()
+
+ # Generate RegNet parameters first
+ if isinstance(arch, str):
+ assert arch in self.arch_settings, \
+ f'"arch": "{arch}" is not one of the' \
+ ' arch_settings'
+ arch = self.arch_settings[arch]
+ elif not isinstance(arch, dict):
+ raise ValueError('Expect "arch" to be either a string '
+ f'or a dict, got {type(arch)}')
+
+ widths, num_stages = self.generate_regnet(
+ arch['w0'],
+ arch['wa'],
+ arch['wm'],
+ arch['depth'],
+ )
+ # Convert to per stage format
+ stage_widths, stage_blocks = self.get_stages_from_blocks(widths)
+ # Generate group widths and bot muls
+ group_widths = [arch['group_w'] for _ in range(num_stages)]
+ self.bottleneck_ratio = [arch['bot_mul'] for _ in range(num_stages)]
+ # Adjust the compatibility of stage_widths and group_widths
+ stage_widths, group_widths = self.adjust_width_group(
+ stage_widths, self.bottleneck_ratio, group_widths)
+
+ # Group params by stage
+ self.stage_widths = stage_widths
+ self.group_widths = group_widths
+ self.depth = sum(stage_blocks)
+ self.stem_channels = stem_channels
+ self.base_channels = base_channels
+ self.num_stages = num_stages
+ assert num_stages >= 1 and num_stages <= 4
+ self.strides = strides
+ self.dilations = dilations
+ assert len(strides) == len(dilations) == num_stages
+ self.out_indices = out_indices
+ assert max(out_indices) < num_stages
+ self.style = style
+ self.deep_stem = deep_stem
+ self.avg_down = avg_down
+ self.frozen_stages = frozen_stages
+ self.conv_cfg = conv_cfg
+ self.norm_cfg = norm_cfg
+ self.with_cp = with_cp
+ self.norm_eval = norm_eval
+ self.dcn = dcn
+ self.stage_with_dcn = stage_with_dcn
+ if dcn is not None:
+ assert len(stage_with_dcn) == num_stages
+ self.plugins = plugins
+ self.zero_init_residual = zero_init_residual
+ self.block = Bottleneck
+ expansion_bak = self.block.expansion
+ self.block.expansion = 1
+ self.stage_blocks = stage_blocks[:num_stages]
+
+ self._make_stem_layer(in_channels, stem_channels)
+
+ self.inplanes = stem_channels
+ self.res_layers = []
+ for i, num_blocks in enumerate(self.stage_blocks):
+ stride = self.strides[i]
+ dilation = self.dilations[i]
+ group_width = self.group_widths[i]
+ width = int(round(self.stage_widths[i] * self.bottleneck_ratio[i]))
+ stage_groups = width // group_width
+
+ dcn = self.dcn if self.stage_with_dcn[i] else None
+ if self.plugins is not None:
+ stage_plugins = self.make_stage_plugins(self.plugins, i)
+ else:
+ stage_plugins = None
+
+ res_layer = self.make_res_layer(
+ block=self.block,
+ inplanes=self.inplanes,
+ planes=self.stage_widths[i],
+ num_blocks=num_blocks,
+ stride=stride,
+ dilation=dilation,
+ style=self.style,
+ avg_down=self.avg_down,
+ with_cp=self.with_cp,
+ conv_cfg=self.conv_cfg,
+ norm_cfg=self.norm_cfg,
+ dcn=dcn,
+ plugins=stage_plugins,
+ groups=stage_groups,
+ base_width=group_width,
+ base_channels=self.stage_widths[i])
+ self.inplanes = self.stage_widths[i]
+ layer_name = f'layer{i + 1}'
+ self.add_module(layer_name, res_layer)
+ self.res_layers.append(layer_name)
+
+ self._freeze_stages()
+
+ self.feat_dim = stage_widths[-1]
+ self.block.expansion = expansion_bak
+
+ def _make_stem_layer(self, in_channels, base_channels):
+ self.conv1 = build_conv_layer(
+ self.conv_cfg,
+ in_channels,
+ base_channels,
+ kernel_size=3,
+ stride=2,
+ padding=1,
+ bias=False)
+ self.norm1_name, norm1 = build_norm_layer(
+ self.norm_cfg, base_channels, postfix=1)
+ self.add_module(self.norm1_name, norm1)
+ self.relu = nn.ReLU(inplace=True)
+
+ def generate_regnet(self,
+ initial_width,
+ width_slope,
+ width_parameter,
+ depth,
+ divisor=8):
+ """Generates per block width from RegNet parameters.
+
+ Args:
+ initial_width ([int]): Initial width of the backbone
+ width_slope ([float]): Slope of the quantized linear function
+ width_parameter ([int]): Parameter used to quantize the width.
+ depth ([int]): Depth of the backbone.
+ divisor (int, optional): The divisor of channels. Defaults to 8.
+
+ Returns:
+ list, int: return a list of widths of each stage and the number \
+ of stages
+ """
+ assert width_slope >= 0
+ assert initial_width > 0
+ assert width_parameter > 1
+ assert initial_width % divisor == 0
+ widths_cont = np.arange(depth) * width_slope + initial_width
+ ks = np.round(
+ np.log(widths_cont / initial_width) / np.log(width_parameter))
+ widths = initial_width * np.power(width_parameter, ks)
+ widths = np.round(np.divide(widths, divisor)) * divisor
+ num_stages = len(np.unique(widths))
+ widths, widths_cont = widths.astype(int).tolist(), widths_cont.tolist()
+ return widths, num_stages
+
+ @staticmethod
+ def quantize_float(number, divisor):
+ """Converts a float to closest non-zero int divisible by divisor.
+
+ Args:
+ number (int): Original number to be quantized.
+ divisor (int): Divisor used to quantize the number.
+
+ Returns:
+ int: quantized number that is divisible by devisor.
+ """
+ return int(round(number / divisor) * divisor)
+
+ def adjust_width_group(self, widths, bottleneck_ratio, groups):
+ """Adjusts the compatibility of widths and groups.
+
+ Args:
+ widths (list[int]): Width of each stage.
+ bottleneck_ratio (float): Bottleneck ratio.
+ groups (int): number of groups in each stage
+
+ Returns:
+ tuple(list): The adjusted widths and groups of each stage.
+ """
+ bottleneck_width = [
+ int(w * b) for w, b in zip(widths, bottleneck_ratio)
+ ]
+ groups = [min(g, w_bot) for g, w_bot in zip(groups, bottleneck_width)]
+ bottleneck_width = [
+ self.quantize_float(w_bot, g)
+ for w_bot, g in zip(bottleneck_width, groups)
+ ]
+ widths = [
+ int(w_bot / b)
+ for w_bot, b in zip(bottleneck_width, bottleneck_ratio)
+ ]
+ return widths, groups
+
+ def get_stages_from_blocks(self, widths):
+ """Gets widths/stage_blocks of network at each stage.
+
+ Args:
+ widths (list[int]): Width in each stage.
+
+ Returns:
+ tuple(list): width and depth of each stage
+ """
+ width_diff = [
+ width != width_prev
+ for width, width_prev in zip(widths + [0], [0] + widths)
+ ]
+ stage_widths = [
+ width for width, diff in zip(widths, width_diff[:-1]) if diff
+ ]
+ stage_blocks = np.diff([
+ depth for depth, diff in zip(range(len(width_diff)), width_diff)
+ if diff
+ ]).tolist()
+ return stage_widths, stage_blocks
+
+ def forward(self, x):
+ """Forward function."""
+ x = self.conv1(x)
+ x = self.norm1(x)
+ x = self.relu(x)
+
+ outs = []
+ for i, layer_name in enumerate(self.res_layers):
+ res_layer = getattr(self, layer_name)
+ x = res_layer(x)
+ if i in self.out_indices:
+ outs.append(x)
+ return tuple(outs)
diff --git a/annotator/uniformer/mmdet_null/models/backbones/res2net.py b/annotator/uniformer/mmdet_null/models/backbones/res2net.py
new file mode 100644
index 0000000000000000000000000000000000000000..7901b7f2fa29741d72328bdbdbf92fc4d5c5f847
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/backbones/res2net.py
@@ -0,0 +1,351 @@
+import math
+
+import torch
+import torch.nn as nn
+import torch.utils.checkpoint as cp
+from mmcv.cnn import (build_conv_layer, build_norm_layer, constant_init,
+ kaiming_init)
+from mmcv.runner import load_checkpoint
+from torch.nn.modules.batchnorm import _BatchNorm
+
+from mmdet.utils import get_root_logger
+from ..builder import BACKBONES
+from .resnet import Bottleneck as _Bottleneck
+from .resnet import ResNet
+
+
+class Bottle2neck(_Bottleneck):
+ expansion = 4
+
+ def __init__(self,
+ inplanes,
+ planes,
+ scales=4,
+ base_width=26,
+ base_channels=64,
+ stage_type='normal',
+ **kwargs):
+ """Bottle2neck block for Res2Net.
+
+ If style is "pytorch", the stride-two layer is the 3x3 conv layer, if
+ it is "caffe", the stride-two layer is the first 1x1 conv layer.
+ """
+ super(Bottle2neck, self).__init__(inplanes, planes, **kwargs)
+ assert scales > 1, 'Res2Net degenerates to ResNet when scales = 1.'
+ width = int(math.floor(self.planes * (base_width / base_channels)))
+
+ self.norm1_name, norm1 = build_norm_layer(
+ self.norm_cfg, width * scales, postfix=1)
+ self.norm3_name, norm3 = build_norm_layer(
+ self.norm_cfg, self.planes * self.expansion, postfix=3)
+
+ self.conv1 = build_conv_layer(
+ self.conv_cfg,
+ self.inplanes,
+ width * scales,
+ kernel_size=1,
+ stride=self.conv1_stride,
+ bias=False)
+ self.add_module(self.norm1_name, norm1)
+
+ if stage_type == 'stage' and self.conv2_stride != 1:
+ self.pool = nn.AvgPool2d(
+ kernel_size=3, stride=self.conv2_stride, padding=1)
+ convs = []
+ bns = []
+
+ fallback_on_stride = False
+ if self.with_dcn:
+ fallback_on_stride = self.dcn.pop('fallback_on_stride', False)
+ if not self.with_dcn or fallback_on_stride:
+ for i in range(scales - 1):
+ convs.append(
+ build_conv_layer(
+ self.conv_cfg,
+ width,
+ width,
+ kernel_size=3,
+ stride=self.conv2_stride,
+ padding=self.dilation,
+ dilation=self.dilation,
+ bias=False))
+ bns.append(
+ build_norm_layer(self.norm_cfg, width, postfix=i + 1)[1])
+ self.convs = nn.ModuleList(convs)
+ self.bns = nn.ModuleList(bns)
+ else:
+ assert self.conv_cfg is None, 'conv_cfg must be None for DCN'
+ for i in range(scales - 1):
+ convs.append(
+ build_conv_layer(
+ self.dcn,
+ width,
+ width,
+ kernel_size=3,
+ stride=self.conv2_stride,
+ padding=self.dilation,
+ dilation=self.dilation,
+ bias=False))
+ bns.append(
+ build_norm_layer(self.norm_cfg, width, postfix=i + 1)[1])
+ self.convs = nn.ModuleList(convs)
+ self.bns = nn.ModuleList(bns)
+
+ self.conv3 = build_conv_layer(
+ self.conv_cfg,
+ width * scales,
+ self.planes * self.expansion,
+ kernel_size=1,
+ bias=False)
+ self.add_module(self.norm3_name, norm3)
+
+ self.stage_type = stage_type
+ self.scales = scales
+ self.width = width
+ delattr(self, 'conv2')
+ delattr(self, self.norm2_name)
+
+ def forward(self, x):
+ """Forward function."""
+
+ def _inner_forward(x):
+ identity = x
+
+ out = self.conv1(x)
+ out = self.norm1(out)
+ out = self.relu(out)
+
+ if self.with_plugins:
+ out = self.forward_plugin(out, self.after_conv1_plugin_names)
+
+ spx = torch.split(out, self.width, 1)
+ sp = self.convs[0](spx[0].contiguous())
+ sp = self.relu(self.bns[0](sp))
+ out = sp
+ for i in range(1, self.scales - 1):
+ if self.stage_type == 'stage':
+ sp = spx[i]
+ else:
+ sp = sp + spx[i]
+ sp = self.convs[i](sp.contiguous())
+ sp = self.relu(self.bns[i](sp))
+ out = torch.cat((out, sp), 1)
+
+ if self.stage_type == 'normal' or self.conv2_stride == 1:
+ out = torch.cat((out, spx[self.scales - 1]), 1)
+ elif self.stage_type == 'stage':
+ out = torch.cat((out, self.pool(spx[self.scales - 1])), 1)
+
+ if self.with_plugins:
+ out = self.forward_plugin(out, self.after_conv2_plugin_names)
+
+ out = self.conv3(out)
+ out = self.norm3(out)
+
+ if self.with_plugins:
+ out = self.forward_plugin(out, self.after_conv3_plugin_names)
+
+ if self.downsample is not None:
+ identity = self.downsample(x)
+
+ out += identity
+
+ return out
+
+ if self.with_cp and x.requires_grad:
+ out = cp.checkpoint(_inner_forward, x)
+ else:
+ out = _inner_forward(x)
+
+ out = self.relu(out)
+
+ return out
+
+
+class Res2Layer(nn.Sequential):
+ """Res2Layer to build Res2Net style backbone.
+
+ Args:
+ block (nn.Module): block used to build ResLayer.
+ inplanes (int): inplanes of block.
+ planes (int): planes of block.
+ num_blocks (int): number of blocks.
+ stride (int): stride of the first block. Default: 1
+ avg_down (bool): Use AvgPool instead of stride conv when
+ downsampling in the bottle2neck. Default: False
+ conv_cfg (dict): dictionary to construct and config conv layer.
+ Default: None
+ norm_cfg (dict): dictionary to construct and config norm layer.
+ Default: dict(type='BN')
+ scales (int): Scales used in Res2Net. Default: 4
+ base_width (int): Basic width of each scale. Default: 26
+ """
+
+ def __init__(self,
+ block,
+ inplanes,
+ planes,
+ num_blocks,
+ stride=1,
+ avg_down=True,
+ conv_cfg=None,
+ norm_cfg=dict(type='BN'),
+ scales=4,
+ base_width=26,
+ **kwargs):
+ self.block = block
+
+ downsample = None
+ if stride != 1 or inplanes != planes * block.expansion:
+ downsample = nn.Sequential(
+ nn.AvgPool2d(
+ kernel_size=stride,
+ stride=stride,
+ ceil_mode=True,
+ count_include_pad=False),
+ build_conv_layer(
+ conv_cfg,
+ inplanes,
+ planes * block.expansion,
+ kernel_size=1,
+ stride=1,
+ bias=False),
+ build_norm_layer(norm_cfg, planes * block.expansion)[1],
+ )
+
+ layers = []
+ layers.append(
+ block(
+ inplanes=inplanes,
+ planes=planes,
+ stride=stride,
+ downsample=downsample,
+ conv_cfg=conv_cfg,
+ norm_cfg=norm_cfg,
+ scales=scales,
+ base_width=base_width,
+ stage_type='stage',
+ **kwargs))
+ inplanes = planes * block.expansion
+ for i in range(1, num_blocks):
+ layers.append(
+ block(
+ inplanes=inplanes,
+ planes=planes,
+ stride=1,
+ conv_cfg=conv_cfg,
+ norm_cfg=norm_cfg,
+ scales=scales,
+ base_width=base_width,
+ **kwargs))
+ super(Res2Layer, self).__init__(*layers)
+
+
+@BACKBONES.register_module()
+class Res2Net(ResNet):
+ """Res2Net backbone.
+
+ Args:
+ scales (int): Scales used in Res2Net. Default: 4
+ base_width (int): Basic width of each scale. Default: 26
+ depth (int): Depth of res2net, from {50, 101, 152}.
+ in_channels (int): Number of input image channels. Default: 3.
+ num_stages (int): Res2net stages. Default: 4.
+ strides (Sequence[int]): Strides of the first block of each stage.
+ dilations (Sequence[int]): Dilation of each stage.
+ out_indices (Sequence[int]): Output from which stages.
+ style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two
+ layer is the 3x3 conv layer, otherwise the stride-two layer is
+ the first 1x1 conv layer.
+ deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv
+ avg_down (bool): Use AvgPool instead of stride conv when
+ downsampling in the bottle2neck.
+ frozen_stages (int): Stages to be frozen (stop grad and set eval mode).
+ -1 means not freezing any parameters.
+ norm_cfg (dict): Dictionary to construct and config norm layer.
+ norm_eval (bool): Whether to set norm layers to eval mode, namely,
+ freeze running stats (mean and var). Note: Effect on Batch Norm
+ and its variants only.
+ plugins (list[dict]): List of plugins for stages, each dict contains:
+
+ - cfg (dict, required): Cfg dict to build plugin.
+ - position (str, required): Position inside block to insert
+ plugin, options are 'after_conv1', 'after_conv2', 'after_conv3'.
+ - stages (tuple[bool], optional): Stages to apply plugin, length
+ should be same as 'num_stages'.
+ with_cp (bool): Use checkpoint or not. Using checkpoint will save some
+ memory while slowing down the training speed.
+ zero_init_residual (bool): Whether to use zero init for last norm layer
+ in resblocks to let them behave as identity.
+
+ Example:
+ >>> from mmdet.models import Res2Net
+ >>> import torch
+ >>> self = Res2Net(depth=50, scales=4, base_width=26)
+ >>> self.eval()
+ >>> inputs = torch.rand(1, 3, 32, 32)
+ >>> level_outputs = self.forward(inputs)
+ >>> for level_out in level_outputs:
+ ... print(tuple(level_out.shape))
+ (1, 256, 8, 8)
+ (1, 512, 4, 4)
+ (1, 1024, 2, 2)
+ (1, 2048, 1, 1)
+ """
+
+ arch_settings = {
+ 50: (Bottle2neck, (3, 4, 6, 3)),
+ 101: (Bottle2neck, (3, 4, 23, 3)),
+ 152: (Bottle2neck, (3, 8, 36, 3))
+ }
+
+ def __init__(self,
+ scales=4,
+ base_width=26,
+ style='pytorch',
+ deep_stem=True,
+ avg_down=True,
+ **kwargs):
+ self.scales = scales
+ self.base_width = base_width
+ super(Res2Net, self).__init__(
+ style='pytorch', deep_stem=True, avg_down=True, **kwargs)
+
+ def make_res_layer(self, **kwargs):
+ return Res2Layer(
+ scales=self.scales,
+ base_width=self.base_width,
+ base_channels=self.base_channels,
+ **kwargs)
+
+ def init_weights(self, pretrained=None):
+ """Initialize the weights in backbone.
+
+ Args:
+ pretrained (str, optional): Path to pre-trained weights.
+ Defaults to None.
+ """
+ if isinstance(pretrained, str):
+ logger = get_root_logger()
+ load_checkpoint(self, pretrained, strict=False, logger=logger)
+ elif pretrained is None:
+ for m in self.modules():
+ if isinstance(m, nn.Conv2d):
+ kaiming_init(m)
+ elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
+ constant_init(m, 1)
+
+ if self.dcn is not None:
+ for m in self.modules():
+ if isinstance(m, Bottle2neck):
+ # dcn in Res2Net bottle2neck is in ModuleList
+ for n in m.convs:
+ if hasattr(n, 'conv_offset'):
+ constant_init(n.conv_offset, 0)
+
+ if self.zero_init_residual:
+ for m in self.modules():
+ if isinstance(m, Bottle2neck):
+ constant_init(m.norm3, 0)
+ else:
+ raise TypeError('pretrained must be a str or None')
diff --git a/annotator/uniformer/mmdet_null/models/backbones/resnest.py b/annotator/uniformer/mmdet_null/models/backbones/resnest.py
new file mode 100644
index 0000000000000000000000000000000000000000..48e1d8bfa47348a13f0da0b9ecf32354fa270340
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/backbones/resnest.py
@@ -0,0 +1,317 @@
+import math
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+import torch.utils.checkpoint as cp
+from mmcv.cnn import build_conv_layer, build_norm_layer
+
+from ..builder import BACKBONES
+from ..utils import ResLayer
+from .resnet import Bottleneck as _Bottleneck
+from .resnet import ResNetV1d
+
+
+class RSoftmax(nn.Module):
+ """Radix Softmax module in ``SplitAttentionConv2d``.
+
+ Args:
+ radix (int): Radix of input.
+ groups (int): Groups of input.
+ """
+
+ def __init__(self, radix, groups):
+ super().__init__()
+ self.radix = radix
+ self.groups = groups
+
+ def forward(self, x):
+ batch = x.size(0)
+ if self.radix > 1:
+ x = x.view(batch, self.groups, self.radix, -1).transpose(1, 2)
+ x = F.softmax(x, dim=1)
+ x = x.reshape(batch, -1)
+ else:
+ x = torch.sigmoid(x)
+ return x
+
+
+class SplitAttentionConv2d(nn.Module):
+ """Split-Attention Conv2d in ResNeSt.
+
+ Args:
+ in_channels (int): Number of channels in the input feature map.
+ channels (int): Number of intermediate channels.
+ kernel_size (int | tuple[int]): Size of the convolution kernel.
+ stride (int | tuple[int]): Stride of the convolution.
+ padding (int | tuple[int]): Zero-padding added to both sides of
+ dilation (int | tuple[int]): Spacing between kernel elements.
+ groups (int): Number of blocked connections from input channels to
+ output channels.
+ groups (int): Same as nn.Conv2d.
+ radix (int): Radix of SpltAtConv2d. Default: 2
+ reduction_factor (int): Reduction factor of inter_channels. Default: 4.
+ conv_cfg (dict): Config dict for convolution layer. Default: None,
+ which means using conv2d.
+ norm_cfg (dict): Config dict for normalization layer. Default: None.
+ dcn (dict): Config dict for DCN. Default: None.
+ """
+
+ def __init__(self,
+ in_channels,
+ channels,
+ kernel_size,
+ stride=1,
+ padding=0,
+ dilation=1,
+ groups=1,
+ radix=2,
+ reduction_factor=4,
+ conv_cfg=None,
+ norm_cfg=dict(type='BN'),
+ dcn=None):
+ super(SplitAttentionConv2d, self).__init__()
+ inter_channels = max(in_channels * radix // reduction_factor, 32)
+ self.radix = radix
+ self.groups = groups
+ self.channels = channels
+ self.with_dcn = dcn is not None
+ self.dcn = dcn
+ fallback_on_stride = False
+ if self.with_dcn:
+ fallback_on_stride = self.dcn.pop('fallback_on_stride', False)
+ if self.with_dcn and not fallback_on_stride:
+ assert conv_cfg is None, 'conv_cfg must be None for DCN'
+ conv_cfg = dcn
+ self.conv = build_conv_layer(
+ conv_cfg,
+ in_channels,
+ channels * radix,
+ kernel_size,
+ stride=stride,
+ padding=padding,
+ dilation=dilation,
+ groups=groups * radix,
+ bias=False)
+ # To be consistent with original implementation, starting from 0
+ self.norm0_name, norm0 = build_norm_layer(
+ norm_cfg, channels * radix, postfix=0)
+ self.add_module(self.norm0_name, norm0)
+ self.relu = nn.ReLU(inplace=True)
+ self.fc1 = build_conv_layer(
+ None, channels, inter_channels, 1, groups=self.groups)
+ self.norm1_name, norm1 = build_norm_layer(
+ norm_cfg, inter_channels, postfix=1)
+ self.add_module(self.norm1_name, norm1)
+ self.fc2 = build_conv_layer(
+ None, inter_channels, channels * radix, 1, groups=self.groups)
+ self.rsoftmax = RSoftmax(radix, groups)
+
+ @property
+ def norm0(self):
+ """nn.Module: the normalization layer named "norm0" """
+ return getattr(self, self.norm0_name)
+
+ @property
+ def norm1(self):
+ """nn.Module: the normalization layer named "norm1" """
+ return getattr(self, self.norm1_name)
+
+ def forward(self, x):
+ x = self.conv(x)
+ x = self.norm0(x)
+ x = self.relu(x)
+
+ batch, rchannel = x.shape[:2]
+ batch = x.size(0)
+ if self.radix > 1:
+ splits = x.view(batch, self.radix, -1, *x.shape[2:])
+ gap = splits.sum(dim=1)
+ else:
+ gap = x
+ gap = F.adaptive_avg_pool2d(gap, 1)
+ gap = self.fc1(gap)
+
+ gap = self.norm1(gap)
+ gap = self.relu(gap)
+
+ atten = self.fc2(gap)
+ atten = self.rsoftmax(atten).view(batch, -1, 1, 1)
+
+ if self.radix > 1:
+ attens = atten.view(batch, self.radix, -1, *atten.shape[2:])
+ out = torch.sum(attens * splits, dim=1)
+ else:
+ out = atten * x
+ return out.contiguous()
+
+
+class Bottleneck(_Bottleneck):
+ """Bottleneck block for ResNeSt.
+
+ Args:
+ inplane (int): Input planes of this block.
+ planes (int): Middle planes of this block.
+ groups (int): Groups of conv2.
+ base_width (int): Base of width in terms of base channels. Default: 4.
+ base_channels (int): Base of channels for calculating width.
+ Default: 64.
+ radix (int): Radix of SpltAtConv2d. Default: 2
+ reduction_factor (int): Reduction factor of inter_channels in
+ SplitAttentionConv2d. Default: 4.
+ avg_down_stride (bool): Whether to use average pool for stride in
+ Bottleneck. Default: True.
+ kwargs (dict): Key word arguments for base class.
+ """
+ expansion = 4
+
+ def __init__(self,
+ inplanes,
+ planes,
+ groups=1,
+ base_width=4,
+ base_channels=64,
+ radix=2,
+ reduction_factor=4,
+ avg_down_stride=True,
+ **kwargs):
+ """Bottleneck block for ResNeSt."""
+ super(Bottleneck, self).__init__(inplanes, planes, **kwargs)
+
+ if groups == 1:
+ width = self.planes
+ else:
+ width = math.floor(self.planes *
+ (base_width / base_channels)) * groups
+
+ self.avg_down_stride = avg_down_stride and self.conv2_stride > 1
+
+ self.norm1_name, norm1 = build_norm_layer(
+ self.norm_cfg, width, postfix=1)
+ self.norm3_name, norm3 = build_norm_layer(
+ self.norm_cfg, self.planes * self.expansion, postfix=3)
+
+ self.conv1 = build_conv_layer(
+ self.conv_cfg,
+ self.inplanes,
+ width,
+ kernel_size=1,
+ stride=self.conv1_stride,
+ bias=False)
+ self.add_module(self.norm1_name, norm1)
+ self.with_modulated_dcn = False
+ self.conv2 = SplitAttentionConv2d(
+ width,
+ width,
+ kernel_size=3,
+ stride=1 if self.avg_down_stride else self.conv2_stride,
+ padding=self.dilation,
+ dilation=self.dilation,
+ groups=groups,
+ radix=radix,
+ reduction_factor=reduction_factor,
+ conv_cfg=self.conv_cfg,
+ norm_cfg=self.norm_cfg,
+ dcn=self.dcn)
+ delattr(self, self.norm2_name)
+
+ if self.avg_down_stride:
+ self.avd_layer = nn.AvgPool2d(3, self.conv2_stride, padding=1)
+
+ self.conv3 = build_conv_layer(
+ self.conv_cfg,
+ width,
+ self.planes * self.expansion,
+ kernel_size=1,
+ bias=False)
+ self.add_module(self.norm3_name, norm3)
+
+ def forward(self, x):
+
+ def _inner_forward(x):
+ identity = x
+
+ out = self.conv1(x)
+ out = self.norm1(out)
+ out = self.relu(out)
+
+ if self.with_plugins:
+ out = self.forward_plugin(out, self.after_conv1_plugin_names)
+
+ out = self.conv2(out)
+
+ if self.avg_down_stride:
+ out = self.avd_layer(out)
+
+ if self.with_plugins:
+ out = self.forward_plugin(out, self.after_conv2_plugin_names)
+
+ out = self.conv3(out)
+ out = self.norm3(out)
+
+ if self.with_plugins:
+ out = self.forward_plugin(out, self.after_conv3_plugin_names)
+
+ if self.downsample is not None:
+ identity = self.downsample(x)
+
+ out += identity
+
+ return out
+
+ if self.with_cp and x.requires_grad:
+ out = cp.checkpoint(_inner_forward, x)
+ else:
+ out = _inner_forward(x)
+
+ out = self.relu(out)
+
+ return out
+
+
+@BACKBONES.register_module()
+class ResNeSt(ResNetV1d):
+ """ResNeSt backbone.
+
+ Args:
+ groups (int): Number of groups of Bottleneck. Default: 1
+ base_width (int): Base width of Bottleneck. Default: 4
+ radix (int): Radix of SplitAttentionConv2d. Default: 2
+ reduction_factor (int): Reduction factor of inter_channels in
+ SplitAttentionConv2d. Default: 4.
+ avg_down_stride (bool): Whether to use average pool for stride in
+ Bottleneck. Default: True.
+ kwargs (dict): Keyword arguments for ResNet.
+ """
+
+ arch_settings = {
+ 50: (Bottleneck, (3, 4, 6, 3)),
+ 101: (Bottleneck, (3, 4, 23, 3)),
+ 152: (Bottleneck, (3, 8, 36, 3)),
+ 200: (Bottleneck, (3, 24, 36, 3))
+ }
+
+ def __init__(self,
+ groups=1,
+ base_width=4,
+ radix=2,
+ reduction_factor=4,
+ avg_down_stride=True,
+ **kwargs):
+ self.groups = groups
+ self.base_width = base_width
+ self.radix = radix
+ self.reduction_factor = reduction_factor
+ self.avg_down_stride = avg_down_stride
+ super(ResNeSt, self).__init__(**kwargs)
+
+ def make_res_layer(self, **kwargs):
+ """Pack all blocks in a stage into a ``ResLayer``."""
+ return ResLayer(
+ groups=self.groups,
+ base_width=self.base_width,
+ base_channels=self.base_channels,
+ radix=self.radix,
+ reduction_factor=self.reduction_factor,
+ avg_down_stride=self.avg_down_stride,
+ **kwargs)
diff --git a/annotator/uniformer/mmdet_null/models/backbones/resnet.py b/annotator/uniformer/mmdet_null/models/backbones/resnet.py
new file mode 100644
index 0000000000000000000000000000000000000000..3826815a6d94fdc4c54001d4c186d10ca3380e80
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/backbones/resnet.py
@@ -0,0 +1,663 @@
+import torch.nn as nn
+import torch.utils.checkpoint as cp
+from mmcv.cnn import (build_conv_layer, build_norm_layer, build_plugin_layer,
+ constant_init, kaiming_init)
+from mmcv.runner import load_checkpoint
+from torch.nn.modules.batchnorm import _BatchNorm
+
+from mmdet.utils import get_root_logger
+from ..builder import BACKBONES
+from ..utils import ResLayer
+
+
+class BasicBlock(nn.Module):
+ expansion = 1
+
+ def __init__(self,
+ inplanes,
+ planes,
+ stride=1,
+ dilation=1,
+ downsample=None,
+ style='pytorch',
+ with_cp=False,
+ conv_cfg=None,
+ norm_cfg=dict(type='BN'),
+ dcn=None,
+ plugins=None):
+ super(BasicBlock, self).__init__()
+ assert dcn is None, 'Not implemented yet.'
+ assert plugins is None, 'Not implemented yet.'
+
+ self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1)
+ self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2)
+
+ self.conv1 = build_conv_layer(
+ conv_cfg,
+ inplanes,
+ planes,
+ 3,
+ stride=stride,
+ padding=dilation,
+ dilation=dilation,
+ bias=False)
+ self.add_module(self.norm1_name, norm1)
+ self.conv2 = build_conv_layer(
+ conv_cfg, planes, planes, 3, padding=1, bias=False)
+ self.add_module(self.norm2_name, norm2)
+
+ self.relu = nn.ReLU(inplace=True)
+ self.downsample = downsample
+ self.stride = stride
+ self.dilation = dilation
+ self.with_cp = with_cp
+
+ @property
+ def norm1(self):
+ """nn.Module: normalization layer after the first convolution layer"""
+ return getattr(self, self.norm1_name)
+
+ @property
+ def norm2(self):
+ """nn.Module: normalization layer after the second convolution layer"""
+ return getattr(self, self.norm2_name)
+
+ def forward(self, x):
+ """Forward function."""
+
+ def _inner_forward(x):
+ identity = x
+
+ out = self.conv1(x)
+ out = self.norm1(out)
+ out = self.relu(out)
+
+ out = self.conv2(out)
+ out = self.norm2(out)
+
+ if self.downsample is not None:
+ identity = self.downsample(x)
+
+ out += identity
+
+ return out
+
+ if self.with_cp and x.requires_grad:
+ out = cp.checkpoint(_inner_forward, x)
+ else:
+ out = _inner_forward(x)
+
+ out = self.relu(out)
+
+ return out
+
+
+class Bottleneck(nn.Module):
+ expansion = 4
+
+ def __init__(self,
+ inplanes,
+ planes,
+ stride=1,
+ dilation=1,
+ downsample=None,
+ style='pytorch',
+ with_cp=False,
+ conv_cfg=None,
+ norm_cfg=dict(type='BN'),
+ dcn=None,
+ plugins=None):
+ """Bottleneck block for ResNet.
+
+ If style is "pytorch", the stride-two layer is the 3x3 conv layer, if
+ it is "caffe", the stride-two layer is the first 1x1 conv layer.
+ """
+ super(Bottleneck, self).__init__()
+ assert style in ['pytorch', 'caffe']
+ assert dcn is None or isinstance(dcn, dict)
+ assert plugins is None or isinstance(plugins, list)
+ if plugins is not None:
+ allowed_position = ['after_conv1', 'after_conv2', 'after_conv3']
+ assert all(p['position'] in allowed_position for p in plugins)
+
+ self.inplanes = inplanes
+ self.planes = planes
+ self.stride = stride
+ self.dilation = dilation
+ self.style = style
+ self.with_cp = with_cp
+ self.conv_cfg = conv_cfg
+ self.norm_cfg = norm_cfg
+ self.dcn = dcn
+ self.with_dcn = dcn is not None
+ self.plugins = plugins
+ self.with_plugins = plugins is not None
+
+ if self.with_plugins:
+ # collect plugins for conv1/conv2/conv3
+ self.after_conv1_plugins = [
+ plugin['cfg'] for plugin in plugins
+ if plugin['position'] == 'after_conv1'
+ ]
+ self.after_conv2_plugins = [
+ plugin['cfg'] for plugin in plugins
+ if plugin['position'] == 'after_conv2'
+ ]
+ self.after_conv3_plugins = [
+ plugin['cfg'] for plugin in plugins
+ if plugin['position'] == 'after_conv3'
+ ]
+
+ if self.style == 'pytorch':
+ self.conv1_stride = 1
+ self.conv2_stride = stride
+ else:
+ self.conv1_stride = stride
+ self.conv2_stride = 1
+
+ self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1)
+ self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2)
+ self.norm3_name, norm3 = build_norm_layer(
+ norm_cfg, planes * self.expansion, postfix=3)
+
+ self.conv1 = build_conv_layer(
+ conv_cfg,
+ inplanes,
+ planes,
+ kernel_size=1,
+ stride=self.conv1_stride,
+ bias=False)
+ self.add_module(self.norm1_name, norm1)
+ fallback_on_stride = False
+ if self.with_dcn:
+ fallback_on_stride = dcn.pop('fallback_on_stride', False)
+ if not self.with_dcn or fallback_on_stride:
+ self.conv2 = build_conv_layer(
+ conv_cfg,
+ planes,
+ planes,
+ kernel_size=3,
+ stride=self.conv2_stride,
+ padding=dilation,
+ dilation=dilation,
+ bias=False)
+ else:
+ assert self.conv_cfg is None, 'conv_cfg must be None for DCN'
+ self.conv2 = build_conv_layer(
+ dcn,
+ planes,
+ planes,
+ kernel_size=3,
+ stride=self.conv2_stride,
+ padding=dilation,
+ dilation=dilation,
+ bias=False)
+
+ self.add_module(self.norm2_name, norm2)
+ self.conv3 = build_conv_layer(
+ conv_cfg,
+ planes,
+ planes * self.expansion,
+ kernel_size=1,
+ bias=False)
+ self.add_module(self.norm3_name, norm3)
+
+ self.relu = nn.ReLU(inplace=True)
+ self.downsample = downsample
+
+ if self.with_plugins:
+ self.after_conv1_plugin_names = self.make_block_plugins(
+ planes, self.after_conv1_plugins)
+ self.after_conv2_plugin_names = self.make_block_plugins(
+ planes, self.after_conv2_plugins)
+ self.after_conv3_plugin_names = self.make_block_plugins(
+ planes * self.expansion, self.after_conv3_plugins)
+
+ def make_block_plugins(self, in_channels, plugins):
+ """make plugins for block.
+
+ Args:
+ in_channels (int): Input channels of plugin.
+ plugins (list[dict]): List of plugins cfg to build.
+
+ Returns:
+ list[str]: List of the names of plugin.
+ """
+ assert isinstance(plugins, list)
+ plugin_names = []
+ for plugin in plugins:
+ plugin = plugin.copy()
+ name, layer = build_plugin_layer(
+ plugin,
+ in_channels=in_channels,
+ postfix=plugin.pop('postfix', ''))
+ assert not hasattr(self, name), f'duplicate plugin {name}'
+ self.add_module(name, layer)
+ plugin_names.append(name)
+ return plugin_names
+
+ def forward_plugin(self, x, plugin_names):
+ out = x
+ for name in plugin_names:
+ out = getattr(self, name)(x)
+ return out
+
+ @property
+ def norm1(self):
+ """nn.Module: normalization layer after the first convolution layer"""
+ return getattr(self, self.norm1_name)
+
+ @property
+ def norm2(self):
+ """nn.Module: normalization layer after the second convolution layer"""
+ return getattr(self, self.norm2_name)
+
+ @property
+ def norm3(self):
+ """nn.Module: normalization layer after the third convolution layer"""
+ return getattr(self, self.norm3_name)
+
+ def forward(self, x):
+ """Forward function."""
+
+ def _inner_forward(x):
+ identity = x
+ out = self.conv1(x)
+ out = self.norm1(out)
+ out = self.relu(out)
+
+ if self.with_plugins:
+ out = self.forward_plugin(out, self.after_conv1_plugin_names)
+
+ out = self.conv2(out)
+ out = self.norm2(out)
+ out = self.relu(out)
+
+ if self.with_plugins:
+ out = self.forward_plugin(out, self.after_conv2_plugin_names)
+
+ out = self.conv3(out)
+ out = self.norm3(out)
+
+ if self.with_plugins:
+ out = self.forward_plugin(out, self.after_conv3_plugin_names)
+
+ if self.downsample is not None:
+ identity = self.downsample(x)
+
+ out += identity
+
+ return out
+
+ if self.with_cp and x.requires_grad:
+ out = cp.checkpoint(_inner_forward, x)
+ else:
+ out = _inner_forward(x)
+
+ out = self.relu(out)
+
+ return out
+
+
+@BACKBONES.register_module()
+class ResNet(nn.Module):
+ """ResNet backbone.
+
+ Args:
+ depth (int): Depth of resnet, from {18, 34, 50, 101, 152}.
+ stem_channels (int | None): Number of stem channels. If not specified,
+ it will be the same as `base_channels`. Default: None.
+ base_channels (int): Number of base channels of res layer. Default: 64.
+ in_channels (int): Number of input image channels. Default: 3.
+ num_stages (int): Resnet stages. Default: 4.
+ strides (Sequence[int]): Strides of the first block of each stage.
+ dilations (Sequence[int]): Dilation of each stage.
+ out_indices (Sequence[int]): Output from which stages.
+ style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two
+ layer is the 3x3 conv layer, otherwise the stride-two layer is
+ the first 1x1 conv layer.
+ deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv
+ avg_down (bool): Use AvgPool instead of stride conv when
+ downsampling in the bottleneck.
+ frozen_stages (int): Stages to be frozen (stop grad and set eval mode).
+ -1 means not freezing any parameters.
+ norm_cfg (dict): Dictionary to construct and config norm layer.
+ norm_eval (bool): Whether to set norm layers to eval mode, namely,
+ freeze running stats (mean and var). Note: Effect on Batch Norm
+ and its variants only.
+ plugins (list[dict]): List of plugins for stages, each dict contains:
+
+ - cfg (dict, required): Cfg dict to build plugin.
+ - position (str, required): Position inside block to insert
+ plugin, options are 'after_conv1', 'after_conv2', 'after_conv3'.
+ - stages (tuple[bool], optional): Stages to apply plugin, length
+ should be same as 'num_stages'.
+ with_cp (bool): Use checkpoint or not. Using checkpoint will save some
+ memory while slowing down the training speed.
+ zero_init_residual (bool): Whether to use zero init for last norm layer
+ in resblocks to let them behave as identity.
+
+ Example:
+ >>> from mmdet.models import ResNet
+ >>> import torch
+ >>> self = ResNet(depth=18)
+ >>> self.eval()
+ >>> inputs = torch.rand(1, 3, 32, 32)
+ >>> level_outputs = self.forward(inputs)
+ >>> for level_out in level_outputs:
+ ... print(tuple(level_out.shape))
+ (1, 64, 8, 8)
+ (1, 128, 4, 4)
+ (1, 256, 2, 2)
+ (1, 512, 1, 1)
+ """
+
+ arch_settings = {
+ 18: (BasicBlock, (2, 2, 2, 2)),
+ 34: (BasicBlock, (3, 4, 6, 3)),
+ 50: (Bottleneck, (3, 4, 6, 3)),
+ 101: (Bottleneck, (3, 4, 23, 3)),
+ 152: (Bottleneck, (3, 8, 36, 3))
+ }
+
+ def __init__(self,
+ depth,
+ in_channels=3,
+ stem_channels=None,
+ base_channels=64,
+ num_stages=4,
+ strides=(1, 2, 2, 2),
+ dilations=(1, 1, 1, 1),
+ out_indices=(0, 1, 2, 3),
+ style='pytorch',
+ deep_stem=False,
+ avg_down=False,
+ frozen_stages=-1,
+ conv_cfg=None,
+ norm_cfg=dict(type='BN', requires_grad=True),
+ norm_eval=True,
+ dcn=None,
+ stage_with_dcn=(False, False, False, False),
+ plugins=None,
+ with_cp=False,
+ zero_init_residual=True):
+ super(ResNet, self).__init__()
+ if depth not in self.arch_settings:
+ raise KeyError(f'invalid depth {depth} for resnet')
+ self.depth = depth
+ if stem_channels is None:
+ stem_channels = base_channels
+ self.stem_channels = stem_channels
+ self.base_channels = base_channels
+ self.num_stages = num_stages
+ assert num_stages >= 1 and num_stages <= 4
+ self.strides = strides
+ self.dilations = dilations
+ assert len(strides) == len(dilations) == num_stages
+ self.out_indices = out_indices
+ assert max(out_indices) < num_stages
+ self.style = style
+ self.deep_stem = deep_stem
+ self.avg_down = avg_down
+ self.frozen_stages = frozen_stages
+ self.conv_cfg = conv_cfg
+ self.norm_cfg = norm_cfg
+ self.with_cp = with_cp
+ self.norm_eval = norm_eval
+ self.dcn = dcn
+ self.stage_with_dcn = stage_with_dcn
+ if dcn is not None:
+ assert len(stage_with_dcn) == num_stages
+ self.plugins = plugins
+ self.zero_init_residual = zero_init_residual
+ self.block, stage_blocks = self.arch_settings[depth]
+ self.stage_blocks = stage_blocks[:num_stages]
+ self.inplanes = stem_channels
+
+ self._make_stem_layer(in_channels, stem_channels)
+
+ self.res_layers = []
+ for i, num_blocks in enumerate(self.stage_blocks):
+ stride = strides[i]
+ dilation = dilations[i]
+ dcn = self.dcn if self.stage_with_dcn[i] else None
+ if plugins is not None:
+ stage_plugins = self.make_stage_plugins(plugins, i)
+ else:
+ stage_plugins = None
+ planes = base_channels * 2**i
+ res_layer = self.make_res_layer(
+ block=self.block,
+ inplanes=self.inplanes,
+ planes=planes,
+ num_blocks=num_blocks,
+ stride=stride,
+ dilation=dilation,
+ style=self.style,
+ avg_down=self.avg_down,
+ with_cp=with_cp,
+ conv_cfg=conv_cfg,
+ norm_cfg=norm_cfg,
+ dcn=dcn,
+ plugins=stage_plugins)
+ self.inplanes = planes * self.block.expansion
+ layer_name = f'layer{i + 1}'
+ self.add_module(layer_name, res_layer)
+ self.res_layers.append(layer_name)
+
+ self._freeze_stages()
+
+ self.feat_dim = self.block.expansion * base_channels * 2**(
+ len(self.stage_blocks) - 1)
+
+ def make_stage_plugins(self, plugins, stage_idx):
+ """Make plugins for ResNet ``stage_idx`` th stage.
+
+ Currently we support to insert ``context_block``,
+ ``empirical_attention_block``, ``nonlocal_block`` into the backbone
+ like ResNet/ResNeXt. They could be inserted after conv1/conv2/conv3 of
+ Bottleneck.
+
+ An example of plugins format could be:
+
+ Examples:
+ >>> plugins=[
+ ... dict(cfg=dict(type='xxx', arg1='xxx'),
+ ... stages=(False, True, True, True),
+ ... position='after_conv2'),
+ ... dict(cfg=dict(type='yyy'),
+ ... stages=(True, True, True, True),
+ ... position='after_conv3'),
+ ... dict(cfg=dict(type='zzz', postfix='1'),
+ ... stages=(True, True, True, True),
+ ... position='after_conv3'),
+ ... dict(cfg=dict(type='zzz', postfix='2'),
+ ... stages=(True, True, True, True),
+ ... position='after_conv3')
+ ... ]
+ >>> self = ResNet(depth=18)
+ >>> stage_plugins = self.make_stage_plugins(plugins, 0)
+ >>> assert len(stage_plugins) == 3
+
+ Suppose ``stage_idx=0``, the structure of blocks in the stage would be:
+
+ .. code-block:: none
+
+ conv1-> conv2->conv3->yyy->zzz1->zzz2
+
+ Suppose 'stage_idx=1', the structure of blocks in the stage would be:
+
+ .. code-block:: none
+
+ conv1-> conv2->xxx->conv3->yyy->zzz1->zzz2
+
+ If stages is missing, the plugin would be applied to all stages.
+
+ Args:
+ plugins (list[dict]): List of plugins cfg to build. The postfix is
+ required if multiple same type plugins are inserted.
+ stage_idx (int): Index of stage to build
+
+ Returns:
+ list[dict]: Plugins for current stage
+ """
+ stage_plugins = []
+ for plugin in plugins:
+ plugin = plugin.copy()
+ stages = plugin.pop('stages', None)
+ assert stages is None or len(stages) == self.num_stages
+ # whether to insert plugin into current stage
+ if stages is None or stages[stage_idx]:
+ stage_plugins.append(plugin)
+
+ return stage_plugins
+
+ def make_res_layer(self, **kwargs):
+ """Pack all blocks in a stage into a ``ResLayer``."""
+ return ResLayer(**kwargs)
+
+ @property
+ def norm1(self):
+ """nn.Module: the normalization layer named "norm1" """
+ return getattr(self, self.norm1_name)
+
+ def _make_stem_layer(self, in_channels, stem_channels):
+ if self.deep_stem:
+ self.stem = nn.Sequential(
+ build_conv_layer(
+ self.conv_cfg,
+ in_channels,
+ stem_channels // 2,
+ kernel_size=3,
+ stride=2,
+ padding=1,
+ bias=False),
+ build_norm_layer(self.norm_cfg, stem_channels // 2)[1],
+ nn.ReLU(inplace=True),
+ build_conv_layer(
+ self.conv_cfg,
+ stem_channels // 2,
+ stem_channels // 2,
+ kernel_size=3,
+ stride=1,
+ padding=1,
+ bias=False),
+ build_norm_layer(self.norm_cfg, stem_channels // 2)[1],
+ nn.ReLU(inplace=True),
+ build_conv_layer(
+ self.conv_cfg,
+ stem_channels // 2,
+ stem_channels,
+ kernel_size=3,
+ stride=1,
+ padding=1,
+ bias=False),
+ build_norm_layer(self.norm_cfg, stem_channels)[1],
+ nn.ReLU(inplace=True))
+ else:
+ self.conv1 = build_conv_layer(
+ self.conv_cfg,
+ in_channels,
+ stem_channels,
+ kernel_size=7,
+ stride=2,
+ padding=3,
+ bias=False)
+ self.norm1_name, norm1 = build_norm_layer(
+ self.norm_cfg, stem_channels, postfix=1)
+ self.add_module(self.norm1_name, norm1)
+ self.relu = nn.ReLU(inplace=True)
+ self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
+
+ def _freeze_stages(self):
+ if self.frozen_stages >= 0:
+ if self.deep_stem:
+ self.stem.eval()
+ for param in self.stem.parameters():
+ param.requires_grad = False
+ else:
+ self.norm1.eval()
+ for m in [self.conv1, self.norm1]:
+ for param in m.parameters():
+ param.requires_grad = False
+
+ for i in range(1, self.frozen_stages + 1):
+ m = getattr(self, f'layer{i}')
+ m.eval()
+ for param in m.parameters():
+ param.requires_grad = False
+
+ def init_weights(self, pretrained=None):
+ """Initialize the weights in backbone.
+
+ Args:
+ pretrained (str, optional): Path to pre-trained weights.
+ Defaults to None.
+ """
+ if isinstance(pretrained, str):
+ logger = get_root_logger()
+ load_checkpoint(self, pretrained, strict=False, logger=logger)
+ elif pretrained is None:
+ for m in self.modules():
+ if isinstance(m, nn.Conv2d):
+ kaiming_init(m)
+ elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
+ constant_init(m, 1)
+
+ if self.dcn is not None:
+ for m in self.modules():
+ if isinstance(m, Bottleneck) and hasattr(
+ m.conv2, 'conv_offset'):
+ constant_init(m.conv2.conv_offset, 0)
+
+ if self.zero_init_residual:
+ for m in self.modules():
+ if isinstance(m, Bottleneck):
+ constant_init(m.norm3, 0)
+ elif isinstance(m, BasicBlock):
+ constant_init(m.norm2, 0)
+ else:
+ raise TypeError('pretrained must be a str or None')
+
+ def forward(self, x):
+ """Forward function."""
+ if self.deep_stem:
+ x = self.stem(x)
+ else:
+ x = self.conv1(x)
+ x = self.norm1(x)
+ x = self.relu(x)
+ x = self.maxpool(x)
+ outs = []
+ for i, layer_name in enumerate(self.res_layers):
+ res_layer = getattr(self, layer_name)
+ x = res_layer(x)
+ if i in self.out_indices:
+ outs.append(x)
+ return tuple(outs)
+
+ def train(self, mode=True):
+ """Convert the model into training mode while keep normalization layer
+ freezed."""
+ super(ResNet, self).train(mode)
+ self._freeze_stages()
+ if mode and self.norm_eval:
+ for m in self.modules():
+ # trick: eval have effect on BatchNorm only
+ if isinstance(m, _BatchNorm):
+ m.eval()
+
+
+@BACKBONES.register_module()
+class ResNetV1d(ResNet):
+ r"""ResNetV1d variant described in `Bag of Tricks
+ `_.
+
+ Compared with default ResNet(ResNetV1b), ResNetV1d replaces the 7x7 conv in
+ the input stem with three 3x3 convs. And in the downsampling block, a 2x2
+ avg_pool with stride 2 is added before conv, whose stride is changed to 1.
+ """
+
+ def __init__(self, **kwargs):
+ super(ResNetV1d, self).__init__(
+ deep_stem=True, avg_down=True, **kwargs)
diff --git a/annotator/uniformer/mmdet_null/models/backbones/resnext.py b/annotator/uniformer/mmdet_null/models/backbones/resnext.py
new file mode 100644
index 0000000000000000000000000000000000000000..6dbcbd516fd308b1d703eecb83ab275f6b159516
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/backbones/resnext.py
@@ -0,0 +1,153 @@
+import math
+
+from mmcv.cnn import build_conv_layer, build_norm_layer
+
+from ..builder import BACKBONES
+from ..utils import ResLayer
+from .resnet import Bottleneck as _Bottleneck
+from .resnet import ResNet
+
+
+class Bottleneck(_Bottleneck):
+ expansion = 4
+
+ def __init__(self,
+ inplanes,
+ planes,
+ groups=1,
+ base_width=4,
+ base_channels=64,
+ **kwargs):
+ """Bottleneck block for ResNeXt.
+
+ If style is "pytorch", the stride-two layer is the 3x3 conv layer, if
+ it is "caffe", the stride-two layer is the first 1x1 conv layer.
+ """
+ super(Bottleneck, self).__init__(inplanes, planes, **kwargs)
+
+ if groups == 1:
+ width = self.planes
+ else:
+ width = math.floor(self.planes *
+ (base_width / base_channels)) * groups
+
+ self.norm1_name, norm1 = build_norm_layer(
+ self.norm_cfg, width, postfix=1)
+ self.norm2_name, norm2 = build_norm_layer(
+ self.norm_cfg, width, postfix=2)
+ self.norm3_name, norm3 = build_norm_layer(
+ self.norm_cfg, self.planes * self.expansion, postfix=3)
+
+ self.conv1 = build_conv_layer(
+ self.conv_cfg,
+ self.inplanes,
+ width,
+ kernel_size=1,
+ stride=self.conv1_stride,
+ bias=False)
+ self.add_module(self.norm1_name, norm1)
+ fallback_on_stride = False
+ self.with_modulated_dcn = False
+ if self.with_dcn:
+ fallback_on_stride = self.dcn.pop('fallback_on_stride', False)
+ if not self.with_dcn or fallback_on_stride:
+ self.conv2 = build_conv_layer(
+ self.conv_cfg,
+ width,
+ width,
+ kernel_size=3,
+ stride=self.conv2_stride,
+ padding=self.dilation,
+ dilation=self.dilation,
+ groups=groups,
+ bias=False)
+ else:
+ assert self.conv_cfg is None, 'conv_cfg must be None for DCN'
+ self.conv2 = build_conv_layer(
+ self.dcn,
+ width,
+ width,
+ kernel_size=3,
+ stride=self.conv2_stride,
+ padding=self.dilation,
+ dilation=self.dilation,
+ groups=groups,
+ bias=False)
+
+ self.add_module(self.norm2_name, norm2)
+ self.conv3 = build_conv_layer(
+ self.conv_cfg,
+ width,
+ self.planes * self.expansion,
+ kernel_size=1,
+ bias=False)
+ self.add_module(self.norm3_name, norm3)
+
+ if self.with_plugins:
+ self._del_block_plugins(self.after_conv1_plugin_names +
+ self.after_conv2_plugin_names +
+ self.after_conv3_plugin_names)
+ self.after_conv1_plugin_names = self.make_block_plugins(
+ width, self.after_conv1_plugins)
+ self.after_conv2_plugin_names = self.make_block_plugins(
+ width, self.after_conv2_plugins)
+ self.after_conv3_plugin_names = self.make_block_plugins(
+ self.planes * self.expansion, self.after_conv3_plugins)
+
+ def _del_block_plugins(self, plugin_names):
+ """delete plugins for block if exist.
+
+ Args:
+ plugin_names (list[str]): List of plugins name to delete.
+ """
+ assert isinstance(plugin_names, list)
+ for plugin_name in plugin_names:
+ del self._modules[plugin_name]
+
+
+@BACKBONES.register_module()
+class ResNeXt(ResNet):
+ """ResNeXt backbone.
+
+ Args:
+ depth (int): Depth of resnet, from {18, 34, 50, 101, 152}.
+ in_channels (int): Number of input image channels. Default: 3.
+ num_stages (int): Resnet stages. Default: 4.
+ groups (int): Group of resnext.
+ base_width (int): Base width of resnext.
+ strides (Sequence[int]): Strides of the first block of each stage.
+ dilations (Sequence[int]): Dilation of each stage.
+ out_indices (Sequence[int]): Output from which stages.
+ style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two
+ layer is the 3x3 conv layer, otherwise the stride-two layer is
+ the first 1x1 conv layer.
+ frozen_stages (int): Stages to be frozen (all param fixed). -1 means
+ not freezing any parameters.
+ norm_cfg (dict): dictionary to construct and config norm layer.
+ norm_eval (bool): Whether to set norm layers to eval mode, namely,
+ freeze running stats (mean and var). Note: Effect on Batch Norm
+ and its variants only.
+ with_cp (bool): Use checkpoint or not. Using checkpoint will save some
+ memory while slowing down the training speed.
+ zero_init_residual (bool): whether to use zero init for last norm layer
+ in resblocks to let them behave as identity.
+ """
+
+ arch_settings = {
+ 50: (Bottleneck, (3, 4, 6, 3)),
+ 101: (Bottleneck, (3, 4, 23, 3)),
+ 152: (Bottleneck, (3, 8, 36, 3))
+ }
+
+ def __init__(self, groups=1, base_width=4, **kwargs):
+ self.groups = groups
+ self.base_width = base_width
+ super(ResNeXt, self).__init__(**kwargs)
+
+ def make_res_layer(self, **kwargs):
+ """Pack all blocks in a stage into a ``ResLayer``"""
+ return ResLayer(
+ groups=self.groups,
+ base_width=self.base_width,
+ base_channels=self.base_channels,
+ **kwargs)
diff --git a/annotator/uniformer/mmdet_null/models/backbones/ssd_vgg.py b/annotator/uniformer/mmdet_null/models/backbones/ssd_vgg.py
new file mode 100644
index 0000000000000000000000000000000000000000..cbc4fbb2301afc002f47abb9ed133a500d6cf23f
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/backbones/ssd_vgg.py
@@ -0,0 +1,169 @@
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from mmcv.cnn import VGG, constant_init, kaiming_init, normal_init, xavier_init
+from mmcv.runner import load_checkpoint
+
+from mmdet.utils import get_root_logger
+from ..builder import BACKBONES
+
+
+@BACKBONES.register_module()
+class SSDVGG(VGG):
+ """VGG Backbone network for single-shot-detection.
+
+ Args:
+ input_size (int): width and height of input, from {300, 512}.
+ depth (int): Depth of vgg, from {11, 13, 16, 19}.
+ out_indices (Sequence[int]): Output from which stages.
+
+ Example:
+ >>> self = SSDVGG(input_size=300, depth=11)
+ >>> self.eval()
+ >>> inputs = torch.rand(1, 3, 300, 300)
+ >>> level_outputs = self.forward(inputs)
+ >>> for level_out in level_outputs:
+ ... print(tuple(level_out.shape))
+ (1, 1024, 19, 19)
+ (1, 512, 10, 10)
+ (1, 256, 5, 5)
+ (1, 256, 3, 3)
+ (1, 256, 1, 1)
+ """
+ extra_setting = {
+ 300: (256, 'S', 512, 128, 'S', 256, 128, 256, 128, 256),
+ 512: (256, 'S', 512, 128, 'S', 256, 128, 'S', 256, 128, 'S', 256, 128),
+ }
+
+ def __init__(self,
+ input_size,
+ depth,
+ with_last_pool=False,
+ ceil_mode=True,
+ out_indices=(3, 4),
+ out_feature_indices=(22, 34),
+ l2_norm_scale=20.):
+ # TODO: in_channels for mmcv.VGG
+ super(SSDVGG, self).__init__(
+ depth,
+ with_last_pool=with_last_pool,
+ ceil_mode=ceil_mode,
+ out_indices=out_indices)
+ assert input_size in (300, 512)
+ self.input_size = input_size
+
+ self.features.add_module(
+ str(len(self.features)),
+ nn.MaxPool2d(kernel_size=3, stride=1, padding=1))
+ self.features.add_module(
+ str(len(self.features)),
+ nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6))
+ self.features.add_module(
+ str(len(self.features)), nn.ReLU(inplace=True))
+ self.features.add_module(
+ str(len(self.features)), nn.Conv2d(1024, 1024, kernel_size=1))
+ self.features.add_module(
+ str(len(self.features)), nn.ReLU(inplace=True))
+ self.out_feature_indices = out_feature_indices
+
+ self.inplanes = 1024
+ self.extra = self._make_extra_layers(self.extra_setting[input_size])
+ self.l2_norm = L2Norm(
+ self.features[out_feature_indices[0] - 1].out_channels,
+ l2_norm_scale)
+
+ def init_weights(self, pretrained=None):
+ """Initialize the weights in backbone.
+
+ Args:
+ pretrained (str, optional): Path to pre-trained weights.
+ Defaults to None.
+ """
+ if isinstance(pretrained, str):
+ logger = get_root_logger()
+ load_checkpoint(self, pretrained, strict=False, logger=logger)
+ elif pretrained is None:
+ for m in self.features.modules():
+ if isinstance(m, nn.Conv2d):
+ kaiming_init(m)
+ elif isinstance(m, nn.BatchNorm2d):
+ constant_init(m, 1)
+ elif isinstance(m, nn.Linear):
+ normal_init(m, std=0.01)
+ else:
+ raise TypeError('pretrained must be a str or None')
+
+ for m in self.extra.modules():
+ if isinstance(m, nn.Conv2d):
+ xavier_init(m, distribution='uniform')
+
+ constant_init(self.l2_norm, self.l2_norm.scale)
+
+ def forward(self, x):
+ """Forward function."""
+ outs = []
+ for i, layer in enumerate(self.features):
+ x = layer(x)
+ if i in self.out_feature_indices:
+ outs.append(x)
+ for i, layer in enumerate(self.extra):
+ x = F.relu(layer(x), inplace=True)
+ if i % 2 == 1:
+ outs.append(x)
+ outs[0] = self.l2_norm(outs[0])
+ if len(outs) == 1:
+ return outs[0]
+ else:
+ return tuple(outs)
+
+ def _make_extra_layers(self, outplanes):
+ layers = []
+ kernel_sizes = (1, 3)
+ num_layers = 0
+ outplane = None
+ for i in range(len(outplanes)):
+ if self.inplanes == 'S':
+ self.inplanes = outplane
+ continue
+ k = kernel_sizes[num_layers % 2]
+ if outplanes[i] == 'S':
+ outplane = outplanes[i + 1]
+ conv = nn.Conv2d(
+ self.inplanes, outplane, k, stride=2, padding=1)
+ else:
+ outplane = outplanes[i]
+ conv = nn.Conv2d(
+ self.inplanes, outplane, k, stride=1, padding=0)
+ layers.append(conv)
+ self.inplanes = outplanes[i]
+ num_layers += 1
+ if self.input_size == 512:
+ layers.append(nn.Conv2d(self.inplanes, 256, 4, padding=1))
+
+ return nn.Sequential(*layers)
+
+
+class L2Norm(nn.Module):
+
+ def __init__(self, n_dims, scale=20., eps=1e-10):
+ """L2 normalization layer.
+
+ Args:
+ n_dims (int): Number of dimensions to be normalized
+ scale (float, optional): Defaults to 20..
+ eps (float, optional): Used to avoid division by zero.
+ Defaults to 1e-10.
+ """
+ super(L2Norm, self).__init__()
+ self.n_dims = n_dims
+ self.weight = nn.Parameter(torch.Tensor(self.n_dims))
+ self.eps = eps
+ self.scale = scale
+
+ def forward(self, x):
+ """Forward function."""
+ # normalization layer convert to FP32 in FP16 training
+ x_float = x.float()
+ norm = x_float.pow(2).sum(1, keepdim=True).sqrt() + self.eps
+ return (self.weight[None, :, None, None].float().expand_as(x_float) *
+ x_float / norm).type_as(x)
diff --git a/annotator/uniformer/mmdet_null/models/backbones/swin_transformer.py b/annotator/uniformer/mmdet_null/models/backbones/swin_transformer.py
new file mode 100644
index 0000000000000000000000000000000000000000..bb41850d8480a08a6a7698bf6129ffd1ab239681
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/backbones/swin_transformer.py
@@ -0,0 +1,630 @@
+# --------------------------------------------------------
+# Swin Transformer
+# Copyright (c) 2021 Microsoft
+# Licensed under The MIT License [see LICENSE for details]
+# Written by Ze Liu, Yutong Lin, Yixuan Wei
+# --------------------------------------------------------
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+import torch.utils.checkpoint as checkpoint
+import numpy as np
+from timm.models.layers import DropPath, to_2tuple, trunc_normal_
+
+from mmcv_custom import load_checkpoint
+from mmdet.utils import get_root_logger
+from ..builder import BACKBONES
+
+
+class Mlp(nn.Module):
+ """ Multilayer perceptron."""
+
+ def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
+ super().__init__()
+ out_features = out_features or in_features
+ hidden_features = hidden_features or in_features
+ self.fc1 = nn.Linear(in_features, hidden_features)
+ self.act = act_layer()
+ self.fc2 = nn.Linear(hidden_features, out_features)
+ self.drop = nn.Dropout(drop)
+
+ def forward(self, x):
+ x = self.fc1(x)
+ x = self.act(x)
+ x = self.drop(x)
+ x = self.fc2(x)
+ x = self.drop(x)
+ return x
+
+
+def window_partition(x, window_size):
+ """
+ Args:
+ x: (B, H, W, C)
+ window_size (int): window size
+
+ Returns:
+ windows: (num_windows*B, window_size, window_size, C)
+ """
+ B, H, W, C = x.shape
+ x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
+ windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
+ return windows
+
+
+def window_reverse(windows, window_size, H, W):
+ """
+ Args:
+ windows: (num_windows*B, window_size, window_size, C)
+ window_size (int): Window size
+ H (int): Height of image
+ W (int): Width of image
+
+ Returns:
+ x: (B, H, W, C)
+ """
+ B = int(windows.shape[0] / (H * W / window_size / window_size))
+ x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
+ x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
+ return x
+
+
+class WindowAttention(nn.Module):
+ """ Window based multi-head self attention (W-MSA) module with relative position bias.
+ It supports both of shifted and non-shifted window.
+
+ Args:
+ dim (int): Number of input channels.
+ window_size (tuple[int]): The height and width of the window.
+ num_heads (int): Number of attention heads.
+ qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
+ qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set
+ attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
+ proj_drop (float, optional): Dropout ratio of output. Default: 0.0
+ """
+
+ def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.):
+
+ super().__init__()
+ self.dim = dim
+ self.window_size = window_size # Wh, Ww
+ self.num_heads = num_heads
+ head_dim = dim // num_heads
+ self.scale = qk_scale or head_dim ** -0.5
+
+ # define a parameter table of relative position bias
+ self.relative_position_bias_table = nn.Parameter(
+ torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH
+
+ # get pair-wise relative position index for each token inside the window
+ coords_h = torch.arange(self.window_size[0])
+ coords_w = torch.arange(self.window_size[1])
+ coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
+ coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
+ relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
+ relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
+ relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
+ relative_coords[:, :, 1] += self.window_size[1] - 1
+ relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
+ relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
+ self.register_buffer("relative_position_index", relative_position_index)
+
+ self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
+ self.attn_drop = nn.Dropout(attn_drop)
+ self.proj = nn.Linear(dim, dim)
+ self.proj_drop = nn.Dropout(proj_drop)
+
+ trunc_normal_(self.relative_position_bias_table, std=.02)
+ self.softmax = nn.Softmax(dim=-1)
+
+ def forward(self, x, mask=None):
+ """ Forward function.
+
+ Args:
+ x: input features with shape of (num_windows*B, N, C)
+ mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
+ """
+ B_, N, C = x.shape
+ qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
+ q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
+
+ q = q * self.scale
+ attn = (q @ k.transpose(-2, -1))
+
+ relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
+ self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH
+ relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
+ attn = attn + relative_position_bias.unsqueeze(0)
+
+ if mask is not None:
+ nW = mask.shape[0]
+ attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)
+ attn = attn.view(-1, self.num_heads, N, N)
+ attn = self.softmax(attn)
+ else:
+ attn = self.softmax(attn)
+
+ attn = self.attn_drop(attn)
+
+ x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
+ x = self.proj(x)
+ x = self.proj_drop(x)
+ return x
+
+
+class SwinTransformerBlock(nn.Module):
+ """ Swin Transformer Block.
+
+ Args:
+ dim (int): Number of input channels.
+ num_heads (int): Number of attention heads.
+ window_size (int): Window size.
+ shift_size (int): Shift size for SW-MSA.
+ mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
+ qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
+ qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
+ drop (float, optional): Dropout rate. Default: 0.0
+ attn_drop (float, optional): Attention dropout rate. Default: 0.0
+ drop_path (float, optional): Stochastic depth rate. Default: 0.0
+ act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
+ norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
+ """
+
+ def __init__(self, dim, num_heads, window_size=7, shift_size=0,
+ mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path=0.,
+ act_layer=nn.GELU, norm_layer=nn.LayerNorm):
+ super().__init__()
+ self.dim = dim
+ self.num_heads = num_heads
+ self.window_size = window_size
+ self.shift_size = shift_size
+ self.mlp_ratio = mlp_ratio
+ assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size"
+
+ self.norm1 = norm_layer(dim)
+ self.attn = WindowAttention(
+ dim, window_size=to_2tuple(self.window_size), num_heads=num_heads,
+ qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
+
+ self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
+ self.norm2 = norm_layer(dim)
+ mlp_hidden_dim = int(dim * mlp_ratio)
+ self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
+
+ self.H = None
+ self.W = None
+
+ def forward(self, x, mask_matrix):
+ """ Forward function.
+
+ Args:
+ x: Input feature, tensor size (B, H*W, C).
+ H, W: Spatial resolution of the input feature.
+ mask_matrix: Attention mask for cyclic shift.
+ """
+ B, L, C = x.shape
+ H, W = self.H, self.W
+ assert L == H * W, "input feature has wrong size"
+
+ shortcut = x
+ x = self.norm1(x)
+ x = x.view(B, H, W, C)
+
+ # pad feature maps to multiples of window size
+ pad_l = pad_t = 0
+ pad_r = (self.window_size - W % self.window_size) % self.window_size
+ pad_b = (self.window_size - H % self.window_size) % self.window_size
+ x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b))
+ _, Hp, Wp, _ = x.shape
+
+ # cyclic shift
+ if self.shift_size > 0:
+ shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
+ attn_mask = mask_matrix
+ else:
+ shifted_x = x
+ attn_mask = None
+
+ # partition windows
+ x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C
+ x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C
+
+ # W-MSA/SW-MSA
+ attn_windows = self.attn(x_windows, mask=attn_mask) # nW*B, window_size*window_size, C
+
+ # merge windows
+ attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)
+ shifted_x = window_reverse(attn_windows, self.window_size, Hp, Wp) # B H' W' C
+
+ # reverse cyclic shift
+ if self.shift_size > 0:
+ x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
+ else:
+ x = shifted_x
+
+ if pad_r > 0 or pad_b > 0:
+ x = x[:, :H, :W, :].contiguous()
+
+ x = x.view(B, H * W, C)
+
+ # FFN
+ x = shortcut + self.drop_path(x)
+ x = x + self.drop_path(self.mlp(self.norm2(x)))
+
+ return x
+
+
+class PatchMerging(nn.Module):
+ """ Patch Merging Layer
+
+ Args:
+ dim (int): Number of input channels.
+ norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
+ """
+ def __init__(self, dim, norm_layer=nn.LayerNorm):
+ super().__init__()
+ self.dim = dim
+ self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
+ self.norm = norm_layer(4 * dim)
+
+ def forward(self, x, H, W):
+ """ Forward function.
+
+ Args:
+ x: Input feature, tensor size (B, H*W, C).
+ H, W: Spatial resolution of the input feature.
+ """
+ B, L, C = x.shape
+ assert L == H * W, "input feature has wrong size"
+
+ x = x.view(B, H, W, C)
+
+ # padding
+ pad_input = (H % 2 == 1) or (W % 2 == 1)
+ if pad_input:
+ x = F.pad(x, (0, 0, 0, W % 2, 0, H % 2))
+
+ x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C
+ x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C
+ x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C
+ x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C
+ x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C
+ x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C
+
+ x = self.norm(x)
+ x = self.reduction(x)
+
+ return x
+
+
+class BasicLayer(nn.Module):
+ """ A basic Swin Transformer layer for one stage.
+
+ Args:
+ dim (int): Number of feature channels
+ depth (int): Depths of this stage.
+ num_heads (int): Number of attention head.
+ window_size (int): Local window size. Default: 7.
+ mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4.
+ qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
+ qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
+ drop (float, optional): Dropout rate. Default: 0.0
+ attn_drop (float, optional): Attention dropout rate. Default: 0.0
+ drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
+ norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
+ downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
+ use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
+ """
+
+ def __init__(self,
+ dim,
+ depth,
+ num_heads,
+ window_size=7,
+ mlp_ratio=4.,
+ qkv_bias=True,
+ qk_scale=None,
+ drop=0.,
+ attn_drop=0.,
+ drop_path=0.,
+ norm_layer=nn.LayerNorm,
+ downsample=None,
+ use_checkpoint=False):
+ super().__init__()
+ self.window_size = window_size
+ self.shift_size = window_size // 2
+ self.depth = depth
+ self.use_checkpoint = use_checkpoint
+
+ # build blocks
+ self.blocks = nn.ModuleList([
+ SwinTransformerBlock(
+ dim=dim,
+ num_heads=num_heads,
+ window_size=window_size,
+ shift_size=0 if (i % 2 == 0) else window_size // 2,
+ mlp_ratio=mlp_ratio,
+ qkv_bias=qkv_bias,
+ qk_scale=qk_scale,
+ drop=drop,
+ attn_drop=attn_drop,
+ drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
+ norm_layer=norm_layer)
+ for i in range(depth)])
+
+ # patch merging layer
+ if downsample is not None:
+ self.downsample = downsample(dim=dim, norm_layer=norm_layer)
+ else:
+ self.downsample = None
+
+ def forward(self, x, H, W):
+ """ Forward function.
+
+ Args:
+ x: Input feature, tensor size (B, H*W, C).
+ H, W: Spatial resolution of the input feature.
+ """
+
+ # calculate attention mask for SW-MSA
+ Hp = int(np.ceil(H / self.window_size)) * self.window_size
+ Wp = int(np.ceil(W / self.window_size)) * self.window_size
+ img_mask = torch.zeros((1, Hp, Wp, 1), device=x.device) # 1 Hp Wp 1
+ h_slices = (slice(0, -self.window_size),
+ slice(-self.window_size, -self.shift_size),
+ slice(-self.shift_size, None))
+ w_slices = (slice(0, -self.window_size),
+ slice(-self.window_size, -self.shift_size),
+ slice(-self.shift_size, None))
+ cnt = 0
+ for h in h_slices:
+ for w in w_slices:
+ img_mask[:, h, w, :] = cnt
+ cnt += 1
+
+ mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1
+ mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
+ attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
+ attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
+
+ for blk in self.blocks:
+ blk.H, blk.W = H, W
+ if self.use_checkpoint:
+ x = checkpoint.checkpoint(blk, x, attn_mask)
+ else:
+ x = blk(x, attn_mask)
+ if self.downsample is not None:
+ x_down = self.downsample(x, H, W)
+ Wh, Ww = (H + 1) // 2, (W + 1) // 2
+ return x, H, W, x_down, Wh, Ww
+ else:
+ return x, H, W, x, H, W
+
+
+class PatchEmbed(nn.Module):
+ """ Image to Patch Embedding
+
+ Args:
+ patch_size (int): Patch token size. Default: 4.
+ in_chans (int): Number of input image channels. Default: 3.
+ embed_dim (int): Number of linear projection output channels. Default: 96.
+ norm_layer (nn.Module, optional): Normalization layer. Default: None
+ """
+
+ def __init__(self, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None):
+ super().__init__()
+ patch_size = to_2tuple(patch_size)
+ self.patch_size = patch_size
+
+ self.in_chans = in_chans
+ self.embed_dim = embed_dim
+
+ self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
+ if norm_layer is not None:
+ self.norm = norm_layer(embed_dim)
+ else:
+ self.norm = None
+
+ def forward(self, x):
+ """Forward function."""
+ # padding
+ _, _, H, W = x.size()
+ if W % self.patch_size[1] != 0:
+ x = F.pad(x, (0, self.patch_size[1] - W % self.patch_size[1]))
+ if H % self.patch_size[0] != 0:
+ x = F.pad(x, (0, 0, 0, self.patch_size[0] - H % self.patch_size[0]))
+
+ x = self.proj(x) # B C Wh Ww
+ if self.norm is not None:
+ Wh, Ww = x.size(2), x.size(3)
+ x = x.flatten(2).transpose(1, 2)
+ x = self.norm(x)
+ x = x.transpose(1, 2).view(-1, self.embed_dim, Wh, Ww)
+
+ return x
+
+
+@BACKBONES.register_module()
+class SwinTransformer(nn.Module):
+ """ Swin Transformer backbone.
+ A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` -
+ https://arxiv.org/pdf/2103.14030
+
+ Args:
+ pretrain_img_size (int): Input image size for training the pretrained model,
+ used in absolute postion embedding. Default 224.
+ patch_size (int | tuple(int)): Patch size. Default: 4.
+ in_chans (int): Number of input image channels. Default: 3.
+ embed_dim (int): Number of linear projection output channels. Default: 96.
+ depths (tuple[int]): Depths of each Swin Transformer stage.
+ num_heads (tuple[int]): Number of attention head of each stage.
+ window_size (int): Window size. Default: 7.
+ mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4.
+ qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True
+ qk_scale (float): Override default qk scale of head_dim ** -0.5 if set.
+ drop_rate (float): Dropout rate.
+ attn_drop_rate (float): Attention dropout rate. Default: 0.
+ drop_path_rate (float): Stochastic depth rate. Default: 0.2.
+ norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
+ ape (bool): If True, add absolute position embedding to the patch embedding. Default: False.
+ patch_norm (bool): If True, add normalization after patch embedding. Default: True.
+ out_indices (Sequence[int]): Output from which stages.
+ frozen_stages (int): Stages to be frozen (stop grad and set eval mode).
+ -1 means not freezing any parameters.
+ use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
+ """
+
+ def __init__(self,
+ pretrain_img_size=224,
+ patch_size=4,
+ in_chans=3,
+ embed_dim=96,
+ depths=[2, 2, 6, 2],
+ num_heads=[3, 6, 12, 24],
+ window_size=7,
+ mlp_ratio=4.,
+ qkv_bias=True,
+ qk_scale=None,
+ drop_rate=0.,
+ attn_drop_rate=0.,
+ drop_path_rate=0.2,
+ norm_layer=nn.LayerNorm,
+ ape=False,
+ patch_norm=True,
+ out_indices=(0, 1, 2, 3),
+ frozen_stages=-1,
+ use_checkpoint=False):
+ super().__init__()
+
+ self.pretrain_img_size = pretrain_img_size
+ self.num_layers = len(depths)
+ self.embed_dim = embed_dim
+ self.ape = ape
+ self.patch_norm = patch_norm
+ self.out_indices = out_indices
+ self.frozen_stages = frozen_stages
+
+ # split image into non-overlapping patches
+ self.patch_embed = PatchEmbed(
+ patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim,
+ norm_layer=norm_layer if self.patch_norm else None)
+
+ # absolute position embedding
+ if self.ape:
+ pretrain_img_size = to_2tuple(pretrain_img_size)
+ patch_size = to_2tuple(patch_size)
+ patches_resolution = [pretrain_img_size[0] // patch_size[0], pretrain_img_size[1] // patch_size[1]]
+
+ self.absolute_pos_embed = nn.Parameter(torch.zeros(1, embed_dim, patches_resolution[0], patches_resolution[1]))
+ trunc_normal_(self.absolute_pos_embed, std=.02)
+
+ self.pos_drop = nn.Dropout(p=drop_rate)
+
+ # stochastic depth
+ dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
+
+ # build layers
+ self.layers = nn.ModuleList()
+ for i_layer in range(self.num_layers):
+ layer = BasicLayer(
+ dim=int(embed_dim * 2 ** i_layer),
+ depth=depths[i_layer],
+ num_heads=num_heads[i_layer],
+ window_size=window_size,
+ mlp_ratio=mlp_ratio,
+ qkv_bias=qkv_bias,
+ qk_scale=qk_scale,
+ drop=drop_rate,
+ attn_drop=attn_drop_rate,
+ drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],
+ norm_layer=norm_layer,
+ downsample=PatchMerging if (i_layer < self.num_layers - 1) else None,
+ use_checkpoint=use_checkpoint)
+ self.layers.append(layer)
+
+ num_features = [int(embed_dim * 2 ** i) for i in range(self.num_layers)]
+ self.num_features = num_features
+
+ # add a norm layer for each output
+ for i_layer in out_indices:
+ layer = norm_layer(num_features[i_layer])
+ layer_name = f'norm{i_layer}'
+ self.add_module(layer_name, layer)
+
+ self._freeze_stages()
+
+ def _freeze_stages(self):
+ if self.frozen_stages >= 0:
+ self.patch_embed.eval()
+ for param in self.patch_embed.parameters():
+ param.requires_grad = False
+
+ if self.frozen_stages >= 1 and self.ape:
+ self.absolute_pos_embed.requires_grad = False
+
+ if self.frozen_stages >= 2:
+ self.pos_drop.eval()
+ for i in range(0, self.frozen_stages - 1):
+ m = self.layers[i]
+ m.eval()
+ for param in m.parameters():
+ param.requires_grad = False
+
+ def init_weights(self, pretrained=None):
+ """Initialize the weights in backbone.
+
+ Args:
+ pretrained (str, optional): Path to pre-trained weights.
+ Defaults to None.
+ """
+
+ def _init_weights(m):
+ if isinstance(m, nn.Linear):
+ trunc_normal_(m.weight, std=.02)
+ if isinstance(m, nn.Linear) and m.bias is not None:
+ nn.init.constant_(m.bias, 0)
+ elif isinstance(m, nn.LayerNorm):
+ nn.init.constant_(m.bias, 0)
+ nn.init.constant_(m.weight, 1.0)
+
+ if isinstance(pretrained, str):
+ self.apply(_init_weights)
+ logger = get_root_logger()
+ load_checkpoint(self, pretrained, strict=False, logger=logger)
+ elif pretrained is None:
+ self.apply(_init_weights)
+ else:
+ raise TypeError('pretrained must be a str or None')
+
+ def forward(self, x):
+ """Forward function."""
+ x = self.patch_embed(x)
+
+ Wh, Ww = x.size(2), x.size(3)
+ if self.ape:
+ # interpolate the position embedding to the corresponding size
+ absolute_pos_embed = F.interpolate(self.absolute_pos_embed, size=(Wh, Ww), mode='bicubic')
+ x = (x + absolute_pos_embed).flatten(2).transpose(1, 2) # B Wh*Ww C
+ else:
+ x = x.flatten(2).transpose(1, 2)
+ x = self.pos_drop(x)
+
+ outs = []
+ for i in range(self.num_layers):
+ layer = self.layers[i]
+ x_out, H, W, x, Wh, Ww = layer(x, Wh, Ww)
+
+ if i in self.out_indices:
+ norm_layer = getattr(self, f'norm{i}')
+ x_out = norm_layer(x_out)
+
+ out = x_out.view(-1, H, W, self.num_features[i]).permute(0, 3, 1, 2).contiguous()
+ outs.append(out)
+
+ return tuple(outs)
+
+ def train(self, mode=True):
+ """Convert the model into training mode while keep layers freezed."""
+ super(SwinTransformer, self).train(mode)
+ self._freeze_stages()
diff --git a/annotator/uniformer/mmdet_null/models/backbones/trident_resnet.py b/annotator/uniformer/mmdet_null/models/backbones/trident_resnet.py
new file mode 100644
index 0000000000000000000000000000000000000000..e6100132b0f4120585da8a309cba4488b4b0ea72
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/backbones/trident_resnet.py
@@ -0,0 +1,292 @@
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+import torch.utils.checkpoint as cp
+from mmcv.cnn import build_conv_layer, build_norm_layer, kaiming_init
+from torch.nn.modules.utils import _pair
+
+from mmdet.models.backbones.resnet import Bottleneck, ResNet
+from mmdet.models.builder import BACKBONES
+
+
+class TridentConv(nn.Module):
+ """Trident Convolution Module.
+
+ Args:
+ in_channels (int): Number of channels in input.
+ out_channels (int): Number of channels in output.
+ kernel_size (int): Size of convolution kernel.
+ stride (int, optional): Convolution stride. Default: 1.
+ trident_dilations (tuple[int, int, int], optional): Dilations of
+ different trident branch. Default: (1, 2, 3).
+ test_branch_idx (int, optional): In inference, all 3 branches will
+ be used if `test_branch_idx==-1`, otherwise only branch with
+ index `test_branch_idx` will be used. Default: 1.
+ bias (bool, optional): Whether to use bias in convolution or not.
+ Default: False.
+ """
+
+ def __init__(self,
+ in_channels,
+ out_channels,
+ kernel_size,
+ stride=1,
+ trident_dilations=(1, 2, 3),
+ test_branch_idx=1,
+ bias=False):
+ super(TridentConv, self).__init__()
+ self.num_branch = len(trident_dilations)
+ self.with_bias = bias
+ self.test_branch_idx = test_branch_idx
+ self.stride = _pair(stride)
+ self.kernel_size = _pair(kernel_size)
+ self.paddings = _pair(trident_dilations)
+ self.dilations = trident_dilations
+ self.in_channels = in_channels
+ self.out_channels = out_channels
+ self.bias = bias
+
+ self.weight = nn.Parameter(
+ torch.Tensor(out_channels, in_channels, *self.kernel_size))
+ if bias:
+ self.bias = nn.Parameter(torch.Tensor(out_channels))
+ else:
+ self.bias = None
+ self.init_weights()
+
+ def init_weights(self):
+ kaiming_init(self, distribution='uniform', mode='fan_in')
+
+ def extra_repr(self):
+ tmpstr = f'in_channels={self.in_channels}'
+ tmpstr += f', out_channels={self.out_channels}'
+ tmpstr += f', kernel_size={self.kernel_size}'
+ tmpstr += f', num_branch={self.num_branch}'
+ tmpstr += f', test_branch_idx={self.test_branch_idx}'
+ tmpstr += f', stride={self.stride}'
+ tmpstr += f', paddings={self.paddings}'
+ tmpstr += f', dilations={self.dilations}'
+ tmpstr += f', bias={self.bias}'
+ return tmpstr
+
+ def forward(self, inputs):
+ if self.training or self.test_branch_idx == -1:
+ outputs = [
+ F.conv2d(input, self.weight, self.bias, self.stride, padding,
+ dilation) for input, dilation, padding in zip(
+ inputs, self.dilations, self.paddings)
+ ]
+ else:
+ assert len(inputs) == 1
+ outputs = [
+ F.conv2d(inputs[0], self.weight, self.bias, self.stride,
+ self.paddings[self.test_branch_idx],
+ self.dilations[self.test_branch_idx])
+ ]
+
+ return outputs
+
+
+# Since TridentNet is defined over ResNet50 and ResNet101, here we
+# only support TridentBottleneckBlock.
+class TridentBottleneck(Bottleneck):
+ """BottleBlock for TridentResNet.
+
+ Args:
+ trident_dilations (tuple[int, int, int]): Dilations of different
+ trident branch.
+ test_branch_idx (int): In inference, all 3 branches will be used
+ if `test_branch_idx==-1`, otherwise only branch with index
+ `test_branch_idx` will be used.
+ concat_output (bool): Whether to concat the output list to a Tensor.
+ `True` only in the last Block.
+ """
+
+ def __init__(self, trident_dilations, test_branch_idx, concat_output,
+ **kwargs):
+
+ super(TridentBottleneck, self).__init__(**kwargs)
+ self.trident_dilations = trident_dilations
+ self.num_branch = len(trident_dilations)
+ self.concat_output = concat_output
+ self.test_branch_idx = test_branch_idx
+ self.conv2 = TridentConv(
+ self.planes,
+ self.planes,
+ kernel_size=3,
+ stride=self.conv2_stride,
+ bias=False,
+ trident_dilations=self.trident_dilations,
+ test_branch_idx=test_branch_idx)
+
+ def forward(self, x):
+
+ def _inner_forward(x):
+ num_branch = (
+ self.num_branch
+ if self.training or self.test_branch_idx == -1 else 1)
+ identity = x
+ if not isinstance(x, list):
+ x = (x, ) * num_branch
+ identity = x
+ if self.downsample is not None:
+ identity = [self.downsample(b) for b in x]
+
+ out = [self.conv1(b) for b in x]
+ out = [self.norm1(b) for b in out]
+ out = [self.relu(b) for b in out]
+
+ if self.with_plugins:
+ for k in range(len(out)):
+ out[k] = self.forward_plugin(out[k],
+ self.after_conv1_plugin_names)
+
+ out = self.conv2(out)
+ out = [self.norm2(b) for b in out]
+ out = [self.relu(b) for b in out]
+ if self.with_plugins:
+ for k in range(len(out)):
+ out[k] = self.forward_plugin(out[k],
+ self.after_conv2_plugin_names)
+
+ out = [self.conv3(b) for b in out]
+ out = [self.norm3(b) for b in out]
+
+ if self.with_plugins:
+ for k in range(len(out)):
+ out[k] = self.forward_plugin(out[k],
+ self.after_conv3_plugin_names)
+
+ out = [
+ out_b + identity_b for out_b, identity_b in zip(out, identity)
+ ]
+ return out
+
+ if self.with_cp and x.requires_grad:
+ out = cp.checkpoint(_inner_forward, x)
+ else:
+ out = _inner_forward(x)
+
+ out = [self.relu(b) for b in out]
+ if self.concat_output:
+ out = torch.cat(out, dim=0)
+ return out
+
+
+def make_trident_res_layer(block,
+ inplanes,
+ planes,
+ num_blocks,
+ stride=1,
+ trident_dilations=(1, 2, 3),
+ style='pytorch',
+ with_cp=False,
+ conv_cfg=None,
+ norm_cfg=dict(type='BN'),
+ dcn=None,
+ plugins=None,
+ test_branch_idx=-1):
+ """Build Trident Res Layers."""
+
+ downsample = None
+ if stride != 1 or inplanes != planes * block.expansion:
+ downsample = []
+ conv_stride = stride
+ downsample.extend([
+ build_conv_layer(
+ conv_cfg,
+ inplanes,
+ planes * block.expansion,
+ kernel_size=1,
+ stride=conv_stride,
+ bias=False),
+ build_norm_layer(norm_cfg, planes * block.expansion)[1]
+ ])
+ downsample = nn.Sequential(*downsample)
+
+ layers = []
+ for i in range(num_blocks):
+ layers.append(
+ block(
+ inplanes=inplanes,
+ planes=planes,
+ stride=stride if i == 0 else 1,
+ trident_dilations=trident_dilations,
+ downsample=downsample if i == 0 else None,
+ style=style,
+ with_cp=with_cp,
+ conv_cfg=conv_cfg,
+ norm_cfg=norm_cfg,
+ dcn=dcn,
+ plugins=plugins,
+ test_branch_idx=test_branch_idx,
+ concat_output=True if i == num_blocks - 1 else False))
+ inplanes = planes * block.expansion
+ return nn.Sequential(*layers)
+
+
+@BACKBONES.register_module()
+class TridentResNet(ResNet):
+ """The stem layer, stage 1 and stage 2 in Trident ResNet are identical to
+ ResNet, while in stage 3, Trident BottleBlock is utilized to replace the
+ normal BottleBlock to yield trident output. Different branch shares the
+ convolution weight but uses different dilations to achieve multi-scale
+ output.
+
+ / stage3(b0) \
+ x - stem - stage1 - stage2 - stage3(b1) - output
+ \ stage3(b2) /
+
+ Args:
+ depth (int): Depth of resnet, from {50, 101, 152}.
+ num_branch (int): Number of branches in TridentNet.
+ test_branch_idx (int): In inference, all 3 branches will be used
+ if `test_branch_idx==-1`, otherwise only branch with index
+ `test_branch_idx` will be used.
+ trident_dilations (tuple[int]): Dilations of different trident branch.
+ len(trident_dilations) should be equal to num_branch.
+ """ # noqa
+
+ def __init__(self, depth, num_branch, test_branch_idx, trident_dilations,
+ **kwargs):
+
+ assert num_branch == len(trident_dilations)
+ assert depth in (50, 101, 152)
+ super(TridentResNet, self).__init__(depth, **kwargs)
+ assert self.num_stages == 3
+ self.test_branch_idx = test_branch_idx
+ self.num_branch = num_branch
+
+ last_stage_idx = self.num_stages - 1
+ stride = self.strides[last_stage_idx]
+ dilation = trident_dilations
+ dcn = self.dcn if self.stage_with_dcn[last_stage_idx] else None
+ if self.plugins is not None:
+ stage_plugins = self.make_stage_plugins(self.plugins,
+ last_stage_idx)
+ else:
+ stage_plugins = None
+ planes = self.base_channels * 2**last_stage_idx
+ res_layer = make_trident_res_layer(
+ TridentBottleneck,
+ inplanes=(self.block.expansion * self.base_channels *
+ 2**(last_stage_idx - 1)),
+ planes=planes,
+ num_blocks=self.stage_blocks[last_stage_idx],
+ stride=stride,
+ trident_dilations=dilation,
+ style=self.style,
+ with_cp=self.with_cp,
+ conv_cfg=self.conv_cfg,
+ norm_cfg=self.norm_cfg,
+ dcn=dcn,
+ plugins=stage_plugins,
+ test_branch_idx=self.test_branch_idx)
+
+ layer_name = f'layer{last_stage_idx + 1}'
+
+ self.__setattr__(layer_name, res_layer)
+ self.res_layers.pop(last_stage_idx)
+ self.res_layers.insert(last_stage_idx, layer_name)
+
+ self._freeze_stages()
diff --git a/annotator/uniformer/mmdet_null/models/backbones/uniformer.py b/annotator/uniformer/mmdet_null/models/backbones/uniformer.py
new file mode 100644
index 0000000000000000000000000000000000000000..5705a6dd7019f51bc04e4a2c7ff42021821dbd49
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/backbones/uniformer.py
@@ -0,0 +1,422 @@
+# --------------------------------------------------------
+# UniFormer
+# Copyright (c) 2022 SenseTime X-Lab
+# Licensed under The MIT License [see LICENSE for details]
+# Written by Kunchang Li
+# --------------------------------------------------------
+
+from collections import OrderedDict
+import math
+
+from functools import partial
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+import torch.utils.checkpoint as checkpoint
+import numpy as np
+from timm.models.layers import DropPath, to_2tuple, trunc_normal_
+
+from mmcv_custom import load_checkpoint
+from mmdet.utils import get_root_logger
+from ..builder import BACKBONES
+
+
+class Mlp(nn.Module):
+ def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
+ super().__init__()
+ out_features = out_features or in_features
+ hidden_features = hidden_features or in_features
+ self.fc1 = nn.Linear(in_features, hidden_features)
+ self.act = act_layer()
+ self.fc2 = nn.Linear(hidden_features, out_features)
+ self.drop = nn.Dropout(drop)
+
+ def forward(self, x):
+ x = self.fc1(x)
+ x = self.act(x)
+ x = self.drop(x)
+ x = self.fc2(x)
+ x = self.drop(x)
+ return x
+
+
+class CMlp(nn.Module):
+ def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
+ super().__init__()
+ out_features = out_features or in_features
+ hidden_features = hidden_features or in_features
+ self.fc1 = nn.Conv2d(in_features, hidden_features, 1)
+ self.act = act_layer()
+ self.fc2 = nn.Conv2d(hidden_features, out_features, 1)
+ self.drop = nn.Dropout(drop)
+
+ def forward(self, x):
+ x = self.fc1(x)
+ x = self.act(x)
+ x = self.drop(x)
+ x = self.fc2(x)
+ x = self.drop(x)
+ return x
+
+
+class CBlock(nn.Module):
+ def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
+ drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
+ super().__init__()
+ self.pos_embed = nn.Conv2d(dim, dim, 3, padding=1, groups=dim)
+ self.norm1 = nn.BatchNorm2d(dim)
+ self.conv1 = nn.Conv2d(dim, dim, 1)
+ self.conv2 = nn.Conv2d(dim, dim, 1)
+ self.attn = nn.Conv2d(dim, dim, 5, padding=2, groups=dim)
+ # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
+ self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
+ self.norm2 = nn.BatchNorm2d(dim)
+ mlp_hidden_dim = int(dim * mlp_ratio)
+ self.mlp = CMlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
+
+ def forward(self, x):
+ x = x + self.pos_embed(x)
+ x = x + self.drop_path(self.conv2(self.attn(self.conv1(self.norm1(x)))))
+ x = x + self.drop_path(self.mlp(self.norm2(x)))
+ return x
+
+
+class Attention(nn.Module):
+ def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
+ super().__init__()
+ self.num_heads = num_heads
+ head_dim = dim // num_heads
+ # NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights
+ self.scale = qk_scale or head_dim ** -0.5
+
+ self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
+ self.attn_drop = nn.Dropout(attn_drop)
+ self.proj = nn.Linear(dim, dim)
+ self.proj_drop = nn.Dropout(proj_drop)
+
+ def forward(self, x):
+ B, N, C = x.shape
+ qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
+ q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
+
+ attn = (q @ k.transpose(-2, -1)) * self.scale
+ attn = attn.softmax(dim=-1)
+ attn = self.attn_drop(attn)
+
+ x = (attn @ v).transpose(1, 2).reshape(B, N, C)
+ x = self.proj(x)
+ x = self.proj_drop(x)
+ return x
+
+
+class SABlock(nn.Module):
+ def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
+ drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
+ super().__init__()
+ self.pos_embed = nn.Conv2d(dim, dim, 3, padding=1, groups=dim)
+ self.norm1 = norm_layer(dim)
+ self.attn = Attention(
+ dim,
+ num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale,
+ attn_drop=attn_drop, proj_drop=drop)
+ # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
+ self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
+ self.norm2 = norm_layer(dim)
+ mlp_hidden_dim = int(dim * mlp_ratio)
+ self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
+
+ def forward(self, x):
+ x = x + self.pos_embed(x)
+ B, N, H, W = x.shape
+ x = x.flatten(2).transpose(1, 2)
+ x = x + self.drop_path(self.attn(self.norm1(x)))
+ x = x + self.drop_path(self.mlp(self.norm2(x)))
+ x = x.transpose(1, 2).reshape(B, N, H, W)
+ return x
+
+
+def window_partition(x, window_size):
+ """
+ Args:
+ x: (B, H, W, C)
+ window_size (int): window size
+ Returns:
+ windows: (num_windows*B, window_size, window_size, C)
+ """
+ B, H, W, C = x.shape
+ x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
+ windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
+ return windows
+
+
+def window_reverse(windows, window_size, H, W):
+ """
+ Args:
+ windows: (num_windows*B, window_size, window_size, C)
+ window_size (int): Window size
+ H (int): Height of image
+ W (int): Width of image
+ Returns:
+ x: (B, H, W, C)
+ """
+ B = int(windows.shape[0] / (H * W / window_size / window_size))
+ x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
+ x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
+ return x
+
+
+class SABlock_Windows(nn.Module):
+ def __init__(self, dim, num_heads, window_size=14, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
+ drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
+ super().__init__()
+ self.window_size=window_size
+ self.pos_embed = nn.Conv2d(dim, dim, 3, padding=1, groups=dim)
+ self.norm1 = norm_layer(dim)
+ self.attn = Attention(
+ dim,
+ num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale,
+ attn_drop=attn_drop, proj_drop=drop)
+ # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
+ self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
+ self.norm2 = norm_layer(dim)
+ mlp_hidden_dim = int(dim * mlp_ratio)
+ self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
+
+ def forward(self, x):
+ x = x + self.pos_embed(x)
+ x = x.permute(0, 2, 3, 1)
+ B, H, W, C = x.shape
+ shortcut = x
+ x = self.norm1(x)
+
+ pad_l = pad_t = 0
+ pad_r = (self.window_size - W % self.window_size) % self.window_size
+ pad_b = (self.window_size - H % self.window_size) % self.window_size
+ x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b))
+ _, Hp, Wp, _ = x.shape
+
+ x_windows = window_partition(x, self.window_size) # nW*B, window_size, window_size, C
+ x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C
+
+ # W-MSA/SW-MSA
+ attn_windows = self.attn(x_windows) # nW*B, window_size*window_size, C
+
+ # merge windows
+ attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)
+ x = window_reverse(attn_windows, self.window_size, Hp, Wp) # B H' W' C
+
+ # reverse cyclic shift
+ if pad_r > 0 or pad_b > 0:
+ x = x[:, :H, :W, :].contiguous()
+
+ x = shortcut + self.drop_path(x)
+ x = x + self.drop_path(self.mlp(self.norm2(x)))
+ x = x.permute(0, 3, 1, 2).reshape(B, C, H, W)
+ return x
+
+
+class PatchEmbed(nn.Module):
+ """ Image to Patch Embedding
+ """
+ def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
+ super().__init__()
+ img_size = to_2tuple(img_size)
+ patch_size = to_2tuple(patch_size)
+ num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])
+ self.img_size = img_size
+ self.patch_size = patch_size
+ self.num_patches = num_patches
+ self.norm = nn.LayerNorm(embed_dim)
+ self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
+
+ def forward(self, x):
+ B, _, H, W = x.shape
+ x = self.proj(x)
+ B, _, H, W = x.shape
+ x = x.flatten(2).transpose(1, 2)
+ x = self.norm(x)
+ x = x.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous()
+ return x
+
+
+@BACKBONES.register_module()
+class UniFormer(nn.Module):
+ """ Vision Transformer
+ A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` -
+ https://arxiv.org/abs/2010.11929
+ """
+ def __init__(self, layers=[3, 4, 8, 3], img_size=224, in_chans=3, num_classes=80, embed_dim=[64, 128, 320, 512],
+ head_dim=64, mlp_ratio=4., qkv_bias=True, qk_scale=None, representation_size=None,
+ drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=partial(nn.LayerNorm, eps=1e-6),
+ pretrained_path=None, use_checkpoint=False, checkpoint_num=[0, 0, 0, 0],
+ windows=False, hybrid=False, window_size=14):
+ """
+ Args:
+ layer (list): number of block in each layer
+ img_size (int, tuple): input image size
+ in_chans (int): number of input channels
+ num_classes (int): number of classes for classification head
+ embed_dim (int): embedding dimension
+ head_dim (int): dimension of attention heads
+ mlp_ratio (int): ratio of mlp hidden dim to embedding dim
+ qkv_bias (bool): enable bias for qkv if True
+ qk_scale (float): override default qk scale of head_dim ** -0.5 if set
+ representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set
+ drop_rate (float): dropout rate
+ attn_drop_rate (float): attention dropout rate
+ drop_path_rate (float): stochastic depth rate
+ norm_layer (nn.Module): normalization layer
+ pretrained_path (str): path of pretrained model
+ use_checkpoint (bool): whether use checkpoint
+ checkpoint_num (list): index for using checkpoint in every stage
+ windows (bool): whether use window MHRA
+ hybrid (bool): whether use hybrid MHRA
+ window_size (int): size of window (>14)
+ """
+ super().__init__()
+ self.num_classes = num_classes
+ self.use_checkpoint = use_checkpoint
+ self.checkpoint_num = checkpoint_num
+ self.windows = windows
+ print(f'Use Checkpoint: {self.use_checkpoint}')
+ print(f'Checkpoint Number: {self.checkpoint_num}')
+ self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
+ norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)
+
+ self.patch_embed1 = PatchEmbed(
+ img_size=img_size, patch_size=4, in_chans=in_chans, embed_dim=embed_dim[0])
+ self.patch_embed2 = PatchEmbed(
+ img_size=img_size // 4, patch_size=2, in_chans=embed_dim[0], embed_dim=embed_dim[1])
+ self.patch_embed3 = PatchEmbed(
+ img_size=img_size // 8, patch_size=2, in_chans=embed_dim[1], embed_dim=embed_dim[2])
+ self.patch_embed4 = PatchEmbed(
+ img_size=img_size // 16, patch_size=2, in_chans=embed_dim[2], embed_dim=embed_dim[3])
+
+ self.pos_drop = nn.Dropout(p=drop_rate)
+ dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(layers))] # stochastic depth decay rule
+ num_heads = [dim // head_dim for dim in embed_dim]
+ self.blocks1 = nn.ModuleList([
+ CBlock(
+ dim=embed_dim[0], num_heads=num_heads[0], mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
+ drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer)
+ for i in range(layers[0])])
+ self.norm1=norm_layer(embed_dim[0])
+ self.blocks2 = nn.ModuleList([
+ CBlock(
+ dim=embed_dim[1], num_heads=num_heads[1], mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
+ drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i+layers[0]], norm_layer=norm_layer)
+ for i in range(layers[1])])
+ self.norm2 = norm_layer(embed_dim[1])
+ if self.windows:
+ print('Use local window for all blocks in stage3')
+ self.blocks3 = nn.ModuleList([
+ SABlock_Windows(
+ dim=embed_dim[2], num_heads=num_heads[2], window_size=window_size, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
+ drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i+layers[0]+layers[1]], norm_layer=norm_layer)
+ for i in range(layers[2])])
+ elif hybrid:
+ print('Use hybrid window for blocks in stage3')
+ block3 = []
+ for i in range(layers[2]):
+ if (i + 1) % 4 == 0:
+ block3.append(SABlock(
+ dim=embed_dim[2], num_heads=num_heads[2], mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
+ drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i+layers[0]+layers[1]], norm_layer=norm_layer))
+ else:
+ block3.append(SABlock_Windows(
+ dim=embed_dim[2], num_heads=num_heads[2], window_size=window_size, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
+ drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i+layers[0]+layers[1]], norm_layer=norm_layer))
+ self.blocks3 = nn.ModuleList(block3)
+ else:
+ print('Use global window for all blocks in stage3')
+ self.blocks3 = nn.ModuleList([
+ SABlock(
+ dim=embed_dim[2], num_heads=num_heads[2], mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
+ drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i+layers[0]+layers[1]], norm_layer=norm_layer)
+ for i in range(layers[2])])
+ self.norm3 = norm_layer(embed_dim[2])
+ self.blocks4 = nn.ModuleList([
+ SABlock(
+ dim=embed_dim[3], num_heads=num_heads[3], mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
+ drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i+layers[0]+layers[1]+layers[2]], norm_layer=norm_layer)
+ for i in range(layers[3])])
+ self.norm4 = norm_layer(embed_dim[3])
+
+ # Representation layer
+ if representation_size:
+ self.num_features = representation_size
+ self.pre_logits = nn.Sequential(OrderedDict([
+ ('fc', nn.Linear(embed_dim, representation_size)),
+ ('act', nn.Tanh())
+ ]))
+ else:
+ self.pre_logits = nn.Identity()
+
+ self.apply(self._init_weights)
+ self.init_weights(pretrained=pretrained_path)
+
+ def init_weights(self, pretrained):
+ if isinstance(pretrained, str):
+ logger = get_root_logger()
+ load_checkpoint(self, pretrained, map_location='cpu', strict=False, logger=logger)
+ print(f'Load pretrained model from {pretrained}')
+ def _init_weights(self, m):
+ if isinstance(m, nn.Linear):
+ trunc_normal_(m.weight, std=.02)
+ if isinstance(m, nn.Linear) and m.bias is not None:
+ nn.init.constant_(m.bias, 0)
+ elif isinstance(m, nn.LayerNorm):
+ nn.init.constant_(m.bias, 0)
+ nn.init.constant_(m.weight, 1.0)
+
+ @torch.jit.ignore
+ def no_weight_decay(self):
+ return {'pos_embed', 'cls_token'}
+
+ def get_classifier(self):
+ return self.head
+
+ def reset_classifier(self, num_classes, global_pool=''):
+ self.num_classes = num_classes
+ self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
+
+ def forward_features(self, x):
+ out = []
+ x = self.patch_embed1(x)
+ x = self.pos_drop(x)
+ for i, blk in enumerate(self.blocks1):
+ if self.use_checkpoint and i < self.checkpoint_num[0]:
+ x = checkpoint.checkpoint(blk, x)
+ else:
+ x = blk(x)
+ x_out = self.norm1(x.permute(0, 2, 3, 1))
+ out.append(x_out.permute(0, 3, 1, 2).contiguous())
+ x = self.patch_embed2(x)
+ for i, blk in enumerate(self.blocks2):
+ if self.use_checkpoint and i < self.checkpoint_num[1]:
+ x = checkpoint.checkpoint(blk, x)
+ else:
+ x = blk(x)
+ x_out = self.norm2(x.permute(0, 2, 3, 1))
+ out.append(x_out.permute(0, 3, 1, 2).contiguous())
+ x = self.patch_embed3(x)
+ for i, blk in enumerate(self.blocks3):
+ if self.use_checkpoint and i < self.checkpoint_num[2]:
+ x = checkpoint.checkpoint(blk, x)
+ else:
+ x = blk(x)
+ x_out = self.norm3(x.permute(0, 2, 3, 1))
+ out.append(x_out.permute(0, 3, 1, 2).contiguous())
+ x = self.patch_embed4(x)
+ for i, blk in enumerate(self.blocks4):
+ if self.use_checkpoint and i < self.checkpoint_num[3]:
+ x = checkpoint.checkpoint(blk, x)
+ else:
+ x = blk(x)
+ x_out = self.norm4(x.permute(0, 2, 3, 1))
+ out.append(x_out.permute(0, 3, 1, 2).contiguous())
+ return tuple(out)
+
+ def forward(self, x):
+ x = self.forward_features(x)
+ return x
diff --git a/annotator/uniformer/mmdet_null/models/builder.py b/annotator/uniformer/mmdet_null/models/builder.py
new file mode 100644
index 0000000000000000000000000000000000000000..81c927e507a7c1625ffb114de10e93c94927af25
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/builder.py
@@ -0,0 +1,77 @@
+import warnings
+
+from mmcv.utils import Registry, build_from_cfg
+from torch import nn
+
+BACKBONES = Registry('backbone')
+NECKS = Registry('neck')
+ROI_EXTRACTORS = Registry('roi_extractor')
+SHARED_HEADS = Registry('shared_head')
+HEADS = Registry('head')
+LOSSES = Registry('loss')
+DETECTORS = Registry('detector')
+
+
+def build(cfg, registry, default_args=None):
+ """Build a module.
+
+ Args:
+ cfg (dict, list[dict]): The config of modules, is is either a dict
+ or a list of configs.
+ registry (:obj:`Registry`): A registry the module belongs to.
+ default_args (dict, optional): Default arguments to build the module.
+ Defaults to None.
+
+ Returns:
+ nn.Module: A built nn module.
+ """
+ if isinstance(cfg, list):
+ modules = [
+ build_from_cfg(cfg_, registry, default_args) for cfg_ in cfg
+ ]
+ return nn.Sequential(*modules)
+ else:
+ return build_from_cfg(cfg, registry, default_args)
+
+
+def build_backbone(cfg):
+ """Build backbone."""
+ return build(cfg, BACKBONES)
+
+
+def build_neck(cfg):
+ """Build neck."""
+ return build(cfg, NECKS)
+
+
+def build_roi_extractor(cfg):
+ """Build roi extractor."""
+ return build(cfg, ROI_EXTRACTORS)
+
+
+def build_shared_head(cfg):
+ """Build shared head."""
+ return build(cfg, SHARED_HEADS)
+
+
+def build_head(cfg):
+ """Build head."""
+ return build(cfg, HEADS)
+
+
+def build_loss(cfg):
+ """Build loss."""
+ return build(cfg, LOSSES)
+
+
+def build_detector(cfg, train_cfg=None, test_cfg=None):
+ """Build detector."""
+ if train_cfg is not None or test_cfg is not None:
+ warnings.warn(
+ 'train_cfg and test_cfg is deprecated, '
+ 'please specify them in model', UserWarning)
+ assert cfg.get('train_cfg') is None or train_cfg is None, \
+ 'train_cfg specified in both outer field and model field '
+ assert cfg.get('test_cfg') is None or test_cfg is None, \
+ 'test_cfg specified in both outer field and model field '
+ return build(cfg, DETECTORS, dict(train_cfg=train_cfg, test_cfg=test_cfg))
diff --git a/annotator/uniformer/mmdet_null/models/dense_heads/__init__.py b/annotator/uniformer/mmdet_null/models/dense_heads/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..f004dd95d97df16167f932587b3ce73b05b04a37
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/dense_heads/__init__.py
@@ -0,0 +1,41 @@
+from .anchor_free_head import AnchorFreeHead
+from .anchor_head import AnchorHead
+from .atss_head import ATSSHead
+from .cascade_rpn_head import CascadeRPNHead, StageCascadeRPNHead
+from .centripetal_head import CentripetalHead
+from .corner_head import CornerHead
+from .embedding_rpn_head import EmbeddingRPNHead
+from .fcos_head import FCOSHead
+from .fovea_head import FoveaHead
+from .free_anchor_retina_head import FreeAnchorRetinaHead
+from .fsaf_head import FSAFHead
+from .ga_retina_head import GARetinaHead
+from .ga_rpn_head import GARPNHead
+from .gfl_head import GFLHead
+from .guided_anchor_head import FeatureAdaption, GuidedAnchorHead
+from .ld_head import LDHead
+from .nasfcos_head import NASFCOSHead
+from .paa_head import PAAHead
+from .pisa_retinanet_head import PISARetinaHead
+from .pisa_ssd_head import PISASSDHead
+from .reppoints_head import RepPointsHead
+from .retina_head import RetinaHead
+from .retina_sepbn_head import RetinaSepBNHead
+from .rpn_head import RPNHead
+from .sabl_retina_head import SABLRetinaHead
+from .ssd_head import SSDHead
+from .transformer_head import TransformerHead
+from .vfnet_head import VFNetHead
+from .yolact_head import YOLACTHead, YOLACTProtonet, YOLACTSegmHead
+from .yolo_head import YOLOV3Head
+
+__all__ = [
+ 'AnchorFreeHead', 'AnchorHead', 'GuidedAnchorHead', 'FeatureAdaption',
+ 'RPNHead', 'GARPNHead', 'RetinaHead', 'RetinaSepBNHead', 'GARetinaHead',
+ 'SSDHead', 'FCOSHead', 'RepPointsHead', 'FoveaHead',
+ 'FreeAnchorRetinaHead', 'ATSSHead', 'FSAFHead', 'NASFCOSHead',
+ 'PISARetinaHead', 'PISASSDHead', 'GFLHead', 'CornerHead', 'YOLACTHead',
+ 'YOLACTSegmHead', 'YOLACTProtonet', 'YOLOV3Head', 'PAAHead',
+ 'SABLRetinaHead', 'CentripetalHead', 'VFNetHead', 'TransformerHead',
+ 'StageCascadeRPNHead', 'CascadeRPNHead', 'EmbeddingRPNHead', 'LDHead'
+]
diff --git a/annotator/uniformer/mmdet_null/models/dense_heads/anchor_free_head.py b/annotator/uniformer/mmdet_null/models/dense_heads/anchor_free_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..1814a0cc4f577f470f74f025440073a0aaa1ebd0
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/dense_heads/anchor_free_head.py
@@ -0,0 +1,340 @@
+from abc import abstractmethod
+
+import torch
+import torch.nn as nn
+from mmcv.cnn import ConvModule, bias_init_with_prob, normal_init
+from mmcv.runner import force_fp32
+
+from mmdet.core import multi_apply
+from ..builder import HEADS, build_loss
+from .base_dense_head import BaseDenseHead
+from .dense_test_mixins import BBoxTestMixin
+
+
+@HEADS.register_module()
+class AnchorFreeHead(BaseDenseHead, BBoxTestMixin):
+ """Anchor-free head (FCOS, Fovea, RepPoints, etc.).
+
+ Args:
+ num_classes (int): Number of categories excluding the background
+ category.
+ in_channels (int): Number of channels in the input feature map.
+ feat_channels (int): Number of hidden channels. Used in child classes.
+ stacked_convs (int): Number of stacking convs of the head.
+ strides (tuple): Downsample factor of each feature map.
+ dcn_on_last_conv (bool): If true, use dcn in the last layer of
+ towers. Default: False.
+ conv_bias (bool | str): If specified as `auto`, it will be decided by
+ the norm_cfg. Bias of conv will be set as True if `norm_cfg` is
+ None, otherwise False. Default: "auto".
+ loss_cls (dict): Config of classification loss.
+ loss_bbox (dict): Config of localization loss.
+ conv_cfg (dict): Config dict for convolution layer. Default: None.
+ norm_cfg (dict): Config dict for normalization layer. Default: None.
+ train_cfg (dict): Training config of anchor head.
+ test_cfg (dict): Testing config of anchor head.
+ """ # noqa: W605
+
+ _version = 1
+
+ def __init__(self,
+ num_classes,
+ in_channels,
+ feat_channels=256,
+ stacked_convs=4,
+ strides=(4, 8, 16, 32, 64),
+ dcn_on_last_conv=False,
+ conv_bias='auto',
+ loss_cls=dict(
+ type='FocalLoss',
+ use_sigmoid=True,
+ gamma=2.0,
+ alpha=0.25,
+ loss_weight=1.0),
+ loss_bbox=dict(type='IoULoss', loss_weight=1.0),
+ conv_cfg=None,
+ norm_cfg=None,
+ train_cfg=None,
+ test_cfg=None):
+ super(AnchorFreeHead, self).__init__()
+ self.num_classes = num_classes
+ self.cls_out_channels = num_classes
+ self.in_channels = in_channels
+ self.feat_channels = feat_channels
+ self.stacked_convs = stacked_convs
+ self.strides = strides
+ self.dcn_on_last_conv = dcn_on_last_conv
+ assert conv_bias == 'auto' or isinstance(conv_bias, bool)
+ self.conv_bias = conv_bias
+ self.loss_cls = build_loss(loss_cls)
+ self.loss_bbox = build_loss(loss_bbox)
+ self.train_cfg = train_cfg
+ self.test_cfg = test_cfg
+ self.conv_cfg = conv_cfg
+ self.norm_cfg = norm_cfg
+ self.fp16_enabled = False
+
+ self._init_layers()
+
+ def _init_layers(self):
+ """Initialize layers of the head."""
+ self._init_cls_convs()
+ self._init_reg_convs()
+ self._init_predictor()
+
+ def _init_cls_convs(self):
+ """Initialize classification conv layers of the head."""
+ self.cls_convs = nn.ModuleList()
+ for i in range(self.stacked_convs):
+ chn = self.in_channels if i == 0 else self.feat_channels
+ if self.dcn_on_last_conv and i == self.stacked_convs - 1:
+ conv_cfg = dict(type='DCNv2')
+ else:
+ conv_cfg = self.conv_cfg
+ self.cls_convs.append(
+ ConvModule(
+ chn,
+ self.feat_channels,
+ 3,
+ stride=1,
+ padding=1,
+ conv_cfg=conv_cfg,
+ norm_cfg=self.norm_cfg,
+ bias=self.conv_bias))
+
+ def _init_reg_convs(self):
+ """Initialize bbox regression conv layers of the head."""
+ self.reg_convs = nn.ModuleList()
+ for i in range(self.stacked_convs):
+ chn = self.in_channels if i == 0 else self.feat_channels
+ if self.dcn_on_last_conv and i == self.stacked_convs - 1:
+ conv_cfg = dict(type='DCNv2')
+ else:
+ conv_cfg = self.conv_cfg
+ self.reg_convs.append(
+ ConvModule(
+ chn,
+ self.feat_channels,
+ 3,
+ stride=1,
+ padding=1,
+ conv_cfg=conv_cfg,
+ norm_cfg=self.norm_cfg,
+ bias=self.conv_bias))
+
+ def _init_predictor(self):
+ """Initialize predictor layers of the head."""
+ self.conv_cls = nn.Conv2d(
+ self.feat_channels, self.cls_out_channels, 3, padding=1)
+ self.conv_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1)
+
+ def init_weights(self):
+ """Initialize weights of the head."""
+ for m in self.cls_convs:
+ if isinstance(m.conv, nn.Conv2d):
+ normal_init(m.conv, std=0.01)
+ for m in self.reg_convs:
+ if isinstance(m.conv, nn.Conv2d):
+ normal_init(m.conv, std=0.01)
+ bias_cls = bias_init_with_prob(0.01)
+ normal_init(self.conv_cls, std=0.01, bias=bias_cls)
+ normal_init(self.conv_reg, std=0.01)
+
+ def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
+ missing_keys, unexpected_keys, error_msgs):
+ """Hack some keys of the model state dict so that can load checkpoints
+ of previous version."""
+ version = local_metadata.get('version', None)
+ if version is None:
+ # the key is different in early versions
+ # for example, 'fcos_cls' become 'conv_cls' now
+ bbox_head_keys = [
+ k for k in state_dict.keys() if k.startswith(prefix)
+ ]
+ ori_predictor_keys = []
+ new_predictor_keys = []
+ # e.g. 'fcos_cls' or 'fcos_reg'
+ for key in bbox_head_keys:
+ ori_predictor_keys.append(key)
+ key = key.split('.')
+ conv_name = None
+ if key[1].endswith('cls'):
+ conv_name = 'conv_cls'
+ elif key[1].endswith('reg'):
+ conv_name = 'conv_reg'
+ elif key[1].endswith('centerness'):
+ conv_name = 'conv_centerness'
+ else:
+ assert NotImplementedError
+ if conv_name is not None:
+ key[1] = conv_name
+ new_predictor_keys.append('.'.join(key))
+ else:
+ ori_predictor_keys.pop(-1)
+ for i in range(len(new_predictor_keys)):
+ state_dict[new_predictor_keys[i]] = state_dict.pop(
+ ori_predictor_keys[i])
+ super()._load_from_state_dict(state_dict, prefix, local_metadata,
+ strict, missing_keys, unexpected_keys,
+ error_msgs)
+
+ def forward(self, feats):
+ """Forward features from the upstream network.
+
+ Args:
+ feats (tuple[Tensor]): Features from the upstream network, each is
+ a 4D-tensor.
+
+ Returns:
+ tuple: Usually contain classification scores and bbox predictions.
+ cls_scores (list[Tensor]): Box scores for each scale level,
+ each is a 4D-tensor, the channel number is
+ num_points * num_classes.
+ bbox_preds (list[Tensor]): Box energies / deltas for each scale
+ level, each is a 4D-tensor, the channel number is
+ num_points * 4.
+ """
+ return multi_apply(self.forward_single, feats)[:2]
+
+ def forward_single(self, x):
+ """Forward features of a single scale level.
+
+ Args:
+ x (Tensor): FPN feature maps of the specified stride.
+
+ Returns:
+ tuple: Scores for each class, bbox predictions, features
+ after classification and regression conv layers, some
+ models needs these features like FCOS.
+ """
+ cls_feat = x
+ reg_feat = x
+
+ for cls_layer in self.cls_convs:
+ cls_feat = cls_layer(cls_feat)
+ cls_score = self.conv_cls(cls_feat)
+
+ for reg_layer in self.reg_convs:
+ reg_feat = reg_layer(reg_feat)
+ bbox_pred = self.conv_reg(reg_feat)
+ return cls_score, bbox_pred, cls_feat, reg_feat
+
+ @abstractmethod
+ @force_fp32(apply_to=('cls_scores', 'bbox_preds'))
+ def loss(self,
+ cls_scores,
+ bbox_preds,
+ gt_bboxes,
+ gt_labels,
+ img_metas,
+ gt_bboxes_ignore=None):
+ """Compute loss of the head.
+
+ Args:
+ cls_scores (list[Tensor]): Box scores for each scale level,
+ each is a 4D-tensor, the channel number is
+ num_points * num_classes.
+ bbox_preds (list[Tensor]): Box energies / deltas for each scale
+ level, each is a 4D-tensor, the channel number is
+ num_points * 4.
+ gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
+ shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
+ gt_labels (list[Tensor]): class indices corresponding to each box
+ img_metas (list[dict]): Meta information of each image, e.g.,
+ image size, scaling factor, etc.
+ gt_bboxes_ignore (None | list[Tensor]): specify which bounding
+ boxes can be ignored when computing the loss.
+ """
+
+ raise NotImplementedError
+
+ @abstractmethod
+ @force_fp32(apply_to=('cls_scores', 'bbox_preds'))
+ def get_bboxes(self,
+ cls_scores,
+ bbox_preds,
+ img_metas,
+ cfg=None,
+ rescale=None):
+ """Transform network output for a batch into bbox predictions.
+
+ Args:
+ cls_scores (list[Tensor]): Box scores for each scale level
+ Has shape (N, num_points * num_classes, H, W)
+ bbox_preds (list[Tensor]): Box energies / deltas for each scale
+ level with shape (N, num_points * 4, H, W)
+ img_metas (list[dict]): Meta information of each image, e.g.,
+ image size, scaling factor, etc.
+ cfg (mmcv.Config): Test / postprocessing configuration,
+ if None, test_cfg would be used
+ rescale (bool): If True, return boxes in original image space
+ """
+
+ raise NotImplementedError
+
+ @abstractmethod
+ def get_targets(self, points, gt_bboxes_list, gt_labels_list):
+ """Compute regression, classification and centerness targets for points
+ in multiple images.
+
+ Args:
+ points (list[Tensor]): Points of each fpn level, each has shape
+ (num_points, 2).
+ gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image,
+ each has shape (num_gt, 4).
+ gt_labels_list (list[Tensor]): Ground truth labels of each box,
+ each has shape (num_gt,).
+ """
+ raise NotImplementedError
+
+ def _get_points_single(self,
+ featmap_size,
+ stride,
+ dtype,
+ device,
+ flatten=False):
+ """Get points of a single scale level."""
+ h, w = featmap_size
+ x_range = torch.arange(w, dtype=dtype, device=device)
+ y_range = torch.arange(h, dtype=dtype, device=device)
+ y, x = torch.meshgrid(y_range, x_range)
+ if flatten:
+ y = y.flatten()
+ x = x.flatten()
+ return y, x
+
+ def get_points(self, featmap_sizes, dtype, device, flatten=False):
+ """Get points according to feature map sizes.
+
+ Args:
+ featmap_sizes (list[tuple]): Multi-level feature map sizes.
+ dtype (torch.dtype): Type of points.
+ device (torch.device): Device of points.
+
+ Returns:
+ tuple: points of each image.
+ """
+ mlvl_points = []
+ for i in range(len(featmap_sizes)):
+ mlvl_points.append(
+ self._get_points_single(featmap_sizes[i], self.strides[i],
+ dtype, device, flatten))
+ return mlvl_points
+
+ def aug_test(self, feats, img_metas, rescale=False):
+ """Test function with test time augmentation.
+
+ Args:
+ feats (list[Tensor]): the outer list indicates test-time
+ augmentations and inner Tensor should have a shape NxCxHxW,
+ which contains features for all images in the batch.
+ img_metas (list[list[dict]]): the outer list indicates test-time
+ augs (multiscale, flip, etc.) and the inner list indicates
+ images in a batch. each dict has image information.
+ rescale (bool, optional): Whether to rescale the results.
+ Defaults to False.
+
+ Returns:
+ list[ndarray]: bbox results of each class
+ """
+ return self.aug_test_bboxes(feats, img_metas, rescale=rescale)
diff --git a/annotator/uniformer/mmdet_null/models/dense_heads/anchor_head.py b/annotator/uniformer/mmdet_null/models/dense_heads/anchor_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..eea73520572725f547216ab639c1ebbdfb50834c
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/dense_heads/anchor_head.py
@@ -0,0 +1,751 @@
+import torch
+import torch.nn as nn
+from mmcv.cnn import normal_init
+from mmcv.runner import force_fp32
+
+from mmdet.core import (anchor_inside_flags, build_anchor_generator,
+ build_assigner, build_bbox_coder, build_sampler,
+ images_to_levels, multi_apply, multiclass_nms, unmap)
+from ..builder import HEADS, build_loss
+from .base_dense_head import BaseDenseHead
+from .dense_test_mixins import BBoxTestMixin
+
+
+@HEADS.register_module()
+class AnchorHead(BaseDenseHead, BBoxTestMixin):
+ """Anchor-based head (RPN, RetinaNet, SSD, etc.).
+
+ Args:
+ num_classes (int): Number of categories excluding the background
+ category.
+ in_channels (int): Number of channels in the input feature map.
+ feat_channels (int): Number of hidden channels. Used in child classes.
+ anchor_generator (dict): Config dict for anchor generator
+ bbox_coder (dict): Config of bounding box coder.
+ reg_decoded_bbox (bool): If true, the regression loss would be
+ applied directly on decoded bounding boxes, converting both
+ the predicted boxes and regression targets to absolute
+ coordinates format. Default False. It should be `True` when
+ using `IoULoss`, `GIoULoss`, or `DIoULoss` in the bbox head.
+ loss_cls (dict): Config of classification loss.
+ loss_bbox (dict): Config of localization loss.
+ train_cfg (dict): Training config of anchor head.
+ test_cfg (dict): Testing config of anchor head.
+ """ # noqa: W605
+
+ def __init__(self,
+ num_classes,
+ in_channels,
+ feat_channels=256,
+ anchor_generator=dict(
+ type='AnchorGenerator',
+ scales=[8, 16, 32],
+ ratios=[0.5, 1.0, 2.0],
+ strides=[4, 8, 16, 32, 64]),
+ bbox_coder=dict(
+ type='DeltaXYWHBBoxCoder',
+ clip_border=True,
+ target_means=(.0, .0, .0, .0),
+ target_stds=(1.0, 1.0, 1.0, 1.0)),
+ reg_decoded_bbox=False,
+ loss_cls=dict(
+ type='CrossEntropyLoss',
+ use_sigmoid=True,
+ loss_weight=1.0),
+ loss_bbox=dict(
+ type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0),
+ train_cfg=None,
+ test_cfg=None):
+ super(AnchorHead, self).__init__()
+ self.in_channels = in_channels
+ self.num_classes = num_classes
+ self.feat_channels = feat_channels
+ self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False)
+ # TODO better way to determine whether sample or not
+ self.sampling = loss_cls['type'] not in [
+ 'FocalLoss', 'GHMC', 'QualityFocalLoss'
+ ]
+ if self.use_sigmoid_cls:
+ self.cls_out_channels = num_classes
+ else:
+ self.cls_out_channels = num_classes + 1
+
+ if self.cls_out_channels <= 0:
+ raise ValueError(f'num_classes={num_classes} is too small')
+ self.reg_decoded_bbox = reg_decoded_bbox
+
+ self.bbox_coder = build_bbox_coder(bbox_coder)
+ self.loss_cls = build_loss(loss_cls)
+ self.loss_bbox = build_loss(loss_bbox)
+ self.train_cfg = train_cfg
+ self.test_cfg = test_cfg
+ if self.train_cfg:
+ self.assigner = build_assigner(self.train_cfg.assigner)
+ # use PseudoSampler when sampling is False
+ if self.sampling and hasattr(self.train_cfg, 'sampler'):
+ sampler_cfg = self.train_cfg.sampler
+ else:
+ sampler_cfg = dict(type='PseudoSampler')
+ self.sampler = build_sampler(sampler_cfg, context=self)
+ self.fp16_enabled = False
+
+ self.anchor_generator = build_anchor_generator(anchor_generator)
+ # usually the numbers of anchors for each level are the same
+ # except SSD detectors
+ self.num_anchors = self.anchor_generator.num_base_anchors[0]
+ self._init_layers()
+
+ def _init_layers(self):
+ """Initialize layers of the head."""
+ self.conv_cls = nn.Conv2d(self.in_channels,
+ self.num_anchors * self.cls_out_channels, 1)
+ self.conv_reg = nn.Conv2d(self.in_channels, self.num_anchors * 4, 1)
+
+ def init_weights(self):
+ """Initialize weights of the head."""
+ normal_init(self.conv_cls, std=0.01)
+ normal_init(self.conv_reg, std=0.01)
+
+ def forward_single(self, x):
+ """Forward feature of a single scale level.
+
+ Args:
+ x (Tensor): Features of a single scale level.
+
+ Returns:
+ tuple:
+ cls_score (Tensor): Cls scores for a single scale level \
+ the channels number is num_anchors * num_classes.
+ bbox_pred (Tensor): Box energies / deltas for a single scale \
+ level, the channels number is num_anchors * 4.
+ """
+ cls_score = self.conv_cls(x)
+ bbox_pred = self.conv_reg(x)
+ return cls_score, bbox_pred
+
+ def forward(self, feats):
+ """Forward features from the upstream network.
+
+ Args:
+ feats (tuple[Tensor]): Features from the upstream network, each is
+ a 4D-tensor.
+
+ Returns:
+ tuple: A tuple of classification scores and bbox prediction.
+
+ - cls_scores (list[Tensor]): Classification scores for all \
+ scale levels, each is a 4D-tensor, the channels number \
+ is num_anchors * num_classes.
+ - bbox_preds (list[Tensor]): Box energies / deltas for all \
+ scale levels, each is a 4D-tensor, the channels number \
+ is num_anchors * 4.
+ """
+ return multi_apply(self.forward_single, feats)
+
+ def get_anchors(self, featmap_sizes, img_metas, device='cuda'):
+ """Get anchors according to feature map sizes.
+
+ Args:
+ featmap_sizes (list[tuple]): Multi-level feature map sizes.
+ img_metas (list[dict]): Image meta info.
+ device (torch.device | str): Device for returned tensors
+
+ Returns:
+ tuple:
+ anchor_list (list[Tensor]): Anchors of each image.
+ valid_flag_list (list[Tensor]): Valid flags of each image.
+ """
+ num_imgs = len(img_metas)
+
+ # since feature map sizes of all images are the same, we only compute
+ # anchors for one time
+ multi_level_anchors = self.anchor_generator.grid_anchors(
+ featmap_sizes, device)
+ anchor_list = [multi_level_anchors for _ in range(num_imgs)]
+
+ # for each image, we compute valid flags of multi level anchors
+ valid_flag_list = []
+ for img_id, img_meta in enumerate(img_metas):
+ multi_level_flags = self.anchor_generator.valid_flags(
+ featmap_sizes, img_meta['pad_shape'], device)
+ valid_flag_list.append(multi_level_flags)
+
+ return anchor_list, valid_flag_list
+
+ def _get_targets_single(self,
+ flat_anchors,
+ valid_flags,
+ gt_bboxes,
+ gt_bboxes_ignore,
+ gt_labels,
+ img_meta,
+ label_channels=1,
+ unmap_outputs=True):
+ """Compute regression and classification targets for anchors in a
+ single image.
+
+ Args:
+ flat_anchors (Tensor): Multi-level anchors of the image, which are
+ concatenated into a single tensor of shape (num_anchors ,4)
+ valid_flags (Tensor): Multi level valid flags of the image,
+ which are concatenated into a single tensor of
+ shape (num_anchors,).
+ gt_bboxes (Tensor): Ground truth bboxes of the image,
+ shape (num_gts, 4).
+ gt_bboxes_ignore (Tensor): Ground truth bboxes to be
+ ignored, shape (num_ignored_gts, 4).
+ img_meta (dict): Meta info of the image.
+ gt_labels (Tensor): Ground truth labels of each box,
+ shape (num_gts,).
+ label_channels (int): Channel of label.
+ unmap_outputs (bool): Whether to map outputs back to the original
+ set of anchors.
+
+ Returns:
+ tuple:
+ labels_list (list[Tensor]): Labels of each level
+ label_weights_list (list[Tensor]): Label weights of each level
+ bbox_targets_list (list[Tensor]): BBox targets of each level
+ bbox_weights_list (list[Tensor]): BBox weights of each level
+ num_total_pos (int): Number of positive samples in all images
+ num_total_neg (int): Number of negative samples in all images
+ """
+ inside_flags = anchor_inside_flags(flat_anchors, valid_flags,
+ img_meta['img_shape'][:2],
+ self.train_cfg.allowed_border)
+ if not inside_flags.any():
+ return (None, ) * 7
+ # assign gt and sample anchors
+ anchors = flat_anchors[inside_flags, :]
+
+ assign_result = self.assigner.assign(
+ anchors, gt_bboxes, gt_bboxes_ignore,
+ None if self.sampling else gt_labels)
+ sampling_result = self.sampler.sample(assign_result, anchors,
+ gt_bboxes)
+
+ num_valid_anchors = anchors.shape[0]
+ bbox_targets = torch.zeros_like(anchors)
+ bbox_weights = torch.zeros_like(anchors)
+ labels = anchors.new_full((num_valid_anchors, ),
+ self.num_classes,
+ dtype=torch.long)
+ label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float)
+
+ pos_inds = sampling_result.pos_inds
+ neg_inds = sampling_result.neg_inds
+ if len(pos_inds) > 0:
+ if not self.reg_decoded_bbox:
+ pos_bbox_targets = self.bbox_coder.encode(
+ sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes)
+ else:
+ pos_bbox_targets = sampling_result.pos_gt_bboxes
+ bbox_targets[pos_inds, :] = pos_bbox_targets
+ bbox_weights[pos_inds, :] = 1.0
+ if gt_labels is None:
+ # Only rpn gives gt_labels as None
+ # Foreground is the first class since v2.5.0
+ labels[pos_inds] = 0
+ else:
+ labels[pos_inds] = gt_labels[
+ sampling_result.pos_assigned_gt_inds]
+ if self.train_cfg.pos_weight <= 0:
+ label_weights[pos_inds] = 1.0
+ else:
+ label_weights[pos_inds] = self.train_cfg.pos_weight
+ if len(neg_inds) > 0:
+ label_weights[neg_inds] = 1.0
+
+ # map up to original set of anchors
+ if unmap_outputs:
+ num_total_anchors = flat_anchors.size(0)
+ labels = unmap(
+ labels, num_total_anchors, inside_flags,
+ fill=self.num_classes) # fill bg label
+ label_weights = unmap(label_weights, num_total_anchors,
+ inside_flags)
+ bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags)
+ bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags)
+
+ return (labels, label_weights, bbox_targets, bbox_weights, pos_inds,
+ neg_inds, sampling_result)
+
+ def get_targets(self,
+ anchor_list,
+ valid_flag_list,
+ gt_bboxes_list,
+ img_metas,
+ gt_bboxes_ignore_list=None,
+ gt_labels_list=None,
+ label_channels=1,
+ unmap_outputs=True,
+ return_sampling_results=False):
+ """Compute regression and classification targets for anchors in
+ multiple images.
+
+ Args:
+ anchor_list (list[list[Tensor]]): Multi level anchors of each
+ image. The outer list indicates images, and the inner list
+ corresponds to feature levels of the image. Each element of
+ the inner list is a tensor of shape (num_anchors, 4).
+ valid_flag_list (list[list[Tensor]]): Multi level valid flags of
+ each image. The outer list indicates images, and the inner list
+ corresponds to feature levels of the image. Each element of
+ the inner list is a tensor of shape (num_anchors, )
+ gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image.
+ img_metas (list[dict]): Meta info of each image.
+ gt_bboxes_ignore_list (list[Tensor]): Ground truth bboxes to be
+ ignored.
+ gt_labels_list (list[Tensor]): Ground truth labels of each box.
+ label_channels (int): Channel of label.
+ unmap_outputs (bool): Whether to map outputs back to the original
+ set of anchors.
+
+ Returns:
+ tuple: Usually returns a tuple containing learning targets.
+
+ - labels_list (list[Tensor]): Labels of each level.
+ - label_weights_list (list[Tensor]): Label weights of each \
+ level.
+ - bbox_targets_list (list[Tensor]): BBox targets of each level.
+ - bbox_weights_list (list[Tensor]): BBox weights of each level.
+ - num_total_pos (int): Number of positive samples in all \
+ images.
+ - num_total_neg (int): Number of negative samples in all \
+ images.
+ additional_returns: This function enables user-defined returns from
+ `self._get_targets_single`. These returns are currently refined
+ to properties at each feature map (i.e. having HxW dimension).
+ The results will be concatenated after the end
+ """
+ num_imgs = len(img_metas)
+ assert len(anchor_list) == len(valid_flag_list) == num_imgs
+
+ # anchor number of multi levels
+ num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]
+ # concat all level anchors to a single tensor
+ concat_anchor_list = []
+ concat_valid_flag_list = []
+ for i in range(num_imgs):
+ assert len(anchor_list[i]) == len(valid_flag_list[i])
+ concat_anchor_list.append(torch.cat(anchor_list[i]))
+ concat_valid_flag_list.append(torch.cat(valid_flag_list[i]))
+
+ # compute targets for each image
+ if gt_bboxes_ignore_list is None:
+ gt_bboxes_ignore_list = [None for _ in range(num_imgs)]
+ if gt_labels_list is None:
+ gt_labels_list = [None for _ in range(num_imgs)]
+ results = multi_apply(
+ self._get_targets_single,
+ concat_anchor_list,
+ concat_valid_flag_list,
+ gt_bboxes_list,
+ gt_bboxes_ignore_list,
+ gt_labels_list,
+ img_metas,
+ label_channels=label_channels,
+ unmap_outputs=unmap_outputs)
+ (all_labels, all_label_weights, all_bbox_targets, all_bbox_weights,
+ pos_inds_list, neg_inds_list, sampling_results_list) = results[:7]
+ rest_results = list(results[7:]) # user-added return values
+ # no valid anchors
+ if any([labels is None for labels in all_labels]):
+ return None
+ # sampled anchors of all images
+ num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list])
+ num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list])
+ # split targets to a list w.r.t. multiple levels
+ labels_list = images_to_levels(all_labels, num_level_anchors)
+ label_weights_list = images_to_levels(all_label_weights,
+ num_level_anchors)
+ bbox_targets_list = images_to_levels(all_bbox_targets,
+ num_level_anchors)
+ bbox_weights_list = images_to_levels(all_bbox_weights,
+ num_level_anchors)
+ res = (labels_list, label_weights_list, bbox_targets_list,
+ bbox_weights_list, num_total_pos, num_total_neg)
+ if return_sampling_results:
+ res = res + (sampling_results_list, )
+ for i, r in enumerate(rest_results): # user-added return values
+ rest_results[i] = images_to_levels(r, num_level_anchors)
+
+ return res + tuple(rest_results)
+
+ def loss_single(self, cls_score, bbox_pred, anchors, labels, label_weights,
+ bbox_targets, bbox_weights, num_total_samples):
+ """Compute loss of a single scale level.
+
+ Args:
+ cls_score (Tensor): Box scores for each scale level
+ Has shape (N, num_anchors * num_classes, H, W).
+ bbox_pred (Tensor): Box energies / deltas for each scale
+ level with shape (N, num_anchors * 4, H, W).
+ anchors (Tensor): Box reference for each scale level with shape
+ (N, num_total_anchors, 4).
+ labels (Tensor): Labels of each anchors with shape
+ (N, num_total_anchors).
+ label_weights (Tensor): Label weights of each anchor with shape
+ (N, num_total_anchors)
+ bbox_targets (Tensor): BBox regression targets of each anchor wight
+ shape (N, num_total_anchors, 4).
+ bbox_weights (Tensor): BBox regression loss weights of each anchor
+ with shape (N, num_total_anchors, 4).
+ num_total_samples (int): If sampling, num total samples equal to
+ the number of total anchors; Otherwise, it is the number of
+ positive anchors.
+
+ Returns:
+ dict[str, Tensor]: A dictionary of loss components.
+ """
+ # classification loss
+ labels = labels.reshape(-1)
+ label_weights = label_weights.reshape(-1)
+ cls_score = cls_score.permute(0, 2, 3,
+ 1).reshape(-1, self.cls_out_channels)
+ loss_cls = self.loss_cls(
+ cls_score, labels, label_weights, avg_factor=num_total_samples)
+ # regression loss
+ bbox_targets = bbox_targets.reshape(-1, 4)
+ bbox_weights = bbox_weights.reshape(-1, 4)
+ bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4)
+ if self.reg_decoded_bbox:
+ # When the regression loss (e.g. `IouLoss`, `GIouLoss`)
+ # is applied directly on the decoded bounding boxes, it
+ # decodes the already encoded coordinates to absolute format.
+ anchors = anchors.reshape(-1, 4)
+ bbox_pred = self.bbox_coder.decode(anchors, bbox_pred)
+ loss_bbox = self.loss_bbox(
+ bbox_pred,
+ bbox_targets,
+ bbox_weights,
+ avg_factor=num_total_samples)
+ return loss_cls, loss_bbox
+
+ @force_fp32(apply_to=('cls_scores', 'bbox_preds'))
+ def loss(self,
+ cls_scores,
+ bbox_preds,
+ gt_bboxes,
+ gt_labels,
+ img_metas,
+ gt_bboxes_ignore=None):
+ """Compute losses of the head.
+
+ Args:
+ cls_scores (list[Tensor]): Box scores for each scale level
+ Has shape (N, num_anchors * num_classes, H, W)
+ bbox_preds (list[Tensor]): Box energies / deltas for each scale
+ level with shape (N, num_anchors * 4, H, W)
+ gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
+ shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
+ gt_labels (list[Tensor]): class indices corresponding to each box
+ img_metas (list[dict]): Meta information of each image, e.g.,
+ image size, scaling factor, etc.
+ gt_bboxes_ignore (None | list[Tensor]): specify which bounding
+ boxes can be ignored when computing the loss. Default: None
+
+ Returns:
+ dict[str, Tensor]: A dictionary of loss components.
+ """
+ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
+ assert len(featmap_sizes) == self.anchor_generator.num_levels
+
+ device = cls_scores[0].device
+
+ anchor_list, valid_flag_list = self.get_anchors(
+ featmap_sizes, img_metas, device=device)
+ label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1
+ cls_reg_targets = self.get_targets(
+ anchor_list,
+ valid_flag_list,
+ gt_bboxes,
+ img_metas,
+ gt_bboxes_ignore_list=gt_bboxes_ignore,
+ gt_labels_list=gt_labels,
+ label_channels=label_channels)
+ if cls_reg_targets is None:
+ return None
+ (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,
+ num_total_pos, num_total_neg) = cls_reg_targets
+ num_total_samples = (
+ num_total_pos + num_total_neg if self.sampling else num_total_pos)
+
+ # anchor number of multi levels
+ num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]
+ # concat all level anchors and flags to a single tensor
+ concat_anchor_list = []
+ for i in range(len(anchor_list)):
+ concat_anchor_list.append(torch.cat(anchor_list[i]))
+ all_anchor_list = images_to_levels(concat_anchor_list,
+ num_level_anchors)
+
+ losses_cls, losses_bbox = multi_apply(
+ self.loss_single,
+ cls_scores,
+ bbox_preds,
+ all_anchor_list,
+ labels_list,
+ label_weights_list,
+ bbox_targets_list,
+ bbox_weights_list,
+ num_total_samples=num_total_samples)
+ return dict(loss_cls=losses_cls, loss_bbox=losses_bbox)
+
+ @force_fp32(apply_to=('cls_scores', 'bbox_preds'))
+ def get_bboxes(self,
+ cls_scores,
+ bbox_preds,
+ img_metas,
+ cfg=None,
+ rescale=False,
+ with_nms=True):
+ """Transform network output for a batch into bbox predictions.
+
+ Args:
+ cls_scores (list[Tensor]): Box scores for each level in the
+ feature pyramid, has shape
+ (N, num_anchors * num_classes, H, W).
+ bbox_preds (list[Tensor]): Box energies / deltas for each
+ level in the feature pyramid, has shape
+ (N, num_anchors * 4, H, W).
+ img_metas (list[dict]): Meta information of each image, e.g.,
+ image size, scaling factor, etc.
+ cfg (mmcv.Config | None): Test / postprocessing configuration,
+ if None, test_cfg would be used
+ rescale (bool): If True, return boxes in original image space.
+ Default: False.
+ with_nms (bool): If True, do nms before return boxes.
+ Default: True.
+
+ Returns:
+ list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple.
+ The first item is an (n, 5) tensor, where 5 represent
+ (tl_x, tl_y, br_x, br_y, score) and the score between 0 and 1.
+ The shape of the second tensor in the tuple is (n,), and
+ each element represents the class label of the corresponding
+ box.
+
+ Example:
+ >>> import mmcv
+ >>> self = AnchorHead(
+ >>> num_classes=9,
+ >>> in_channels=1,
+ >>> anchor_generator=dict(
+ >>> type='AnchorGenerator',
+ >>> scales=[8],
+ >>> ratios=[0.5, 1.0, 2.0],
+ >>> strides=[4,]))
+ >>> img_metas = [{'img_shape': (32, 32, 3), 'scale_factor': 1}]
+ >>> cfg = mmcv.Config(dict(
+ >>> score_thr=0.00,
+ >>> nms=dict(type='nms', iou_thr=1.0),
+ >>> max_per_img=10))
+ >>> feat = torch.rand(1, 1, 3, 3)
+ >>> cls_score, bbox_pred = self.forward_single(feat)
+ >>> # note the input lists are over different levels, not images
+ >>> cls_scores, bbox_preds = [cls_score], [bbox_pred]
+ >>> result_list = self.get_bboxes(cls_scores, bbox_preds,
+ >>> img_metas, cfg)
+ >>> det_bboxes, det_labels = result_list[0]
+ >>> assert len(result_list) == 1
+ >>> assert det_bboxes.shape[1] == 5
+ >>> assert len(det_bboxes) == len(det_labels) == cfg.max_per_img
+ """
+ assert len(cls_scores) == len(bbox_preds)
+ num_levels = len(cls_scores)
+
+ device = cls_scores[0].device
+ featmap_sizes = [cls_scores[i].shape[-2:] for i in range(num_levels)]
+ mlvl_anchors = self.anchor_generator.grid_anchors(
+ featmap_sizes, device=device)
+
+ mlvl_cls_scores = [cls_scores[i].detach() for i in range(num_levels)]
+ mlvl_bbox_preds = [bbox_preds[i].detach() for i in range(num_levels)]
+
+ if torch.onnx.is_in_onnx_export():
+ assert len(
+ img_metas
+ ) == 1, 'Only support one input image while in exporting to ONNX'
+ img_shapes = img_metas[0]['img_shape_for_onnx']
+ else:
+ img_shapes = [
+ img_metas[i]['img_shape']
+ for i in range(cls_scores[0].shape[0])
+ ]
+ scale_factors = [
+ img_metas[i]['scale_factor'] for i in range(cls_scores[0].shape[0])
+ ]
+
+ if with_nms:
+ # some heads don't support with_nms argument
+ result_list = self._get_bboxes(mlvl_cls_scores, mlvl_bbox_preds,
+ mlvl_anchors, img_shapes,
+ scale_factors, cfg, rescale)
+ else:
+ result_list = self._get_bboxes(mlvl_cls_scores, mlvl_bbox_preds,
+ mlvl_anchors, img_shapes,
+ scale_factors, cfg, rescale,
+ with_nms)
+ return result_list
+
+ def _get_bboxes(self,
+ mlvl_cls_scores,
+ mlvl_bbox_preds,
+ mlvl_anchors,
+ img_shapes,
+ scale_factors,
+ cfg,
+ rescale=False,
+ with_nms=True):
+ """Transform outputs for a batch item into bbox predictions.
+
+ Args:
+ mlvl_cls_scores (list[Tensor]): Each element in the list is
+ the scores of bboxes of single level in the feature pyramid,
+ has shape (N, num_anchors * num_classes, H, W).
+ mlvl_bbox_preds (list[Tensor]): Each element in the list is the
+ bboxes predictions of single level in the feature pyramid,
+ has shape (N, num_anchors * 4, H, W).
+ mlvl_anchors (list[Tensor]): Each element in the list is
+ the anchors of single level in feature pyramid, has shape
+ (num_anchors, 4).
+ img_shapes (list[tuple[int]]): Each tuple in the list represent
+ the shape(height, width, 3) of single image in the batch.
+ scale_factors (list[ndarray]): Scale factor of the batch
+ image arange as list[(w_scale, h_scale, w_scale, h_scale)].
+ cfg (mmcv.Config): Test / postprocessing configuration,
+ if None, test_cfg would be used.
+ rescale (bool): If True, return boxes in original image space.
+ Default: False.
+ with_nms (bool): If True, do nms before return boxes.
+ Default: True.
+
+ Returns:
+ list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple.
+ The first item is an (n, 5) tensor, where 5 represent
+ (tl_x, tl_y, br_x, br_y, score) and the score between 0 and 1.
+ The shape of the second tensor in the tuple is (n,), and
+ each element represents the class label of the corresponding
+ box.
+ """
+ cfg = self.test_cfg if cfg is None else cfg
+ assert len(mlvl_cls_scores) == len(mlvl_bbox_preds) == len(
+ mlvl_anchors)
+ batch_size = mlvl_cls_scores[0].shape[0]
+ # convert to tensor to keep tracing
+ nms_pre_tensor = torch.tensor(
+ cfg.get('nms_pre', -1),
+ device=mlvl_cls_scores[0].device,
+ dtype=torch.long)
+
+ mlvl_bboxes = []
+ mlvl_scores = []
+ for cls_score, bbox_pred, anchors in zip(mlvl_cls_scores,
+ mlvl_bbox_preds,
+ mlvl_anchors):
+ assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
+ cls_score = cls_score.permute(0, 2, 3,
+ 1).reshape(batch_size, -1,
+ self.cls_out_channels)
+ if self.use_sigmoid_cls:
+ scores = cls_score.sigmoid()
+ else:
+ scores = cls_score.softmax(-1)
+ bbox_pred = bbox_pred.permute(0, 2, 3,
+ 1).reshape(batch_size, -1, 4)
+ anchors = anchors.expand_as(bbox_pred)
+ # Always keep topk op for dynamic input in onnx
+ if nms_pre_tensor > 0 and (torch.onnx.is_in_onnx_export()
+ or scores.shape[-2] > nms_pre_tensor):
+ from torch import _shape_as_tensor
+ # keep shape as tensor and get k
+ num_anchor = _shape_as_tensor(scores)[-2].to(
+ nms_pre_tensor.device)
+ nms_pre = torch.where(nms_pre_tensor < num_anchor,
+ nms_pre_tensor, num_anchor)
+
+ # Get maximum scores for foreground classes.
+ if self.use_sigmoid_cls:
+ max_scores, _ = scores.max(-1)
+ else:
+ # remind that we set FG labels to [0, num_class-1]
+ # since mmdet v2.0
+ # BG cat_id: num_class
+ max_scores, _ = scores[..., :-1].max(-1)
+
+ _, topk_inds = max_scores.topk(nms_pre)
+ batch_inds = torch.arange(batch_size).view(
+ -1, 1).expand_as(topk_inds)
+ anchors = anchors[batch_inds, topk_inds, :]
+ bbox_pred = bbox_pred[batch_inds, topk_inds, :]
+ scores = scores[batch_inds, topk_inds, :]
+
+ bboxes = self.bbox_coder.decode(
+ anchors, bbox_pred, max_shape=img_shapes)
+ mlvl_bboxes.append(bboxes)
+ mlvl_scores.append(scores)
+
+ batch_mlvl_bboxes = torch.cat(mlvl_bboxes, dim=1)
+ if rescale:
+ batch_mlvl_bboxes /= batch_mlvl_bboxes.new_tensor(
+ scale_factors).unsqueeze(1)
+ batch_mlvl_scores = torch.cat(mlvl_scores, dim=1)
+
+ # Set max number of box to be feed into nms in deployment
+ deploy_nms_pre = cfg.get('deploy_nms_pre', -1)
+ if deploy_nms_pre > 0 and torch.onnx.is_in_onnx_export():
+ # Get maximum scores for foreground classes.
+ if self.use_sigmoid_cls:
+ max_scores, _ = batch_mlvl_scores.max(-1)
+ else:
+ # remind that we set FG labels to [0, num_class-1]
+ # since mmdet v2.0
+ # BG cat_id: num_class
+ max_scores, _ = batch_mlvl_scores[..., :-1].max(-1)
+ _, topk_inds = max_scores.topk(deploy_nms_pre)
+ batch_inds = torch.arange(batch_size).view(-1,
+ 1).expand_as(topk_inds)
+ batch_mlvl_scores = batch_mlvl_scores[batch_inds, topk_inds]
+ batch_mlvl_bboxes = batch_mlvl_bboxes[batch_inds, topk_inds]
+ if self.use_sigmoid_cls:
+ # Add a dummy background class to the backend when using sigmoid
+ # remind that we set FG labels to [0, num_class-1] since mmdet v2.0
+ # BG cat_id: num_class
+ padding = batch_mlvl_scores.new_zeros(batch_size,
+ batch_mlvl_scores.shape[1],
+ 1)
+ batch_mlvl_scores = torch.cat([batch_mlvl_scores, padding], dim=-1)
+
+ if with_nms:
+ det_results = []
+ for (mlvl_bboxes, mlvl_scores) in zip(batch_mlvl_bboxes,
+ batch_mlvl_scores):
+ det_bbox, det_label = multiclass_nms(mlvl_bboxes, mlvl_scores,
+ cfg.score_thr, cfg.nms,
+ cfg.max_per_img)
+ det_results.append(tuple([det_bbox, det_label]))
+ else:
+ det_results = [
+ tuple(mlvl_bs)
+ for mlvl_bs in zip(batch_mlvl_bboxes, batch_mlvl_scores)
+ ]
+ return det_results
+
+ def aug_test(self, feats, img_metas, rescale=False):
+ """Test function with test time augmentation.
+
+ Args:
+ feats (list[Tensor]): the outer list indicates test-time
+ augmentations and inner Tensor should have a shape NxCxHxW,
+ which contains features for all images in the batch.
+ img_metas (list[list[dict]]): the outer list indicates test-time
+ augs (multiscale, flip, etc.) and the inner list indicates
+ images in a batch. each dict has image information.
+ rescale (bool, optional): Whether to rescale the results.
+ Defaults to False.
+
+ Returns:
+ list[ndarray]: bbox results of each class
+ """
+ return self.aug_test_bboxes(feats, img_metas, rescale=rescale)
diff --git a/annotator/uniformer/mmdet_null/models/dense_heads/atss_head.py b/annotator/uniformer/mmdet_null/models/dense_heads/atss_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..ff55dfa1790ba270539fc9f623dbb2984fa1a99e
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/dense_heads/atss_head.py
@@ -0,0 +1,689 @@
+import torch
+import torch.nn as nn
+from mmcv.cnn import ConvModule, Scale, bias_init_with_prob, normal_init
+from mmcv.runner import force_fp32
+
+from mmdet.core import (anchor_inside_flags, build_assigner, build_sampler,
+ images_to_levels, multi_apply, multiclass_nms,
+ reduce_mean, unmap)
+from ..builder import HEADS, build_loss
+from .anchor_head import AnchorHead
+
+EPS = 1e-12
+
+
+@HEADS.register_module()
+class ATSSHead(AnchorHead):
+ """Bridging the Gap Between Anchor-based and Anchor-free Detection via
+ Adaptive Training Sample Selection.
+
+ ATSS head structure is similar with FCOS, however ATSS use anchor boxes
+ and assign label by Adaptive Training Sample Selection instead max-iou.
+
+ https://arxiv.org/abs/1912.02424
+ """
+
+ def __init__(self,
+ num_classes,
+ in_channels,
+ stacked_convs=4,
+ conv_cfg=None,
+ norm_cfg=dict(type='GN', num_groups=32, requires_grad=True),
+ loss_centerness=dict(
+ type='CrossEntropyLoss',
+ use_sigmoid=True,
+ loss_weight=1.0),
+ **kwargs):
+ self.stacked_convs = stacked_convs
+ self.conv_cfg = conv_cfg
+ self.norm_cfg = norm_cfg
+ super(ATSSHead, self).__init__(num_classes, in_channels, **kwargs)
+
+ self.sampling = False
+ if self.train_cfg:
+ self.assigner = build_assigner(self.train_cfg.assigner)
+ # SSD sampling=False so use PseudoSampler
+ sampler_cfg = dict(type='PseudoSampler')
+ self.sampler = build_sampler(sampler_cfg, context=self)
+ self.loss_centerness = build_loss(loss_centerness)
+
+ def _init_layers(self):
+ """Initialize layers of the head."""
+ self.relu = nn.ReLU(inplace=True)
+ self.cls_convs = nn.ModuleList()
+ self.reg_convs = nn.ModuleList()
+ for i in range(self.stacked_convs):
+ chn = self.in_channels if i == 0 else self.feat_channels
+ self.cls_convs.append(
+ ConvModule(
+ chn,
+ self.feat_channels,
+ 3,
+ stride=1,
+ padding=1,
+ conv_cfg=self.conv_cfg,
+ norm_cfg=self.norm_cfg))
+ self.reg_convs.append(
+ ConvModule(
+ chn,
+ self.feat_channels,
+ 3,
+ stride=1,
+ padding=1,
+ conv_cfg=self.conv_cfg,
+ norm_cfg=self.norm_cfg))
+ self.atss_cls = nn.Conv2d(
+ self.feat_channels,
+ self.num_anchors * self.cls_out_channels,
+ 3,
+ padding=1)
+ self.atss_reg = nn.Conv2d(
+ self.feat_channels, self.num_anchors * 4, 3, padding=1)
+ self.atss_centerness = nn.Conv2d(
+ self.feat_channels, self.num_anchors * 1, 3, padding=1)
+ self.scales = nn.ModuleList(
+ [Scale(1.0) for _ in self.anchor_generator.strides])
+
+ def init_weights(self):
+ """Initialize weights of the head."""
+ for m in self.cls_convs:
+ normal_init(m.conv, std=0.01)
+ for m in self.reg_convs:
+ normal_init(m.conv, std=0.01)
+ bias_cls = bias_init_with_prob(0.01)
+ normal_init(self.atss_cls, std=0.01, bias=bias_cls)
+ normal_init(self.atss_reg, std=0.01)
+ normal_init(self.atss_centerness, std=0.01)
+
+ def forward(self, feats):
+ """Forward features from the upstream network.
+
+ Args:
+ feats (tuple[Tensor]): Features from the upstream network, each is
+ a 4D-tensor.
+
+ Returns:
+ tuple: Usually a tuple of classification scores and bbox prediction
+ cls_scores (list[Tensor]): Classification scores for all scale
+ levels, each is a 4D-tensor, the channels number is
+ num_anchors * num_classes.
+ bbox_preds (list[Tensor]): Box energies / deltas for all scale
+ levels, each is a 4D-tensor, the channels number is
+ num_anchors * 4.
+ """
+ return multi_apply(self.forward_single, feats, self.scales)
+
+ def forward_single(self, x, scale):
+ """Forward feature of a single scale level.
+
+ Args:
+ x (Tensor): Features of a single scale level.
+ scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize
+ the bbox prediction.
+
+ Returns:
+ tuple:
+ cls_score (Tensor): Cls scores for a single scale level
+ the channels number is num_anchors * num_classes.
+ bbox_pred (Tensor): Box energies / deltas for a single scale
+ level, the channels number is num_anchors * 4.
+ centerness (Tensor): Centerness for a single scale level, the
+ channel number is (N, num_anchors * 1, H, W).
+ """
+ cls_feat = x
+ reg_feat = x
+ for cls_conv in self.cls_convs:
+ cls_feat = cls_conv(cls_feat)
+ for reg_conv in self.reg_convs:
+ reg_feat = reg_conv(reg_feat)
+ cls_score = self.atss_cls(cls_feat)
+ # we just follow atss, not apply exp in bbox_pred
+ bbox_pred = scale(self.atss_reg(reg_feat)).float()
+ centerness = self.atss_centerness(reg_feat)
+ return cls_score, bbox_pred, centerness
+
+ def loss_single(self, anchors, cls_score, bbox_pred, centerness, labels,
+ label_weights, bbox_targets, num_total_samples):
+ """Compute loss of a single scale level.
+
+ Args:
+ cls_score (Tensor): Box scores for each scale level
+ Has shape (N, num_anchors * num_classes, H, W).
+ bbox_pred (Tensor): Box energies / deltas for each scale
+ level with shape (N, num_anchors * 4, H, W).
+ anchors (Tensor): Box reference for each scale level with shape
+ (N, num_total_anchors, 4).
+ labels (Tensor): Labels of each anchors with shape
+ (N, num_total_anchors).
+ label_weights (Tensor): Label weights of each anchor with shape
+ (N, num_total_anchors)
+ bbox_targets (Tensor): BBox regression targets of each anchor wight
+ shape (N, num_total_anchors, 4).
+ num_total_samples (int): Number os positive samples that is
+ reduced over all GPUs.
+
+ Returns:
+ dict[str, Tensor]: A dictionary of loss components.
+ """
+
+ anchors = anchors.reshape(-1, 4)
+ cls_score = cls_score.permute(0, 2, 3, 1).reshape(
+ -1, self.cls_out_channels).contiguous()
+ bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4)
+ centerness = centerness.permute(0, 2, 3, 1).reshape(-1)
+ bbox_targets = bbox_targets.reshape(-1, 4)
+ labels = labels.reshape(-1)
+ label_weights = label_weights.reshape(-1)
+
+ # classification loss
+ loss_cls = self.loss_cls(
+ cls_score, labels, label_weights, avg_factor=num_total_samples)
+
+ # FG cat_id: [0, num_classes -1], BG cat_id: num_classes
+ bg_class_ind = self.num_classes
+ pos_inds = ((labels >= 0)
+ & (labels < bg_class_ind)).nonzero().squeeze(1)
+
+ if len(pos_inds) > 0:
+ pos_bbox_targets = bbox_targets[pos_inds]
+ pos_bbox_pred = bbox_pred[pos_inds]
+ pos_anchors = anchors[pos_inds]
+ pos_centerness = centerness[pos_inds]
+
+ centerness_targets = self.centerness_target(
+ pos_anchors, pos_bbox_targets)
+ pos_decode_bbox_pred = self.bbox_coder.decode(
+ pos_anchors, pos_bbox_pred)
+ pos_decode_bbox_targets = self.bbox_coder.decode(
+ pos_anchors, pos_bbox_targets)
+
+ # regression loss
+ loss_bbox = self.loss_bbox(
+ pos_decode_bbox_pred,
+ pos_decode_bbox_targets,
+ weight=centerness_targets,
+ avg_factor=1.0)
+
+ # centerness loss
+ loss_centerness = self.loss_centerness(
+ pos_centerness,
+ centerness_targets,
+ avg_factor=num_total_samples)
+
+ else:
+ loss_bbox = bbox_pred.sum() * 0
+ loss_centerness = centerness.sum() * 0
+ centerness_targets = bbox_targets.new_tensor(0.)
+
+ return loss_cls, loss_bbox, loss_centerness, centerness_targets.sum()
+
+ @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'centernesses'))
+ def loss(self,
+ cls_scores,
+ bbox_preds,
+ centernesses,
+ gt_bboxes,
+ gt_labels,
+ img_metas,
+ gt_bboxes_ignore=None):
+ """Compute losses of the head.
+
+ Args:
+ cls_scores (list[Tensor]): Box scores for each scale level
+ Has shape (N, num_anchors * num_classes, H, W)
+ bbox_preds (list[Tensor]): Box energies / deltas for each scale
+ level with shape (N, num_anchors * 4, H, W)
+ centernesses (list[Tensor]): Centerness for each scale
+ level with shape (N, num_anchors * 1, H, W)
+ gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
+ shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
+ gt_labels (list[Tensor]): class indices corresponding to each box
+ img_metas (list[dict]): Meta information of each image, e.g.,
+ image size, scaling factor, etc.
+ gt_bboxes_ignore (list[Tensor] | None): specify which bounding
+ boxes can be ignored when computing the loss.
+
+ Returns:
+ dict[str, Tensor]: A dictionary of loss components.
+ """
+ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
+ assert len(featmap_sizes) == self.anchor_generator.num_levels
+
+ device = cls_scores[0].device
+ anchor_list, valid_flag_list = self.get_anchors(
+ featmap_sizes, img_metas, device=device)
+ label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1
+
+ cls_reg_targets = self.get_targets(
+ anchor_list,
+ valid_flag_list,
+ gt_bboxes,
+ img_metas,
+ gt_bboxes_ignore_list=gt_bboxes_ignore,
+ gt_labels_list=gt_labels,
+ label_channels=label_channels)
+ if cls_reg_targets is None:
+ return None
+
+ (anchor_list, labels_list, label_weights_list, bbox_targets_list,
+ bbox_weights_list, num_total_pos, num_total_neg) = cls_reg_targets
+
+ num_total_samples = reduce_mean(
+ torch.tensor(num_total_pos, dtype=torch.float,
+ device=device)).item()
+ num_total_samples = max(num_total_samples, 1.0)
+
+ losses_cls, losses_bbox, loss_centerness,\
+ bbox_avg_factor = multi_apply(
+ self.loss_single,
+ anchor_list,
+ cls_scores,
+ bbox_preds,
+ centernesses,
+ labels_list,
+ label_weights_list,
+ bbox_targets_list,
+ num_total_samples=num_total_samples)
+
+ bbox_avg_factor = sum(bbox_avg_factor)
+ bbox_avg_factor = reduce_mean(bbox_avg_factor).item()
+ if bbox_avg_factor < EPS:
+ bbox_avg_factor = 1
+ losses_bbox = list(map(lambda x: x / bbox_avg_factor, losses_bbox))
+ return dict(
+ loss_cls=losses_cls,
+ loss_bbox=losses_bbox,
+ loss_centerness=loss_centerness)
+
+ def centerness_target(self, anchors, bbox_targets):
+ # only calculate pos centerness targets, otherwise there may be nan
+ gts = self.bbox_coder.decode(anchors, bbox_targets)
+ anchors_cx = (anchors[:, 2] + anchors[:, 0]) / 2
+ anchors_cy = (anchors[:, 3] + anchors[:, 1]) / 2
+ l_ = anchors_cx - gts[:, 0]
+ t_ = anchors_cy - gts[:, 1]
+ r_ = gts[:, 2] - anchors_cx
+ b_ = gts[:, 3] - anchors_cy
+
+ left_right = torch.stack([l_, r_], dim=1)
+ top_bottom = torch.stack([t_, b_], dim=1)
+ centerness = torch.sqrt(
+ (left_right.min(dim=-1)[0] / left_right.max(dim=-1)[0]) *
+ (top_bottom.min(dim=-1)[0] / top_bottom.max(dim=-1)[0]))
+ assert not torch.isnan(centerness).any()
+ return centerness
+
+ @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'centernesses'))
+ def get_bboxes(self,
+ cls_scores,
+ bbox_preds,
+ centernesses,
+ img_metas,
+ cfg=None,
+ rescale=False,
+ with_nms=True):
+ """Transform network output for a batch into bbox predictions.
+
+ Args:
+ cls_scores (list[Tensor]): Box scores for each scale level
+ with shape (N, num_anchors * num_classes, H, W).
+ bbox_preds (list[Tensor]): Box energies / deltas for each scale
+ level with shape (N, num_anchors * 4, H, W).
+ centernesses (list[Tensor]): Centerness for each scale level with
+ shape (N, num_anchors * 1, H, W).
+ img_metas (list[dict]): Meta information of each image, e.g.,
+ image size, scaling factor, etc.
+ cfg (mmcv.Config | None): Test / postprocessing configuration,
+ if None, test_cfg would be used. Default: None.
+ rescale (bool): If True, return boxes in original image space.
+ Default: False.
+ with_nms (bool): If True, do nms before return boxes.
+ Default: True.
+
+ Returns:
+ list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple.
+ The first item is an (n, 5) tensor, where 5 represent
+ (tl_x, tl_y, br_x, br_y, score) and the score between 0 and 1.
+ The shape of the second tensor in the tuple is (n,), and
+ each element represents the class label of the corresponding
+ box.
+ """
+ cfg = self.test_cfg if cfg is None else cfg
+ assert len(cls_scores) == len(bbox_preds)
+ num_levels = len(cls_scores)
+ device = cls_scores[0].device
+ featmap_sizes = [cls_scores[i].shape[-2:] for i in range(num_levels)]
+ mlvl_anchors = self.anchor_generator.grid_anchors(
+ featmap_sizes, device=device)
+
+ cls_score_list = [cls_scores[i].detach() for i in range(num_levels)]
+ bbox_pred_list = [bbox_preds[i].detach() for i in range(num_levels)]
+ centerness_pred_list = [
+ centernesses[i].detach() for i in range(num_levels)
+ ]
+ img_shapes = [
+ img_metas[i]['img_shape'] for i in range(cls_scores[0].shape[0])
+ ]
+ scale_factors = [
+ img_metas[i]['scale_factor'] for i in range(cls_scores[0].shape[0])
+ ]
+ result_list = self._get_bboxes(cls_score_list, bbox_pred_list,
+ centerness_pred_list, mlvl_anchors,
+ img_shapes, scale_factors, cfg, rescale,
+ with_nms)
+ return result_list
+
+ def _get_bboxes(self,
+ cls_scores,
+ bbox_preds,
+ centernesses,
+ mlvl_anchors,
+ img_shapes,
+ scale_factors,
+ cfg,
+ rescale=False,
+ with_nms=True):
+ """Transform outputs for a single batch item into labeled boxes.
+
+ Args:
+ cls_scores (list[Tensor]): Box scores for a single scale level
+ with shape (N, num_anchors * num_classes, H, W).
+ bbox_preds (list[Tensor]): Box energies / deltas for a single
+ scale level with shape (N, num_anchors * 4, H, W).
+ centernesses (list[Tensor]): Centerness for a single scale level
+ with shape (N, num_anchors * 1, H, W).
+ mlvl_anchors (list[Tensor]): Box reference for a single scale level
+ with shape (num_total_anchors, 4).
+ img_shapes (list[tuple[int]]): Shape of the input image,
+ list[(height, width, 3)].
+ scale_factors (list[ndarray]): Scale factor of the image arrange as
+ (w_scale, h_scale, w_scale, h_scale).
+ cfg (mmcv.Config | None): Test / postprocessing configuration,
+ if None, test_cfg would be used.
+ rescale (bool): If True, return boxes in original image space.
+ Default: False.
+ with_nms (bool): If True, do nms before return boxes.
+ Default: True.
+
+ Returns:
+ list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple.
+ The first item is an (n, 5) tensor, where 5 represent
+ (tl_x, tl_y, br_x, br_y, score) and the score between 0 and 1.
+ The shape of the second tensor in the tuple is (n,), and
+ each element represents the class label of the corresponding
+ box.
+ """
+ assert len(cls_scores) == len(bbox_preds) == len(mlvl_anchors)
+ device = cls_scores[0].device
+ batch_size = cls_scores[0].shape[0]
+ # convert to tensor to keep tracing
+ nms_pre_tensor = torch.tensor(
+ cfg.get('nms_pre', -1), device=device, dtype=torch.long)
+ mlvl_bboxes = []
+ mlvl_scores = []
+ mlvl_centerness = []
+ for cls_score, bbox_pred, centerness, anchors in zip(
+ cls_scores, bbox_preds, centernesses, mlvl_anchors):
+ assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
+ scores = cls_score.permute(0, 2, 3, 1).reshape(
+ batch_size, -1, self.cls_out_channels).sigmoid()
+ centerness = centerness.permute(0, 2, 3,
+ 1).reshape(batch_size,
+ -1).sigmoid()
+ bbox_pred = bbox_pred.permute(0, 2, 3,
+ 1).reshape(batch_size, -1, 4)
+
+ # Always keep topk op for dynamic input in onnx
+ if nms_pre_tensor > 0 and (torch.onnx.is_in_onnx_export()
+ or scores.shape[-2] > nms_pre_tensor):
+ from torch import _shape_as_tensor
+ # keep shape as tensor and get k
+ num_anchor = _shape_as_tensor(scores)[-2].to(device)
+ nms_pre = torch.where(nms_pre_tensor < num_anchor,
+ nms_pre_tensor, num_anchor)
+
+ max_scores, _ = (scores * centerness[..., None]).max(-1)
+ _, topk_inds = max_scores.topk(nms_pre)
+ anchors = anchors[topk_inds, :]
+ batch_inds = torch.arange(batch_size).view(
+ -1, 1).expand_as(topk_inds).long()
+ bbox_pred = bbox_pred[batch_inds, topk_inds, :]
+ scores = scores[batch_inds, topk_inds, :]
+ centerness = centerness[batch_inds, topk_inds]
+ else:
+ anchors = anchors.expand_as(bbox_pred)
+
+ bboxes = self.bbox_coder.decode(
+ anchors, bbox_pred, max_shape=img_shapes)
+ mlvl_bboxes.append(bboxes)
+ mlvl_scores.append(scores)
+ mlvl_centerness.append(centerness)
+
+ batch_mlvl_bboxes = torch.cat(mlvl_bboxes, dim=1)
+ if rescale:
+ batch_mlvl_bboxes /= batch_mlvl_bboxes.new_tensor(
+ scale_factors).unsqueeze(1)
+ batch_mlvl_scores = torch.cat(mlvl_scores, dim=1)
+ batch_mlvl_centerness = torch.cat(mlvl_centerness, dim=1)
+
+ # Set max number of box to be feed into nms in deployment
+ deploy_nms_pre = cfg.get('deploy_nms_pre', -1)
+ if deploy_nms_pre > 0 and torch.onnx.is_in_onnx_export():
+ batch_mlvl_scores, _ = (
+ batch_mlvl_scores *
+ batch_mlvl_centerness.unsqueeze(2).expand_as(batch_mlvl_scores)
+ ).max(-1)
+ _, topk_inds = batch_mlvl_scores.topk(deploy_nms_pre)
+ batch_inds = torch.arange(batch_size).view(-1,
+ 1).expand_as(topk_inds)
+ batch_mlvl_scores = batch_mlvl_scores[batch_inds, topk_inds, :]
+ batch_mlvl_bboxes = batch_mlvl_bboxes[batch_inds, topk_inds, :]
+ batch_mlvl_centerness = batch_mlvl_centerness[batch_inds,
+ topk_inds]
+ # remind that we set FG labels to [0, num_class-1] since mmdet v2.0
+ # BG cat_id: num_class
+ padding = batch_mlvl_scores.new_zeros(batch_size,
+ batch_mlvl_scores.shape[1], 1)
+ batch_mlvl_scores = torch.cat([batch_mlvl_scores, padding], dim=-1)
+
+ if with_nms:
+ det_results = []
+ for (mlvl_bboxes, mlvl_scores,
+ mlvl_centerness) in zip(batch_mlvl_bboxes, batch_mlvl_scores,
+ batch_mlvl_centerness):
+ det_bbox, det_label = multiclass_nms(
+ mlvl_bboxes,
+ mlvl_scores,
+ cfg.score_thr,
+ cfg.nms,
+ cfg.max_per_img,
+ score_factors=mlvl_centerness)
+ det_results.append(tuple([det_bbox, det_label]))
+ else:
+ det_results = [
+ tuple(mlvl_bs)
+ for mlvl_bs in zip(batch_mlvl_bboxes, batch_mlvl_scores,
+ batch_mlvl_centerness)
+ ]
+ return det_results
+
+ def get_targets(self,
+ anchor_list,
+ valid_flag_list,
+ gt_bboxes_list,
+ img_metas,
+ gt_bboxes_ignore_list=None,
+ gt_labels_list=None,
+ label_channels=1,
+ unmap_outputs=True):
+ """Get targets for ATSS head.
+
+ This method is almost the same as `AnchorHead.get_targets()`. Besides
+ returning the targets as the parent method does, it also returns the
+ anchors as the first element of the returned tuple.
+ """
+ num_imgs = len(img_metas)
+ assert len(anchor_list) == len(valid_flag_list) == num_imgs
+
+ # anchor number of multi levels
+ num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]
+ num_level_anchors_list = [num_level_anchors] * num_imgs
+
+ # concat all level anchors and flags to a single tensor
+ for i in range(num_imgs):
+ assert len(anchor_list[i]) == len(valid_flag_list[i])
+ anchor_list[i] = torch.cat(anchor_list[i])
+ valid_flag_list[i] = torch.cat(valid_flag_list[i])
+
+ # compute targets for each image
+ if gt_bboxes_ignore_list is None:
+ gt_bboxes_ignore_list = [None for _ in range(num_imgs)]
+ if gt_labels_list is None:
+ gt_labels_list = [None for _ in range(num_imgs)]
+ (all_anchors, all_labels, all_label_weights, all_bbox_targets,
+ all_bbox_weights, pos_inds_list, neg_inds_list) = multi_apply(
+ self._get_target_single,
+ anchor_list,
+ valid_flag_list,
+ num_level_anchors_list,
+ gt_bboxes_list,
+ gt_bboxes_ignore_list,
+ gt_labels_list,
+ img_metas,
+ label_channels=label_channels,
+ unmap_outputs=unmap_outputs)
+ # no valid anchors
+ if any([labels is None for labels in all_labels]):
+ return None
+ # sampled anchors of all images
+ num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list])
+ num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list])
+ # split targets to a list w.r.t. multiple levels
+ anchors_list = images_to_levels(all_anchors, num_level_anchors)
+ labels_list = images_to_levels(all_labels, num_level_anchors)
+ label_weights_list = images_to_levels(all_label_weights,
+ num_level_anchors)
+ bbox_targets_list = images_to_levels(all_bbox_targets,
+ num_level_anchors)
+ bbox_weights_list = images_to_levels(all_bbox_weights,
+ num_level_anchors)
+ return (anchors_list, labels_list, label_weights_list,
+ bbox_targets_list, bbox_weights_list, num_total_pos,
+ num_total_neg)
+
+ def _get_target_single(self,
+ flat_anchors,
+ valid_flags,
+ num_level_anchors,
+ gt_bboxes,
+ gt_bboxes_ignore,
+ gt_labels,
+ img_meta,
+ label_channels=1,
+ unmap_outputs=True):
+ """Compute regression, classification targets for anchors in a single
+ image.
+
+ Args:
+ flat_anchors (Tensor): Multi-level anchors of the image, which are
+ concatenated into a single tensor of shape (num_anchors ,4)
+ valid_flags (Tensor): Multi level valid flags of the image,
+ which are concatenated into a single tensor of
+ shape (num_anchors,).
+ num_level_anchors Tensor): Number of anchors of each scale level.
+ gt_bboxes (Tensor): Ground truth bboxes of the image,
+ shape (num_gts, 4).
+ gt_bboxes_ignore (Tensor): Ground truth bboxes to be
+ ignored, shape (num_ignored_gts, 4).
+ gt_labels (Tensor): Ground truth labels of each box,
+ shape (num_gts,).
+ img_meta (dict): Meta info of the image.
+ label_channels (int): Channel of label.
+ unmap_outputs (bool): Whether to map outputs back to the original
+ set of anchors.
+
+ Returns:
+ tuple: N is the number of total anchors in the image.
+ labels (Tensor): Labels of all anchors in the image with shape
+ (N,).
+ label_weights (Tensor): Label weights of all anchor in the
+ image with shape (N,).
+ bbox_targets (Tensor): BBox targets of all anchors in the
+ image with shape (N, 4).
+ bbox_weights (Tensor): BBox weights of all anchors in the
+ image with shape (N, 4)
+ pos_inds (Tensor): Indices of positive anchor with shape
+ (num_pos,).
+ neg_inds (Tensor): Indices of negative anchor with shape
+ (num_neg,).
+ """
+ inside_flags = anchor_inside_flags(flat_anchors, valid_flags,
+ img_meta['img_shape'][:2],
+ self.train_cfg.allowed_border)
+ if not inside_flags.any():
+ return (None, ) * 7
+ # assign gt and sample anchors
+ anchors = flat_anchors[inside_flags, :]
+
+ num_level_anchors_inside = self.get_num_level_anchors_inside(
+ num_level_anchors, inside_flags)
+ assign_result = self.assigner.assign(anchors, num_level_anchors_inside,
+ gt_bboxes, gt_bboxes_ignore,
+ gt_labels)
+
+ sampling_result = self.sampler.sample(assign_result, anchors,
+ gt_bboxes)
+
+ num_valid_anchors = anchors.shape[0]
+ bbox_targets = torch.zeros_like(anchors)
+ bbox_weights = torch.zeros_like(anchors)
+ labels = anchors.new_full((num_valid_anchors, ),
+ self.num_classes,
+ dtype=torch.long)
+ label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float)
+
+ pos_inds = sampling_result.pos_inds
+ neg_inds = sampling_result.neg_inds
+ if len(pos_inds) > 0:
+ if hasattr(self, 'bbox_coder'):
+ pos_bbox_targets = self.bbox_coder.encode(
+ sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes)
+ else:
+ # used in VFNetHead
+ pos_bbox_targets = sampling_result.pos_gt_bboxes
+ bbox_targets[pos_inds, :] = pos_bbox_targets
+ bbox_weights[pos_inds, :] = 1.0
+ if gt_labels is None:
+ # Only rpn gives gt_labels as None
+ # Foreground is the first class since v2.5.0
+ labels[pos_inds] = 0
+ else:
+ labels[pos_inds] = gt_labels[
+ sampling_result.pos_assigned_gt_inds]
+ if self.train_cfg.pos_weight <= 0:
+ label_weights[pos_inds] = 1.0
+ else:
+ label_weights[pos_inds] = self.train_cfg.pos_weight
+ if len(neg_inds) > 0:
+ label_weights[neg_inds] = 1.0
+
+ # map up to original set of anchors
+ if unmap_outputs:
+ num_total_anchors = flat_anchors.size(0)
+ anchors = unmap(anchors, num_total_anchors, inside_flags)
+ labels = unmap(
+ labels, num_total_anchors, inside_flags, fill=self.num_classes)
+ label_weights = unmap(label_weights, num_total_anchors,
+ inside_flags)
+ bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags)
+ bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags)
+
+ return (anchors, labels, label_weights, bbox_targets, bbox_weights,
+ pos_inds, neg_inds)
+
+ def get_num_level_anchors_inside(self, num_level_anchors, inside_flags):
+ split_inside_flags = torch.split(inside_flags, num_level_anchors)
+ num_level_anchors_inside = [
+ int(flags.sum()) for flags in split_inside_flags
+ ]
+ return num_level_anchors_inside
diff --git a/annotator/uniformer/mmdet_null/models/dense_heads/base_dense_head.py b/annotator/uniformer/mmdet_null/models/dense_heads/base_dense_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..de11e4a2197b1dfe241ce7a66daa1907a8fc5661
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/dense_heads/base_dense_head.py
@@ -0,0 +1,59 @@
+from abc import ABCMeta, abstractmethod
+
+import torch.nn as nn
+
+
+class BaseDenseHead(nn.Module, metaclass=ABCMeta):
+ """Base class for DenseHeads."""
+
+ def __init__(self):
+ super(BaseDenseHead, self).__init__()
+
+ @abstractmethod
+ def loss(self, **kwargs):
+ """Compute losses of the head."""
+ pass
+
+ @abstractmethod
+ def get_bboxes(self, **kwargs):
+ """Transform network output for a batch into bbox predictions."""
+ pass
+
+ def forward_train(self,
+ x,
+ img_metas,
+ gt_bboxes,
+ gt_labels=None,
+ gt_bboxes_ignore=None,
+ proposal_cfg=None,
+ **kwargs):
+ """
+ Args:
+ x (list[Tensor]): Features from FPN.
+ img_metas (list[dict]): Meta information of each image, e.g.,
+ image size, scaling factor, etc.
+ gt_bboxes (Tensor): Ground truth bboxes of the image,
+ shape (num_gts, 4).
+ gt_labels (Tensor): Ground truth labels of each box,
+ shape (num_gts,).
+ gt_bboxes_ignore (Tensor): Ground truth bboxes to be
+ ignored, shape (num_ignored_gts, 4).
+ proposal_cfg (mmcv.Config): Test / postprocessing configuration,
+ if None, test_cfg would be used
+
+ Returns:
+ tuple:
+ losses: (dict[str, Tensor]): A dictionary of loss components.
+ proposal_list (list[Tensor]): Proposals of each image.
+ """
+ outs = self(x)
+ if gt_labels is None:
+ loss_inputs = outs + (gt_bboxes, img_metas)
+ else:
+ loss_inputs = outs + (gt_bboxes, gt_labels, img_metas)
+ losses = self.loss(*loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore)
+ if proposal_cfg is None:
+ return losses
+ else:
+ proposal_list = self.get_bboxes(*outs, img_metas, cfg=proposal_cfg)
+ return losses, proposal_list
diff --git a/annotator/uniformer/mmdet_null/models/dense_heads/cascade_rpn_head.py b/annotator/uniformer/mmdet_null/models/dense_heads/cascade_rpn_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..e32ee461951e685fb44a461033293159e3439717
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/dense_heads/cascade_rpn_head.py
@@ -0,0 +1,784 @@
+from __future__ import division
+import copy
+import warnings
+
+import torch
+import torch.nn as nn
+from mmcv import ConfigDict
+from mmcv.cnn import normal_init
+from mmcv.ops import DeformConv2d, batched_nms
+
+from mmdet.core import (RegionAssigner, build_assigner, build_sampler,
+ images_to_levels, multi_apply)
+from ..builder import HEADS, build_head
+from .base_dense_head import BaseDenseHead
+from .rpn_head import RPNHead
+
+
+class AdaptiveConv(nn.Module):
+ """AdaptiveConv used to adapt the sampling location with the anchors.
+
+ Args:
+ in_channels (int): Number of channels in the input image
+ out_channels (int): Number of channels produced by the convolution
+ kernel_size (int or tuple): Size of the conv kernel. Default: 3
+ stride (int or tuple, optional): Stride of the convolution. Default: 1
+ padding (int or tuple, optional): Zero-padding added to both sides of
+ the input. Default: 1
+ dilation (int or tuple, optional): Spacing between kernel elements.
+ Default: 3
+ groups (int, optional): Number of blocked connections from input
+ channels to output channels. Default: 1
+ bias (bool, optional): If set True, adds a learnable bias to the
+ output. Default: False.
+ type (str, optional): Type of adaptive conv, can be either 'offset'
+ (arbitrary anchors) or 'dilation' (uniform anchor).
+ Default: 'dilation'.
+ """
+
+ def __init__(self,
+ in_channels,
+ out_channels,
+ kernel_size=3,
+ stride=1,
+ padding=1,
+ dilation=3,
+ groups=1,
+ bias=False,
+ type='dilation'):
+ super(AdaptiveConv, self).__init__()
+ assert type in ['offset', 'dilation']
+ self.adapt_type = type
+
+ assert kernel_size == 3, 'Adaptive conv only supports kernels 3'
+ if self.adapt_type == 'offset':
+ assert stride == 1 and padding == 1 and groups == 1, \
+ 'Adaptive conv offset mode only supports padding: {1}, ' \
+ f'stride: {1}, groups: {1}'
+ self.conv = DeformConv2d(
+ in_channels,
+ out_channels,
+ kernel_size,
+ padding=padding,
+ stride=stride,
+ groups=groups,
+ bias=bias)
+ else:
+ self.conv = nn.Conv2d(
+ in_channels,
+ out_channels,
+ kernel_size,
+ padding=dilation,
+ dilation=dilation)
+
+ def init_weights(self):
+ """Init weights."""
+ normal_init(self.conv, std=0.01)
+
+ def forward(self, x, offset):
+ """Forward function."""
+ if self.adapt_type == 'offset':
+ N, _, H, W = x.shape
+ assert offset is not None
+ assert H * W == offset.shape[1]
+ # reshape [N, NA, 18] to (N, 18, H, W)
+ offset = offset.permute(0, 2, 1).reshape(N, -1, H, W)
+ offset = offset.contiguous()
+ x = self.conv(x, offset)
+ else:
+ assert offset is None
+ x = self.conv(x)
+ return x
+
+
+@HEADS.register_module()
+class StageCascadeRPNHead(RPNHead):
+ """Stage of CascadeRPNHead.
+
+ Args:
+ in_channels (int): Number of channels in the input feature map.
+ anchor_generator (dict): anchor generator config.
+ adapt_cfg (dict): adaptation config.
+ bridged_feature (bool, optional): whether update rpn feature.
+ Default: False.
+ with_cls (bool, optional): wheather use classification branch.
+ Default: True.
+ sampling (bool, optional): wheather use sampling. Default: True.
+ """
+
+ def __init__(self,
+ in_channels,
+ anchor_generator=dict(
+ type='AnchorGenerator',
+ scales=[8],
+ ratios=[1.0],
+ strides=[4, 8, 16, 32, 64]),
+ adapt_cfg=dict(type='dilation', dilation=3),
+ bridged_feature=False,
+ with_cls=True,
+ sampling=True,
+ **kwargs):
+ self.with_cls = with_cls
+ self.anchor_strides = anchor_generator['strides']
+ self.anchor_scales = anchor_generator['scales']
+ self.bridged_feature = bridged_feature
+ self.adapt_cfg = adapt_cfg
+ super(StageCascadeRPNHead, self).__init__(
+ in_channels, anchor_generator=anchor_generator, **kwargs)
+
+ # override sampling and sampler
+ self.sampling = sampling
+ if self.train_cfg:
+ self.assigner = build_assigner(self.train_cfg.assigner)
+ # use PseudoSampler when sampling is False
+ if self.sampling and hasattr(self.train_cfg, 'sampler'):
+ sampler_cfg = self.train_cfg.sampler
+ else:
+ sampler_cfg = dict(type='PseudoSampler')
+ self.sampler = build_sampler(sampler_cfg, context=self)
+
+ def _init_layers(self):
+ """Init layers of a CascadeRPN stage."""
+ self.rpn_conv = AdaptiveConv(self.in_channels, self.feat_channels,
+ **self.adapt_cfg)
+ if self.with_cls:
+ self.rpn_cls = nn.Conv2d(self.feat_channels,
+ self.num_anchors * self.cls_out_channels,
+ 1)
+ self.rpn_reg = nn.Conv2d(self.feat_channels, self.num_anchors * 4, 1)
+ self.relu = nn.ReLU(inplace=True)
+
+ def init_weights(self):
+ """Init weights of a CascadeRPN stage."""
+ self.rpn_conv.init_weights()
+ normal_init(self.rpn_reg, std=0.01)
+ if self.with_cls:
+ normal_init(self.rpn_cls, std=0.01)
+
+ def forward_single(self, x, offset):
+ """Forward function of single scale."""
+ bridged_x = x
+ x = self.relu(self.rpn_conv(x, offset))
+ if self.bridged_feature:
+ bridged_x = x # update feature
+ cls_score = self.rpn_cls(x) if self.with_cls else None
+ bbox_pred = self.rpn_reg(x)
+ return bridged_x, cls_score, bbox_pred
+
+ def forward(self, feats, offset_list=None):
+ """Forward function."""
+ if offset_list is None:
+ offset_list = [None for _ in range(len(feats))]
+ return multi_apply(self.forward_single, feats, offset_list)
+
+ def _region_targets_single(self,
+ anchors,
+ valid_flags,
+ gt_bboxes,
+ gt_bboxes_ignore,
+ gt_labels,
+ img_meta,
+ featmap_sizes,
+ label_channels=1):
+ """Get anchor targets based on region for single level."""
+ assign_result = self.assigner.assign(
+ anchors,
+ valid_flags,
+ gt_bboxes,
+ img_meta,
+ featmap_sizes,
+ self.anchor_scales[0],
+ self.anchor_strides,
+ gt_bboxes_ignore=gt_bboxes_ignore,
+ gt_labels=None,
+ allowed_border=self.train_cfg.allowed_border)
+ flat_anchors = torch.cat(anchors)
+ sampling_result = self.sampler.sample(assign_result, flat_anchors,
+ gt_bboxes)
+
+ num_anchors = flat_anchors.shape[0]
+ bbox_targets = torch.zeros_like(flat_anchors)
+ bbox_weights = torch.zeros_like(flat_anchors)
+ labels = flat_anchors.new_zeros(num_anchors, dtype=torch.long)
+ label_weights = flat_anchors.new_zeros(num_anchors, dtype=torch.float)
+
+ pos_inds = sampling_result.pos_inds
+ neg_inds = sampling_result.neg_inds
+ if len(pos_inds) > 0:
+ if not self.reg_decoded_bbox:
+ pos_bbox_targets = self.bbox_coder.encode(
+ sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes)
+ else:
+ pos_bbox_targets = sampling_result.pos_gt_bboxes
+ bbox_targets[pos_inds, :] = pos_bbox_targets
+ bbox_weights[pos_inds, :] = 1.0
+ if gt_labels is None:
+ labels[pos_inds] = 1
+ else:
+ labels[pos_inds] = gt_labels[
+ sampling_result.pos_assigned_gt_inds]
+ if self.train_cfg.pos_weight <= 0:
+ label_weights[pos_inds] = 1.0
+ else:
+ label_weights[pos_inds] = self.train_cfg.pos_weight
+ if len(neg_inds) > 0:
+ label_weights[neg_inds] = 1.0
+
+ return (labels, label_weights, bbox_targets, bbox_weights, pos_inds,
+ neg_inds)
+
+ def region_targets(self,
+ anchor_list,
+ valid_flag_list,
+ gt_bboxes_list,
+ img_metas,
+ featmap_sizes,
+ gt_bboxes_ignore_list=None,
+ gt_labels_list=None,
+ label_channels=1,
+ unmap_outputs=True):
+ """See :func:`StageCascadeRPNHead.get_targets`."""
+ num_imgs = len(img_metas)
+ assert len(anchor_list) == len(valid_flag_list) == num_imgs
+
+ # anchor number of multi levels
+ num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]
+
+ # compute targets for each image
+ if gt_bboxes_ignore_list is None:
+ gt_bboxes_ignore_list = [None for _ in range(num_imgs)]
+ if gt_labels_list is None:
+ gt_labels_list = [None for _ in range(num_imgs)]
+ (all_labels, all_label_weights, all_bbox_targets, all_bbox_weights,
+ pos_inds_list, neg_inds_list) = multi_apply(
+ self._region_targets_single,
+ anchor_list,
+ valid_flag_list,
+ gt_bboxes_list,
+ gt_bboxes_ignore_list,
+ gt_labels_list,
+ img_metas,
+ featmap_sizes=featmap_sizes,
+ label_channels=label_channels)
+ # no valid anchors
+ if any([labels is None for labels in all_labels]):
+ return None
+ # sampled anchors of all images
+ num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list])
+ num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list])
+ # split targets to a list w.r.t. multiple levels
+ labels_list = images_to_levels(all_labels, num_level_anchors)
+ label_weights_list = images_to_levels(all_label_weights,
+ num_level_anchors)
+ bbox_targets_list = images_to_levels(all_bbox_targets,
+ num_level_anchors)
+ bbox_weights_list = images_to_levels(all_bbox_weights,
+ num_level_anchors)
+ return (labels_list, label_weights_list, bbox_targets_list,
+ bbox_weights_list, num_total_pos, num_total_neg)
+
+ def get_targets(self,
+ anchor_list,
+ valid_flag_list,
+ gt_bboxes,
+ img_metas,
+ featmap_sizes,
+ gt_bboxes_ignore=None,
+ label_channels=1):
+ """Compute regression and classification targets for anchors.
+
+ Args:
+ anchor_list (list[list]): Multi level anchors of each image.
+ valid_flag_list (list[list]): Multi level valid flags of each
+ image.
+ gt_bboxes (list[Tensor]): Ground truth bboxes of each image.
+ img_metas (list[dict]): Meta info of each image.
+ featmap_sizes (list[Tensor]): Feature mapsize each level
+ gt_bboxes_ignore (list[Tensor]): Ignore bboxes of each images
+ label_channels (int): Channel of label.
+
+ Returns:
+ cls_reg_targets (tuple)
+ """
+ if isinstance(self.assigner, RegionAssigner):
+ cls_reg_targets = self.region_targets(
+ anchor_list,
+ valid_flag_list,
+ gt_bboxes,
+ img_metas,
+ featmap_sizes,
+ gt_bboxes_ignore_list=gt_bboxes_ignore,
+ label_channels=label_channels)
+ else:
+ cls_reg_targets = super(StageCascadeRPNHead, self).get_targets(
+ anchor_list,
+ valid_flag_list,
+ gt_bboxes,
+ img_metas,
+ gt_bboxes_ignore_list=gt_bboxes_ignore,
+ label_channels=label_channels)
+ return cls_reg_targets
+
+ def anchor_offset(self, anchor_list, anchor_strides, featmap_sizes):
+ """ Get offest for deformable conv based on anchor shape
+ NOTE: currently support deformable kernel_size=3 and dilation=1
+
+ Args:
+ anchor_list (list[list[tensor])): [NI, NLVL, NA, 4] list of
+ multi-level anchors
+ anchor_strides (list[int]): anchor stride of each level
+
+ Returns:
+ offset_list (list[tensor]): [NLVL, NA, 2, 18]: offset of DeformConv
+ kernel.
+ """
+
+ def _shape_offset(anchors, stride, ks=3, dilation=1):
+ # currently support kernel_size=3 and dilation=1
+ assert ks == 3 and dilation == 1
+ pad = (ks - 1) // 2
+ idx = torch.arange(-pad, pad + 1, dtype=dtype, device=device)
+ yy, xx = torch.meshgrid(idx, idx) # return order matters
+ xx = xx.reshape(-1)
+ yy = yy.reshape(-1)
+ w = (anchors[:, 2] - anchors[:, 0]) / stride
+ h = (anchors[:, 3] - anchors[:, 1]) / stride
+ w = w / (ks - 1) - dilation
+ h = h / (ks - 1) - dilation
+ offset_x = w[:, None] * xx # (NA, ks**2)
+ offset_y = h[:, None] * yy # (NA, ks**2)
+ return offset_x, offset_y
+
+ def _ctr_offset(anchors, stride, featmap_size):
+ feat_h, feat_w = featmap_size
+ assert len(anchors) == feat_h * feat_w
+
+ x = (anchors[:, 0] + anchors[:, 2]) * 0.5
+ y = (anchors[:, 1] + anchors[:, 3]) * 0.5
+ # compute centers on feature map
+ x = x / stride
+ y = y / stride
+ # compute predefine centers
+ xx = torch.arange(0, feat_w, device=anchors.device)
+ yy = torch.arange(0, feat_h, device=anchors.device)
+ yy, xx = torch.meshgrid(yy, xx)
+ xx = xx.reshape(-1).type_as(x)
+ yy = yy.reshape(-1).type_as(y)
+
+ offset_x = x - xx # (NA, )
+ offset_y = y - yy # (NA, )
+ return offset_x, offset_y
+
+ num_imgs = len(anchor_list)
+ num_lvls = len(anchor_list[0])
+ dtype = anchor_list[0][0].dtype
+ device = anchor_list[0][0].device
+ num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]
+
+ offset_list = []
+ for i in range(num_imgs):
+ mlvl_offset = []
+ for lvl in range(num_lvls):
+ c_offset_x, c_offset_y = _ctr_offset(anchor_list[i][lvl],
+ anchor_strides[lvl],
+ featmap_sizes[lvl])
+ s_offset_x, s_offset_y = _shape_offset(anchor_list[i][lvl],
+ anchor_strides[lvl])
+
+ # offset = ctr_offset + shape_offset
+ offset_x = s_offset_x + c_offset_x[:, None]
+ offset_y = s_offset_y + c_offset_y[:, None]
+
+ # offset order (y0, x0, y1, x2, .., y8, x8, y9, x9)
+ offset = torch.stack([offset_y, offset_x], dim=-1)
+ offset = offset.reshape(offset.size(0), -1) # [NA, 2*ks**2]
+ mlvl_offset.append(offset)
+ offset_list.append(torch.cat(mlvl_offset)) # [totalNA, 2*ks**2]
+ offset_list = images_to_levels(offset_list, num_level_anchors)
+ return offset_list
+
+ def loss_single(self, cls_score, bbox_pred, anchors, labels, label_weights,
+ bbox_targets, bbox_weights, num_total_samples):
+ """Loss function on single scale."""
+ # classification loss
+ if self.with_cls:
+ labels = labels.reshape(-1)
+ label_weights = label_weights.reshape(-1)
+ cls_score = cls_score.permute(0, 2, 3,
+ 1).reshape(-1, self.cls_out_channels)
+ loss_cls = self.loss_cls(
+ cls_score, labels, label_weights, avg_factor=num_total_samples)
+ # regression loss
+ bbox_targets = bbox_targets.reshape(-1, 4)
+ bbox_weights = bbox_weights.reshape(-1, 4)
+ bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4)
+ if self.reg_decoded_bbox:
+ # When the regression loss (e.g. `IouLoss`, `GIouLoss`)
+ # is applied directly on the decoded bounding boxes, it
+ # decodes the already encoded coordinates to absolute format.
+ anchors = anchors.reshape(-1, 4)
+ bbox_pred = self.bbox_coder.decode(anchors, bbox_pred)
+ loss_reg = self.loss_bbox(
+ bbox_pred,
+ bbox_targets,
+ bbox_weights,
+ avg_factor=num_total_samples)
+ if self.with_cls:
+ return loss_cls, loss_reg
+ return None, loss_reg
+
+ def loss(self,
+ anchor_list,
+ valid_flag_list,
+ cls_scores,
+ bbox_preds,
+ gt_bboxes,
+ img_metas,
+ gt_bboxes_ignore=None):
+ """Compute losses of the head.
+
+ Args:
+ anchor_list (list[list]): Multi level anchors of each image.
+ cls_scores (list[Tensor]): Box scores for each scale level
+ Has shape (N, num_anchors * num_classes, H, W)
+ bbox_preds (list[Tensor]): Box energies / deltas for each scale
+ level with shape (N, num_anchors * 4, H, W)
+ gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
+ shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
+ img_metas (list[dict]): Meta information of each image, e.g.,
+ image size, scaling factor, etc.
+ gt_bboxes_ignore (None | list[Tensor]): specify which bounding
+ boxes can be ignored when computing the loss. Default: None
+
+ Returns:
+ dict[str, Tensor]: A dictionary of loss components.
+ """
+ featmap_sizes = [featmap.size()[-2:] for featmap in bbox_preds]
+ label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1
+ cls_reg_targets = self.get_targets(
+ anchor_list,
+ valid_flag_list,
+ gt_bboxes,
+ img_metas,
+ featmap_sizes,
+ gt_bboxes_ignore=gt_bboxes_ignore,
+ label_channels=label_channels)
+ if cls_reg_targets is None:
+ return None
+ (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,
+ num_total_pos, num_total_neg) = cls_reg_targets
+ if self.sampling:
+ num_total_samples = num_total_pos + num_total_neg
+ else:
+ # 200 is hard-coded average factor,
+ # which follows guided anchoring.
+ num_total_samples = sum([label.numel()
+ for label in labels_list]) / 200.0
+
+ # change per image, per level anchor_list to per_level, per_image
+ mlvl_anchor_list = list(zip(*anchor_list))
+ # concat mlvl_anchor_list
+ mlvl_anchor_list = [
+ torch.cat(anchors, dim=0) for anchors in mlvl_anchor_list
+ ]
+
+ losses = multi_apply(
+ self.loss_single,
+ cls_scores,
+ bbox_preds,
+ mlvl_anchor_list,
+ labels_list,
+ label_weights_list,
+ bbox_targets_list,
+ bbox_weights_list,
+ num_total_samples=num_total_samples)
+ if self.with_cls:
+ return dict(loss_rpn_cls=losses[0], loss_rpn_reg=losses[1])
+ return dict(loss_rpn_reg=losses[1])
+
+ def get_bboxes(self,
+ anchor_list,
+ cls_scores,
+ bbox_preds,
+ img_metas,
+ cfg,
+ rescale=False):
+ """Get proposal predict."""
+ assert len(cls_scores) == len(bbox_preds)
+ num_levels = len(cls_scores)
+
+ result_list = []
+ for img_id in range(len(img_metas)):
+ cls_score_list = [
+ cls_scores[i][img_id].detach() for i in range(num_levels)
+ ]
+ bbox_pred_list = [
+ bbox_preds[i][img_id].detach() for i in range(num_levels)
+ ]
+ img_shape = img_metas[img_id]['img_shape']
+ scale_factor = img_metas[img_id]['scale_factor']
+ proposals = self._get_bboxes_single(cls_score_list, bbox_pred_list,
+ anchor_list[img_id], img_shape,
+ scale_factor, cfg, rescale)
+ result_list.append(proposals)
+ return result_list
+
+ def refine_bboxes(self, anchor_list, bbox_preds, img_metas):
+ """Refine bboxes through stages."""
+ num_levels = len(bbox_preds)
+ new_anchor_list = []
+ for img_id in range(len(img_metas)):
+ mlvl_anchors = []
+ for i in range(num_levels):
+ bbox_pred = bbox_preds[i][img_id].detach()
+ bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4)
+ img_shape = img_metas[img_id]['img_shape']
+ bboxes = self.bbox_coder.decode(anchor_list[img_id][i],
+ bbox_pred, img_shape)
+ mlvl_anchors.append(bboxes)
+ new_anchor_list.append(mlvl_anchors)
+ return new_anchor_list
+
+ # TODO: temporary plan
+ def _get_bboxes_single(self,
+ cls_scores,
+ bbox_preds,
+ mlvl_anchors,
+ img_shape,
+ scale_factor,
+ cfg,
+ rescale=False):
+ """Transform outputs for a single batch item into bbox predictions.
+
+ Args:
+ cls_scores (list[Tensor]): Box scores for each scale level
+ Has shape (num_anchors * num_classes, H, W).
+ bbox_preds (list[Tensor]): Box energies / deltas for each scale
+ level with shape (num_anchors * 4, H, W).
+ mlvl_anchors (list[Tensor]): Box reference for each scale level
+ with shape (num_total_anchors, 4).
+ img_shape (tuple[int]): Shape of the input image,
+ (height, width, 3).
+ scale_factor (ndarray): Scale factor of the image arange as
+ (w_scale, h_scale, w_scale, h_scale).
+ cfg (mmcv.Config): Test / postprocessing configuration,
+ if None, test_cfg would be used.
+ rescale (bool): If True, return boxes in original image space.
+
+ Returns:
+ Tensor: Labeled boxes have the shape of (n,5), where the
+ first 4 columns are bounding box positions
+ (tl_x, tl_y, br_x, br_y) and the 5-th column is a score
+ between 0 and 1.
+ """
+ cfg = self.test_cfg if cfg is None else cfg
+ cfg = copy.deepcopy(cfg)
+ # bboxes from different level should be independent during NMS,
+ # level_ids are used as labels for batched NMS to separate them
+ level_ids = []
+ mlvl_scores = []
+ mlvl_bbox_preds = []
+ mlvl_valid_anchors = []
+ for idx in range(len(cls_scores)):
+ rpn_cls_score = cls_scores[idx]
+ rpn_bbox_pred = bbox_preds[idx]
+ assert rpn_cls_score.size()[-2:] == rpn_bbox_pred.size()[-2:]
+ rpn_cls_score = rpn_cls_score.permute(1, 2, 0)
+ if self.use_sigmoid_cls:
+ rpn_cls_score = rpn_cls_score.reshape(-1)
+ scores = rpn_cls_score.sigmoid()
+ else:
+ rpn_cls_score = rpn_cls_score.reshape(-1, 2)
+ # We set FG labels to [0, num_class-1] and BG label to
+ # num_class in RPN head since mmdet v2.5, which is unified to
+ # be consistent with other head since mmdet v2.0. In mmdet v2.0
+ # to v2.4 we keep BG label as 0 and FG label as 1 in rpn head.
+ scores = rpn_cls_score.softmax(dim=1)[:, 0]
+ rpn_bbox_pred = rpn_bbox_pred.permute(1, 2, 0).reshape(-1, 4)
+ anchors = mlvl_anchors[idx]
+ if cfg.nms_pre > 0 and scores.shape[0] > cfg.nms_pre:
+ # sort is faster than topk
+ # _, topk_inds = scores.topk(cfg.nms_pre)
+ if torch.onnx.is_in_onnx_export():
+ # sort op will be converted to TopK in onnx
+ # and k<=3480 in TensorRT
+ _, topk_inds = scores.topk(cfg.nms_pre)
+ scores = scores[topk_inds]
+ else:
+ ranked_scores, rank_inds = scores.sort(descending=True)
+ topk_inds = rank_inds[:cfg.nms_pre]
+ scores = ranked_scores[:cfg.nms_pre]
+ rpn_bbox_pred = rpn_bbox_pred[topk_inds, :]
+ anchors = anchors[topk_inds, :]
+ mlvl_scores.append(scores)
+ mlvl_bbox_preds.append(rpn_bbox_pred)
+ mlvl_valid_anchors.append(anchors)
+ level_ids.append(
+ scores.new_full((scores.size(0), ), idx, dtype=torch.long))
+
+ scores = torch.cat(mlvl_scores)
+ anchors = torch.cat(mlvl_valid_anchors)
+ rpn_bbox_pred = torch.cat(mlvl_bbox_preds)
+ proposals = self.bbox_coder.decode(
+ anchors, rpn_bbox_pred, max_shape=img_shape)
+ ids = torch.cat(level_ids)
+
+ # Skip nonzero op while exporting to ONNX
+ if cfg.min_bbox_size > 0 and (not torch.onnx.is_in_onnx_export()):
+ w = proposals[:, 2] - proposals[:, 0]
+ h = proposals[:, 3] - proposals[:, 1]
+ valid_inds = torch.nonzero(
+ (w >= cfg.min_bbox_size)
+ & (h >= cfg.min_bbox_size),
+ as_tuple=False).squeeze()
+ if valid_inds.sum().item() != len(proposals):
+ proposals = proposals[valid_inds, :]
+ scores = scores[valid_inds]
+ ids = ids[valid_inds]
+
+ # deprecate arguments warning
+ if 'nms' not in cfg or 'max_num' in cfg or 'nms_thr' in cfg:
+ warnings.warn(
+ 'In rpn_proposal or test_cfg, '
+ 'nms_thr has been moved to a dict named nms as '
+ 'iou_threshold, max_num has been renamed as max_per_img, '
+ 'name of original arguments and the way to specify '
+ 'iou_threshold of NMS will be deprecated.')
+ if 'nms' not in cfg:
+ cfg.nms = ConfigDict(dict(type='nms', iou_threshold=cfg.nms_thr))
+ if 'max_num' in cfg:
+ if 'max_per_img' in cfg:
+ assert cfg.max_num == cfg.max_per_img, f'You ' \
+ f'set max_num and ' \
+ f'max_per_img at the same time, but get {cfg.max_num} ' \
+ f'and {cfg.max_per_img} respectively' \
+ 'Please delete max_num which will be deprecated.'
+ else:
+ cfg.max_per_img = cfg.max_num
+ if 'nms_thr' in cfg:
+ assert cfg.nms.iou_threshold == cfg.nms_thr, f'You set' \
+ f' iou_threshold in nms and ' \
+ f'nms_thr at the same time, but get' \
+ f' {cfg.nms.iou_threshold} and {cfg.nms_thr}' \
+ f' respectively. Please delete the nms_thr ' \
+ f'which will be deprecated.'
+
+ dets, keep = batched_nms(proposals, scores, ids, cfg.nms)
+ return dets[:cfg.max_per_img]
+
+
+@HEADS.register_module()
+class CascadeRPNHead(BaseDenseHead):
+ """The CascadeRPNHead will predict more accurate region proposals, which is
+ required for two-stage detectors (such as Fast/Faster R-CNN). CascadeRPN
+ consists of a sequence of RPNStage to progressively improve the accuracy of
+ the detected proposals.
+
+ More details can be found in ``https://arxiv.org/abs/1909.06720``.
+
+ Args:
+ num_stages (int): number of CascadeRPN stages.
+ stages (list[dict]): list of configs to build the stages.
+ train_cfg (list[dict]): list of configs at training time each stage.
+ test_cfg (dict): config at testing time.
+ """
+
+ def __init__(self, num_stages, stages, train_cfg, test_cfg):
+ super(CascadeRPNHead, self).__init__()
+ assert num_stages == len(stages)
+ self.num_stages = num_stages
+ self.stages = nn.ModuleList()
+ for i in range(len(stages)):
+ train_cfg_i = train_cfg[i] if train_cfg is not None else None
+ stages[i].update(train_cfg=train_cfg_i)
+ stages[i].update(test_cfg=test_cfg)
+ self.stages.append(build_head(stages[i]))
+ self.train_cfg = train_cfg
+ self.test_cfg = test_cfg
+
+ def init_weights(self):
+ """Init weight of CascadeRPN."""
+ for i in range(self.num_stages):
+ self.stages[i].init_weights()
+
+ def loss(self):
+ """loss() is implemented in StageCascadeRPNHead."""
+ pass
+
+ def get_bboxes(self):
+ """get_bboxes() is implemented in StageCascadeRPNHead."""
+ pass
+
+ def forward_train(self,
+ x,
+ img_metas,
+ gt_bboxes,
+ gt_labels=None,
+ gt_bboxes_ignore=None,
+ proposal_cfg=None):
+ """Forward train function."""
+ assert gt_labels is None, 'RPN does not require gt_labels'
+
+ featmap_sizes = [featmap.size()[-2:] for featmap in x]
+ device = x[0].device
+ anchor_list, valid_flag_list = self.stages[0].get_anchors(
+ featmap_sizes, img_metas, device=device)
+
+ losses = dict()
+
+ for i in range(self.num_stages):
+ stage = self.stages[i]
+
+ if stage.adapt_cfg['type'] == 'offset':
+ offset_list = stage.anchor_offset(anchor_list,
+ stage.anchor_strides,
+ featmap_sizes)
+ else:
+ offset_list = None
+ x, cls_score, bbox_pred = stage(x, offset_list)
+ rpn_loss_inputs = (anchor_list, valid_flag_list, cls_score,
+ bbox_pred, gt_bboxes, img_metas)
+ stage_loss = stage.loss(*rpn_loss_inputs)
+ for name, value in stage_loss.items():
+ losses['s{}.{}'.format(i, name)] = value
+
+ # refine boxes
+ if i < self.num_stages - 1:
+ anchor_list = stage.refine_bboxes(anchor_list, bbox_pred,
+ img_metas)
+ if proposal_cfg is None:
+ return losses
+ else:
+ proposal_list = self.stages[-1].get_bboxes(anchor_list, cls_score,
+ bbox_pred, img_metas,
+ self.test_cfg)
+ return losses, proposal_list
+
+ def simple_test_rpn(self, x, img_metas):
+ """Simple forward test function."""
+ featmap_sizes = [featmap.size()[-2:] for featmap in x]
+ device = x[0].device
+ anchor_list, _ = self.stages[0].get_anchors(
+ featmap_sizes, img_metas, device=device)
+
+ for i in range(self.num_stages):
+ stage = self.stages[i]
+ if stage.adapt_cfg['type'] == 'offset':
+ offset_list = stage.anchor_offset(anchor_list,
+ stage.anchor_strides,
+ featmap_sizes)
+ else:
+ offset_list = None
+ x, cls_score, bbox_pred = stage(x, offset_list)
+ if i < self.num_stages - 1:
+ anchor_list = stage.refine_bboxes(anchor_list, bbox_pred,
+ img_metas)
+
+ proposal_list = self.stages[-1].get_bboxes(anchor_list, cls_score,
+ bbox_pred, img_metas,
+ self.test_cfg)
+ return proposal_list
+
+ def aug_test_rpn(self, x, img_metas):
+ """Augmented forward test function."""
+ raise NotImplementedError
diff --git a/annotator/uniformer/mmdet_null/models/dense_heads/centripetal_head.py b/annotator/uniformer/mmdet_null/models/dense_heads/centripetal_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..6728218b60539a71f6353645635f741a1ad7263d
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/dense_heads/centripetal_head.py
@@ -0,0 +1,421 @@
+import torch.nn as nn
+from mmcv.cnn import ConvModule, normal_init
+from mmcv.ops import DeformConv2d
+
+from mmdet.core import multi_apply
+from ..builder import HEADS, build_loss
+from .corner_head import CornerHead
+
+
+@HEADS.register_module()
+class CentripetalHead(CornerHead):
+ """Head of CentripetalNet: Pursuing High-quality Keypoint Pairs for Object
+ Detection.
+
+ CentripetalHead inherits from :class:`CornerHead`. It removes the
+ embedding branch and adds guiding shift and centripetal shift branches.
+ More details can be found in the `paper
+ `_ .
+
+ Args:
+ num_classes (int): Number of categories excluding the background
+ category.
+ in_channels (int): Number of channels in the input feature map.
+ num_feat_levels (int): Levels of feature from the previous module. 2
+ for HourglassNet-104 and 1 for HourglassNet-52. HourglassNet-104
+ outputs the final feature and intermediate supervision feature and
+ HourglassNet-52 only outputs the final feature. Default: 2.
+ corner_emb_channels (int): Channel of embedding vector. Default: 1.
+ train_cfg (dict | None): Training config. Useless in CornerHead,
+ but we keep this variable for SingleStageDetector. Default: None.
+ test_cfg (dict | None): Testing config of CornerHead. Default: None.
+ loss_heatmap (dict | None): Config of corner heatmap loss. Default:
+ GaussianFocalLoss.
+ loss_embedding (dict | None): Config of corner embedding loss. Default:
+ AssociativeEmbeddingLoss.
+ loss_offset (dict | None): Config of corner offset loss. Default:
+ SmoothL1Loss.
+ loss_guiding_shift (dict): Config of guiding shift loss. Default:
+ SmoothL1Loss.
+ loss_centripetal_shift (dict): Config of centripetal shift loss.
+ Default: SmoothL1Loss.
+ """
+
+ def __init__(self,
+ *args,
+ centripetal_shift_channels=2,
+ guiding_shift_channels=2,
+ feat_adaption_conv_kernel=3,
+ loss_guiding_shift=dict(
+ type='SmoothL1Loss', beta=1.0, loss_weight=0.05),
+ loss_centripetal_shift=dict(
+ type='SmoothL1Loss', beta=1.0, loss_weight=1),
+ **kwargs):
+ assert centripetal_shift_channels == 2, (
+ 'CentripetalHead only support centripetal_shift_channels == 2')
+ self.centripetal_shift_channels = centripetal_shift_channels
+ assert guiding_shift_channels == 2, (
+ 'CentripetalHead only support guiding_shift_channels == 2')
+ self.guiding_shift_channels = guiding_shift_channels
+ self.feat_adaption_conv_kernel = feat_adaption_conv_kernel
+ super(CentripetalHead, self).__init__(*args, **kwargs)
+ self.loss_guiding_shift = build_loss(loss_guiding_shift)
+ self.loss_centripetal_shift = build_loss(loss_centripetal_shift)
+
+ def _init_centripetal_layers(self):
+ """Initialize centripetal layers.
+
+ Including feature adaption deform convs (feat_adaption), deform offset
+ prediction convs (dcn_off), guiding shift (guiding_shift) and
+ centripetal shift ( centripetal_shift). Each branch has two parts:
+ prefix `tl_` for top-left and `br_` for bottom-right.
+ """
+ self.tl_feat_adaption = nn.ModuleList()
+ self.br_feat_adaption = nn.ModuleList()
+ self.tl_dcn_offset = nn.ModuleList()
+ self.br_dcn_offset = nn.ModuleList()
+ self.tl_guiding_shift = nn.ModuleList()
+ self.br_guiding_shift = nn.ModuleList()
+ self.tl_centripetal_shift = nn.ModuleList()
+ self.br_centripetal_shift = nn.ModuleList()
+
+ for _ in range(self.num_feat_levels):
+ self.tl_feat_adaption.append(
+ DeformConv2d(self.in_channels, self.in_channels,
+ self.feat_adaption_conv_kernel, 1, 1))
+ self.br_feat_adaption.append(
+ DeformConv2d(self.in_channels, self.in_channels,
+ self.feat_adaption_conv_kernel, 1, 1))
+
+ self.tl_guiding_shift.append(
+ self._make_layers(
+ out_channels=self.guiding_shift_channels,
+ in_channels=self.in_channels))
+ self.br_guiding_shift.append(
+ self._make_layers(
+ out_channels=self.guiding_shift_channels,
+ in_channels=self.in_channels))
+
+ self.tl_dcn_offset.append(
+ ConvModule(
+ self.guiding_shift_channels,
+ self.feat_adaption_conv_kernel**2 *
+ self.guiding_shift_channels,
+ 1,
+ bias=False,
+ act_cfg=None))
+ self.br_dcn_offset.append(
+ ConvModule(
+ self.guiding_shift_channels,
+ self.feat_adaption_conv_kernel**2 *
+ self.guiding_shift_channels,
+ 1,
+ bias=False,
+ act_cfg=None))
+
+ self.tl_centripetal_shift.append(
+ self._make_layers(
+ out_channels=self.centripetal_shift_channels,
+ in_channels=self.in_channels))
+ self.br_centripetal_shift.append(
+ self._make_layers(
+ out_channels=self.centripetal_shift_channels,
+ in_channels=self.in_channels))
+
+ def _init_layers(self):
+ """Initialize layers for CentripetalHead.
+
+ Including two parts: CornerHead layers and CentripetalHead layers
+ """
+ super()._init_layers() # using _init_layers in CornerHead
+ self._init_centripetal_layers()
+
+ def init_weights(self):
+ """Initialize weights of the head."""
+ super().init_weights()
+ for i in range(self.num_feat_levels):
+ normal_init(self.tl_feat_adaption[i], std=0.01)
+ normal_init(self.br_feat_adaption[i], std=0.01)
+ normal_init(self.tl_dcn_offset[i].conv, std=0.1)
+ normal_init(self.br_dcn_offset[i].conv, std=0.1)
+ _ = [x.conv.reset_parameters() for x in self.tl_guiding_shift[i]]
+ _ = [x.conv.reset_parameters() for x in self.br_guiding_shift[i]]
+ _ = [
+ x.conv.reset_parameters() for x in self.tl_centripetal_shift[i]
+ ]
+ _ = [
+ x.conv.reset_parameters() for x in self.br_centripetal_shift[i]
+ ]
+
+ def forward_single(self, x, lvl_ind):
+ """Forward feature of a single level.
+
+ Args:
+ x (Tensor): Feature of a single level.
+ lvl_ind (int): Level index of current feature.
+
+ Returns:
+ tuple[Tensor]: A tuple of CentripetalHead's output for current
+ feature level. Containing the following Tensors:
+
+ - tl_heat (Tensor): Predicted top-left corner heatmap.
+ - br_heat (Tensor): Predicted bottom-right corner heatmap.
+ - tl_off (Tensor): Predicted top-left offset heatmap.
+ - br_off (Tensor): Predicted bottom-right offset heatmap.
+ - tl_guiding_shift (Tensor): Predicted top-left guiding shift
+ heatmap.
+ - br_guiding_shift (Tensor): Predicted bottom-right guiding
+ shift heatmap.
+ - tl_centripetal_shift (Tensor): Predicted top-left centripetal
+ shift heatmap.
+ - br_centripetal_shift (Tensor): Predicted bottom-right
+ centripetal shift heatmap.
+ """
+ tl_heat, br_heat, _, _, tl_off, br_off, tl_pool, br_pool = super(
+ ).forward_single(
+ x, lvl_ind, return_pool=True)
+
+ tl_guiding_shift = self.tl_guiding_shift[lvl_ind](tl_pool)
+ br_guiding_shift = self.br_guiding_shift[lvl_ind](br_pool)
+
+ tl_dcn_offset = self.tl_dcn_offset[lvl_ind](tl_guiding_shift.detach())
+ br_dcn_offset = self.br_dcn_offset[lvl_ind](br_guiding_shift.detach())
+
+ tl_feat_adaption = self.tl_feat_adaption[lvl_ind](tl_pool,
+ tl_dcn_offset)
+ br_feat_adaption = self.br_feat_adaption[lvl_ind](br_pool,
+ br_dcn_offset)
+
+ tl_centripetal_shift = self.tl_centripetal_shift[lvl_ind](
+ tl_feat_adaption)
+ br_centripetal_shift = self.br_centripetal_shift[lvl_ind](
+ br_feat_adaption)
+
+ result_list = [
+ tl_heat, br_heat, tl_off, br_off, tl_guiding_shift,
+ br_guiding_shift, tl_centripetal_shift, br_centripetal_shift
+ ]
+ return result_list
+
+ def loss(self,
+ tl_heats,
+ br_heats,
+ tl_offs,
+ br_offs,
+ tl_guiding_shifts,
+ br_guiding_shifts,
+ tl_centripetal_shifts,
+ br_centripetal_shifts,
+ gt_bboxes,
+ gt_labels,
+ img_metas,
+ gt_bboxes_ignore=None):
+ """Compute losses of the head.
+
+ Args:
+ tl_heats (list[Tensor]): Top-left corner heatmaps for each level
+ with shape (N, num_classes, H, W).
+ br_heats (list[Tensor]): Bottom-right corner heatmaps for each
+ level with shape (N, num_classes, H, W).
+ tl_offs (list[Tensor]): Top-left corner offsets for each level
+ with shape (N, corner_offset_channels, H, W).
+ br_offs (list[Tensor]): Bottom-right corner offsets for each level
+ with shape (N, corner_offset_channels, H, W).
+ tl_guiding_shifts (list[Tensor]): Top-left guiding shifts for each
+ level with shape (N, guiding_shift_channels, H, W).
+ br_guiding_shifts (list[Tensor]): Bottom-right guiding shifts for
+ each level with shape (N, guiding_shift_channels, H, W).
+ tl_centripetal_shifts (list[Tensor]): Top-left centripetal shifts
+ for each level with shape (N, centripetal_shift_channels, H,
+ W).
+ br_centripetal_shifts (list[Tensor]): Bottom-right centripetal
+ shifts for each level with shape (N,
+ centripetal_shift_channels, H, W).
+ gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
+ shape (num_gts, 4) in [left, top, right, bottom] format.
+ gt_labels (list[Tensor]): Class indices corresponding to each box.
+ img_metas (list[dict]): Meta information of each image, e.g.,
+ image size, scaling factor, etc.
+ gt_bboxes_ignore (list[Tensor] | None): Specify which bounding
+ boxes can be ignored when computing the loss.
+
+ Returns:
+ dict[str, Tensor]: A dictionary of loss components. Containing the
+ following losses:
+
+ - det_loss (list[Tensor]): Corner keypoint losses of all
+ feature levels.
+ - off_loss (list[Tensor]): Corner offset losses of all feature
+ levels.
+ - guiding_loss (list[Tensor]): Guiding shift losses of all
+ feature levels.
+ - centripetal_loss (list[Tensor]): Centripetal shift losses of
+ all feature levels.
+ """
+ targets = self.get_targets(
+ gt_bboxes,
+ gt_labels,
+ tl_heats[-1].shape,
+ img_metas[0]['pad_shape'],
+ with_corner_emb=self.with_corner_emb,
+ with_guiding_shift=True,
+ with_centripetal_shift=True)
+ mlvl_targets = [targets for _ in range(self.num_feat_levels)]
+ [det_losses, off_losses, guiding_losses, centripetal_losses
+ ] = multi_apply(self.loss_single, tl_heats, br_heats, tl_offs,
+ br_offs, tl_guiding_shifts, br_guiding_shifts,
+ tl_centripetal_shifts, br_centripetal_shifts,
+ mlvl_targets)
+ loss_dict = dict(
+ det_loss=det_losses,
+ off_loss=off_losses,
+ guiding_loss=guiding_losses,
+ centripetal_loss=centripetal_losses)
+ return loss_dict
+
+ def loss_single(self, tl_hmp, br_hmp, tl_off, br_off, tl_guiding_shift,
+ br_guiding_shift, tl_centripetal_shift,
+ br_centripetal_shift, targets):
+ """Compute losses for single level.
+
+ Args:
+ tl_hmp (Tensor): Top-left corner heatmap for current level with
+ shape (N, num_classes, H, W).
+ br_hmp (Tensor): Bottom-right corner heatmap for current level with
+ shape (N, num_classes, H, W).
+ tl_off (Tensor): Top-left corner offset for current level with
+ shape (N, corner_offset_channels, H, W).
+ br_off (Tensor): Bottom-right corner offset for current level with
+ shape (N, corner_offset_channels, H, W).
+ tl_guiding_shift (Tensor): Top-left guiding shift for current level
+ with shape (N, guiding_shift_channels, H, W).
+ br_guiding_shift (Tensor): Bottom-right guiding shift for current
+ level with shape (N, guiding_shift_channels, H, W).
+ tl_centripetal_shift (Tensor): Top-left centripetal shift for
+ current level with shape (N, centripetal_shift_channels, H, W).
+ br_centripetal_shift (Tensor): Bottom-right centripetal shift for
+ current level with shape (N, centripetal_shift_channels, H, W).
+ targets (dict): Corner target generated by `get_targets`.
+
+ Returns:
+ tuple[torch.Tensor]: Losses of the head's differnet branches
+ containing the following losses:
+
+ - det_loss (Tensor): Corner keypoint loss.
+ - off_loss (Tensor): Corner offset loss.
+ - guiding_loss (Tensor): Guiding shift loss.
+ - centripetal_loss (Tensor): Centripetal shift loss.
+ """
+ targets['corner_embedding'] = None
+
+ det_loss, _, _, off_loss = super().loss_single(tl_hmp, br_hmp, None,
+ None, tl_off, br_off,
+ targets)
+
+ gt_tl_guiding_shift = targets['topleft_guiding_shift']
+ gt_br_guiding_shift = targets['bottomright_guiding_shift']
+ gt_tl_centripetal_shift = targets['topleft_centripetal_shift']
+ gt_br_centripetal_shift = targets['bottomright_centripetal_shift']
+
+ gt_tl_heatmap = targets['topleft_heatmap']
+ gt_br_heatmap = targets['bottomright_heatmap']
+ # We only compute the offset loss at the real corner position.
+ # The value of real corner would be 1 in heatmap ground truth.
+ # The mask is computed in class agnostic mode and its shape is
+ # batch * 1 * width * height.
+ tl_mask = gt_tl_heatmap.eq(1).sum(1).gt(0).unsqueeze(1).type_as(
+ gt_tl_heatmap)
+ br_mask = gt_br_heatmap.eq(1).sum(1).gt(0).unsqueeze(1).type_as(
+ gt_br_heatmap)
+
+ # Guiding shift loss
+ tl_guiding_loss = self.loss_guiding_shift(
+ tl_guiding_shift,
+ gt_tl_guiding_shift,
+ tl_mask,
+ avg_factor=tl_mask.sum())
+ br_guiding_loss = self.loss_guiding_shift(
+ br_guiding_shift,
+ gt_br_guiding_shift,
+ br_mask,
+ avg_factor=br_mask.sum())
+ guiding_loss = (tl_guiding_loss + br_guiding_loss) / 2.0
+ # Centripetal shift loss
+ tl_centripetal_loss = self.loss_centripetal_shift(
+ tl_centripetal_shift,
+ gt_tl_centripetal_shift,
+ tl_mask,
+ avg_factor=tl_mask.sum())
+ br_centripetal_loss = self.loss_centripetal_shift(
+ br_centripetal_shift,
+ gt_br_centripetal_shift,
+ br_mask,
+ avg_factor=br_mask.sum())
+ centripetal_loss = (tl_centripetal_loss + br_centripetal_loss) / 2.0
+
+ return det_loss, off_loss, guiding_loss, centripetal_loss
+
+ def get_bboxes(self,
+ tl_heats,
+ br_heats,
+ tl_offs,
+ br_offs,
+ tl_guiding_shifts,
+ br_guiding_shifts,
+ tl_centripetal_shifts,
+ br_centripetal_shifts,
+ img_metas,
+ rescale=False,
+ with_nms=True):
+ """Transform network output for a batch into bbox predictions.
+
+ Args:
+ tl_heats (list[Tensor]): Top-left corner heatmaps for each level
+ with shape (N, num_classes, H, W).
+ br_heats (list[Tensor]): Bottom-right corner heatmaps for each
+ level with shape (N, num_classes, H, W).
+ tl_offs (list[Tensor]): Top-left corner offsets for each level
+ with shape (N, corner_offset_channels, H, W).
+ br_offs (list[Tensor]): Bottom-right corner offsets for each level
+ with shape (N, corner_offset_channels, H, W).
+ tl_guiding_shifts (list[Tensor]): Top-left guiding shifts for each
+ level with shape (N, guiding_shift_channels, H, W). Useless in
+ this function, we keep this arg because it's the raw output
+ from CentripetalHead.
+ br_guiding_shifts (list[Tensor]): Bottom-right guiding shifts for
+ each level with shape (N, guiding_shift_channels, H, W).
+ Useless in this function, we keep this arg because it's the
+ raw output from CentripetalHead.
+ tl_centripetal_shifts (list[Tensor]): Top-left centripetal shifts
+ for each level with shape (N, centripetal_shift_channels, H,
+ W).
+ br_centripetal_shifts (list[Tensor]): Bottom-right centripetal
+ shifts for each level with shape (N,
+ centripetal_shift_channels, H, W).
+ img_metas (list[dict]): Meta information of each image, e.g.,
+ image size, scaling factor, etc.
+ rescale (bool): If True, return boxes in original image space.
+ Default: False.
+ with_nms (bool): If True, do nms before return boxes.
+ Default: True.
+ """
+ assert tl_heats[-1].shape[0] == br_heats[-1].shape[0] == len(img_metas)
+ result_list = []
+ for img_id in range(len(img_metas)):
+ result_list.append(
+ self._get_bboxes_single(
+ tl_heats[-1][img_id:img_id + 1, :],
+ br_heats[-1][img_id:img_id + 1, :],
+ tl_offs[-1][img_id:img_id + 1, :],
+ br_offs[-1][img_id:img_id + 1, :],
+ img_metas[img_id],
+ tl_emb=None,
+ br_emb=None,
+ tl_centripetal_shift=tl_centripetal_shifts[-1][
+ img_id:img_id + 1, :],
+ br_centripetal_shift=br_centripetal_shifts[-1][
+ img_id:img_id + 1, :],
+ rescale=rescale,
+ with_nms=with_nms))
+
+ return result_list
diff --git a/annotator/uniformer/mmdet_null/models/dense_heads/corner_head.py b/annotator/uniformer/mmdet_null/models/dense_heads/corner_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..50cdb49a29f2ced1a31a50e654a3bdc14f5f5004
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/dense_heads/corner_head.py
@@ -0,0 +1,1074 @@
+from logging import warning
+from math import ceil, log
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from mmcv.cnn import ConvModule, bias_init_with_prob
+from mmcv.ops import CornerPool, batched_nms
+
+from mmdet.core import multi_apply
+from ..builder import HEADS, build_loss
+from ..utils import gaussian_radius, gen_gaussian_target
+from .base_dense_head import BaseDenseHead
+
+
+class BiCornerPool(nn.Module):
+ """Bidirectional Corner Pooling Module (TopLeft, BottomRight, etc.)
+
+ Args:
+ in_channels (int): Input channels of module.
+ out_channels (int): Output channels of module.
+ feat_channels (int): Feature channels of module.
+ directions (list[str]): Directions of two CornerPools.
+ norm_cfg (dict): Dictionary to construct and config norm layer.
+ """
+
+ def __init__(self,
+ in_channels,
+ directions,
+ feat_channels=128,
+ out_channels=128,
+ norm_cfg=dict(type='BN', requires_grad=True)):
+ super(BiCornerPool, self).__init__()
+ self.direction1_conv = ConvModule(
+ in_channels, feat_channels, 3, padding=1, norm_cfg=norm_cfg)
+ self.direction2_conv = ConvModule(
+ in_channels, feat_channels, 3, padding=1, norm_cfg=norm_cfg)
+
+ self.aftpool_conv = ConvModule(
+ feat_channels,
+ out_channels,
+ 3,
+ padding=1,
+ norm_cfg=norm_cfg,
+ act_cfg=None)
+
+ self.conv1 = ConvModule(
+ in_channels, out_channels, 1, norm_cfg=norm_cfg, act_cfg=None)
+ self.conv2 = ConvModule(
+ in_channels, out_channels, 3, padding=1, norm_cfg=norm_cfg)
+
+ self.direction1_pool = CornerPool(directions[0])
+ self.direction2_pool = CornerPool(directions[1])
+ self.relu = nn.ReLU(inplace=True)
+
+ def forward(self, x):
+ """Forward features from the upstream network.
+
+ Args:
+ x (tensor): Input feature of BiCornerPool.
+
+ Returns:
+ conv2 (tensor): Output feature of BiCornerPool.
+ """
+ direction1_conv = self.direction1_conv(x)
+ direction2_conv = self.direction2_conv(x)
+ direction1_feat = self.direction1_pool(direction1_conv)
+ direction2_feat = self.direction2_pool(direction2_conv)
+ aftpool_conv = self.aftpool_conv(direction1_feat + direction2_feat)
+ conv1 = self.conv1(x)
+ relu = self.relu(aftpool_conv + conv1)
+ conv2 = self.conv2(relu)
+ return conv2
+
+
+@HEADS.register_module()
+class CornerHead(BaseDenseHead):
+ """Head of CornerNet: Detecting Objects as Paired Keypoints.
+
+ Code is modified from the `official github repo
+ `_ .
+
+ More details can be found in the `paper
+ `_ .
+
+ Args:
+ num_classes (int): Number of categories excluding the background
+ category.
+ in_channels (int): Number of channels in the input feature map.
+ num_feat_levels (int): Levels of feature from the previous module. 2
+ for HourglassNet-104 and 1 for HourglassNet-52. Because
+ HourglassNet-104 outputs the final feature and intermediate
+ supervision feature and HourglassNet-52 only outputs the final
+ feature. Default: 2.
+ corner_emb_channels (int): Channel of embedding vector. Default: 1.
+ train_cfg (dict | None): Training config. Useless in CornerHead,
+ but we keep this variable for SingleStageDetector. Default: None.
+ test_cfg (dict | None): Testing config of CornerHead. Default: None.
+ loss_heatmap (dict | None): Config of corner heatmap loss. Default:
+ GaussianFocalLoss.
+ loss_embedding (dict | None): Config of corner embedding loss. Default:
+ AssociativeEmbeddingLoss.
+ loss_offset (dict | None): Config of corner offset loss. Default:
+ SmoothL1Loss.
+ """
+
+ def __init__(self,
+ num_classes,
+ in_channels,
+ num_feat_levels=2,
+ corner_emb_channels=1,
+ train_cfg=None,
+ test_cfg=None,
+ loss_heatmap=dict(
+ type='GaussianFocalLoss',
+ alpha=2.0,
+ gamma=4.0,
+ loss_weight=1),
+ loss_embedding=dict(
+ type='AssociativeEmbeddingLoss',
+ pull_weight=0.25,
+ push_weight=0.25),
+ loss_offset=dict(
+ type='SmoothL1Loss', beta=1.0, loss_weight=1)):
+ super(CornerHead, self).__init__()
+ self.num_classes = num_classes
+ self.in_channels = in_channels
+ self.corner_emb_channels = corner_emb_channels
+ self.with_corner_emb = self.corner_emb_channels > 0
+ self.corner_offset_channels = 2
+ self.num_feat_levels = num_feat_levels
+ self.loss_heatmap = build_loss(
+ loss_heatmap) if loss_heatmap is not None else None
+ self.loss_embedding = build_loss(
+ loss_embedding) if loss_embedding is not None else None
+ self.loss_offset = build_loss(
+ loss_offset) if loss_offset is not None else None
+ self.train_cfg = train_cfg
+ self.test_cfg = test_cfg
+
+ self._init_layers()
+
+ def _make_layers(self, out_channels, in_channels=256, feat_channels=256):
+ """Initialize conv sequential for CornerHead."""
+ return nn.Sequential(
+ ConvModule(in_channels, feat_channels, 3, padding=1),
+ ConvModule(
+ feat_channels, out_channels, 1, norm_cfg=None, act_cfg=None))
+
+ def _init_corner_kpt_layers(self):
+ """Initialize corner keypoint layers.
+
+ Including corner heatmap branch and corner offset branch. Each branch
+ has two parts: prefix `tl_` for top-left and `br_` for bottom-right.
+ """
+ self.tl_pool, self.br_pool = nn.ModuleList(), nn.ModuleList()
+ self.tl_heat, self.br_heat = nn.ModuleList(), nn.ModuleList()
+ self.tl_off, self.br_off = nn.ModuleList(), nn.ModuleList()
+
+ for _ in range(self.num_feat_levels):
+ self.tl_pool.append(
+ BiCornerPool(
+ self.in_channels, ['top', 'left'],
+ out_channels=self.in_channels))
+ self.br_pool.append(
+ BiCornerPool(
+ self.in_channels, ['bottom', 'right'],
+ out_channels=self.in_channels))
+
+ self.tl_heat.append(
+ self._make_layers(
+ out_channels=self.num_classes,
+ in_channels=self.in_channels))
+ self.br_heat.append(
+ self._make_layers(
+ out_channels=self.num_classes,
+ in_channels=self.in_channels))
+
+ self.tl_off.append(
+ self._make_layers(
+ out_channels=self.corner_offset_channels,
+ in_channels=self.in_channels))
+ self.br_off.append(
+ self._make_layers(
+ out_channels=self.corner_offset_channels,
+ in_channels=self.in_channels))
+
+ def _init_corner_emb_layers(self):
+ """Initialize corner embedding layers.
+
+ Only include corner embedding branch with two parts: prefix `tl_` for
+ top-left and `br_` for bottom-right.
+ """
+ self.tl_emb, self.br_emb = nn.ModuleList(), nn.ModuleList()
+
+ for _ in range(self.num_feat_levels):
+ self.tl_emb.append(
+ self._make_layers(
+ out_channels=self.corner_emb_channels,
+ in_channels=self.in_channels))
+ self.br_emb.append(
+ self._make_layers(
+ out_channels=self.corner_emb_channels,
+ in_channels=self.in_channels))
+
+ def _init_layers(self):
+ """Initialize layers for CornerHead.
+
+ Including two parts: corner keypoint layers and corner embedding layers
+ """
+ self._init_corner_kpt_layers()
+ if self.with_corner_emb:
+ self._init_corner_emb_layers()
+
+ def init_weights(self):
+ """Initialize weights of the head."""
+ bias_init = bias_init_with_prob(0.1)
+ for i in range(self.num_feat_levels):
+ # The initialization of parameters are different between nn.Conv2d
+ # and ConvModule. Our experiments show that using the original
+ # initialization of nn.Conv2d increases the final mAP by about 0.2%
+ self.tl_heat[i][-1].conv.reset_parameters()
+ self.tl_heat[i][-1].conv.bias.data.fill_(bias_init)
+ self.br_heat[i][-1].conv.reset_parameters()
+ self.br_heat[i][-1].conv.bias.data.fill_(bias_init)
+ self.tl_off[i][-1].conv.reset_parameters()
+ self.br_off[i][-1].conv.reset_parameters()
+ if self.with_corner_emb:
+ self.tl_emb[i][-1].conv.reset_parameters()
+ self.br_emb[i][-1].conv.reset_parameters()
+
+ def forward(self, feats):
+ """Forward features from the upstream network.
+
+ Args:
+ feats (tuple[Tensor]): Features from the upstream network, each is
+ a 4D-tensor.
+
+ Returns:
+ tuple: Usually a tuple of corner heatmaps, offset heatmaps and
+ embedding heatmaps.
+ - tl_heats (list[Tensor]): Top-left corner heatmaps for all
+ levels, each is a 4D-tensor, the channels number is
+ num_classes.
+ - br_heats (list[Tensor]): Bottom-right corner heatmaps for all
+ levels, each is a 4D-tensor, the channels number is
+ num_classes.
+ - tl_embs (list[Tensor] | list[None]): Top-left embedding
+ heatmaps for all levels, each is a 4D-tensor or None.
+ If not None, the channels number is corner_emb_channels.
+ - br_embs (list[Tensor] | list[None]): Bottom-right embedding
+ heatmaps for all levels, each is a 4D-tensor or None.
+ If not None, the channels number is corner_emb_channels.
+ - tl_offs (list[Tensor]): Top-left offset heatmaps for all
+ levels, each is a 4D-tensor. The channels number is
+ corner_offset_channels.
+ - br_offs (list[Tensor]): Bottom-right offset heatmaps for all
+ levels, each is a 4D-tensor. The channels number is
+ corner_offset_channels.
+ """
+ lvl_ind = list(range(self.num_feat_levels))
+ return multi_apply(self.forward_single, feats, lvl_ind)
+
+ def forward_single(self, x, lvl_ind, return_pool=False):
+ """Forward feature of a single level.
+
+ Args:
+ x (Tensor): Feature of a single level.
+ lvl_ind (int): Level index of current feature.
+ return_pool (bool): Return corner pool feature or not.
+
+ Returns:
+ tuple[Tensor]: A tuple of CornerHead's output for current feature
+ level. Containing the following Tensors:
+
+ - tl_heat (Tensor): Predicted top-left corner heatmap.
+ - br_heat (Tensor): Predicted bottom-right corner heatmap.
+ - tl_emb (Tensor | None): Predicted top-left embedding heatmap.
+ None for `self.with_corner_emb == False`.
+ - br_emb (Tensor | None): Predicted bottom-right embedding
+ heatmap. None for `self.with_corner_emb == False`.
+ - tl_off (Tensor): Predicted top-left offset heatmap.
+ - br_off (Tensor): Predicted bottom-right offset heatmap.
+ - tl_pool (Tensor): Top-left corner pool feature. Not must
+ have.
+ - br_pool (Tensor): Bottom-right corner pool feature. Not must
+ have.
+ """
+ tl_pool = self.tl_pool[lvl_ind](x)
+ tl_heat = self.tl_heat[lvl_ind](tl_pool)
+ br_pool = self.br_pool[lvl_ind](x)
+ br_heat = self.br_heat[lvl_ind](br_pool)
+
+ tl_emb, br_emb = None, None
+ if self.with_corner_emb:
+ tl_emb = self.tl_emb[lvl_ind](tl_pool)
+ br_emb = self.br_emb[lvl_ind](br_pool)
+
+ tl_off = self.tl_off[lvl_ind](tl_pool)
+ br_off = self.br_off[lvl_ind](br_pool)
+
+ result_list = [tl_heat, br_heat, tl_emb, br_emb, tl_off, br_off]
+ if return_pool:
+ result_list.append(tl_pool)
+ result_list.append(br_pool)
+
+ return result_list
+
+ def get_targets(self,
+ gt_bboxes,
+ gt_labels,
+ feat_shape,
+ img_shape,
+ with_corner_emb=False,
+ with_guiding_shift=False,
+ with_centripetal_shift=False):
+ """Generate corner targets.
+
+ Including corner heatmap, corner offset.
+
+ Optional: corner embedding, corner guiding shift, centripetal shift.
+
+ For CornerNet, we generate corner heatmap, corner offset and corner
+ embedding from this function.
+
+ For CentripetalNet, we generate corner heatmap, corner offset, guiding
+ shift and centripetal shift from this function.
+
+ Args:
+ gt_bboxes (list[Tensor]): Ground truth bboxes of each image, each
+ has shape (num_gt, 4).
+ gt_labels (list[Tensor]): Ground truth labels of each box, each has
+ shape (num_gt,).
+ feat_shape (list[int]): Shape of output feature,
+ [batch, channel, height, width].
+ img_shape (list[int]): Shape of input image,
+ [height, width, channel].
+ with_corner_emb (bool): Generate corner embedding target or not.
+ Default: False.
+ with_guiding_shift (bool): Generate guiding shift target or not.
+ Default: False.
+ with_centripetal_shift (bool): Generate centripetal shift target or
+ not. Default: False.
+
+ Returns:
+ dict: Ground truth of corner heatmap, corner offset, corner
+ embedding, guiding shift and centripetal shift. Containing the
+ following keys:
+
+ - topleft_heatmap (Tensor): Ground truth top-left corner
+ heatmap.
+ - bottomright_heatmap (Tensor): Ground truth bottom-right
+ corner heatmap.
+ - topleft_offset (Tensor): Ground truth top-left corner offset.
+ - bottomright_offset (Tensor): Ground truth bottom-right corner
+ offset.
+ - corner_embedding (list[list[list[int]]]): Ground truth corner
+ embedding. Not must have.
+ - topleft_guiding_shift (Tensor): Ground truth top-left corner
+ guiding shift. Not must have.
+ - bottomright_guiding_shift (Tensor): Ground truth bottom-right
+ corner guiding shift. Not must have.
+ - topleft_centripetal_shift (Tensor): Ground truth top-left
+ corner centripetal shift. Not must have.
+ - bottomright_centripetal_shift (Tensor): Ground truth
+ bottom-right corner centripetal shift. Not must have.
+ """
+ batch_size, _, height, width = feat_shape
+ img_h, img_w = img_shape[:2]
+
+ width_ratio = float(width / img_w)
+ height_ratio = float(height / img_h)
+
+ gt_tl_heatmap = gt_bboxes[-1].new_zeros(
+ [batch_size, self.num_classes, height, width])
+ gt_br_heatmap = gt_bboxes[-1].new_zeros(
+ [batch_size, self.num_classes, height, width])
+ gt_tl_offset = gt_bboxes[-1].new_zeros([batch_size, 2, height, width])
+ gt_br_offset = gt_bboxes[-1].new_zeros([batch_size, 2, height, width])
+
+ if with_corner_emb:
+ match = []
+
+ # Guiding shift is a kind of offset, from center to corner
+ if with_guiding_shift:
+ gt_tl_guiding_shift = gt_bboxes[-1].new_zeros(
+ [batch_size, 2, height, width])
+ gt_br_guiding_shift = gt_bboxes[-1].new_zeros(
+ [batch_size, 2, height, width])
+ # Centripetal shift is also a kind of offset, from center to corner
+ # and normalized by log.
+ if with_centripetal_shift:
+ gt_tl_centripetal_shift = gt_bboxes[-1].new_zeros(
+ [batch_size, 2, height, width])
+ gt_br_centripetal_shift = gt_bboxes[-1].new_zeros(
+ [batch_size, 2, height, width])
+
+ for batch_id in range(batch_size):
+ # Ground truth of corner embedding per image is a list of coord set
+ corner_match = []
+ for box_id in range(len(gt_labels[batch_id])):
+ left, top, right, bottom = gt_bboxes[batch_id][box_id]
+ center_x = (left + right) / 2.0
+ center_y = (top + bottom) / 2.0
+ label = gt_labels[batch_id][box_id]
+
+ # Use coords in the feature level to generate ground truth
+ scale_left = left * width_ratio
+ scale_right = right * width_ratio
+ scale_top = top * height_ratio
+ scale_bottom = bottom * height_ratio
+ scale_center_x = center_x * width_ratio
+ scale_center_y = center_y * height_ratio
+
+ # Int coords on feature map/ground truth tensor
+ left_idx = int(min(scale_left, width - 1))
+ right_idx = int(min(scale_right, width - 1))
+ top_idx = int(min(scale_top, height - 1))
+ bottom_idx = int(min(scale_bottom, height - 1))
+
+ # Generate gaussian heatmap
+ scale_box_width = ceil(scale_right - scale_left)
+ scale_box_height = ceil(scale_bottom - scale_top)
+ radius = gaussian_radius((scale_box_height, scale_box_width),
+ min_overlap=0.3)
+ radius = max(0, int(radius))
+ gt_tl_heatmap[batch_id, label] = gen_gaussian_target(
+ gt_tl_heatmap[batch_id, label], [left_idx, top_idx],
+ radius)
+ gt_br_heatmap[batch_id, label] = gen_gaussian_target(
+ gt_br_heatmap[batch_id, label], [right_idx, bottom_idx],
+ radius)
+
+ # Generate corner offset
+ left_offset = scale_left - left_idx
+ top_offset = scale_top - top_idx
+ right_offset = scale_right - right_idx
+ bottom_offset = scale_bottom - bottom_idx
+ gt_tl_offset[batch_id, 0, top_idx, left_idx] = left_offset
+ gt_tl_offset[batch_id, 1, top_idx, left_idx] = top_offset
+ gt_br_offset[batch_id, 0, bottom_idx, right_idx] = right_offset
+ gt_br_offset[batch_id, 1, bottom_idx,
+ right_idx] = bottom_offset
+
+ # Generate corner embedding
+ if with_corner_emb:
+ corner_match.append([[top_idx, left_idx],
+ [bottom_idx, right_idx]])
+ # Generate guiding shift
+ if with_guiding_shift:
+ gt_tl_guiding_shift[batch_id, 0, top_idx,
+ left_idx] = scale_center_x - left_idx
+ gt_tl_guiding_shift[batch_id, 1, top_idx,
+ left_idx] = scale_center_y - top_idx
+ gt_br_guiding_shift[batch_id, 0, bottom_idx,
+ right_idx] = right_idx - scale_center_x
+ gt_br_guiding_shift[
+ batch_id, 1, bottom_idx,
+ right_idx] = bottom_idx - scale_center_y
+ # Generate centripetal shift
+ if with_centripetal_shift:
+ gt_tl_centripetal_shift[batch_id, 0, top_idx,
+ left_idx] = log(scale_center_x -
+ scale_left)
+ gt_tl_centripetal_shift[batch_id, 1, top_idx,
+ left_idx] = log(scale_center_y -
+ scale_top)
+ gt_br_centripetal_shift[batch_id, 0, bottom_idx,
+ right_idx] = log(scale_right -
+ scale_center_x)
+ gt_br_centripetal_shift[batch_id, 1, bottom_idx,
+ right_idx] = log(scale_bottom -
+ scale_center_y)
+
+ if with_corner_emb:
+ match.append(corner_match)
+
+ target_result = dict(
+ topleft_heatmap=gt_tl_heatmap,
+ topleft_offset=gt_tl_offset,
+ bottomright_heatmap=gt_br_heatmap,
+ bottomright_offset=gt_br_offset)
+
+ if with_corner_emb:
+ target_result.update(corner_embedding=match)
+ if with_guiding_shift:
+ target_result.update(
+ topleft_guiding_shift=gt_tl_guiding_shift,
+ bottomright_guiding_shift=gt_br_guiding_shift)
+ if with_centripetal_shift:
+ target_result.update(
+ topleft_centripetal_shift=gt_tl_centripetal_shift,
+ bottomright_centripetal_shift=gt_br_centripetal_shift)
+
+ return target_result
+
+ def loss(self,
+ tl_heats,
+ br_heats,
+ tl_embs,
+ br_embs,
+ tl_offs,
+ br_offs,
+ gt_bboxes,
+ gt_labels,
+ img_metas,
+ gt_bboxes_ignore=None):
+ """Compute losses of the head.
+
+ Args:
+ tl_heats (list[Tensor]): Top-left corner heatmaps for each level
+ with shape (N, num_classes, H, W).
+ br_heats (list[Tensor]): Bottom-right corner heatmaps for each
+ level with shape (N, num_classes, H, W).
+ tl_embs (list[Tensor]): Top-left corner embeddings for each level
+ with shape (N, corner_emb_channels, H, W).
+ br_embs (list[Tensor]): Bottom-right corner embeddings for each
+ level with shape (N, corner_emb_channels, H, W).
+ tl_offs (list[Tensor]): Top-left corner offsets for each level
+ with shape (N, corner_offset_channels, H, W).
+ br_offs (list[Tensor]): Bottom-right corner offsets for each level
+ with shape (N, corner_offset_channels, H, W).
+ gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
+ shape (num_gts, 4) in [left, top, right, bottom] format.
+ gt_labels (list[Tensor]): Class indices corresponding to each box.
+ img_metas (list[dict]): Meta information of each image, e.g.,
+ image size, scaling factor, etc.
+ gt_bboxes_ignore (list[Tensor] | None): Specify which bounding
+ boxes can be ignored when computing the loss.
+
+ Returns:
+ dict[str, Tensor]: A dictionary of loss components. Containing the
+ following losses:
+
+ - det_loss (list[Tensor]): Corner keypoint losses of all
+ feature levels.
+ - pull_loss (list[Tensor]): Part one of AssociativeEmbedding
+ losses of all feature levels.
+ - push_loss (list[Tensor]): Part two of AssociativeEmbedding
+ losses of all feature levels.
+ - off_loss (list[Tensor]): Corner offset losses of all feature
+ levels.
+ """
+ targets = self.get_targets(
+ gt_bboxes,
+ gt_labels,
+ tl_heats[-1].shape,
+ img_metas[0]['pad_shape'],
+ with_corner_emb=self.with_corner_emb)
+ mlvl_targets = [targets for _ in range(self.num_feat_levels)]
+ det_losses, pull_losses, push_losses, off_losses = multi_apply(
+ self.loss_single, tl_heats, br_heats, tl_embs, br_embs, tl_offs,
+ br_offs, mlvl_targets)
+ loss_dict = dict(det_loss=det_losses, off_loss=off_losses)
+ if self.with_corner_emb:
+ loss_dict.update(pull_loss=pull_losses, push_loss=push_losses)
+ return loss_dict
+
+ def loss_single(self, tl_hmp, br_hmp, tl_emb, br_emb, tl_off, br_off,
+ targets):
+ """Compute losses for single level.
+
+ Args:
+ tl_hmp (Tensor): Top-left corner heatmap for current level with
+ shape (N, num_classes, H, W).
+ br_hmp (Tensor): Bottom-right corner heatmap for current level with
+ shape (N, num_classes, H, W).
+ tl_emb (Tensor): Top-left corner embedding for current level with
+ shape (N, corner_emb_channels, H, W).
+ br_emb (Tensor): Bottom-right corner embedding for current level
+ with shape (N, corner_emb_channels, H, W).
+ tl_off (Tensor): Top-left corner offset for current level with
+ shape (N, corner_offset_channels, H, W).
+ br_off (Tensor): Bottom-right corner offset for current level with
+ shape (N, corner_offset_channels, H, W).
+ targets (dict): Corner target generated by `get_targets`.
+
+ Returns:
+ tuple[torch.Tensor]: Losses of the head's differnet branches
+ containing the following losses:
+
+ - det_loss (Tensor): Corner keypoint loss.
+ - pull_loss (Tensor): Part one of AssociativeEmbedding loss.
+ - push_loss (Tensor): Part two of AssociativeEmbedding loss.
+ - off_loss (Tensor): Corner offset loss.
+ """
+ gt_tl_hmp = targets['topleft_heatmap']
+ gt_br_hmp = targets['bottomright_heatmap']
+ gt_tl_off = targets['topleft_offset']
+ gt_br_off = targets['bottomright_offset']
+ gt_embedding = targets['corner_embedding']
+
+ # Detection loss
+ tl_det_loss = self.loss_heatmap(
+ tl_hmp.sigmoid(),
+ gt_tl_hmp,
+ avg_factor=max(1,
+ gt_tl_hmp.eq(1).sum()))
+ br_det_loss = self.loss_heatmap(
+ br_hmp.sigmoid(),
+ gt_br_hmp,
+ avg_factor=max(1,
+ gt_br_hmp.eq(1).sum()))
+ det_loss = (tl_det_loss + br_det_loss) / 2.0
+
+ # AssociativeEmbedding loss
+ if self.with_corner_emb and self.loss_embedding is not None:
+ pull_loss, push_loss = self.loss_embedding(tl_emb, br_emb,
+ gt_embedding)
+ else:
+ pull_loss, push_loss = None, None
+
+ # Offset loss
+ # We only compute the offset loss at the real corner position.
+ # The value of real corner would be 1 in heatmap ground truth.
+ # The mask is computed in class agnostic mode and its shape is
+ # batch * 1 * width * height.
+ tl_off_mask = gt_tl_hmp.eq(1).sum(1).gt(0).unsqueeze(1).type_as(
+ gt_tl_hmp)
+ br_off_mask = gt_br_hmp.eq(1).sum(1).gt(0).unsqueeze(1).type_as(
+ gt_br_hmp)
+ tl_off_loss = self.loss_offset(
+ tl_off,
+ gt_tl_off,
+ tl_off_mask,
+ avg_factor=max(1, tl_off_mask.sum()))
+ br_off_loss = self.loss_offset(
+ br_off,
+ gt_br_off,
+ br_off_mask,
+ avg_factor=max(1, br_off_mask.sum()))
+
+ off_loss = (tl_off_loss + br_off_loss) / 2.0
+
+ return det_loss, pull_loss, push_loss, off_loss
+
+ def get_bboxes(self,
+ tl_heats,
+ br_heats,
+ tl_embs,
+ br_embs,
+ tl_offs,
+ br_offs,
+ img_metas,
+ rescale=False,
+ with_nms=True):
+ """Transform network output for a batch into bbox predictions.
+
+ Args:
+ tl_heats (list[Tensor]): Top-left corner heatmaps for each level
+ with shape (N, num_classes, H, W).
+ br_heats (list[Tensor]): Bottom-right corner heatmaps for each
+ level with shape (N, num_classes, H, W).
+ tl_embs (list[Tensor]): Top-left corner embeddings for each level
+ with shape (N, corner_emb_channels, H, W).
+ br_embs (list[Tensor]): Bottom-right corner embeddings for each
+ level with shape (N, corner_emb_channels, H, W).
+ tl_offs (list[Tensor]): Top-left corner offsets for each level
+ with shape (N, corner_offset_channels, H, W).
+ br_offs (list[Tensor]): Bottom-right corner offsets for each level
+ with shape (N, corner_offset_channels, H, W).
+ img_metas (list[dict]): Meta information of each image, e.g.,
+ image size, scaling factor, etc.
+ rescale (bool): If True, return boxes in original image space.
+ Default: False.
+ with_nms (bool): If True, do nms before return boxes.
+ Default: True.
+ """
+ assert tl_heats[-1].shape[0] == br_heats[-1].shape[0] == len(img_metas)
+ result_list = []
+ for img_id in range(len(img_metas)):
+ result_list.append(
+ self._get_bboxes_single(
+ tl_heats[-1][img_id:img_id + 1, :],
+ br_heats[-1][img_id:img_id + 1, :],
+ tl_offs[-1][img_id:img_id + 1, :],
+ br_offs[-1][img_id:img_id + 1, :],
+ img_metas[img_id],
+ tl_emb=tl_embs[-1][img_id:img_id + 1, :],
+ br_emb=br_embs[-1][img_id:img_id + 1, :],
+ rescale=rescale,
+ with_nms=with_nms))
+
+ return result_list
+
+ def _get_bboxes_single(self,
+ tl_heat,
+ br_heat,
+ tl_off,
+ br_off,
+ img_meta,
+ tl_emb=None,
+ br_emb=None,
+ tl_centripetal_shift=None,
+ br_centripetal_shift=None,
+ rescale=False,
+ with_nms=True):
+ """Transform outputs for a single batch item into bbox predictions.
+
+ Args:
+ tl_heat (Tensor): Top-left corner heatmap for current level with
+ shape (N, num_classes, H, W).
+ br_heat (Tensor): Bottom-right corner heatmap for current level
+ with shape (N, num_classes, H, W).
+ tl_off (Tensor): Top-left corner offset for current level with
+ shape (N, corner_offset_channels, H, W).
+ br_off (Tensor): Bottom-right corner offset for current level with
+ shape (N, corner_offset_channels, H, W).
+ img_meta (dict): Meta information of current image, e.g.,
+ image size, scaling factor, etc.
+ tl_emb (Tensor): Top-left corner embedding for current level with
+ shape (N, corner_emb_channels, H, W).
+ br_emb (Tensor): Bottom-right corner embedding for current level
+ with shape (N, corner_emb_channels, H, W).
+ tl_centripetal_shift: Top-left corner's centripetal shift for
+ current level with shape (N, 2, H, W).
+ br_centripetal_shift: Bottom-right corner's centripetal shift for
+ current level with shape (N, 2, H, W).
+ rescale (bool): If True, return boxes in original image space.
+ Default: False.
+ with_nms (bool): If True, do nms before return boxes.
+ Default: True.
+ """
+ if isinstance(img_meta, (list, tuple)):
+ img_meta = img_meta[0]
+
+ batch_bboxes, batch_scores, batch_clses = self.decode_heatmap(
+ tl_heat=tl_heat.sigmoid(),
+ br_heat=br_heat.sigmoid(),
+ tl_off=tl_off,
+ br_off=br_off,
+ tl_emb=tl_emb,
+ br_emb=br_emb,
+ tl_centripetal_shift=tl_centripetal_shift,
+ br_centripetal_shift=br_centripetal_shift,
+ img_meta=img_meta,
+ k=self.test_cfg.corner_topk,
+ kernel=self.test_cfg.local_maximum_kernel,
+ distance_threshold=self.test_cfg.distance_threshold)
+
+ if rescale:
+ batch_bboxes /= batch_bboxes.new_tensor(img_meta['scale_factor'])
+
+ bboxes = batch_bboxes.view([-1, 4])
+ scores = batch_scores.view([-1, 1])
+ clses = batch_clses.view([-1, 1])
+
+ idx = scores.argsort(dim=0, descending=True)
+ bboxes = bboxes[idx].view([-1, 4])
+ scores = scores[idx].view(-1)
+ clses = clses[idx].view(-1)
+
+ detections = torch.cat([bboxes, scores.unsqueeze(-1)], -1)
+ keepinds = (detections[:, -1] > -0.1)
+ detections = detections[keepinds]
+ labels = clses[keepinds]
+
+ if with_nms:
+ detections, labels = self._bboxes_nms(detections, labels,
+ self.test_cfg)
+
+ return detections, labels
+
+ def _bboxes_nms(self, bboxes, labels, cfg):
+ if labels.numel() == 0:
+ return bboxes, labels
+
+ if 'nms_cfg' in cfg:
+ warning.warn('nms_cfg in test_cfg will be deprecated. '
+ 'Please rename it as nms')
+ if 'nms' not in cfg:
+ cfg.nms = cfg.nms_cfg
+
+ out_bboxes, keep = batched_nms(bboxes[:, :4], bboxes[:, -1], labels,
+ cfg.nms)
+ out_labels = labels[keep]
+
+ if len(out_bboxes) > 0:
+ idx = torch.argsort(out_bboxes[:, -1], descending=True)
+ idx = idx[:cfg.max_per_img]
+ out_bboxes = out_bboxes[idx]
+ out_labels = out_labels[idx]
+
+ return out_bboxes, out_labels
+
+ def _gather_feat(self, feat, ind, mask=None):
+ """Gather feature according to index.
+
+ Args:
+ feat (Tensor): Target feature map.
+ ind (Tensor): Target coord index.
+ mask (Tensor | None): Mask of featuremap. Default: None.
+
+ Returns:
+ feat (Tensor): Gathered feature.
+ """
+ dim = feat.size(2)
+ ind = ind.unsqueeze(2).repeat(1, 1, dim)
+ feat = feat.gather(1, ind)
+ if mask is not None:
+ mask = mask.unsqueeze(2).expand_as(feat)
+ feat = feat[mask]
+ feat = feat.view(-1, dim)
+ return feat
+
+ def _local_maximum(self, heat, kernel=3):
+ """Extract local maximum pixel with given kernel.
+
+ Args:
+ heat (Tensor): Target heatmap.
+ kernel (int): Kernel size of max pooling. Default: 3.
+
+ Returns:
+ heat (Tensor): A heatmap where local maximum pixels maintain its
+ own value and other positions are 0.
+ """
+ pad = (kernel - 1) // 2
+ hmax = F.max_pool2d(heat, kernel, stride=1, padding=pad)
+ keep = (hmax == heat).float()
+ return heat * keep
+
+ def _transpose_and_gather_feat(self, feat, ind):
+ """Transpose and gather feature according to index.
+
+ Args:
+ feat (Tensor): Target feature map.
+ ind (Tensor): Target coord index.
+
+ Returns:
+ feat (Tensor): Transposed and gathered feature.
+ """
+ feat = feat.permute(0, 2, 3, 1).contiguous()
+ feat = feat.view(feat.size(0), -1, feat.size(3))
+ feat = self._gather_feat(feat, ind)
+ return feat
+
+ def _topk(self, scores, k=20):
+ """Get top k positions from heatmap.
+
+ Args:
+ scores (Tensor): Target heatmap with shape
+ [batch, num_classes, height, width].
+ k (int): Target number. Default: 20.
+
+ Returns:
+ tuple[torch.Tensor]: Scores, indexes, categories and coords of
+ topk keypoint. Containing following Tensors:
+
+ - topk_scores (Tensor): Max scores of each topk keypoint.
+ - topk_inds (Tensor): Indexes of each topk keypoint.
+ - topk_clses (Tensor): Categories of each topk keypoint.
+ - topk_ys (Tensor): Y-coord of each topk keypoint.
+ - topk_xs (Tensor): X-coord of each topk keypoint.
+ """
+ batch, _, height, width = scores.size()
+ topk_scores, topk_inds = torch.topk(scores.view(batch, -1), k)
+ topk_clses = topk_inds // (height * width)
+ topk_inds = topk_inds % (height * width)
+ topk_ys = topk_inds // width
+ topk_xs = (topk_inds % width).int().float()
+ return topk_scores, topk_inds, topk_clses, topk_ys, topk_xs
+
+ def decode_heatmap(self,
+ tl_heat,
+ br_heat,
+ tl_off,
+ br_off,
+ tl_emb=None,
+ br_emb=None,
+ tl_centripetal_shift=None,
+ br_centripetal_shift=None,
+ img_meta=None,
+ k=100,
+ kernel=3,
+ distance_threshold=0.5,
+ num_dets=1000):
+ """Transform outputs for a single batch item into raw bbox predictions.
+
+ Args:
+ tl_heat (Tensor): Top-left corner heatmap for current level with
+ shape (N, num_classes, H, W).
+ br_heat (Tensor): Bottom-right corner heatmap for current level
+ with shape (N, num_classes, H, W).
+ tl_off (Tensor): Top-left corner offset for current level with
+ shape (N, corner_offset_channels, H, W).
+ br_off (Tensor): Bottom-right corner offset for current level with
+ shape (N, corner_offset_channels, H, W).
+ tl_emb (Tensor | None): Top-left corner embedding for current
+ level with shape (N, corner_emb_channels, H, W).
+ br_emb (Tensor | None): Bottom-right corner embedding for current
+ level with shape (N, corner_emb_channels, H, W).
+ tl_centripetal_shift (Tensor | None): Top-left centripetal shift
+ for current level with shape (N, 2, H, W).
+ br_centripetal_shift (Tensor | None): Bottom-right centripetal
+ shift for current level with shape (N, 2, H, W).
+ img_meta (dict): Meta information of current image, e.g.,
+ image size, scaling factor, etc.
+ k (int): Get top k corner keypoints from heatmap.
+ kernel (int): Max pooling kernel for extract local maximum pixels.
+ distance_threshold (float): Distance threshold. Top-left and
+ bottom-right corner keypoints with feature distance less than
+ the threshold will be regarded as keypoints from same object.
+ num_dets (int): Num of raw boxes before doing nms.
+
+ Returns:
+ tuple[torch.Tensor]: Decoded output of CornerHead, containing the
+ following Tensors:
+
+ - bboxes (Tensor): Coords of each box.
+ - scores (Tensor): Scores of each box.
+ - clses (Tensor): Categories of each box.
+ """
+ with_embedding = tl_emb is not None and br_emb is not None
+ with_centripetal_shift = (
+ tl_centripetal_shift is not None
+ and br_centripetal_shift is not None)
+ assert with_embedding + with_centripetal_shift == 1
+ batch, _, height, width = tl_heat.size()
+ inp_h, inp_w, _ = img_meta['pad_shape']
+
+ # perform nms on heatmaps
+ tl_heat = self._local_maximum(tl_heat, kernel=kernel)
+ br_heat = self._local_maximum(br_heat, kernel=kernel)
+
+ tl_scores, tl_inds, tl_clses, tl_ys, tl_xs = self._topk(tl_heat, k=k)
+ br_scores, br_inds, br_clses, br_ys, br_xs = self._topk(br_heat, k=k)
+
+ # We use repeat instead of expand here because expand is a
+ # shallow-copy function. Thus it could cause unexpected testing result
+ # sometimes. Using expand will decrease about 10% mAP during testing
+ # compared to repeat.
+ tl_ys = tl_ys.view(batch, k, 1).repeat(1, 1, k)
+ tl_xs = tl_xs.view(batch, k, 1).repeat(1, 1, k)
+ br_ys = br_ys.view(batch, 1, k).repeat(1, k, 1)
+ br_xs = br_xs.view(batch, 1, k).repeat(1, k, 1)
+
+ tl_off = self._transpose_and_gather_feat(tl_off, tl_inds)
+ tl_off = tl_off.view(batch, k, 1, 2)
+ br_off = self._transpose_and_gather_feat(br_off, br_inds)
+ br_off = br_off.view(batch, 1, k, 2)
+
+ tl_xs = tl_xs + tl_off[..., 0]
+ tl_ys = tl_ys + tl_off[..., 1]
+ br_xs = br_xs + br_off[..., 0]
+ br_ys = br_ys + br_off[..., 1]
+
+ if with_centripetal_shift:
+ tl_centripetal_shift = self._transpose_and_gather_feat(
+ tl_centripetal_shift, tl_inds).view(batch, k, 1, 2).exp()
+ br_centripetal_shift = self._transpose_and_gather_feat(
+ br_centripetal_shift, br_inds).view(batch, 1, k, 2).exp()
+
+ tl_ctxs = tl_xs + tl_centripetal_shift[..., 0]
+ tl_ctys = tl_ys + tl_centripetal_shift[..., 1]
+ br_ctxs = br_xs - br_centripetal_shift[..., 0]
+ br_ctys = br_ys - br_centripetal_shift[..., 1]
+
+ # all possible boxes based on top k corners (ignoring class)
+ tl_xs *= (inp_w / width)
+ tl_ys *= (inp_h / height)
+ br_xs *= (inp_w / width)
+ br_ys *= (inp_h / height)
+
+ if with_centripetal_shift:
+ tl_ctxs *= (inp_w / width)
+ tl_ctys *= (inp_h / height)
+ br_ctxs *= (inp_w / width)
+ br_ctys *= (inp_h / height)
+
+ x_off = img_meta['border'][2]
+ y_off = img_meta['border'][0]
+
+ tl_xs -= x_off
+ tl_ys -= y_off
+ br_xs -= x_off
+ br_ys -= y_off
+
+ tl_xs *= tl_xs.gt(0.0).type_as(tl_xs)
+ tl_ys *= tl_ys.gt(0.0).type_as(tl_ys)
+ br_xs *= br_xs.gt(0.0).type_as(br_xs)
+ br_ys *= br_ys.gt(0.0).type_as(br_ys)
+
+ bboxes = torch.stack((tl_xs, tl_ys, br_xs, br_ys), dim=3)
+ area_bboxes = ((br_xs - tl_xs) * (br_ys - tl_ys)).abs()
+
+ if with_centripetal_shift:
+ tl_ctxs -= x_off
+ tl_ctys -= y_off
+ br_ctxs -= x_off
+ br_ctys -= y_off
+
+ tl_ctxs *= tl_ctxs.gt(0.0).type_as(tl_ctxs)
+ tl_ctys *= tl_ctys.gt(0.0).type_as(tl_ctys)
+ br_ctxs *= br_ctxs.gt(0.0).type_as(br_ctxs)
+ br_ctys *= br_ctys.gt(0.0).type_as(br_ctys)
+
+ ct_bboxes = torch.stack((tl_ctxs, tl_ctys, br_ctxs, br_ctys),
+ dim=3)
+ area_ct_bboxes = ((br_ctxs - tl_ctxs) * (br_ctys - tl_ctys)).abs()
+
+ rcentral = torch.zeros_like(ct_bboxes)
+ # magic nums from paper section 4.1
+ mu = torch.ones_like(area_bboxes) / 2.4
+ mu[area_bboxes > 3500] = 1 / 2.1 # large bbox have smaller mu
+
+ bboxes_center_x = (bboxes[..., 0] + bboxes[..., 2]) / 2
+ bboxes_center_y = (bboxes[..., 1] + bboxes[..., 3]) / 2
+ rcentral[..., 0] = bboxes_center_x - mu * (bboxes[..., 2] -
+ bboxes[..., 0]) / 2
+ rcentral[..., 1] = bboxes_center_y - mu * (bboxes[..., 3] -
+ bboxes[..., 1]) / 2
+ rcentral[..., 2] = bboxes_center_x + mu * (bboxes[..., 2] -
+ bboxes[..., 0]) / 2
+ rcentral[..., 3] = bboxes_center_y + mu * (bboxes[..., 3] -
+ bboxes[..., 1]) / 2
+ area_rcentral = ((rcentral[..., 2] - rcentral[..., 0]) *
+ (rcentral[..., 3] - rcentral[..., 1])).abs()
+ dists = area_ct_bboxes / area_rcentral
+
+ tl_ctx_inds = (ct_bboxes[..., 0] <= rcentral[..., 0]) | (
+ ct_bboxes[..., 0] >= rcentral[..., 2])
+ tl_cty_inds = (ct_bboxes[..., 1] <= rcentral[..., 1]) | (
+ ct_bboxes[..., 1] >= rcentral[..., 3])
+ br_ctx_inds = (ct_bboxes[..., 2] <= rcentral[..., 0]) | (
+ ct_bboxes[..., 2] >= rcentral[..., 2])
+ br_cty_inds = (ct_bboxes[..., 3] <= rcentral[..., 1]) | (
+ ct_bboxes[..., 3] >= rcentral[..., 3])
+
+ if with_embedding:
+ tl_emb = self._transpose_and_gather_feat(tl_emb, tl_inds)
+ tl_emb = tl_emb.view(batch, k, 1)
+ br_emb = self._transpose_and_gather_feat(br_emb, br_inds)
+ br_emb = br_emb.view(batch, 1, k)
+ dists = torch.abs(tl_emb - br_emb)
+
+ tl_scores = tl_scores.view(batch, k, 1).repeat(1, 1, k)
+ br_scores = br_scores.view(batch, 1, k).repeat(1, k, 1)
+
+ scores = (tl_scores + br_scores) / 2 # scores for all possible boxes
+
+ # tl and br should have same class
+ tl_clses = tl_clses.view(batch, k, 1).repeat(1, 1, k)
+ br_clses = br_clses.view(batch, 1, k).repeat(1, k, 1)
+ cls_inds = (tl_clses != br_clses)
+
+ # reject boxes based on distances
+ dist_inds = dists > distance_threshold
+
+ # reject boxes based on widths and heights
+ width_inds = (br_xs <= tl_xs)
+ height_inds = (br_ys <= tl_ys)
+
+ scores[cls_inds] = -1
+ scores[width_inds] = -1
+ scores[height_inds] = -1
+ scores[dist_inds] = -1
+ if with_centripetal_shift:
+ scores[tl_ctx_inds] = -1
+ scores[tl_cty_inds] = -1
+ scores[br_ctx_inds] = -1
+ scores[br_cty_inds] = -1
+
+ scores = scores.view(batch, -1)
+ scores, inds = torch.topk(scores, num_dets)
+ scores = scores.unsqueeze(2)
+
+ bboxes = bboxes.view(batch, -1, 4)
+ bboxes = self._gather_feat(bboxes, inds)
+
+ clses = tl_clses.contiguous().view(batch, -1, 1)
+ clses = self._gather_feat(clses, inds).float()
+
+ return bboxes, scores, clses
diff --git a/annotator/uniformer/mmdet_null/models/dense_heads/dense_test_mixins.py b/annotator/uniformer/mmdet_null/models/dense_heads/dense_test_mixins.py
new file mode 100644
index 0000000000000000000000000000000000000000..dd81364dec90e97c30a6e2220a5e0fe96373c5bd
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/dense_heads/dense_test_mixins.py
@@ -0,0 +1,100 @@
+from inspect import signature
+
+import torch
+
+from mmdet.core import bbox2result, bbox_mapping_back, multiclass_nms
+
+
+class BBoxTestMixin(object):
+ """Mixin class for test time augmentation of bboxes."""
+
+ def merge_aug_bboxes(self, aug_bboxes, aug_scores, img_metas):
+ """Merge augmented detection bboxes and scores.
+
+ Args:
+ aug_bboxes (list[Tensor]): shape (n, 4*#class)
+ aug_scores (list[Tensor] or None): shape (n, #class)
+ img_shapes (list[Tensor]): shape (3, ).
+
+ Returns:
+ tuple: (bboxes, scores)
+ """
+ recovered_bboxes = []
+ for bboxes, img_info in zip(aug_bboxes, img_metas):
+ img_shape = img_info[0]['img_shape']
+ scale_factor = img_info[0]['scale_factor']
+ flip = img_info[0]['flip']
+ flip_direction = img_info[0]['flip_direction']
+ bboxes = bbox_mapping_back(bboxes, img_shape, scale_factor, flip,
+ flip_direction)
+ recovered_bboxes.append(bboxes)
+ bboxes = torch.cat(recovered_bboxes, dim=0)
+ if aug_scores is None:
+ return bboxes
+ else:
+ scores = torch.cat(aug_scores, dim=0)
+ return bboxes, scores
+
+ def aug_test_bboxes(self, feats, img_metas, rescale=False):
+ """Test det bboxes with test time augmentation.
+
+ Args:
+ feats (list[Tensor]): the outer list indicates test-time
+ augmentations and inner Tensor should have a shape NxCxHxW,
+ which contains features for all images in the batch.
+ img_metas (list[list[dict]]): the outer list indicates test-time
+ augs (multiscale, flip, etc.) and the inner list indicates
+ images in a batch. each dict has image information.
+ rescale (bool, optional): Whether to rescale the results.
+ Defaults to False.
+
+ Returns:
+ list[ndarray]: bbox results of each class
+ """
+ # check with_nms argument
+ gb_sig = signature(self.get_bboxes)
+ gb_args = [p.name for p in gb_sig.parameters.values()]
+ if hasattr(self, '_get_bboxes'):
+ gbs_sig = signature(self._get_bboxes)
+ else:
+ gbs_sig = signature(self._get_bboxes_single)
+ gbs_args = [p.name for p in gbs_sig.parameters.values()]
+ assert ('with_nms' in gb_args) and ('with_nms' in gbs_args), \
+ f'{self.__class__.__name__}' \
+ ' does not support test-time augmentation'
+
+ aug_bboxes = []
+ aug_scores = []
+ aug_factors = [] # score_factors for NMS
+ for x, img_meta in zip(feats, img_metas):
+ # only one image in the batch
+ outs = self.forward(x)
+ bbox_inputs = outs + (img_meta, self.test_cfg, False, False)
+ bbox_outputs = self.get_bboxes(*bbox_inputs)[0]
+ aug_bboxes.append(bbox_outputs[0])
+ aug_scores.append(bbox_outputs[1])
+ # bbox_outputs of some detectors (e.g., ATSS, FCOS, YOLOv3)
+ # contains additional element to adjust scores before NMS
+ if len(bbox_outputs) >= 3:
+ aug_factors.append(bbox_outputs[2])
+
+ # after merging, bboxes will be rescaled to the original image size
+ merged_bboxes, merged_scores = self.merge_aug_bboxes(
+ aug_bboxes, aug_scores, img_metas)
+ merged_factors = torch.cat(aug_factors, dim=0) if aug_factors else None
+ det_bboxes, det_labels = multiclass_nms(
+ merged_bboxes,
+ merged_scores,
+ self.test_cfg.score_thr,
+ self.test_cfg.nms,
+ self.test_cfg.max_per_img,
+ score_factors=merged_factors)
+
+ if rescale:
+ _det_bboxes = det_bboxes
+ else:
+ _det_bboxes = det_bboxes.clone()
+ _det_bboxes[:, :4] *= det_bboxes.new_tensor(
+ img_metas[0][0]['scale_factor'])
+ bbox_results = bbox2result(_det_bboxes, det_labels, self.num_classes)
+ return bbox_results
diff --git a/annotator/uniformer/mmdet_null/models/dense_heads/embedding_rpn_head.py b/annotator/uniformer/mmdet_null/models/dense_heads/embedding_rpn_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..200ce8d20c5503f98c5c21f30bb9d00437e25f34
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/dense_heads/embedding_rpn_head.py
@@ -0,0 +1,100 @@
+import torch
+import torch.nn as nn
+
+from mmdet.models.builder import HEADS
+from ...core import bbox_cxcywh_to_xyxy
+
+
+@HEADS.register_module()
+class EmbeddingRPNHead(nn.Module):
+ """RPNHead in the `Sparse R-CNN `_ .
+
+ Unlike traditional RPNHead, this module does not need FPN input, but just
+ decode `init_proposal_bboxes` and expand the first dimension of
+ `init_proposal_bboxes` and `init_proposal_features` to the batch_size.
+
+ Args:
+ num_proposals (int): Number of init_proposals. Default 100.
+ proposal_feature_channel (int): Channel number of
+ init_proposal_feature. Defaults to 256.
+ """
+
+ def __init__(self,
+ num_proposals=100,
+ proposal_feature_channel=256,
+ **kwargs):
+ super(EmbeddingRPNHead, self).__init__()
+ self.num_proposals = num_proposals
+ self.proposal_feature_channel = proposal_feature_channel
+ self._init_layers()
+
+ def _init_layers(self):
+ """Initialize a sparse set of proposal boxes and proposal features."""
+ self.init_proposal_bboxes = nn.Embedding(self.num_proposals, 4)
+ self.init_proposal_features = nn.Embedding(
+ self.num_proposals, self.proposal_feature_channel)
+
+ def init_weights(self):
+ """Initialize the init_proposal_bboxes as normalized.
+
+ [c_x, c_y, w, h], and we initialize it to the size of the entire
+ image.
+ """
+ nn.init.constant_(self.init_proposal_bboxes.weight[:, :2], 0.5)
+ nn.init.constant_(self.init_proposal_bboxes.weight[:, 2:], 1)
+
+ def _decode_init_proposals(self, imgs, img_metas):
+ """Decode init_proposal_bboxes according to the size of images and
+ expand dimension of init_proposal_features to batch_size.
+
+ Args:
+ imgs (list[Tensor]): List of FPN features.
+ img_metas (list[dict]): List of meta-information of
+ images. Need the img_shape to decode the init_proposals.
+
+ Returns:
+ Tuple(Tensor):
+
+ - proposals (Tensor): Decoded proposal bboxes,
+ has shape (batch_size, num_proposals, 4).
+ - init_proposal_features (Tensor): Expanded proposal
+ features, has shape
+ (batch_size, num_proposals, proposal_feature_channel).
+ - imgs_whwh (Tensor): Tensor with shape
+ (batch_size, 4), the dimension means
+ [img_width, img_height, img_width, img_height].
+ """
+ proposals = self.init_proposal_bboxes.weight.clone()
+ proposals = bbox_cxcywh_to_xyxy(proposals)
+ num_imgs = len(imgs[0])
+ imgs_whwh = []
+ for meta in img_metas:
+ h, w, _ = meta['img_shape']
+ imgs_whwh.append(imgs[0].new_tensor([[w, h, w, h]]))
+ imgs_whwh = torch.cat(imgs_whwh, dim=0)
+ imgs_whwh = imgs_whwh[:, None, :]
+
+ # imgs_whwh has shape (batch_size, 1, 4)
+ # The shape of proposals change from (num_proposals, 4)
+ # to (batch_size ,num_proposals, 4)
+ proposals = proposals * imgs_whwh
+
+ init_proposal_features = self.init_proposal_features.weight.clone()
+ init_proposal_features = init_proposal_features[None].expand(
+ num_imgs, *init_proposal_features.size())
+ return proposals, init_proposal_features, imgs_whwh
+
+ def forward_dummy(self, img, img_metas):
+ """Dummy forward function.
+
+ Used in flops calculation.
+ """
+ return self._decode_init_proposals(img, img_metas)
+
+ def forward_train(self, img, img_metas):
+ """Forward function in training stage."""
+ return self._decode_init_proposals(img, img_metas)
+
+ def simple_test_rpn(self, img, img_metas):
+ """Forward function in testing stage."""
+ return self._decode_init_proposals(img, img_metas)
diff --git a/annotator/uniformer/mmdet_null/models/dense_heads/fcos_head.py b/annotator/uniformer/mmdet_null/models/dense_heads/fcos_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..905a703507f279ac8d34cff23c99af33c0d5f973
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/dense_heads/fcos_head.py
@@ -0,0 +1,629 @@
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from mmcv.cnn import Scale, normal_init
+from mmcv.runner import force_fp32
+
+from mmdet.core import distance2bbox, multi_apply, multiclass_nms, reduce_mean
+from ..builder import HEADS, build_loss
+from .anchor_free_head import AnchorFreeHead
+
+INF = 1e8
+
+
+@HEADS.register_module()
+class FCOSHead(AnchorFreeHead):
+ """Anchor-free head used in `FCOS `_.
+
+ The FCOS head does not use anchor boxes. Instead bounding boxes are
+ predicted at each pixel and a centerness measure is used to suppress
+ low-quality predictions.
+ Here norm_on_bbox, centerness_on_reg, dcn_on_last_conv are training
+ tricks used in official repo, which will bring remarkable mAP gains
+ of up to 4.9. Please see https://github.com/tianzhi0549/FCOS for
+ more detail.
+
+ Args:
+ num_classes (int): Number of categories excluding the background
+ category.
+ in_channels (int): Number of channels in the input feature map.
+ strides (list[int] | list[tuple[int, int]]): Strides of points
+ in multiple feature levels. Default: (4, 8, 16, 32, 64).
+ regress_ranges (tuple[tuple[int, int]]): Regress range of multiple
+ level points.
+ center_sampling (bool): If true, use center sampling. Default: False.
+ center_sample_radius (float): Radius of center sampling. Default: 1.5.
+ norm_on_bbox (bool): If true, normalize the regression targets
+ with FPN strides. Default: False.
+ centerness_on_reg (bool): If true, position centerness on the
+ regress branch. Please refer to https://github.com/tianzhi0549/FCOS/issues/89#issuecomment-516877042.
+ Default: False.
+ conv_bias (bool | str): If specified as `auto`, it will be decided by the
+ norm_cfg. Bias of conv will be set as True if `norm_cfg` is None, otherwise
+ False. Default: "auto".
+ loss_cls (dict): Config of classification loss.
+ loss_bbox (dict): Config of localization loss.
+ loss_centerness (dict): Config of centerness loss.
+ norm_cfg (dict): dictionary to construct and config norm layer.
+ Default: norm_cfg=dict(type='GN', num_groups=32, requires_grad=True).
+
+ Example:
+ >>> self = FCOSHead(11, 7)
+ >>> feats = [torch.rand(1, 7, s, s) for s in [4, 8, 16, 32, 64]]
+ >>> cls_score, bbox_pred, centerness = self.forward(feats)
+ >>> assert len(cls_score) == len(self.scales)
+ """ # noqa: E501
+
+ def __init__(self,
+ num_classes,
+ in_channels,
+ regress_ranges=((-1, 64), (64, 128), (128, 256), (256, 512),
+ (512, INF)),
+ center_sampling=False,
+ center_sample_radius=1.5,
+ norm_on_bbox=False,
+ centerness_on_reg=False,
+ loss_cls=dict(
+ type='FocalLoss',
+ use_sigmoid=True,
+ gamma=2.0,
+ alpha=0.25,
+ loss_weight=1.0),
+ loss_bbox=dict(type='IoULoss', loss_weight=1.0),
+ loss_centerness=dict(
+ type='CrossEntropyLoss',
+ use_sigmoid=True,
+ loss_weight=1.0),
+ norm_cfg=dict(type='GN', num_groups=32, requires_grad=True),
+ **kwargs):
+ self.regress_ranges = regress_ranges
+ self.center_sampling = center_sampling
+ self.center_sample_radius = center_sample_radius
+ self.norm_on_bbox = norm_on_bbox
+ self.centerness_on_reg = centerness_on_reg
+ super().__init__(
+ num_classes,
+ in_channels,
+ loss_cls=loss_cls,
+ loss_bbox=loss_bbox,
+ norm_cfg=norm_cfg,
+ **kwargs)
+ self.loss_centerness = build_loss(loss_centerness)
+
+ def _init_layers(self):
+ """Initialize layers of the head."""
+ super()._init_layers()
+ self.conv_centerness = nn.Conv2d(self.feat_channels, 1, 3, padding=1)
+ self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides])
+
+ def init_weights(self):
+ """Initialize weights of the head."""
+ super().init_weights()
+ normal_init(self.conv_centerness, std=0.01)
+
+ def forward(self, feats):
+ """Forward features from the upstream network.
+
+ Args:
+ feats (tuple[Tensor]): Features from the upstream network, each is
+ a 4D-tensor.
+
+ Returns:
+ tuple:
+ cls_scores (list[Tensor]): Box scores for each scale level, \
+ each is a 4D-tensor, the channel number is \
+ num_points * num_classes.
+ bbox_preds (list[Tensor]): Box energies / deltas for each \
+ scale level, each is a 4D-tensor, the channel number is \
+ num_points * 4.
+ centernesses (list[Tensor]): centerness for each scale level, \
+ each is a 4D-tensor, the channel number is num_points * 1.
+ """
+ return multi_apply(self.forward_single, feats, self.scales,
+ self.strides)
+
+ def forward_single(self, x, scale, stride):
+ """Forward features of a single scale level.
+
+ Args:
+ x (Tensor): FPN feature maps of the specified stride.
+ scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize
+ the bbox prediction.
+ stride (int): The corresponding stride for feature maps, only
+ used to normalize the bbox prediction when self.norm_on_bbox
+ is True.
+
+ Returns:
+ tuple: scores for each class, bbox predictions and centerness \
+ predictions of input feature maps.
+ """
+ cls_score, bbox_pred, cls_feat, reg_feat = super().forward_single(x)
+ if self.centerness_on_reg:
+ centerness = self.conv_centerness(reg_feat)
+ else:
+ centerness = self.conv_centerness(cls_feat)
+ # scale the bbox_pred of different level
+ # float to avoid overflow when enabling FP16
+ bbox_pred = scale(bbox_pred).float()
+ if self.norm_on_bbox:
+ bbox_pred = F.relu(bbox_pred)
+ if not self.training:
+ bbox_pred *= stride
+ else:
+ bbox_pred = bbox_pred.exp()
+ return cls_score, bbox_pred, centerness
+
+ @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'centernesses'))
+ def loss(self,
+ cls_scores,
+ bbox_preds,
+ centernesses,
+ gt_bboxes,
+ gt_labels,
+ img_metas,
+ gt_bboxes_ignore=None):
+ """Compute loss of the head.
+
+ Args:
+ cls_scores (list[Tensor]): Box scores for each scale level,
+ each is a 4D-tensor, the channel number is
+ num_points * num_classes.
+ bbox_preds (list[Tensor]): Box energies / deltas for each scale
+ level, each is a 4D-tensor, the channel number is
+ num_points * 4.
+ centernesses (list[Tensor]): centerness for each scale level, each
+ is a 4D-tensor, the channel number is num_points * 1.
+ gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
+ shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
+ gt_labels (list[Tensor]): class indices corresponding to each box
+ img_metas (list[dict]): Meta information of each image, e.g.,
+ image size, scaling factor, etc.
+ gt_bboxes_ignore (None | list[Tensor]): specify which bounding
+ boxes can be ignored when computing the loss.
+
+ Returns:
+ dict[str, Tensor]: A dictionary of loss components.
+ """
+ assert len(cls_scores) == len(bbox_preds) == len(centernesses)
+ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
+ all_level_points = self.get_points(featmap_sizes, bbox_preds[0].dtype,
+ bbox_preds[0].device)
+ labels, bbox_targets = self.get_targets(all_level_points, gt_bboxes,
+ gt_labels)
+
+ num_imgs = cls_scores[0].size(0)
+ # flatten cls_scores, bbox_preds and centerness
+ flatten_cls_scores = [
+ cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels)
+ for cls_score in cls_scores
+ ]
+ flatten_bbox_preds = [
+ bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4)
+ for bbox_pred in bbox_preds
+ ]
+ flatten_centerness = [
+ centerness.permute(0, 2, 3, 1).reshape(-1)
+ for centerness in centernesses
+ ]
+ flatten_cls_scores = torch.cat(flatten_cls_scores)
+ flatten_bbox_preds = torch.cat(flatten_bbox_preds)
+ flatten_centerness = torch.cat(flatten_centerness)
+ flatten_labels = torch.cat(labels)
+ flatten_bbox_targets = torch.cat(bbox_targets)
+ # repeat points to align with bbox_preds
+ flatten_points = torch.cat(
+ [points.repeat(num_imgs, 1) for points in all_level_points])
+
+ # FG cat_id: [0, num_classes -1], BG cat_id: num_classes
+ bg_class_ind = self.num_classes
+ pos_inds = ((flatten_labels >= 0)
+ & (flatten_labels < bg_class_ind)).nonzero().reshape(-1)
+ num_pos = torch.tensor(
+ len(pos_inds), dtype=torch.float, device=bbox_preds[0].device)
+ num_pos = max(reduce_mean(num_pos), 1.0)
+ loss_cls = self.loss_cls(
+ flatten_cls_scores, flatten_labels, avg_factor=num_pos)
+
+ pos_bbox_preds = flatten_bbox_preds[pos_inds]
+ pos_centerness = flatten_centerness[pos_inds]
+
+ if len(pos_inds) > 0:
+ pos_bbox_targets = flatten_bbox_targets[pos_inds]
+ pos_centerness_targets = self.centerness_target(pos_bbox_targets)
+ pos_points = flatten_points[pos_inds]
+ pos_decoded_bbox_preds = distance2bbox(pos_points, pos_bbox_preds)
+ pos_decoded_target_preds = distance2bbox(pos_points,
+ pos_bbox_targets)
+ # centerness weighted iou loss
+ centerness_denorm = max(
+ reduce_mean(pos_centerness_targets.sum().detach()), 1e-6)
+ loss_bbox = self.loss_bbox(
+ pos_decoded_bbox_preds,
+ pos_decoded_target_preds,
+ weight=pos_centerness_targets,
+ avg_factor=centerness_denorm)
+ loss_centerness = self.loss_centerness(
+ pos_centerness, pos_centerness_targets, avg_factor=num_pos)
+ else:
+ loss_bbox = pos_bbox_preds.sum()
+ loss_centerness = pos_centerness.sum()
+
+ return dict(
+ loss_cls=loss_cls,
+ loss_bbox=loss_bbox,
+ loss_centerness=loss_centerness)
+
+ @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'centernesses'))
+ def get_bboxes(self,
+ cls_scores,
+ bbox_preds,
+ centernesses,
+ img_metas,
+ cfg=None,
+ rescale=False,
+ with_nms=True):
+ """Transform network output for a batch into bbox predictions.
+
+ Args:
+ cls_scores (list[Tensor]): Box scores for each scale level
+ with shape (N, num_points * num_classes, H, W).
+ bbox_preds (list[Tensor]): Box energies / deltas for each scale
+ level with shape (N, num_points * 4, H, W).
+ centernesses (list[Tensor]): Centerness for each scale level with
+ shape (N, num_points * 1, H, W).
+ img_metas (list[dict]): Meta information of each image, e.g.,
+ image size, scaling factor, etc.
+ cfg (mmcv.Config | None): Test / postprocessing configuration,
+ if None, test_cfg would be used. Default: None.
+ rescale (bool): If True, return boxes in original image space.
+ Default: False.
+ with_nms (bool): If True, do nms before return boxes.
+ Default: True.
+
+ Returns:
+ list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple.
+ The first item is an (n, 5) tensor, where 5 represent
+ (tl_x, tl_y, br_x, br_y, score) and the score between 0 and 1.
+ The shape of the second tensor in the tuple is (n,), and
+ each element represents the class label of the corresponding
+ box.
+ """
+ assert len(cls_scores) == len(bbox_preds)
+ num_levels = len(cls_scores)
+
+ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
+ mlvl_points = self.get_points(featmap_sizes, bbox_preds[0].dtype,
+ bbox_preds[0].device)
+
+ cls_score_list = [cls_scores[i].detach() for i in range(num_levels)]
+ bbox_pred_list = [bbox_preds[i].detach() for i in range(num_levels)]
+ centerness_pred_list = [
+ centernesses[i].detach() for i in range(num_levels)
+ ]
+ if torch.onnx.is_in_onnx_export():
+ assert len(
+ img_metas
+ ) == 1, 'Only support one input image while in exporting to ONNX'
+ img_shapes = img_metas[0]['img_shape_for_onnx']
+ else:
+ img_shapes = [
+ img_metas[i]['img_shape']
+ for i in range(cls_scores[0].shape[0])
+ ]
+ scale_factors = [
+ img_metas[i]['scale_factor'] for i in range(cls_scores[0].shape[0])
+ ]
+ result_list = self._get_bboxes(cls_score_list, bbox_pred_list,
+ centerness_pred_list, mlvl_points,
+ img_shapes, scale_factors, cfg, rescale,
+ with_nms)
+ return result_list
+
+ def _get_bboxes(self,
+ cls_scores,
+ bbox_preds,
+ centernesses,
+ mlvl_points,
+ img_shapes,
+ scale_factors,
+ cfg,
+ rescale=False,
+ with_nms=True):
+ """Transform outputs for a single batch item into bbox predictions.
+
+ Args:
+ cls_scores (list[Tensor]): Box scores for a single scale level
+ with shape (N, num_points * num_classes, H, W).
+ bbox_preds (list[Tensor]): Box energies / deltas for a single scale
+ level with shape (N, num_points * 4, H, W).
+ centernesses (list[Tensor]): Centerness for a single scale level
+ with shape (N, num_points * 4, H, W).
+ mlvl_points (list[Tensor]): Box reference for a single scale level
+ with shape (num_total_points, 4).
+ img_shapes (list[tuple[int]]): Shape of the input image,
+ list[(height, width, 3)].
+ scale_factors (list[ndarray]): Scale factor of the image arrange as
+ (w_scale, h_scale, w_scale, h_scale).
+ cfg (mmcv.Config | None): Test / postprocessing configuration,
+ if None, test_cfg would be used.
+ rescale (bool): If True, return boxes in original image space.
+ Default: False.
+ with_nms (bool): If True, do nms before return boxes.
+ Default: True.
+
+ Returns:
+ tuple(Tensor):
+ det_bboxes (Tensor): BBox predictions in shape (n, 5), where
+ the first 4 columns are bounding box positions
+ (tl_x, tl_y, br_x, br_y) and the 5-th column is a score
+ between 0 and 1.
+ det_labels (Tensor): A (n,) tensor where each item is the
+ predicted class label of the corresponding box.
+ """
+ cfg = self.test_cfg if cfg is None else cfg
+ assert len(cls_scores) == len(bbox_preds) == len(mlvl_points)
+ device = cls_scores[0].device
+ batch_size = cls_scores[0].shape[0]
+ # convert to tensor to keep tracing
+ nms_pre_tensor = torch.tensor(
+ cfg.get('nms_pre', -1), device=device, dtype=torch.long)
+ mlvl_bboxes = []
+ mlvl_scores = []
+ mlvl_centerness = []
+ for cls_score, bbox_pred, centerness, points in zip(
+ cls_scores, bbox_preds, centernesses, mlvl_points):
+ assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
+ scores = cls_score.permute(0, 2, 3, 1).reshape(
+ batch_size, -1, self.cls_out_channels).sigmoid()
+ centerness = centerness.permute(0, 2, 3,
+ 1).reshape(batch_size,
+ -1).sigmoid()
+
+ bbox_pred = bbox_pred.permute(0, 2, 3,
+ 1).reshape(batch_size, -1, 4)
+ # Always keep topk op for dynamic input in onnx
+ if nms_pre_tensor > 0 and (torch.onnx.is_in_onnx_export()
+ or scores.shape[-2] > nms_pre_tensor):
+ from torch import _shape_as_tensor
+ # keep shape as tensor and get k
+ num_anchor = _shape_as_tensor(scores)[-2].to(device)
+ nms_pre = torch.where(nms_pre_tensor < num_anchor,
+ nms_pre_tensor, num_anchor)
+
+ max_scores, _ = (scores * centerness[..., None]).max(-1)
+ _, topk_inds = max_scores.topk(nms_pre)
+ points = points[topk_inds, :]
+ batch_inds = torch.arange(batch_size).view(
+ -1, 1).expand_as(topk_inds).long()
+ bbox_pred = bbox_pred[batch_inds, topk_inds, :]
+ scores = scores[batch_inds, topk_inds, :]
+ centerness = centerness[batch_inds, topk_inds]
+
+ bboxes = distance2bbox(points, bbox_pred, max_shape=img_shapes)
+ mlvl_bboxes.append(bboxes)
+ mlvl_scores.append(scores)
+ mlvl_centerness.append(centerness)
+
+ batch_mlvl_bboxes = torch.cat(mlvl_bboxes, dim=1)
+ if rescale:
+ batch_mlvl_bboxes /= batch_mlvl_bboxes.new_tensor(
+ scale_factors).unsqueeze(1)
+ batch_mlvl_scores = torch.cat(mlvl_scores, dim=1)
+ batch_mlvl_centerness = torch.cat(mlvl_centerness, dim=1)
+
+ # Set max number of box to be feed into nms in deployment
+ deploy_nms_pre = cfg.get('deploy_nms_pre', -1)
+ if deploy_nms_pre > 0 and torch.onnx.is_in_onnx_export():
+ batch_mlvl_scores, _ = (
+ batch_mlvl_scores *
+ batch_mlvl_centerness.unsqueeze(2).expand_as(batch_mlvl_scores)
+ ).max(-1)
+ _, topk_inds = batch_mlvl_scores.topk(deploy_nms_pre)
+ batch_inds = torch.arange(batch_mlvl_scores.shape[0]).view(
+ -1, 1).expand_as(topk_inds)
+ batch_mlvl_scores = batch_mlvl_scores[batch_inds, topk_inds, :]
+ batch_mlvl_bboxes = batch_mlvl_bboxes[batch_inds, topk_inds, :]
+ batch_mlvl_centerness = batch_mlvl_centerness[batch_inds,
+ topk_inds]
+
+ # remind that we set FG labels to [0, num_class-1] since mmdet v2.0
+ # BG cat_id: num_class
+ padding = batch_mlvl_scores.new_zeros(batch_size,
+ batch_mlvl_scores.shape[1], 1)
+ batch_mlvl_scores = torch.cat([batch_mlvl_scores, padding], dim=-1)
+
+ if with_nms:
+ det_results = []
+ for (mlvl_bboxes, mlvl_scores,
+ mlvl_centerness) in zip(batch_mlvl_bboxes, batch_mlvl_scores,
+ batch_mlvl_centerness):
+ det_bbox, det_label = multiclass_nms(
+ mlvl_bboxes,
+ mlvl_scores,
+ cfg.score_thr,
+ cfg.nms,
+ cfg.max_per_img,
+ score_factors=mlvl_centerness)
+ det_results.append(tuple([det_bbox, det_label]))
+ else:
+ det_results = [
+ tuple(mlvl_bs)
+ for mlvl_bs in zip(batch_mlvl_bboxes, batch_mlvl_scores,
+ batch_mlvl_centerness)
+ ]
+ return det_results
+
+ def _get_points_single(self,
+ featmap_size,
+ stride,
+ dtype,
+ device,
+ flatten=False):
+ """Get points according to feature map sizes."""
+ y, x = super()._get_points_single(featmap_size, stride, dtype, device)
+ points = torch.stack((x.reshape(-1) * stride, y.reshape(-1) * stride),
+ dim=-1) + stride // 2
+ return points
+
+ def get_targets(self, points, gt_bboxes_list, gt_labels_list):
+ """Compute regression, classification and centerness targets for points
+ in multiple images.
+
+ Args:
+ points (list[Tensor]): Points of each fpn level, each has shape
+ (num_points, 2).
+ gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image,
+ each has shape (num_gt, 4).
+ gt_labels_list (list[Tensor]): Ground truth labels of each box,
+ each has shape (num_gt,).
+
+ Returns:
+ tuple:
+ concat_lvl_labels (list[Tensor]): Labels of each level. \
+ concat_lvl_bbox_targets (list[Tensor]): BBox targets of each \
+ level.
+ """
+ assert len(points) == len(self.regress_ranges)
+ num_levels = len(points)
+ # expand regress ranges to align with points
+ expanded_regress_ranges = [
+ points[i].new_tensor(self.regress_ranges[i])[None].expand_as(
+ points[i]) for i in range(num_levels)
+ ]
+ # concat all levels points and regress ranges
+ concat_regress_ranges = torch.cat(expanded_regress_ranges, dim=0)
+ concat_points = torch.cat(points, dim=0)
+
+ # the number of points per img, per lvl
+ num_points = [center.size(0) for center in points]
+
+ # get labels and bbox_targets of each image
+ labels_list, bbox_targets_list = multi_apply(
+ self._get_target_single,
+ gt_bboxes_list,
+ gt_labels_list,
+ points=concat_points,
+ regress_ranges=concat_regress_ranges,
+ num_points_per_lvl=num_points)
+
+ # split to per img, per level
+ labels_list = [labels.split(num_points, 0) for labels in labels_list]
+ bbox_targets_list = [
+ bbox_targets.split(num_points, 0)
+ for bbox_targets in bbox_targets_list
+ ]
+
+ # concat per level image
+ concat_lvl_labels = []
+ concat_lvl_bbox_targets = []
+ for i in range(num_levels):
+ concat_lvl_labels.append(
+ torch.cat([labels[i] for labels in labels_list]))
+ bbox_targets = torch.cat(
+ [bbox_targets[i] for bbox_targets in bbox_targets_list])
+ if self.norm_on_bbox:
+ bbox_targets = bbox_targets / self.strides[i]
+ concat_lvl_bbox_targets.append(bbox_targets)
+ return concat_lvl_labels, concat_lvl_bbox_targets
+
+ def _get_target_single(self, gt_bboxes, gt_labels, points, regress_ranges,
+ num_points_per_lvl):
+ """Compute regression and classification targets for a single image."""
+ num_points = points.size(0)
+ num_gts = gt_labels.size(0)
+ if num_gts == 0:
+ return gt_labels.new_full((num_points,), self.num_classes), \
+ gt_bboxes.new_zeros((num_points, 4))
+
+ areas = (gt_bboxes[:, 2] - gt_bboxes[:, 0]) * (
+ gt_bboxes[:, 3] - gt_bboxes[:, 1])
+ # TODO: figure out why these two are different
+ # areas = areas[None].expand(num_points, num_gts)
+ areas = areas[None].repeat(num_points, 1)
+ regress_ranges = regress_ranges[:, None, :].expand(
+ num_points, num_gts, 2)
+ gt_bboxes = gt_bboxes[None].expand(num_points, num_gts, 4)
+ xs, ys = points[:, 0], points[:, 1]
+ xs = xs[:, None].expand(num_points, num_gts)
+ ys = ys[:, None].expand(num_points, num_gts)
+
+ left = xs - gt_bboxes[..., 0]
+ right = gt_bboxes[..., 2] - xs
+ top = ys - gt_bboxes[..., 1]
+ bottom = gt_bboxes[..., 3] - ys
+ bbox_targets = torch.stack((left, top, right, bottom), -1)
+
+ if self.center_sampling:
+ # condition1: inside a `center bbox`
+ radius = self.center_sample_radius
+ center_xs = (gt_bboxes[..., 0] + gt_bboxes[..., 2]) / 2
+ center_ys = (gt_bboxes[..., 1] + gt_bboxes[..., 3]) / 2
+ center_gts = torch.zeros_like(gt_bboxes)
+ stride = center_xs.new_zeros(center_xs.shape)
+
+ # project the points on current lvl back to the `original` sizes
+ lvl_begin = 0
+ for lvl_idx, num_points_lvl in enumerate(num_points_per_lvl):
+ lvl_end = lvl_begin + num_points_lvl
+ stride[lvl_begin:lvl_end] = self.strides[lvl_idx] * radius
+ lvl_begin = lvl_end
+
+ x_mins = center_xs - stride
+ y_mins = center_ys - stride
+ x_maxs = center_xs + stride
+ y_maxs = center_ys + stride
+ center_gts[..., 0] = torch.where(x_mins > gt_bboxes[..., 0],
+ x_mins, gt_bboxes[..., 0])
+ center_gts[..., 1] = torch.where(y_mins > gt_bboxes[..., 1],
+ y_mins, gt_bboxes[..., 1])
+ center_gts[..., 2] = torch.where(x_maxs > gt_bboxes[..., 2],
+ gt_bboxes[..., 2], x_maxs)
+ center_gts[..., 3] = torch.where(y_maxs > gt_bboxes[..., 3],
+ gt_bboxes[..., 3], y_maxs)
+
+ cb_dist_left = xs - center_gts[..., 0]
+ cb_dist_right = center_gts[..., 2] - xs
+ cb_dist_top = ys - center_gts[..., 1]
+ cb_dist_bottom = center_gts[..., 3] - ys
+ center_bbox = torch.stack(
+ (cb_dist_left, cb_dist_top, cb_dist_right, cb_dist_bottom), -1)
+ inside_gt_bbox_mask = center_bbox.min(-1)[0] > 0
+ else:
+ # condition1: inside a gt bbox
+ inside_gt_bbox_mask = bbox_targets.min(-1)[0] > 0
+
+ # condition2: limit the regression range for each location
+ max_regress_distance = bbox_targets.max(-1)[0]
+ inside_regress_range = (
+ (max_regress_distance >= regress_ranges[..., 0])
+ & (max_regress_distance <= regress_ranges[..., 1]))
+
+ # if there are still more than one objects for a location,
+ # we choose the one with minimal area
+ areas[inside_gt_bbox_mask == 0] = INF
+ areas[inside_regress_range == 0] = INF
+ min_area, min_area_inds = areas.min(dim=1)
+
+ labels = gt_labels[min_area_inds]
+ labels[min_area == INF] = self.num_classes # set as BG
+ bbox_targets = bbox_targets[range(num_points), min_area_inds]
+
+ return labels, bbox_targets
+
+ def centerness_target(self, pos_bbox_targets):
+ """Compute centerness targets.
+
+ Args:
+ pos_bbox_targets (Tensor): BBox targets of positive bboxes in shape
+ (num_pos, 4)
+
+ Returns:
+ Tensor: Centerness target.
+ """
+ # only calculate pos centerness targets, otherwise there may be nan
+ left_right = pos_bbox_targets[:, [0, 2]]
+ top_bottom = pos_bbox_targets[:, [1, 3]]
+ centerness_targets = (
+ left_right.min(dim=-1)[0] / left_right.max(dim=-1)[0]) * (
+ top_bottom.min(dim=-1)[0] / top_bottom.max(dim=-1)[0])
+ return torch.sqrt(centerness_targets)
diff --git a/annotator/uniformer/mmdet_null/models/dense_heads/fovea_head.py b/annotator/uniformer/mmdet_null/models/dense_heads/fovea_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..c8ccea787cba3d092284d4a5e209adaf6521c86a
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/dense_heads/fovea_head.py
@@ -0,0 +1,341 @@
+import torch
+import torch.nn as nn
+from mmcv.cnn import ConvModule, normal_init
+from mmcv.ops import DeformConv2d
+
+from mmdet.core import multi_apply, multiclass_nms
+from ..builder import HEADS
+from .anchor_free_head import AnchorFreeHead
+
+INF = 1e8
+
+
+class FeatureAlign(nn.Module):
+
+ def __init__(self,
+ in_channels,
+ out_channels,
+ kernel_size=3,
+ deform_groups=4):
+ super(FeatureAlign, self).__init__()
+ offset_channels = kernel_size * kernel_size * 2
+ self.conv_offset = nn.Conv2d(
+ 4, deform_groups * offset_channels, 1, bias=False)
+ self.conv_adaption = DeformConv2d(
+ in_channels,
+ out_channels,
+ kernel_size=kernel_size,
+ padding=(kernel_size - 1) // 2,
+ deform_groups=deform_groups)
+ self.relu = nn.ReLU(inplace=True)
+
+ def init_weights(self):
+ normal_init(self.conv_offset, std=0.1)
+ normal_init(self.conv_adaption, std=0.01)
+
+ def forward(self, x, shape):
+ offset = self.conv_offset(shape)
+ x = self.relu(self.conv_adaption(x, offset))
+ return x
+
+
+@HEADS.register_module()
+class FoveaHead(AnchorFreeHead):
+ """FoveaBox: Beyond Anchor-based Object Detector
+ https://arxiv.org/abs/1904.03797
+ """
+
+ def __init__(self,
+ num_classes,
+ in_channels,
+ base_edge_list=(16, 32, 64, 128, 256),
+ scale_ranges=((8, 32), (16, 64), (32, 128), (64, 256), (128,
+ 512)),
+ sigma=0.4,
+ with_deform=False,
+ deform_groups=4,
+ **kwargs):
+ self.base_edge_list = base_edge_list
+ self.scale_ranges = scale_ranges
+ self.sigma = sigma
+ self.with_deform = with_deform
+ self.deform_groups = deform_groups
+ super().__init__(num_classes, in_channels, **kwargs)
+
+ def _init_layers(self):
+ # box branch
+ super()._init_reg_convs()
+ self.conv_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1)
+
+ # cls branch
+ if not self.with_deform:
+ super()._init_cls_convs()
+ self.conv_cls = nn.Conv2d(
+ self.feat_channels, self.cls_out_channels, 3, padding=1)
+ else:
+ self.cls_convs = nn.ModuleList()
+ self.cls_convs.append(
+ ConvModule(
+ self.feat_channels, (self.feat_channels * 4),
+ 3,
+ stride=1,
+ padding=1,
+ conv_cfg=self.conv_cfg,
+ norm_cfg=self.norm_cfg,
+ bias=self.norm_cfg is None))
+ self.cls_convs.append(
+ ConvModule((self.feat_channels * 4), (self.feat_channels * 4),
+ 1,
+ stride=1,
+ padding=0,
+ conv_cfg=self.conv_cfg,
+ norm_cfg=self.norm_cfg,
+ bias=self.norm_cfg is None))
+ self.feature_adaption = FeatureAlign(
+ self.feat_channels,
+ self.feat_channels,
+ kernel_size=3,
+ deform_groups=self.deform_groups)
+ self.conv_cls = nn.Conv2d(
+ int(self.feat_channels * 4),
+ self.cls_out_channels,
+ 3,
+ padding=1)
+
+ def init_weights(self):
+ super().init_weights()
+ if self.with_deform:
+ self.feature_adaption.init_weights()
+
+ def forward_single(self, x):
+ cls_feat = x
+ reg_feat = x
+ for reg_layer in self.reg_convs:
+ reg_feat = reg_layer(reg_feat)
+ bbox_pred = self.conv_reg(reg_feat)
+ if self.with_deform:
+ cls_feat = self.feature_adaption(cls_feat, bbox_pred.exp())
+ for cls_layer in self.cls_convs:
+ cls_feat = cls_layer(cls_feat)
+ cls_score = self.conv_cls(cls_feat)
+ return cls_score, bbox_pred
+
+ def _get_points_single(self, *args, **kwargs):
+ y, x = super()._get_points_single(*args, **kwargs)
+ return y + 0.5, x + 0.5
+
+ def loss(self,
+ cls_scores,
+ bbox_preds,
+ gt_bbox_list,
+ gt_label_list,
+ img_metas,
+ gt_bboxes_ignore=None):
+ assert len(cls_scores) == len(bbox_preds)
+
+ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
+ points = self.get_points(featmap_sizes, bbox_preds[0].dtype,
+ bbox_preds[0].device)
+ num_imgs = cls_scores[0].size(0)
+ flatten_cls_scores = [
+ cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels)
+ for cls_score in cls_scores
+ ]
+ flatten_bbox_preds = [
+ bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4)
+ for bbox_pred in bbox_preds
+ ]
+ flatten_cls_scores = torch.cat(flatten_cls_scores)
+ flatten_bbox_preds = torch.cat(flatten_bbox_preds)
+ flatten_labels, flatten_bbox_targets = self.get_targets(
+ gt_bbox_list, gt_label_list, featmap_sizes, points)
+
+ # FG cat_id: [0, num_classes -1], BG cat_id: num_classes
+ pos_inds = ((flatten_labels >= 0)
+ & (flatten_labels < self.num_classes)).nonzero().view(-1)
+ num_pos = len(pos_inds)
+
+ loss_cls = self.loss_cls(
+ flatten_cls_scores, flatten_labels, avg_factor=num_pos + num_imgs)
+ if num_pos > 0:
+ pos_bbox_preds = flatten_bbox_preds[pos_inds]
+ pos_bbox_targets = flatten_bbox_targets[pos_inds]
+ pos_weights = pos_bbox_targets.new_zeros(
+ pos_bbox_targets.size()) + 1.0
+ loss_bbox = self.loss_bbox(
+ pos_bbox_preds,
+ pos_bbox_targets,
+ pos_weights,
+ avg_factor=num_pos)
+ else:
+ loss_bbox = torch.tensor(
+ 0,
+ dtype=flatten_bbox_preds.dtype,
+ device=flatten_bbox_preds.device)
+ return dict(loss_cls=loss_cls, loss_bbox=loss_bbox)
+
+ def get_targets(self, gt_bbox_list, gt_label_list, featmap_sizes, points):
+ label_list, bbox_target_list = multi_apply(
+ self._get_target_single,
+ gt_bbox_list,
+ gt_label_list,
+ featmap_size_list=featmap_sizes,
+ point_list=points)
+ flatten_labels = [
+ torch.cat([
+ labels_level_img.flatten() for labels_level_img in labels_level
+ ]) for labels_level in zip(*label_list)
+ ]
+ flatten_bbox_targets = [
+ torch.cat([
+ bbox_targets_level_img.reshape(-1, 4)
+ for bbox_targets_level_img in bbox_targets_level
+ ]) for bbox_targets_level in zip(*bbox_target_list)
+ ]
+ flatten_labels = torch.cat(flatten_labels)
+ flatten_bbox_targets = torch.cat(flatten_bbox_targets)
+ return flatten_labels, flatten_bbox_targets
+
+ def _get_target_single(self,
+ gt_bboxes_raw,
+ gt_labels_raw,
+ featmap_size_list=None,
+ point_list=None):
+
+ gt_areas = torch.sqrt((gt_bboxes_raw[:, 2] - gt_bboxes_raw[:, 0]) *
+ (gt_bboxes_raw[:, 3] - gt_bboxes_raw[:, 1]))
+ label_list = []
+ bbox_target_list = []
+ # for each pyramid, find the cls and box target
+ for base_len, (lower_bound, upper_bound), stride, featmap_size, \
+ (y, x) in zip(self.base_edge_list, self.scale_ranges,
+ self.strides, featmap_size_list, point_list):
+ # FG cat_id: [0, num_classes -1], BG cat_id: num_classes
+ labels = gt_labels_raw.new_zeros(featmap_size) + self.num_classes
+ bbox_targets = gt_bboxes_raw.new(featmap_size[0], featmap_size[1],
+ 4) + 1
+ # scale assignment
+ hit_indices = ((gt_areas >= lower_bound) &
+ (gt_areas <= upper_bound)).nonzero().flatten()
+ if len(hit_indices) == 0:
+ label_list.append(labels)
+ bbox_target_list.append(torch.log(bbox_targets))
+ continue
+ _, hit_index_order = torch.sort(-gt_areas[hit_indices])
+ hit_indices = hit_indices[hit_index_order]
+ gt_bboxes = gt_bboxes_raw[hit_indices, :] / stride
+ gt_labels = gt_labels_raw[hit_indices]
+ half_w = 0.5 * (gt_bboxes[:, 2] - gt_bboxes[:, 0])
+ half_h = 0.5 * (gt_bboxes[:, 3] - gt_bboxes[:, 1])
+ # valid fovea area: left, right, top, down
+ pos_left = torch.ceil(
+ gt_bboxes[:, 0] + (1 - self.sigma) * half_w - 0.5).long().\
+ clamp(0, featmap_size[1] - 1)
+ pos_right = torch.floor(
+ gt_bboxes[:, 0] + (1 + self.sigma) * half_w - 0.5).long().\
+ clamp(0, featmap_size[1] - 1)
+ pos_top = torch.ceil(
+ gt_bboxes[:, 1] + (1 - self.sigma) * half_h - 0.5).long().\
+ clamp(0, featmap_size[0] - 1)
+ pos_down = torch.floor(
+ gt_bboxes[:, 1] + (1 + self.sigma) * half_h - 0.5).long().\
+ clamp(0, featmap_size[0] - 1)
+ for px1, py1, px2, py2, label, (gt_x1, gt_y1, gt_x2, gt_y2) in \
+ zip(pos_left, pos_top, pos_right, pos_down, gt_labels,
+ gt_bboxes_raw[hit_indices, :]):
+ labels[py1:py2 + 1, px1:px2 + 1] = label
+ bbox_targets[py1:py2 + 1, px1:px2 + 1, 0] = \
+ (stride * x[py1:py2 + 1, px1:px2 + 1] - gt_x1) / base_len
+ bbox_targets[py1:py2 + 1, px1:px2 + 1, 1] = \
+ (stride * y[py1:py2 + 1, px1:px2 + 1] - gt_y1) / base_len
+ bbox_targets[py1:py2 + 1, px1:px2 + 1, 2] = \
+ (gt_x2 - stride * x[py1:py2 + 1, px1:px2 + 1]) / base_len
+ bbox_targets[py1:py2 + 1, px1:px2 + 1, 3] = \
+ (gt_y2 - stride * y[py1:py2 + 1, px1:px2 + 1]) / base_len
+ bbox_targets = bbox_targets.clamp(min=1. / 16, max=16.)
+ label_list.append(labels)
+ bbox_target_list.append(torch.log(bbox_targets))
+ return label_list, bbox_target_list
+
+ def get_bboxes(self,
+ cls_scores,
+ bbox_preds,
+ img_metas,
+ cfg=None,
+ rescale=None):
+ assert len(cls_scores) == len(bbox_preds)
+ num_levels = len(cls_scores)
+ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
+ points = self.get_points(
+ featmap_sizes,
+ bbox_preds[0].dtype,
+ bbox_preds[0].device,
+ flatten=True)
+ result_list = []
+ for img_id in range(len(img_metas)):
+ cls_score_list = [
+ cls_scores[i][img_id].detach() for i in range(num_levels)
+ ]
+ bbox_pred_list = [
+ bbox_preds[i][img_id].detach() for i in range(num_levels)
+ ]
+ img_shape = img_metas[img_id]['img_shape']
+ scale_factor = img_metas[img_id]['scale_factor']
+ det_bboxes = self._get_bboxes_single(cls_score_list,
+ bbox_pred_list, featmap_sizes,
+ points, img_shape,
+ scale_factor, cfg, rescale)
+ result_list.append(det_bboxes)
+ return result_list
+
+ def _get_bboxes_single(self,
+ cls_scores,
+ bbox_preds,
+ featmap_sizes,
+ point_list,
+ img_shape,
+ scale_factor,
+ cfg,
+ rescale=False):
+ cfg = self.test_cfg if cfg is None else cfg
+ assert len(cls_scores) == len(bbox_preds) == len(point_list)
+ det_bboxes = []
+ det_scores = []
+ for cls_score, bbox_pred, featmap_size, stride, base_len, (y, x) \
+ in zip(cls_scores, bbox_preds, featmap_sizes, self.strides,
+ self.base_edge_list, point_list):
+ assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
+ scores = cls_score.permute(1, 2, 0).reshape(
+ -1, self.cls_out_channels).sigmoid()
+ bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4).exp()
+ nms_pre = cfg.get('nms_pre', -1)
+ if (nms_pre > 0) and (scores.shape[0] > nms_pre):
+ max_scores, _ = scores.max(dim=1)
+ _, topk_inds = max_scores.topk(nms_pre)
+ bbox_pred = bbox_pred[topk_inds, :]
+ scores = scores[topk_inds, :]
+ y = y[topk_inds]
+ x = x[topk_inds]
+ x1 = (stride * x - base_len * bbox_pred[:, 0]).\
+ clamp(min=0, max=img_shape[1] - 1)
+ y1 = (stride * y - base_len * bbox_pred[:, 1]).\
+ clamp(min=0, max=img_shape[0] - 1)
+ x2 = (stride * x + base_len * bbox_pred[:, 2]).\
+ clamp(min=0, max=img_shape[1] - 1)
+ y2 = (stride * y + base_len * bbox_pred[:, 3]).\
+ clamp(min=0, max=img_shape[0] - 1)
+ bboxes = torch.stack([x1, y1, x2, y2], -1)
+ det_bboxes.append(bboxes)
+ det_scores.append(scores)
+ det_bboxes = torch.cat(det_bboxes)
+ if rescale:
+ det_bboxes /= det_bboxes.new_tensor(scale_factor)
+ det_scores = torch.cat(det_scores)
+ padding = det_scores.new_zeros(det_scores.shape[0], 1)
+ # remind that we set FG labels to [0, num_class-1] since mmdet v2.0
+ # BG cat_id: num_class
+ det_scores = torch.cat([det_scores, padding], dim=1)
+ det_bboxes, det_labels = multiclass_nms(det_bboxes, det_scores,
+ cfg.score_thr, cfg.nms,
+ cfg.max_per_img)
+ return det_bboxes, det_labels
diff --git a/annotator/uniformer/mmdet_null/models/dense_heads/free_anchor_retina_head.py b/annotator/uniformer/mmdet_null/models/dense_heads/free_anchor_retina_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..79879fdc3171b8e34b606b27eb1ceb67f4473e3e
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/dense_heads/free_anchor_retina_head.py
@@ -0,0 +1,270 @@
+import torch
+import torch.nn.functional as F
+
+from mmdet.core import bbox_overlaps
+from ..builder import HEADS
+from .retina_head import RetinaHead
+
+EPS = 1e-12
+
+
+@HEADS.register_module()
+class FreeAnchorRetinaHead(RetinaHead):
+ """FreeAnchor RetinaHead used in https://arxiv.org/abs/1909.02466.
+
+ Args:
+ num_classes (int): Number of categories excluding the background
+ category.
+ in_channels (int): Number of channels in the input feature map.
+ stacked_convs (int): Number of conv layers in cls and reg tower.
+ Default: 4.
+ conv_cfg (dict): dictionary to construct and config conv layer.
+ Default: None.
+ norm_cfg (dict): dictionary to construct and config norm layer.
+ Default: norm_cfg=dict(type='GN', num_groups=32,
+ requires_grad=True).
+ pre_anchor_topk (int): Number of boxes that be token in each bag.
+ bbox_thr (float): The threshold of the saturated linear function. It is
+ usually the same with the IoU threshold used in NMS.
+ gamma (float): Gamma parameter in focal loss.
+ alpha (float): Alpha parameter in focal loss.
+ """ # noqa: W605
+
+ def __init__(self,
+ num_classes,
+ in_channels,
+ stacked_convs=4,
+ conv_cfg=None,
+ norm_cfg=None,
+ pre_anchor_topk=50,
+ bbox_thr=0.6,
+ gamma=2.0,
+ alpha=0.5,
+ **kwargs):
+ super(FreeAnchorRetinaHead,
+ self).__init__(num_classes, in_channels, stacked_convs, conv_cfg,
+ norm_cfg, **kwargs)
+
+ self.pre_anchor_topk = pre_anchor_topk
+ self.bbox_thr = bbox_thr
+ self.gamma = gamma
+ self.alpha = alpha
+
+ def loss(self,
+ cls_scores,
+ bbox_preds,
+ gt_bboxes,
+ gt_labels,
+ img_metas,
+ gt_bboxes_ignore=None):
+ """Compute losses of the head.
+
+ Args:
+ cls_scores (list[Tensor]): Box scores for each scale level
+ Has shape (N, num_anchors * num_classes, H, W)
+ bbox_preds (list[Tensor]): Box energies / deltas for each scale
+ level with shape (N, num_anchors * 4, H, W)
+ gt_bboxes (list[Tensor]): each item are the truth boxes for each
+ image in [tl_x, tl_y, br_x, br_y] format.
+ gt_labels (list[Tensor]): class indices corresponding to each box
+ img_metas (list[dict]): Meta information of each image, e.g.,
+ image size, scaling factor, etc.
+ gt_bboxes_ignore (None | list[Tensor]): specify which bounding
+ boxes can be ignored when computing the loss.
+
+ Returns:
+ dict[str, Tensor]: A dictionary of loss components.
+ """
+ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
+ assert len(featmap_sizes) == len(self.anchor_generator.base_anchors)
+
+ anchor_list, _ = self.get_anchors(featmap_sizes, img_metas)
+ anchors = [torch.cat(anchor) for anchor in anchor_list]
+
+ # concatenate each level
+ cls_scores = [
+ cls.permute(0, 2, 3,
+ 1).reshape(cls.size(0), -1, self.cls_out_channels)
+ for cls in cls_scores
+ ]
+ bbox_preds = [
+ bbox_pred.permute(0, 2, 3, 1).reshape(bbox_pred.size(0), -1, 4)
+ for bbox_pred in bbox_preds
+ ]
+ cls_scores = torch.cat(cls_scores, dim=1)
+ bbox_preds = torch.cat(bbox_preds, dim=1)
+
+ cls_prob = torch.sigmoid(cls_scores)
+ box_prob = []
+ num_pos = 0
+ positive_losses = []
+ for _, (anchors_, gt_labels_, gt_bboxes_, cls_prob_,
+ bbox_preds_) in enumerate(
+ zip(anchors, gt_labels, gt_bboxes, cls_prob, bbox_preds)):
+
+ with torch.no_grad():
+ if len(gt_bboxes_) == 0:
+ image_box_prob = torch.zeros(
+ anchors_.size(0),
+ self.cls_out_channels).type_as(bbox_preds_)
+ else:
+ # box_localization: a_{j}^{loc}, shape: [j, 4]
+ pred_boxes = self.bbox_coder.decode(anchors_, bbox_preds_)
+
+ # object_box_iou: IoU_{ij}^{loc}, shape: [i, j]
+ object_box_iou = bbox_overlaps(gt_bboxes_, pred_boxes)
+
+ # object_box_prob: P{a_{j} -> b_{i}}, shape: [i, j]
+ t1 = self.bbox_thr
+ t2 = object_box_iou.max(
+ dim=1, keepdim=True).values.clamp(min=t1 + 1e-12)
+ object_box_prob = ((object_box_iou - t1) /
+ (t2 - t1)).clamp(
+ min=0, max=1)
+
+ # object_cls_box_prob: P{a_{j} -> b_{i}}, shape: [i, c, j]
+ num_obj = gt_labels_.size(0)
+ indices = torch.stack([
+ torch.arange(num_obj).type_as(gt_labels_), gt_labels_
+ ],
+ dim=0)
+ object_cls_box_prob = torch.sparse_coo_tensor(
+ indices, object_box_prob)
+
+ # image_box_iou: P{a_{j} \in A_{+}}, shape: [c, j]
+ """
+ from "start" to "end" implement:
+ image_box_iou = torch.sparse.max(object_cls_box_prob,
+ dim=0).t()
+
+ """
+ # start
+ box_cls_prob = torch.sparse.sum(
+ object_cls_box_prob, dim=0).to_dense()
+
+ indices = torch.nonzero(box_cls_prob, as_tuple=False).t_()
+ if indices.numel() == 0:
+ image_box_prob = torch.zeros(
+ anchors_.size(0),
+ self.cls_out_channels).type_as(object_box_prob)
+ else:
+ nonzero_box_prob = torch.where(
+ (gt_labels_.unsqueeze(dim=-1) == indices[0]),
+ object_box_prob[:, indices[1]],
+ torch.tensor([
+ 0
+ ]).type_as(object_box_prob)).max(dim=0).values
+
+ # upmap to shape [j, c]
+ image_box_prob = torch.sparse_coo_tensor(
+ indices.flip([0]),
+ nonzero_box_prob,
+ size=(anchors_.size(0),
+ self.cls_out_channels)).to_dense()
+ # end
+
+ box_prob.append(image_box_prob)
+
+ # construct bags for objects
+ match_quality_matrix = bbox_overlaps(gt_bboxes_, anchors_)
+ _, matched = torch.topk(
+ match_quality_matrix,
+ self.pre_anchor_topk,
+ dim=1,
+ sorted=False)
+ del match_quality_matrix
+
+ # matched_cls_prob: P_{ij}^{cls}
+ matched_cls_prob = torch.gather(
+ cls_prob_[matched], 2,
+ gt_labels_.view(-1, 1, 1).repeat(1, self.pre_anchor_topk,
+ 1)).squeeze(2)
+
+ # matched_box_prob: P_{ij}^{loc}
+ matched_anchors = anchors_[matched]
+ matched_object_targets = self.bbox_coder.encode(
+ matched_anchors,
+ gt_bboxes_.unsqueeze(dim=1).expand_as(matched_anchors))
+ loss_bbox = self.loss_bbox(
+ bbox_preds_[matched],
+ matched_object_targets,
+ reduction_override='none').sum(-1)
+ matched_box_prob = torch.exp(-loss_bbox)
+
+ # positive_losses: {-log( Mean-max(P_{ij}^{cls} * P_{ij}^{loc}) )}
+ num_pos += len(gt_bboxes_)
+ positive_losses.append(
+ self.positive_bag_loss(matched_cls_prob, matched_box_prob))
+ positive_loss = torch.cat(positive_losses).sum() / max(1, num_pos)
+
+ # box_prob: P{a_{j} \in A_{+}}
+ box_prob = torch.stack(box_prob, dim=0)
+
+ # negative_loss:
+ # \sum_{j}{ FL((1 - P{a_{j} \in A_{+}}) * (1 - P_{j}^{bg})) } / n||B||
+ negative_loss = self.negative_bag_loss(cls_prob, box_prob).sum() / max(
+ 1, num_pos * self.pre_anchor_topk)
+
+ # avoid the absence of gradients in regression subnet
+ # when no ground-truth in a batch
+ if num_pos == 0:
+ positive_loss = bbox_preds.sum() * 0
+
+ losses = {
+ 'positive_bag_loss': positive_loss,
+ 'negative_bag_loss': negative_loss
+ }
+ return losses
+
+ def positive_bag_loss(self, matched_cls_prob, matched_box_prob):
+ """Compute positive bag loss.
+
+ :math:`-log( Mean-max(P_{ij}^{cls} * P_{ij}^{loc}) )`.
+
+ :math:`P_{ij}^{cls}`: matched_cls_prob, classification probability of matched samples.
+
+ :math:`P_{ij}^{loc}`: matched_box_prob, box probability of matched samples.
+
+ Args:
+ matched_cls_prob (Tensor): Classification probabilty of matched
+ samples in shape (num_gt, pre_anchor_topk).
+ matched_box_prob (Tensor): BBox probability of matched samples,
+ in shape (num_gt, pre_anchor_topk).
+
+ Returns:
+ Tensor: Positive bag loss in shape (num_gt,).
+ """ # noqa: E501, W605
+ # bag_prob = Mean-max(matched_prob)
+ matched_prob = matched_cls_prob * matched_box_prob
+ weight = 1 / torch.clamp(1 - matched_prob, 1e-12, None)
+ weight /= weight.sum(dim=1).unsqueeze(dim=-1)
+ bag_prob = (weight * matched_prob).sum(dim=1)
+ # positive_bag_loss = -self.alpha * log(bag_prob)
+ return self.alpha * F.binary_cross_entropy(
+ bag_prob, torch.ones_like(bag_prob), reduction='none')
+
+ def negative_bag_loss(self, cls_prob, box_prob):
+ """Compute negative bag loss.
+
+ :math:`FL((1 - P_{a_{j} \in A_{+}}) * (1 - P_{j}^{bg}))`.
+
+ :math:`P_{a_{j} \in A_{+}}`: Box_probability of matched samples.
+
+ :math:`P_{j}^{bg}`: Classification probability of negative samples.
+
+ Args:
+ cls_prob (Tensor): Classification probability, in shape
+ (num_img, num_anchors, num_classes).
+ box_prob (Tensor): Box probability, in shape
+ (num_img, num_anchors, num_classes).
+
+ Returns:
+ Tensor: Negative bag loss in shape (num_img, num_anchors, num_classes).
+ """ # noqa: E501, W605
+ prob = cls_prob * (1 - box_prob)
+ # There are some cases when neg_prob = 0.
+ # This will cause the neg_prob.log() to be inf without clamp.
+ prob = prob.clamp(min=EPS, max=1 - EPS)
+ negative_bag_loss = prob**self.gamma * F.binary_cross_entropy(
+ prob, torch.zeros_like(prob), reduction='none')
+ return (1 - self.alpha) * negative_bag_loss
diff --git a/annotator/uniformer/mmdet_null/models/dense_heads/fsaf_head.py b/annotator/uniformer/mmdet_null/models/dense_heads/fsaf_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..7183efce28596ba106411250f508aec5995fbf60
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/dense_heads/fsaf_head.py
@@ -0,0 +1,422 @@
+import numpy as np
+import torch
+from mmcv.cnn import normal_init
+from mmcv.runner import force_fp32
+
+from mmdet.core import (anchor_inside_flags, images_to_levels, multi_apply,
+ unmap)
+from ..builder import HEADS
+from ..losses.accuracy import accuracy
+from ..losses.utils import weight_reduce_loss
+from .retina_head import RetinaHead
+
+
+@HEADS.register_module()
+class FSAFHead(RetinaHead):
+ """Anchor-free head used in `FSAF `_.
+
+ The head contains two subnetworks. The first classifies anchor boxes and
+ the second regresses deltas for the anchors (num_anchors is 1 for anchor-
+ free methods)
+
+ Args:
+ *args: Same as its base class in :class:`RetinaHead`
+ score_threshold (float, optional): The score_threshold to calculate
+ positive recall. If given, prediction scores lower than this value
+ is counted as incorrect prediction. Default to None.
+ **kwargs: Same as its base class in :class:`RetinaHead`
+
+ Example:
+ >>> import torch
+ >>> self = FSAFHead(11, 7)
+ >>> x = torch.rand(1, 7, 32, 32)
+ >>> cls_score, bbox_pred = self.forward_single(x)
+ >>> # Each anchor predicts a score for each class except background
+ >>> cls_per_anchor = cls_score.shape[1] / self.num_anchors
+ >>> box_per_anchor = bbox_pred.shape[1] / self.num_anchors
+ >>> assert cls_per_anchor == self.num_classes
+ >>> assert box_per_anchor == 4
+ """
+
+ def __init__(self, *args, score_threshold=None, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.score_threshold = score_threshold
+
+ def forward_single(self, x):
+ """Forward feature map of a single scale level.
+
+ Args:
+ x (Tensor): Feature map of a single scale level.
+
+ Returns:
+ tuple (Tensor):
+ cls_score (Tensor): Box scores for each scale level
+ Has shape (N, num_points * num_classes, H, W).
+ bbox_pred (Tensor): Box energies / deltas for each scale
+ level with shape (N, num_points * 4, H, W).
+ """
+ cls_score, bbox_pred = super().forward_single(x)
+ # relu: TBLR encoder only accepts positive bbox_pred
+ return cls_score, self.relu(bbox_pred)
+
+ def init_weights(self):
+ """Initialize weights of the head."""
+ super(FSAFHead, self).init_weights()
+ # The positive bias in self.retina_reg conv is to prevent predicted \
+ # bbox with 0 area
+ normal_init(self.retina_reg, std=0.01, bias=0.25)
+
+ def _get_targets_single(self,
+ flat_anchors,
+ valid_flags,
+ gt_bboxes,
+ gt_bboxes_ignore,
+ gt_labels,
+ img_meta,
+ label_channels=1,
+ unmap_outputs=True):
+ """Compute regression and classification targets for anchors in a
+ single image.
+
+ Most of the codes are the same with the base class
+ :obj: `AnchorHead`, except that it also collects and returns
+ the matched gt index in the image (from 0 to num_gt-1). If the
+ anchor bbox is not matched to any gt, the corresponding value in
+ pos_gt_inds is -1.
+ """
+ inside_flags = anchor_inside_flags(flat_anchors, valid_flags,
+ img_meta['img_shape'][:2],
+ self.train_cfg.allowed_border)
+ if not inside_flags.any():
+ return (None, ) * 7
+ # Assign gt and sample anchors
+ anchors = flat_anchors[inside_flags.type(torch.bool), :]
+ assign_result = self.assigner.assign(
+ anchors, gt_bboxes, gt_bboxes_ignore,
+ None if self.sampling else gt_labels)
+
+ sampling_result = self.sampler.sample(assign_result, anchors,
+ gt_bboxes)
+
+ num_valid_anchors = anchors.shape[0]
+ bbox_targets = torch.zeros_like(anchors)
+ bbox_weights = torch.zeros_like(anchors)
+ labels = anchors.new_full((num_valid_anchors, ),
+ self.num_classes,
+ dtype=torch.long)
+ label_weights = anchors.new_zeros((num_valid_anchors, label_channels),
+ dtype=torch.float)
+ pos_gt_inds = anchors.new_full((num_valid_anchors, ),
+ -1,
+ dtype=torch.long)
+
+ pos_inds = sampling_result.pos_inds
+ neg_inds = sampling_result.neg_inds
+
+ if len(pos_inds) > 0:
+ if not self.reg_decoded_bbox:
+ pos_bbox_targets = self.bbox_coder.encode(
+ sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes)
+ else:
+ # When the regression loss (e.g. `IouLoss`, `GIouLoss`)
+ # is applied directly on the decoded bounding boxes, both
+ # the predicted boxes and regression targets should be with
+ # absolute coordinate format.
+ pos_bbox_targets = sampling_result.pos_gt_bboxes
+ bbox_targets[pos_inds, :] = pos_bbox_targets
+ bbox_weights[pos_inds, :] = 1.0
+ # The assigned gt_index for each anchor. (0-based)
+ pos_gt_inds[pos_inds] = sampling_result.pos_assigned_gt_inds
+ if gt_labels is None:
+ # Only rpn gives gt_labels as None
+ # Foreground is the first class
+ labels[pos_inds] = 0
+ else:
+ labels[pos_inds] = gt_labels[
+ sampling_result.pos_assigned_gt_inds]
+ if self.train_cfg.pos_weight <= 0:
+ label_weights[pos_inds] = 1.0
+ else:
+ label_weights[pos_inds] = self.train_cfg.pos_weight
+
+ if len(neg_inds) > 0:
+ label_weights[neg_inds] = 1.0
+
+ # shadowed_labels is a tensor composed of tuples
+ # (anchor_inds, class_label) that indicate those anchors lying in the
+ # outer region of a gt or overlapped by another gt with a smaller
+ # area.
+ #
+ # Therefore, only the shadowed labels are ignored for loss calculation.
+ # the key `shadowed_labels` is defined in :obj:`CenterRegionAssigner`
+ shadowed_labels = assign_result.get_extra_property('shadowed_labels')
+ if shadowed_labels is not None and shadowed_labels.numel():
+ if len(shadowed_labels.shape) == 2:
+ idx_, label_ = shadowed_labels[:, 0], shadowed_labels[:, 1]
+ assert (labels[idx_] != label_).all(), \
+ 'One label cannot be both positive and ignored'
+ label_weights[idx_, label_] = 0
+ else:
+ label_weights[shadowed_labels] = 0
+
+ # map up to original set of anchors
+ if unmap_outputs:
+ num_total_anchors = flat_anchors.size(0)
+ labels = unmap(labels, num_total_anchors, inside_flags)
+ label_weights = unmap(label_weights, num_total_anchors,
+ inside_flags)
+ bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags)
+ bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags)
+ pos_gt_inds = unmap(
+ pos_gt_inds, num_total_anchors, inside_flags, fill=-1)
+
+ return (labels, label_weights, bbox_targets, bbox_weights, pos_inds,
+ neg_inds, sampling_result, pos_gt_inds)
+
+ @force_fp32(apply_to=('cls_scores', 'bbox_preds'))
+ def loss(self,
+ cls_scores,
+ bbox_preds,
+ gt_bboxes,
+ gt_labels,
+ img_metas,
+ gt_bboxes_ignore=None):
+ """Compute loss of the head.
+
+ Args:
+ cls_scores (list[Tensor]): Box scores for each scale level
+ Has shape (N, num_points * num_classes, H, W).
+ bbox_preds (list[Tensor]): Box energies / deltas for each scale
+ level with shape (N, num_points * 4, H, W).
+ gt_bboxes (list[Tensor]): each item are the truth boxes for each
+ image in [tl_x, tl_y, br_x, br_y] format.
+ gt_labels (list[Tensor]): class indices corresponding to each box
+ img_metas (list[dict]): Meta information of each image, e.g.,
+ image size, scaling factor, etc.
+ gt_bboxes_ignore (None | list[Tensor]): specify which bounding
+ boxes can be ignored when computing the loss.
+
+ Returns:
+ dict[str, Tensor]: A dictionary of loss components.
+ """
+ for i in range(len(bbox_preds)): # loop over fpn level
+ # avoid 0 area of the predicted bbox
+ bbox_preds[i] = bbox_preds[i].clamp(min=1e-4)
+ # TODO: It may directly use the base-class loss function.
+ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
+ assert len(featmap_sizes) == self.anchor_generator.num_levels
+ batch_size = len(gt_bboxes)
+ device = cls_scores[0].device
+ anchor_list, valid_flag_list = self.get_anchors(
+ featmap_sizes, img_metas, device=device)
+ label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1
+ cls_reg_targets = self.get_targets(
+ anchor_list,
+ valid_flag_list,
+ gt_bboxes,
+ img_metas,
+ gt_bboxes_ignore_list=gt_bboxes_ignore,
+ gt_labels_list=gt_labels,
+ label_channels=label_channels)
+ if cls_reg_targets is None:
+ return None
+ (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,
+ num_total_pos, num_total_neg,
+ pos_assigned_gt_inds_list) = cls_reg_targets
+
+ num_gts = np.array(list(map(len, gt_labels)))
+ num_total_samples = (
+ num_total_pos + num_total_neg if self.sampling else num_total_pos)
+ # anchor number of multi levels
+ num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]
+ # concat all level anchors and flags to a single tensor
+ concat_anchor_list = []
+ for i in range(len(anchor_list)):
+ concat_anchor_list.append(torch.cat(anchor_list[i]))
+ all_anchor_list = images_to_levels(concat_anchor_list,
+ num_level_anchors)
+ losses_cls, losses_bbox = multi_apply(
+ self.loss_single,
+ cls_scores,
+ bbox_preds,
+ all_anchor_list,
+ labels_list,
+ label_weights_list,
+ bbox_targets_list,
+ bbox_weights_list,
+ num_total_samples=num_total_samples)
+
+ # `pos_assigned_gt_inds_list` (length: fpn_levels) stores the assigned
+ # gt index of each anchor bbox in each fpn level.
+ cum_num_gts = list(np.cumsum(num_gts)) # length of batch_size
+ for i, assign in enumerate(pos_assigned_gt_inds_list):
+ # loop over fpn levels
+ for j in range(1, batch_size):
+ # loop over batch size
+ # Convert gt indices in each img to those in the batch
+ assign[j][assign[j] >= 0] += int(cum_num_gts[j - 1])
+ pos_assigned_gt_inds_list[i] = assign.flatten()
+ labels_list[i] = labels_list[i].flatten()
+ num_gts = sum(map(len, gt_labels)) # total number of gt in the batch
+ # The unique label index of each gt in the batch
+ label_sequence = torch.arange(num_gts, device=device)
+ # Collect the average loss of each gt in each level
+ with torch.no_grad():
+ loss_levels, = multi_apply(
+ self.collect_loss_level_single,
+ losses_cls,
+ losses_bbox,
+ pos_assigned_gt_inds_list,
+ labels_seq=label_sequence)
+ # Shape: (fpn_levels, num_gts). Loss of each gt at each fpn level
+ loss_levels = torch.stack(loss_levels, dim=0)
+ # Locate the best fpn level for loss back-propagation
+ if loss_levels.numel() == 0: # zero gt
+ argmin = loss_levels.new_empty((num_gts, ), dtype=torch.long)
+ else:
+ _, argmin = loss_levels.min(dim=0)
+
+ # Reweight the loss of each (anchor, label) pair, so that only those
+ # at the best gt level are back-propagated.
+ losses_cls, losses_bbox, pos_inds = multi_apply(
+ self.reweight_loss_single,
+ losses_cls,
+ losses_bbox,
+ pos_assigned_gt_inds_list,
+ labels_list,
+ list(range(len(losses_cls))),
+ min_levels=argmin)
+ num_pos = torch.cat(pos_inds, 0).sum().float()
+ pos_recall = self.calculate_pos_recall(cls_scores, labels_list,
+ pos_inds)
+
+ if num_pos == 0: # No gt
+ avg_factor = num_pos + float(num_total_neg)
+ else:
+ avg_factor = num_pos
+ for i in range(len(losses_cls)):
+ losses_cls[i] /= avg_factor
+ losses_bbox[i] /= avg_factor
+ return dict(
+ loss_cls=losses_cls,
+ loss_bbox=losses_bbox,
+ num_pos=num_pos / batch_size,
+ pos_recall=pos_recall)
+
+ def calculate_pos_recall(self, cls_scores, labels_list, pos_inds):
+ """Calculate positive recall with score threshold.
+
+ Args:
+ cls_scores (list[Tensor]): Classification scores at all fpn levels.
+ Each tensor is in shape (N, num_classes * num_anchors, H, W)
+ labels_list (list[Tensor]): The label that each anchor is assigned
+ to. Shape (N * H * W * num_anchors, )
+ pos_inds (list[Tensor]): List of bool tensors indicating whether
+ the anchor is assigned to a positive label.
+ Shape (N * H * W * num_anchors, )
+
+ Returns:
+ Tensor: A single float number indicating the positive recall.
+ """
+ with torch.no_grad():
+ num_class = self.num_classes
+ scores = [
+ cls.permute(0, 2, 3, 1).reshape(-1, num_class)[pos]
+ for cls, pos in zip(cls_scores, pos_inds)
+ ]
+ labels = [
+ label.reshape(-1)[pos]
+ for label, pos in zip(labels_list, pos_inds)
+ ]
+ scores = torch.cat(scores, dim=0)
+ labels = torch.cat(labels, dim=0)
+ if self.use_sigmoid_cls:
+ scores = scores.sigmoid()
+ else:
+ scores = scores.softmax(dim=1)
+
+ return accuracy(scores, labels, thresh=self.score_threshold)
+
+ def collect_loss_level_single(self, cls_loss, reg_loss, assigned_gt_inds,
+ labels_seq):
+ """Get the average loss in each FPN level w.r.t. each gt label.
+
+ Args:
+ cls_loss (Tensor): Classification loss of each feature map pixel,
+ shape (num_anchor, num_class)
+ reg_loss (Tensor): Regression loss of each feature map pixel,
+ shape (num_anchor, 4)
+ assigned_gt_inds (Tensor): It indicates which gt the prior is
+ assigned to (0-based, -1: no assignment). shape (num_anchor),
+ labels_seq: The rank of labels. shape (num_gt)
+
+ Returns:
+ shape: (num_gt), average loss of each gt in this level
+ """
+ if len(reg_loss.shape) == 2: # iou loss has shape (num_prior, 4)
+ reg_loss = reg_loss.sum(dim=-1) # sum loss in tblr dims
+ if len(cls_loss.shape) == 2:
+ cls_loss = cls_loss.sum(dim=-1) # sum loss in class dims
+ loss = cls_loss + reg_loss
+ assert loss.size(0) == assigned_gt_inds.size(0)
+ # Default loss value is 1e6 for a layer where no anchor is positive
+ # to ensure it will not be chosen to back-propagate gradient
+ losses_ = loss.new_full(labels_seq.shape, 1e6)
+ for i, l in enumerate(labels_seq):
+ match = assigned_gt_inds == l
+ if match.any():
+ losses_[i] = loss[match].mean()
+ return losses_,
+
+ def reweight_loss_single(self, cls_loss, reg_loss, assigned_gt_inds,
+ labels, level, min_levels):
+ """Reweight loss values at each level.
+
+ Reassign loss values at each level by masking those where the
+ pre-calculated loss is too large. Then return the reduced losses.
+
+ Args:
+ cls_loss (Tensor): Element-wise classification loss.
+ Shape: (num_anchors, num_classes)
+ reg_loss (Tensor): Element-wise regression loss.
+ Shape: (num_anchors, 4)
+ assigned_gt_inds (Tensor): The gt indices that each anchor bbox
+ is assigned to. -1 denotes a negative anchor, otherwise it is the
+ gt index (0-based). Shape: (num_anchors, ),
+ labels (Tensor): Label assigned to anchors. Shape: (num_anchors, ).
+ level (int): The current level index in the pyramid
+ (0-4 for RetinaNet)
+ min_levels (Tensor): The best-matching level for each gt.
+ Shape: (num_gts, ),
+
+ Returns:
+ tuple:
+ - cls_loss: Reduced corrected classification loss. Scalar.
+ - reg_loss: Reduced corrected regression loss. Scalar.
+ - pos_flags (Tensor): Corrected bool tensor indicating the
+ final positive anchors. Shape: (num_anchors, ).
+ """
+ loc_weight = torch.ones_like(reg_loss)
+ cls_weight = torch.ones_like(cls_loss)
+ pos_flags = assigned_gt_inds >= 0 # positive pixel flag
+ pos_indices = torch.nonzero(pos_flags, as_tuple=False).flatten()
+
+ if pos_flags.any(): # pos pixels exist
+ pos_assigned_gt_inds = assigned_gt_inds[pos_flags]
+ zeroing_indices = (min_levels[pos_assigned_gt_inds] != level)
+ neg_indices = pos_indices[zeroing_indices]
+
+ if neg_indices.numel():
+ pos_flags[neg_indices] = 0
+ loc_weight[neg_indices] = 0
+ # Only the weight corresponding to the label is
+ # zeroed out if not selected
+ zeroing_labels = labels[neg_indices]
+ assert (zeroing_labels >= 0).all()
+ cls_weight[neg_indices, zeroing_labels] = 0
+
+ # Weighted loss for both cls and reg loss
+ cls_loss = weight_reduce_loss(cls_loss, cls_weight, reduction='sum')
+ reg_loss = weight_reduce_loss(reg_loss, loc_weight, reduction='sum')
+
+ return cls_loss, reg_loss, pos_flags
diff --git a/annotator/uniformer/mmdet_null/models/dense_heads/ga_retina_head.py b/annotator/uniformer/mmdet_null/models/dense_heads/ga_retina_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..8822d1ca78ee2fa2f304a0649e81274830383533
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/dense_heads/ga_retina_head.py
@@ -0,0 +1,109 @@
+import torch.nn as nn
+from mmcv.cnn import ConvModule, bias_init_with_prob, normal_init
+from mmcv.ops import MaskedConv2d
+
+from ..builder import HEADS
+from .guided_anchor_head import FeatureAdaption, GuidedAnchorHead
+
+
+@HEADS.register_module()
+class GARetinaHead(GuidedAnchorHead):
+ """Guided-Anchor-based RetinaNet head."""
+
+ def __init__(self,
+ num_classes,
+ in_channels,
+ stacked_convs=4,
+ conv_cfg=None,
+ norm_cfg=None,
+ **kwargs):
+ self.stacked_convs = stacked_convs
+ self.conv_cfg = conv_cfg
+ self.norm_cfg = norm_cfg
+ super(GARetinaHead, self).__init__(num_classes, in_channels, **kwargs)
+
+ def _init_layers(self):
+ """Initialize layers of the head."""
+ self.relu = nn.ReLU(inplace=True)
+ self.cls_convs = nn.ModuleList()
+ self.reg_convs = nn.ModuleList()
+ for i in range(self.stacked_convs):
+ chn = self.in_channels if i == 0 else self.feat_channels
+ self.cls_convs.append(
+ ConvModule(
+ chn,
+ self.feat_channels,
+ 3,
+ stride=1,
+ padding=1,
+ conv_cfg=self.conv_cfg,
+ norm_cfg=self.norm_cfg))
+ self.reg_convs.append(
+ ConvModule(
+ chn,
+ self.feat_channels,
+ 3,
+ stride=1,
+ padding=1,
+ conv_cfg=self.conv_cfg,
+ norm_cfg=self.norm_cfg))
+
+ self.conv_loc = nn.Conv2d(self.feat_channels, 1, 1)
+ self.conv_shape = nn.Conv2d(self.feat_channels, self.num_anchors * 2,
+ 1)
+ self.feature_adaption_cls = FeatureAdaption(
+ self.feat_channels,
+ self.feat_channels,
+ kernel_size=3,
+ deform_groups=self.deform_groups)
+ self.feature_adaption_reg = FeatureAdaption(
+ self.feat_channels,
+ self.feat_channels,
+ kernel_size=3,
+ deform_groups=self.deform_groups)
+ self.retina_cls = MaskedConv2d(
+ self.feat_channels,
+ self.num_anchors * self.cls_out_channels,
+ 3,
+ padding=1)
+ self.retina_reg = MaskedConv2d(
+ self.feat_channels, self.num_anchors * 4, 3, padding=1)
+
+ def init_weights(self):
+ """Initialize weights of the layer."""
+ for m in self.cls_convs:
+ normal_init(m.conv, std=0.01)
+ for m in self.reg_convs:
+ normal_init(m.conv, std=0.01)
+
+ self.feature_adaption_cls.init_weights()
+ self.feature_adaption_reg.init_weights()
+
+ bias_cls = bias_init_with_prob(0.01)
+ normal_init(self.conv_loc, std=0.01, bias=bias_cls)
+ normal_init(self.conv_shape, std=0.01)
+ normal_init(self.retina_cls, std=0.01, bias=bias_cls)
+ normal_init(self.retina_reg, std=0.01)
+
+ def forward_single(self, x):
+ """Forward feature map of a single scale level."""
+ cls_feat = x
+ reg_feat = x
+ for cls_conv in self.cls_convs:
+ cls_feat = cls_conv(cls_feat)
+ for reg_conv in self.reg_convs:
+ reg_feat = reg_conv(reg_feat)
+
+ loc_pred = self.conv_loc(cls_feat)
+ shape_pred = self.conv_shape(reg_feat)
+
+ cls_feat = self.feature_adaption_cls(cls_feat, shape_pred)
+ reg_feat = self.feature_adaption_reg(reg_feat, shape_pred)
+
+ if not self.training:
+ mask = loc_pred.sigmoid()[0] >= self.loc_filter_thr
+ else:
+ mask = None
+ cls_score = self.retina_cls(cls_feat, mask)
+ bbox_pred = self.retina_reg(reg_feat, mask)
+ return cls_score, bbox_pred, shape_pred, loc_pred
diff --git a/annotator/uniformer/mmdet_null/models/dense_heads/ga_rpn_head.py b/annotator/uniformer/mmdet_null/models/dense_heads/ga_rpn_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..2ec0d4fdd3475bfbd2e541a6e8130b1df9ad861a
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/dense_heads/ga_rpn_head.py
@@ -0,0 +1,171 @@
+import copy
+import warnings
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from mmcv import ConfigDict
+from mmcv.cnn import normal_init
+from mmcv.ops import nms
+
+from ..builder import HEADS
+from .guided_anchor_head import GuidedAnchorHead
+from .rpn_test_mixin import RPNTestMixin
+
+
+@HEADS.register_module()
+class GARPNHead(RPNTestMixin, GuidedAnchorHead):
+ """Guided-Anchor-based RPN head."""
+
+ def __init__(self, in_channels, **kwargs):
+ super(GARPNHead, self).__init__(1, in_channels, **kwargs)
+
+ def _init_layers(self):
+ """Initialize layers of the head."""
+ self.rpn_conv = nn.Conv2d(
+ self.in_channels, self.feat_channels, 3, padding=1)
+ super(GARPNHead, self)._init_layers()
+
+ def init_weights(self):
+ """Initialize weights of the head."""
+ normal_init(self.rpn_conv, std=0.01)
+ super(GARPNHead, self).init_weights()
+
+ def forward_single(self, x):
+ """Forward feature of a single scale level."""
+
+ x = self.rpn_conv(x)
+ x = F.relu(x, inplace=True)
+ (cls_score, bbox_pred, shape_pred,
+ loc_pred) = super(GARPNHead, self).forward_single(x)
+ return cls_score, bbox_pred, shape_pred, loc_pred
+
+ def loss(self,
+ cls_scores,
+ bbox_preds,
+ shape_preds,
+ loc_preds,
+ gt_bboxes,
+ img_metas,
+ gt_bboxes_ignore=None):
+ losses = super(GARPNHead, self).loss(
+ cls_scores,
+ bbox_preds,
+ shape_preds,
+ loc_preds,
+ gt_bboxes,
+ None,
+ img_metas,
+ gt_bboxes_ignore=gt_bboxes_ignore)
+ return dict(
+ loss_rpn_cls=losses['loss_cls'],
+ loss_rpn_bbox=losses['loss_bbox'],
+ loss_anchor_shape=losses['loss_shape'],
+ loss_anchor_loc=losses['loss_loc'])
+
+ def _get_bboxes_single(self,
+ cls_scores,
+ bbox_preds,
+ mlvl_anchors,
+ mlvl_masks,
+ img_shape,
+ scale_factor,
+ cfg,
+ rescale=False):
+ cfg = self.test_cfg if cfg is None else cfg
+
+ cfg = copy.deepcopy(cfg)
+
+ # deprecate arguments warning
+ if 'nms' not in cfg or 'max_num' in cfg or 'nms_thr' in cfg:
+ warnings.warn(
+ 'In rpn_proposal or test_cfg, '
+ 'nms_thr has been moved to a dict named nms as '
+ 'iou_threshold, max_num has been renamed as max_per_img, '
+ 'name of original arguments and the way to specify '
+ 'iou_threshold of NMS will be deprecated.')
+ if 'nms' not in cfg:
+ cfg.nms = ConfigDict(dict(type='nms', iou_threshold=cfg.nms_thr))
+ if 'max_num' in cfg:
+ if 'max_per_img' in cfg:
+ assert cfg.max_num == cfg.max_per_img, f'You ' \
+ f'set max_num and max_per_img at the same time, ' \
+ f'but get {cfg.max_num} ' \
+ f'and {cfg.max_per_img} respectively' \
+ 'Please delete max_num which will be deprecated.'
+ else:
+ cfg.max_per_img = cfg.max_num
+ if 'nms_thr' in cfg:
+ assert cfg.nms.iou_threshold == cfg.nms_thr, f'You set ' \
+ f'iou_threshold in nms and ' \
+ f'nms_thr at the same time, but get ' \
+ f'{cfg.nms.iou_threshold} and {cfg.nms_thr}' \
+ f' respectively. Please delete the ' \
+ f'nms_thr which will be deprecated.'
+
+ assert cfg.nms.get('type', 'nms') == 'nms', 'GARPNHead only support ' \
+ 'naive nms.'
+
+ mlvl_proposals = []
+ for idx in range(len(cls_scores)):
+ rpn_cls_score = cls_scores[idx]
+ rpn_bbox_pred = bbox_preds[idx]
+ anchors = mlvl_anchors[idx]
+ mask = mlvl_masks[idx]
+ assert rpn_cls_score.size()[-2:] == rpn_bbox_pred.size()[-2:]
+ # if no location is kept, end.
+ if mask.sum() == 0:
+ continue
+ rpn_cls_score = rpn_cls_score.permute(1, 2, 0)
+ if self.use_sigmoid_cls:
+ rpn_cls_score = rpn_cls_score.reshape(-1)
+ scores = rpn_cls_score.sigmoid()
+ else:
+ rpn_cls_score = rpn_cls_score.reshape(-1, 2)
+ # remind that we set FG labels to [0, num_class-1]
+ # since mmdet v2.0
+ # BG cat_id: num_class
+ scores = rpn_cls_score.softmax(dim=1)[:, :-1]
+ # filter scores, bbox_pred w.r.t. mask.
+ # anchors are filtered in get_anchors() beforehand.
+ scores = scores[mask]
+ rpn_bbox_pred = rpn_bbox_pred.permute(1, 2, 0).reshape(-1,
+ 4)[mask, :]
+ if scores.dim() == 0:
+ rpn_bbox_pred = rpn_bbox_pred.unsqueeze(0)
+ anchors = anchors.unsqueeze(0)
+ scores = scores.unsqueeze(0)
+ # filter anchors, bbox_pred, scores w.r.t. scores
+ if cfg.nms_pre > 0 and scores.shape[0] > cfg.nms_pre:
+ _, topk_inds = scores.topk(cfg.nms_pre)
+ rpn_bbox_pred = rpn_bbox_pred[topk_inds, :]
+ anchors = anchors[topk_inds, :]
+ scores = scores[topk_inds]
+ # get proposals w.r.t. anchors and rpn_bbox_pred
+ proposals = self.bbox_coder.decode(
+ anchors, rpn_bbox_pred, max_shape=img_shape)
+ # filter out too small bboxes
+ if cfg.min_bbox_size > 0:
+ w = proposals[:, 2] - proposals[:, 0]
+ h = proposals[:, 3] - proposals[:, 1]
+ valid_inds = torch.nonzero(
+ (w >= cfg.min_bbox_size) & (h >= cfg.min_bbox_size),
+ as_tuple=False).squeeze()
+ proposals = proposals[valid_inds, :]
+ scores = scores[valid_inds]
+ # NMS in current level
+ proposals, _ = nms(proposals, scores, cfg.nms.iou_threshold)
+ proposals = proposals[:cfg.nms_post, :]
+ mlvl_proposals.append(proposals)
+ proposals = torch.cat(mlvl_proposals, 0)
+ if cfg.get('nms_across_levels', False):
+ # NMS across multi levels
+ proposals, _ = nms(proposals[:, :4], proposals[:, -1],
+ cfg.nms.iou_threshold)
+ proposals = proposals[:cfg.max_per_img, :]
+ else:
+ scores = proposals[:, 4]
+ num = min(cfg.max_per_img, proposals.shape[0])
+ _, topk_inds = scores.topk(num)
+ proposals = proposals[topk_inds, :]
+ return proposals
diff --git a/annotator/uniformer/mmdet_null/models/dense_heads/gfl_head.py b/annotator/uniformer/mmdet_null/models/dense_heads/gfl_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..961bc92237663ad5343d3d08eb9c0e4e811ada05
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/dense_heads/gfl_head.py
@@ -0,0 +1,647 @@
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from mmcv.cnn import ConvModule, Scale, bias_init_with_prob, normal_init
+from mmcv.runner import force_fp32
+
+from mmdet.core import (anchor_inside_flags, bbox2distance, bbox_overlaps,
+ build_assigner, build_sampler, distance2bbox,
+ images_to_levels, multi_apply, multiclass_nms,
+ reduce_mean, unmap)
+from ..builder import HEADS, build_loss
+from .anchor_head import AnchorHead
+
+
+class Integral(nn.Module):
+ """A fixed layer for calculating integral result from distribution.
+
+ This layer calculates the target location by :math: `sum{P(y_i) * y_i}`,
+ P(y_i) denotes the softmax vector that represents the discrete distribution
+ y_i denotes the discrete set, usually {0, 1, 2, ..., reg_max}
+
+ Args:
+ reg_max (int): The maximal value of the discrete set. Default: 16. You
+ may want to reset it according to your new dataset or related
+ settings.
+ """
+
+ def __init__(self, reg_max=16):
+ super(Integral, self).__init__()
+ self.reg_max = reg_max
+ self.register_buffer('project',
+ torch.linspace(0, self.reg_max, self.reg_max + 1))
+
+ def forward(self, x):
+ """Forward feature from the regression head to get integral result of
+ bounding box location.
+
+ Args:
+ x (Tensor): Features of the regression head, shape (N, 4*(n+1)),
+ n is self.reg_max.
+
+ Returns:
+ x (Tensor): Integral result of box locations, i.e., distance
+ offsets from the box center in four directions, shape (N, 4).
+ """
+ x = F.softmax(x.reshape(-1, self.reg_max + 1), dim=1)
+ x = F.linear(x, self.project.type_as(x)).reshape(-1, 4)
+ return x
+
+
+@HEADS.register_module()
+class GFLHead(AnchorHead):
+ """Generalized Focal Loss: Learning Qualified and Distributed Bounding
+ Boxes for Dense Object Detection.
+
+ GFL head structure is similar with ATSS, however GFL uses
+ 1) joint representation for classification and localization quality, and
+ 2) flexible General distribution for bounding box locations,
+ which are supervised by
+ Quality Focal Loss (QFL) and Distribution Focal Loss (DFL), respectively
+
+ https://arxiv.org/abs/2006.04388
+
+ Args:
+ num_classes (int): Number of categories excluding the background
+ category.
+ in_channels (int): Number of channels in the input feature map.
+ stacked_convs (int): Number of conv layers in cls and reg tower.
+ Default: 4.
+ conv_cfg (dict): dictionary to construct and config conv layer.
+ Default: None.
+ norm_cfg (dict): dictionary to construct and config norm layer.
+ Default: dict(type='GN', num_groups=32, requires_grad=True).
+ loss_qfl (dict): Config of Quality Focal Loss (QFL).
+ reg_max (int): Max value of integral set :math: `{0, ..., reg_max}`
+ in QFL setting. Default: 16.
+ Example:
+ >>> self = GFLHead(11, 7)
+ >>> feats = [torch.rand(1, 7, s, s) for s in [4, 8, 16, 32, 64]]
+ >>> cls_quality_score, bbox_pred = self.forward(feats)
+ >>> assert len(cls_quality_score) == len(self.scales)
+ """
+
+ def __init__(self,
+ num_classes,
+ in_channels,
+ stacked_convs=4,
+ conv_cfg=None,
+ norm_cfg=dict(type='GN', num_groups=32, requires_grad=True),
+ loss_dfl=dict(type='DistributionFocalLoss', loss_weight=0.25),
+ reg_max=16,
+ **kwargs):
+ self.stacked_convs = stacked_convs
+ self.conv_cfg = conv_cfg
+ self.norm_cfg = norm_cfg
+ self.reg_max = reg_max
+ super(GFLHead, self).__init__(num_classes, in_channels, **kwargs)
+
+ self.sampling = False
+ if self.train_cfg:
+ self.assigner = build_assigner(self.train_cfg.assigner)
+ # SSD sampling=False so use PseudoSampler
+ sampler_cfg = dict(type='PseudoSampler')
+ self.sampler = build_sampler(sampler_cfg, context=self)
+
+ self.integral = Integral(self.reg_max)
+ self.loss_dfl = build_loss(loss_dfl)
+
+ def _init_layers(self):
+ """Initialize layers of the head."""
+ self.relu = nn.ReLU(inplace=True)
+ self.cls_convs = nn.ModuleList()
+ self.reg_convs = nn.ModuleList()
+ for i in range(self.stacked_convs):
+ chn = self.in_channels if i == 0 else self.feat_channels
+ self.cls_convs.append(
+ ConvModule(
+ chn,
+ self.feat_channels,
+ 3,
+ stride=1,
+ padding=1,
+ conv_cfg=self.conv_cfg,
+ norm_cfg=self.norm_cfg))
+ self.reg_convs.append(
+ ConvModule(
+ chn,
+ self.feat_channels,
+ 3,
+ stride=1,
+ padding=1,
+ conv_cfg=self.conv_cfg,
+ norm_cfg=self.norm_cfg))
+ assert self.num_anchors == 1, 'anchor free version'
+ self.gfl_cls = nn.Conv2d(
+ self.feat_channels, self.cls_out_channels, 3, padding=1)
+ self.gfl_reg = nn.Conv2d(
+ self.feat_channels, 4 * (self.reg_max + 1), 3, padding=1)
+ self.scales = nn.ModuleList(
+ [Scale(1.0) for _ in self.anchor_generator.strides])
+
+ def init_weights(self):
+ """Initialize weights of the head."""
+ for m in self.cls_convs:
+ normal_init(m.conv, std=0.01)
+ for m in self.reg_convs:
+ normal_init(m.conv, std=0.01)
+ bias_cls = bias_init_with_prob(0.01)
+ normal_init(self.gfl_cls, std=0.01, bias=bias_cls)
+ normal_init(self.gfl_reg, std=0.01)
+
+ def forward(self, feats):
+ """Forward features from the upstream network.
+
+ Args:
+ feats (tuple[Tensor]): Features from the upstream network, each is
+ a 4D-tensor.
+
+ Returns:
+ tuple: Usually a tuple of classification scores and bbox prediction
+ cls_scores (list[Tensor]): Classification and quality (IoU)
+ joint scores for all scale levels, each is a 4D-tensor,
+ the channel number is num_classes.
+ bbox_preds (list[Tensor]): Box distribution logits for all
+ scale levels, each is a 4D-tensor, the channel number is
+ 4*(n+1), n is max value of integral set.
+ """
+ return multi_apply(self.forward_single, feats, self.scales)
+
+ def forward_single(self, x, scale):
+ """Forward feature of a single scale level.
+
+ Args:
+ x (Tensor): Features of a single scale level.
+ scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize
+ the bbox prediction.
+
+ Returns:
+ tuple:
+ cls_score (Tensor): Cls and quality joint scores for a single
+ scale level the channel number is num_classes.
+ bbox_pred (Tensor): Box distribution logits for a single scale
+ level, the channel number is 4*(n+1), n is max value of
+ integral set.
+ """
+ cls_feat = x
+ reg_feat = x
+ for cls_conv in self.cls_convs:
+ cls_feat = cls_conv(cls_feat)
+ for reg_conv in self.reg_convs:
+ reg_feat = reg_conv(reg_feat)
+ cls_score = self.gfl_cls(cls_feat)
+ bbox_pred = scale(self.gfl_reg(reg_feat)).float()
+ return cls_score, bbox_pred
+
+ def anchor_center(self, anchors):
+ """Get anchor centers from anchors.
+
+ Args:
+ anchors (Tensor): Anchor list with shape (N, 4), "xyxy" format.
+
+ Returns:
+ Tensor: Anchor centers with shape (N, 2), "xy" format.
+ """
+ anchors_cx = (anchors[..., 2] + anchors[..., 0]) / 2
+ anchors_cy = (anchors[..., 3] + anchors[..., 1]) / 2
+ return torch.stack([anchors_cx, anchors_cy], dim=-1)
+
+ def loss_single(self, anchors, cls_score, bbox_pred, labels, label_weights,
+ bbox_targets, stride, num_total_samples):
+ """Compute loss of a single scale level.
+
+ Args:
+ anchors (Tensor): Box reference for each scale level with shape
+ (N, num_total_anchors, 4).
+ cls_score (Tensor): Cls and quality joint scores for each scale
+ level has shape (N, num_classes, H, W).
+ bbox_pred (Tensor): Box distribution logits for each scale
+ level with shape (N, 4*(n+1), H, W), n is max value of integral
+ set.
+ labels (Tensor): Labels of each anchors with shape
+ (N, num_total_anchors).
+ label_weights (Tensor): Label weights of each anchor with shape
+ (N, num_total_anchors)
+ bbox_targets (Tensor): BBox regression targets of each anchor wight
+ shape (N, num_total_anchors, 4).
+ stride (tuple): Stride in this scale level.
+ num_total_samples (int): Number of positive samples that is
+ reduced over all GPUs.
+
+ Returns:
+ dict[str, Tensor]: A dictionary of loss components.
+ """
+ assert stride[0] == stride[1], 'h stride is not equal to w stride!'
+ anchors = anchors.reshape(-1, 4)
+ cls_score = cls_score.permute(0, 2, 3,
+ 1).reshape(-1, self.cls_out_channels)
+ bbox_pred = bbox_pred.permute(0, 2, 3,
+ 1).reshape(-1, 4 * (self.reg_max + 1))
+ bbox_targets = bbox_targets.reshape(-1, 4)
+ labels = labels.reshape(-1)
+ label_weights = label_weights.reshape(-1)
+
+ # FG cat_id: [0, num_classes -1], BG cat_id: num_classes
+ bg_class_ind = self.num_classes
+ pos_inds = ((labels >= 0)
+ & (labels < bg_class_ind)).nonzero().squeeze(1)
+ score = label_weights.new_zeros(labels.shape)
+
+ if len(pos_inds) > 0:
+ pos_bbox_targets = bbox_targets[pos_inds]
+ pos_bbox_pred = bbox_pred[pos_inds]
+ pos_anchors = anchors[pos_inds]
+ pos_anchor_centers = self.anchor_center(pos_anchors) / stride[0]
+
+ weight_targets = cls_score.detach().sigmoid()
+ weight_targets = weight_targets.max(dim=1)[0][pos_inds]
+ pos_bbox_pred_corners = self.integral(pos_bbox_pred)
+ pos_decode_bbox_pred = distance2bbox(pos_anchor_centers,
+ pos_bbox_pred_corners)
+ pos_decode_bbox_targets = pos_bbox_targets / stride[0]
+ score[pos_inds] = bbox_overlaps(
+ pos_decode_bbox_pred.detach(),
+ pos_decode_bbox_targets,
+ is_aligned=True)
+ pred_corners = pos_bbox_pred.reshape(-1, self.reg_max + 1)
+ target_corners = bbox2distance(pos_anchor_centers,
+ pos_decode_bbox_targets,
+ self.reg_max).reshape(-1)
+
+ # regression loss
+ loss_bbox = self.loss_bbox(
+ pos_decode_bbox_pred,
+ pos_decode_bbox_targets,
+ weight=weight_targets,
+ avg_factor=1.0)
+
+ # dfl loss
+ loss_dfl = self.loss_dfl(
+ pred_corners,
+ target_corners,
+ weight=weight_targets[:, None].expand(-1, 4).reshape(-1),
+ avg_factor=4.0)
+ else:
+ loss_bbox = bbox_pred.sum() * 0
+ loss_dfl = bbox_pred.sum() * 0
+ weight_targets = bbox_pred.new_tensor(0)
+
+ # cls (qfl) loss
+ loss_cls = self.loss_cls(
+ cls_score, (labels, score),
+ weight=label_weights,
+ avg_factor=num_total_samples)
+
+ return loss_cls, loss_bbox, loss_dfl, weight_targets.sum()
+
+ @force_fp32(apply_to=('cls_scores', 'bbox_preds'))
+ def loss(self,
+ cls_scores,
+ bbox_preds,
+ gt_bboxes,
+ gt_labels,
+ img_metas,
+ gt_bboxes_ignore=None):
+ """Compute losses of the head.
+
+ Args:
+ cls_scores (list[Tensor]): Cls and quality scores for each scale
+ level has shape (N, num_classes, H, W).
+ bbox_preds (list[Tensor]): Box distribution logits for each scale
+ level with shape (N, 4*(n+1), H, W), n is max value of integral
+ set.
+ gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
+ shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
+ gt_labels (list[Tensor]): class indices corresponding to each box
+ img_metas (list[dict]): Meta information of each image, e.g.,
+ image size, scaling factor, etc.
+ gt_bboxes_ignore (list[Tensor] | None): specify which bounding
+ boxes can be ignored when computing the loss.
+
+ Returns:
+ dict[str, Tensor]: A dictionary of loss components.
+ """
+
+ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
+ assert len(featmap_sizes) == self.anchor_generator.num_levels
+
+ device = cls_scores[0].device
+ anchor_list, valid_flag_list = self.get_anchors(
+ featmap_sizes, img_metas, device=device)
+ label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1
+
+ cls_reg_targets = self.get_targets(
+ anchor_list,
+ valid_flag_list,
+ gt_bboxes,
+ img_metas,
+ gt_bboxes_ignore_list=gt_bboxes_ignore,
+ gt_labels_list=gt_labels,
+ label_channels=label_channels)
+ if cls_reg_targets is None:
+ return None
+
+ (anchor_list, labels_list, label_weights_list, bbox_targets_list,
+ bbox_weights_list, num_total_pos, num_total_neg) = cls_reg_targets
+
+ num_total_samples = reduce_mean(
+ torch.tensor(num_total_pos, dtype=torch.float,
+ device=device)).item()
+ num_total_samples = max(num_total_samples, 1.0)
+
+ losses_cls, losses_bbox, losses_dfl,\
+ avg_factor = multi_apply(
+ self.loss_single,
+ anchor_list,
+ cls_scores,
+ bbox_preds,
+ labels_list,
+ label_weights_list,
+ bbox_targets_list,
+ self.anchor_generator.strides,
+ num_total_samples=num_total_samples)
+
+ avg_factor = sum(avg_factor)
+ avg_factor = reduce_mean(avg_factor).item()
+ losses_bbox = list(map(lambda x: x / avg_factor, losses_bbox))
+ losses_dfl = list(map(lambda x: x / avg_factor, losses_dfl))
+ return dict(
+ loss_cls=losses_cls, loss_bbox=losses_bbox, loss_dfl=losses_dfl)
+
+ def _get_bboxes(self,
+ cls_scores,
+ bbox_preds,
+ mlvl_anchors,
+ img_shapes,
+ scale_factors,
+ cfg,
+ rescale=False,
+ with_nms=True):
+ """Transform outputs for a single batch item into labeled boxes.
+
+ Args:
+ cls_scores (list[Tensor]): Box scores for a single scale level
+ has shape (N, num_classes, H, W).
+ bbox_preds (list[Tensor]): Box distribution logits for a single
+ scale level with shape (N, 4*(n+1), H, W), n is max value of
+ integral set.
+ mlvl_anchors (list[Tensor]): Box reference for a single scale level
+ with shape (num_total_anchors, 4).
+ img_shapes (list[tuple[int]]): Shape of the input image,
+ list[(height, width, 3)].
+ scale_factors (list[ndarray]): Scale factor of the image arange as
+ (w_scale, h_scale, w_scale, h_scale).
+ cfg (mmcv.Config | None): Test / postprocessing configuration,
+ if None, test_cfg would be used.
+ rescale (bool): If True, return boxes in original image space.
+ Default: False.
+ with_nms (bool): If True, do nms before return boxes.
+ Default: True.
+
+ Returns:
+ list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple.
+ The first item is an (n, 5) tensor, where 5 represent
+ (tl_x, tl_y, br_x, br_y, score) and the score between 0 and 1.
+ The shape of the second tensor in the tuple is (n,), and
+ each element represents the class label of the corresponding
+ box.
+ """
+ cfg = self.test_cfg if cfg is None else cfg
+ assert len(cls_scores) == len(bbox_preds) == len(mlvl_anchors)
+ batch_size = cls_scores[0].shape[0]
+
+ mlvl_bboxes = []
+ mlvl_scores = []
+ for cls_score, bbox_pred, stride, anchors in zip(
+ cls_scores, bbox_preds, self.anchor_generator.strides,
+ mlvl_anchors):
+ assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
+ assert stride[0] == stride[1]
+ scores = cls_score.permute(0, 2, 3, 1).reshape(
+ batch_size, -1, self.cls_out_channels).sigmoid()
+ bbox_pred = bbox_pred.permute(0, 2, 3, 1)
+
+ bbox_pred = self.integral(bbox_pred) * stride[0]
+ bbox_pred = bbox_pred.reshape(batch_size, -1, 4)
+
+ nms_pre = cfg.get('nms_pre', -1)
+ if nms_pre > 0 and scores.shape[1] > nms_pre:
+ max_scores, _ = scores.max(-1)
+ _, topk_inds = max_scores.topk(nms_pre)
+ batch_inds = torch.arange(batch_size).view(
+ -1, 1).expand_as(topk_inds).long()
+ anchors = anchors[topk_inds, :]
+ bbox_pred = bbox_pred[batch_inds, topk_inds, :]
+ scores = scores[batch_inds, topk_inds, :]
+ else:
+ anchors = anchors.expand_as(bbox_pred)
+
+ bboxes = distance2bbox(
+ self.anchor_center(anchors), bbox_pred, max_shape=img_shapes)
+ mlvl_bboxes.append(bboxes)
+ mlvl_scores.append(scores)
+
+ batch_mlvl_bboxes = torch.cat(mlvl_bboxes, dim=1)
+ if rescale:
+ batch_mlvl_bboxes /= batch_mlvl_bboxes.new_tensor(
+ scale_factors).unsqueeze(1)
+
+ batch_mlvl_scores = torch.cat(mlvl_scores, dim=1)
+ # Add a dummy background class to the backend when using sigmoid
+ # remind that we set FG labels to [0, num_class-1] since mmdet v2.0
+ # BG cat_id: num_class
+ padding = batch_mlvl_scores.new_zeros(batch_size,
+ batch_mlvl_scores.shape[1], 1)
+ batch_mlvl_scores = torch.cat([batch_mlvl_scores, padding], dim=-1)
+
+ if with_nms:
+ det_results = []
+ for (mlvl_bboxes, mlvl_scores) in zip(batch_mlvl_bboxes,
+ batch_mlvl_scores):
+ det_bbox, det_label = multiclass_nms(mlvl_bboxes, mlvl_scores,
+ cfg.score_thr, cfg.nms,
+ cfg.max_per_img)
+ det_results.append(tuple([det_bbox, det_label]))
+ else:
+ det_results = [
+ tuple(mlvl_bs)
+ for mlvl_bs in zip(batch_mlvl_bboxes, batch_mlvl_scores)
+ ]
+ return det_results
+
+ def get_targets(self,
+ anchor_list,
+ valid_flag_list,
+ gt_bboxes_list,
+ img_metas,
+ gt_bboxes_ignore_list=None,
+ gt_labels_list=None,
+ label_channels=1,
+ unmap_outputs=True):
+ """Get targets for GFL head.
+
+ This method is almost the same as `AnchorHead.get_targets()`. Besides
+ returning the targets as the parent method does, it also returns the
+ anchors as the first element of the returned tuple.
+ """
+ num_imgs = len(img_metas)
+ assert len(anchor_list) == len(valid_flag_list) == num_imgs
+
+ # anchor number of multi levels
+ num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]
+ num_level_anchors_list = [num_level_anchors] * num_imgs
+
+ # concat all level anchors and flags to a single tensor
+ for i in range(num_imgs):
+ assert len(anchor_list[i]) == len(valid_flag_list[i])
+ anchor_list[i] = torch.cat(anchor_list[i])
+ valid_flag_list[i] = torch.cat(valid_flag_list[i])
+
+ # compute targets for each image
+ if gt_bboxes_ignore_list is None:
+ gt_bboxes_ignore_list = [None for _ in range(num_imgs)]
+ if gt_labels_list is None:
+ gt_labels_list = [None for _ in range(num_imgs)]
+ (all_anchors, all_labels, all_label_weights, all_bbox_targets,
+ all_bbox_weights, pos_inds_list, neg_inds_list) = multi_apply(
+ self._get_target_single,
+ anchor_list,
+ valid_flag_list,
+ num_level_anchors_list,
+ gt_bboxes_list,
+ gt_bboxes_ignore_list,
+ gt_labels_list,
+ img_metas,
+ label_channels=label_channels,
+ unmap_outputs=unmap_outputs)
+ # no valid anchors
+ if any([labels is None for labels in all_labels]):
+ return None
+ # sampled anchors of all images
+ num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list])
+ num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list])
+ # split targets to a list w.r.t. multiple levels
+ anchors_list = images_to_levels(all_anchors, num_level_anchors)
+ labels_list = images_to_levels(all_labels, num_level_anchors)
+ label_weights_list = images_to_levels(all_label_weights,
+ num_level_anchors)
+ bbox_targets_list = images_to_levels(all_bbox_targets,
+ num_level_anchors)
+ bbox_weights_list = images_to_levels(all_bbox_weights,
+ num_level_anchors)
+ return (anchors_list, labels_list, label_weights_list,
+ bbox_targets_list, bbox_weights_list, num_total_pos,
+ num_total_neg)
+
+ def _get_target_single(self,
+ flat_anchors,
+ valid_flags,
+ num_level_anchors,
+ gt_bboxes,
+ gt_bboxes_ignore,
+ gt_labels,
+ img_meta,
+ label_channels=1,
+ unmap_outputs=True):
+ """Compute regression, classification targets for anchors in a single
+ image.
+
+ Args:
+ flat_anchors (Tensor): Multi-level anchors of the image, which are
+ concatenated into a single tensor of shape (num_anchors, 4)
+ valid_flags (Tensor): Multi level valid flags of the image,
+ which are concatenated into a single tensor of
+ shape (num_anchors,).
+ num_level_anchors Tensor): Number of anchors of each scale level.
+ gt_bboxes (Tensor): Ground truth bboxes of the image,
+ shape (num_gts, 4).
+ gt_bboxes_ignore (Tensor): Ground truth bboxes to be
+ ignored, shape (num_ignored_gts, 4).
+ gt_labels (Tensor): Ground truth labels of each box,
+ shape (num_gts,).
+ img_meta (dict): Meta info of the image.
+ label_channels (int): Channel of label.
+ unmap_outputs (bool): Whether to map outputs back to the original
+ set of anchors.
+
+ Returns:
+ tuple: N is the number of total anchors in the image.
+ anchors (Tensor): All anchors in the image with shape (N, 4).
+ labels (Tensor): Labels of all anchors in the image with shape
+ (N,).
+ label_weights (Tensor): Label weights of all anchor in the
+ image with shape (N,).
+ bbox_targets (Tensor): BBox targets of all anchors in the
+ image with shape (N, 4).
+ bbox_weights (Tensor): BBox weights of all anchors in the
+ image with shape (N, 4).
+ pos_inds (Tensor): Indices of positive anchor with shape
+ (num_pos,).
+ neg_inds (Tensor): Indices of negative anchor with shape
+ (num_neg,).
+ """
+ inside_flags = anchor_inside_flags(flat_anchors, valid_flags,
+ img_meta['img_shape'][:2],
+ self.train_cfg.allowed_border)
+ if not inside_flags.any():
+ return (None, ) * 7
+ # assign gt and sample anchors
+ anchors = flat_anchors[inside_flags, :]
+
+ num_level_anchors_inside = self.get_num_level_anchors_inside(
+ num_level_anchors, inside_flags)
+ assign_result = self.assigner.assign(anchors, num_level_anchors_inside,
+ gt_bboxes, gt_bboxes_ignore,
+ gt_labels)
+
+ sampling_result = self.sampler.sample(assign_result, anchors,
+ gt_bboxes)
+
+ num_valid_anchors = anchors.shape[0]
+ bbox_targets = torch.zeros_like(anchors)
+ bbox_weights = torch.zeros_like(anchors)
+ labels = anchors.new_full((num_valid_anchors, ),
+ self.num_classes,
+ dtype=torch.long)
+ label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float)
+
+ pos_inds = sampling_result.pos_inds
+ neg_inds = sampling_result.neg_inds
+ if len(pos_inds) > 0:
+ pos_bbox_targets = sampling_result.pos_gt_bboxes
+ bbox_targets[pos_inds, :] = pos_bbox_targets
+ bbox_weights[pos_inds, :] = 1.0
+ if gt_labels is None:
+ # Only rpn gives gt_labels as None
+ # Foreground is the first class
+ labels[pos_inds] = 0
+ else:
+ labels[pos_inds] = gt_labels[
+ sampling_result.pos_assigned_gt_inds]
+ if self.train_cfg.pos_weight <= 0:
+ label_weights[pos_inds] = 1.0
+ else:
+ label_weights[pos_inds] = self.train_cfg.pos_weight
+ if len(neg_inds) > 0:
+ label_weights[neg_inds] = 1.0
+
+ # map up to original set of anchors
+ if unmap_outputs:
+ num_total_anchors = flat_anchors.size(0)
+ anchors = unmap(anchors, num_total_anchors, inside_flags)
+ labels = unmap(
+ labels, num_total_anchors, inside_flags, fill=self.num_classes)
+ label_weights = unmap(label_weights, num_total_anchors,
+ inside_flags)
+ bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags)
+ bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags)
+
+ return (anchors, labels, label_weights, bbox_targets, bbox_weights,
+ pos_inds, neg_inds)
+
+ def get_num_level_anchors_inside(self, num_level_anchors, inside_flags):
+ split_inside_flags = torch.split(inside_flags, num_level_anchors)
+ num_level_anchors_inside = [
+ int(flags.sum()) for flags in split_inside_flags
+ ]
+ return num_level_anchors_inside
diff --git a/annotator/uniformer/mmdet_null/models/dense_heads/guided_anchor_head.py b/annotator/uniformer/mmdet_null/models/dense_heads/guided_anchor_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..997ebb751ade2ebae3fce335a08c46f596c60913
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/dense_heads/guided_anchor_head.py
@@ -0,0 +1,860 @@
+import torch
+import torch.nn as nn
+from mmcv.cnn import bias_init_with_prob, normal_init
+from mmcv.ops import DeformConv2d, MaskedConv2d
+from mmcv.runner import force_fp32
+
+from mmdet.core import (anchor_inside_flags, build_anchor_generator,
+ build_assigner, build_bbox_coder, build_sampler,
+ calc_region, images_to_levels, multi_apply,
+ multiclass_nms, unmap)
+from ..builder import HEADS, build_loss
+from .anchor_head import AnchorHead
+
+
+class FeatureAdaption(nn.Module):
+ """Feature Adaption Module.
+
+ Feature Adaption Module is implemented based on DCN v1.
+ It uses anchor shape prediction rather than feature map to
+ predict offsets of deform conv layer.
+
+ Args:
+ in_channels (int): Number of channels in the input feature map.
+ out_channels (int): Number of channels in the output feature map.
+ kernel_size (int): Deformable conv kernel size.
+ deform_groups (int): Deformable conv group size.
+ """
+
+ def __init__(self,
+ in_channels,
+ out_channels,
+ kernel_size=3,
+ deform_groups=4):
+ super(FeatureAdaption, self).__init__()
+ offset_channels = kernel_size * kernel_size * 2
+ self.conv_offset = nn.Conv2d(
+ 2, deform_groups * offset_channels, 1, bias=False)
+ self.conv_adaption = DeformConv2d(
+ in_channels,
+ out_channels,
+ kernel_size=kernel_size,
+ padding=(kernel_size - 1) // 2,
+ deform_groups=deform_groups)
+ self.relu = nn.ReLU(inplace=True)
+
+ def init_weights(self):
+ normal_init(self.conv_offset, std=0.1)
+ normal_init(self.conv_adaption, std=0.01)
+
+ def forward(self, x, shape):
+ offset = self.conv_offset(shape.detach())
+ x = self.relu(self.conv_adaption(x, offset))
+ return x
+
+
+@HEADS.register_module()
+class GuidedAnchorHead(AnchorHead):
+ """Guided-Anchor-based head (GA-RPN, GA-RetinaNet, etc.).
+
+ This GuidedAnchorHead will predict high-quality feature guided
+ anchors and locations where anchors will be kept in inference.
+ There are mainly 3 categories of bounding-boxes.
+
+ - Sampled 9 pairs for target assignment. (approxes)
+ - The square boxes where the predicted anchors are based on. (squares)
+ - Guided anchors.
+
+ Please refer to https://arxiv.org/abs/1901.03278 for more details.
+
+ Args:
+ num_classes (int): Number of classes.
+ in_channels (int): Number of channels in the input feature map.
+ feat_channels (int): Number of hidden channels.
+ approx_anchor_generator (dict): Config dict for approx generator
+ square_anchor_generator (dict): Config dict for square generator
+ anchor_coder (dict): Config dict for anchor coder
+ bbox_coder (dict): Config dict for bbox coder
+ reg_decoded_bbox (bool): If true, the regression loss would be
+ applied directly on decoded bounding boxes, converting both
+ the predicted boxes and regression targets to absolute
+ coordinates format. Default False. It should be `True` when
+ using `IoULoss`, `GIoULoss`, or `DIoULoss` in the bbox head.
+ deform_groups: (int): Group number of DCN in
+ FeatureAdaption module.
+ loc_filter_thr (float): Threshold to filter out unconcerned regions.
+ loss_loc (dict): Config of location loss.
+ loss_shape (dict): Config of anchor shape loss.
+ loss_cls (dict): Config of classification loss.
+ loss_bbox (dict): Config of bbox regression loss.
+ """
+
+ def __init__(
+ self,
+ num_classes,
+ in_channels,
+ feat_channels=256,
+ approx_anchor_generator=dict(
+ type='AnchorGenerator',
+ octave_base_scale=8,
+ scales_per_octave=3,
+ ratios=[0.5, 1.0, 2.0],
+ strides=[4, 8, 16, 32, 64]),
+ square_anchor_generator=dict(
+ type='AnchorGenerator',
+ ratios=[1.0],
+ scales=[8],
+ strides=[4, 8, 16, 32, 64]),
+ anchor_coder=dict(
+ type='DeltaXYWHBBoxCoder',
+ target_means=[.0, .0, .0, .0],
+ target_stds=[1.0, 1.0, 1.0, 1.0]
+ ),
+ bbox_coder=dict(
+ type='DeltaXYWHBBoxCoder',
+ target_means=[.0, .0, .0, .0],
+ target_stds=[1.0, 1.0, 1.0, 1.0]
+ ),
+ reg_decoded_bbox=False,
+ deform_groups=4,
+ loc_filter_thr=0.01,
+ train_cfg=None,
+ test_cfg=None,
+ loss_loc=dict(
+ type='FocalLoss',
+ use_sigmoid=True,
+ gamma=2.0,
+ alpha=0.25,
+ loss_weight=1.0),
+ loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0),
+ loss_cls=dict(
+ type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
+ loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
+ loss_weight=1.0)): # yapf: disable
+ super(AnchorHead, self).__init__()
+ self.in_channels = in_channels
+ self.num_classes = num_classes
+ self.feat_channels = feat_channels
+ self.deform_groups = deform_groups
+ self.loc_filter_thr = loc_filter_thr
+
+ # build approx_anchor_generator and square_anchor_generator
+ assert (approx_anchor_generator['octave_base_scale'] ==
+ square_anchor_generator['scales'][0])
+ assert (approx_anchor_generator['strides'] ==
+ square_anchor_generator['strides'])
+ self.approx_anchor_generator = build_anchor_generator(
+ approx_anchor_generator)
+ self.square_anchor_generator = build_anchor_generator(
+ square_anchor_generator)
+ self.approxs_per_octave = self.approx_anchor_generator \
+ .num_base_anchors[0]
+
+ self.reg_decoded_bbox = reg_decoded_bbox
+
+ # one anchor per location
+ self.num_anchors = 1
+ self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False)
+ self.loc_focal_loss = loss_loc['type'] in ['FocalLoss']
+ self.sampling = loss_cls['type'] not in ['FocalLoss']
+ self.ga_sampling = train_cfg is not None and hasattr(
+ train_cfg, 'ga_sampler')
+ if self.use_sigmoid_cls:
+ self.cls_out_channels = self.num_classes
+ else:
+ self.cls_out_channels = self.num_classes + 1
+
+ # build bbox_coder
+ self.anchor_coder = build_bbox_coder(anchor_coder)
+ self.bbox_coder = build_bbox_coder(bbox_coder)
+
+ # build losses
+ self.loss_loc = build_loss(loss_loc)
+ self.loss_shape = build_loss(loss_shape)
+ self.loss_cls = build_loss(loss_cls)
+ self.loss_bbox = build_loss(loss_bbox)
+
+ self.train_cfg = train_cfg
+ self.test_cfg = test_cfg
+
+ if self.train_cfg:
+ self.assigner = build_assigner(self.train_cfg.assigner)
+ # use PseudoSampler when sampling is False
+ if self.sampling and hasattr(self.train_cfg, 'sampler'):
+ sampler_cfg = self.train_cfg.sampler
+ else:
+ sampler_cfg = dict(type='PseudoSampler')
+ self.sampler = build_sampler(sampler_cfg, context=self)
+
+ self.ga_assigner = build_assigner(self.train_cfg.ga_assigner)
+ if self.ga_sampling:
+ ga_sampler_cfg = self.train_cfg.ga_sampler
+ else:
+ ga_sampler_cfg = dict(type='PseudoSampler')
+ self.ga_sampler = build_sampler(ga_sampler_cfg, context=self)
+
+ self.fp16_enabled = False
+
+ self._init_layers()
+
+ def _init_layers(self):
+ self.relu = nn.ReLU(inplace=True)
+ self.conv_loc = nn.Conv2d(self.in_channels, 1, 1)
+ self.conv_shape = nn.Conv2d(self.in_channels, self.num_anchors * 2, 1)
+ self.feature_adaption = FeatureAdaption(
+ self.in_channels,
+ self.feat_channels,
+ kernel_size=3,
+ deform_groups=self.deform_groups)
+ self.conv_cls = MaskedConv2d(self.feat_channels,
+ self.num_anchors * self.cls_out_channels,
+ 1)
+ self.conv_reg = MaskedConv2d(self.feat_channels, self.num_anchors * 4,
+ 1)
+
+ def init_weights(self):
+ normal_init(self.conv_cls, std=0.01)
+ normal_init(self.conv_reg, std=0.01)
+
+ bias_cls = bias_init_with_prob(0.01)
+ normal_init(self.conv_loc, std=0.01, bias=bias_cls)
+ normal_init(self.conv_shape, std=0.01)
+
+ self.feature_adaption.init_weights()
+
+ def forward_single(self, x):
+ loc_pred = self.conv_loc(x)
+ shape_pred = self.conv_shape(x)
+ x = self.feature_adaption(x, shape_pred)
+ # masked conv is only used during inference for speed-up
+ if not self.training:
+ mask = loc_pred.sigmoid()[0] >= self.loc_filter_thr
+ else:
+ mask = None
+ cls_score = self.conv_cls(x, mask)
+ bbox_pred = self.conv_reg(x, mask)
+ return cls_score, bbox_pred, shape_pred, loc_pred
+
+ def forward(self, feats):
+ return multi_apply(self.forward_single, feats)
+
+ def get_sampled_approxs(self, featmap_sizes, img_metas, device='cuda'):
+ """Get sampled approxs and inside flags according to feature map sizes.
+
+ Args:
+ featmap_sizes (list[tuple]): Multi-level feature map sizes.
+ img_metas (list[dict]): Image meta info.
+ device (torch.device | str): device for returned tensors
+
+ Returns:
+ tuple: approxes of each image, inside flags of each image
+ """
+ num_imgs = len(img_metas)
+
+ # since feature map sizes of all images are the same, we only compute
+ # approxes for one time
+ multi_level_approxs = self.approx_anchor_generator.grid_anchors(
+ featmap_sizes, device=device)
+ approxs_list = [multi_level_approxs for _ in range(num_imgs)]
+
+ # for each image, we compute inside flags of multi level approxes
+ inside_flag_list = []
+ for img_id, img_meta in enumerate(img_metas):
+ multi_level_flags = []
+ multi_level_approxs = approxs_list[img_id]
+
+ # obtain valid flags for each approx first
+ multi_level_approx_flags = self.approx_anchor_generator \
+ .valid_flags(featmap_sizes,
+ img_meta['pad_shape'],
+ device=device)
+
+ for i, flags in enumerate(multi_level_approx_flags):
+ approxs = multi_level_approxs[i]
+ inside_flags_list = []
+ for i in range(self.approxs_per_octave):
+ split_valid_flags = flags[i::self.approxs_per_octave]
+ split_approxs = approxs[i::self.approxs_per_octave, :]
+ inside_flags = anchor_inside_flags(
+ split_approxs, split_valid_flags,
+ img_meta['img_shape'][:2],
+ self.train_cfg.allowed_border)
+ inside_flags_list.append(inside_flags)
+ # inside_flag for a position is true if any anchor in this
+ # position is true
+ inside_flags = (
+ torch.stack(inside_flags_list, 0).sum(dim=0) > 0)
+ multi_level_flags.append(inside_flags)
+ inside_flag_list.append(multi_level_flags)
+ return approxs_list, inside_flag_list
+
+ def get_anchors(self,
+ featmap_sizes,
+ shape_preds,
+ loc_preds,
+ img_metas,
+ use_loc_filter=False,
+ device='cuda'):
+ """Get squares according to feature map sizes and guided anchors.
+
+ Args:
+ featmap_sizes (list[tuple]): Multi-level feature map sizes.
+ shape_preds (list[tensor]): Multi-level shape predictions.
+ loc_preds (list[tensor]): Multi-level location predictions.
+ img_metas (list[dict]): Image meta info.
+ use_loc_filter (bool): Use loc filter or not.
+ device (torch.device | str): device for returned tensors
+
+ Returns:
+ tuple: square approxs of each image, guided anchors of each image,
+ loc masks of each image
+ """
+ num_imgs = len(img_metas)
+ num_levels = len(featmap_sizes)
+
+ # since feature map sizes of all images are the same, we only compute
+ # squares for one time
+ multi_level_squares = self.square_anchor_generator.grid_anchors(
+ featmap_sizes, device=device)
+ squares_list = [multi_level_squares for _ in range(num_imgs)]
+
+ # for each image, we compute multi level guided anchors
+ guided_anchors_list = []
+ loc_mask_list = []
+ for img_id, img_meta in enumerate(img_metas):
+ multi_level_guided_anchors = []
+ multi_level_loc_mask = []
+ for i in range(num_levels):
+ squares = squares_list[img_id][i]
+ shape_pred = shape_preds[i][img_id]
+ loc_pred = loc_preds[i][img_id]
+ guided_anchors, loc_mask = self._get_guided_anchors_single(
+ squares,
+ shape_pred,
+ loc_pred,
+ use_loc_filter=use_loc_filter)
+ multi_level_guided_anchors.append(guided_anchors)
+ multi_level_loc_mask.append(loc_mask)
+ guided_anchors_list.append(multi_level_guided_anchors)
+ loc_mask_list.append(multi_level_loc_mask)
+ return squares_list, guided_anchors_list, loc_mask_list
+
+ def _get_guided_anchors_single(self,
+ squares,
+ shape_pred,
+ loc_pred,
+ use_loc_filter=False):
+ """Get guided anchors and loc masks for a single level.
+
+ Args:
+ square (tensor): Squares of a single level.
+ shape_pred (tensor): Shape predections of a single level.
+ loc_pred (tensor): Loc predections of a single level.
+ use_loc_filter (list[tensor]): Use loc filter or not.
+
+ Returns:
+ tuple: guided anchors, location masks
+ """
+ # calculate location filtering mask
+ loc_pred = loc_pred.sigmoid().detach()
+ if use_loc_filter:
+ loc_mask = loc_pred >= self.loc_filter_thr
+ else:
+ loc_mask = loc_pred >= 0.0
+ mask = loc_mask.permute(1, 2, 0).expand(-1, -1, self.num_anchors)
+ mask = mask.contiguous().view(-1)
+ # calculate guided anchors
+ squares = squares[mask]
+ anchor_deltas = shape_pred.permute(1, 2, 0).contiguous().view(
+ -1, 2).detach()[mask]
+ bbox_deltas = anchor_deltas.new_full(squares.size(), 0)
+ bbox_deltas[:, 2:] = anchor_deltas
+ guided_anchors = self.anchor_coder.decode(
+ squares, bbox_deltas, wh_ratio_clip=1e-6)
+ return guided_anchors, mask
+
+ def ga_loc_targets(self, gt_bboxes_list, featmap_sizes):
+ """Compute location targets for guided anchoring.
+
+ Each feature map is divided into positive, negative and ignore regions.
+ - positive regions: target 1, weight 1
+ - ignore regions: target 0, weight 0
+ - negative regions: target 0, weight 0.1
+
+ Args:
+ gt_bboxes_list (list[Tensor]): Gt bboxes of each image.
+ featmap_sizes (list[tuple]): Multi level sizes of each feature
+ maps.
+
+ Returns:
+ tuple
+ """
+ anchor_scale = self.approx_anchor_generator.octave_base_scale
+ anchor_strides = self.approx_anchor_generator.strides
+ # Currently only supports same stride in x and y direction.
+ for stride in anchor_strides:
+ assert (stride[0] == stride[1])
+ anchor_strides = [stride[0] for stride in anchor_strides]
+
+ center_ratio = self.train_cfg.center_ratio
+ ignore_ratio = self.train_cfg.ignore_ratio
+ img_per_gpu = len(gt_bboxes_list)
+ num_lvls = len(featmap_sizes)
+ r1 = (1 - center_ratio) / 2
+ r2 = (1 - ignore_ratio) / 2
+ all_loc_targets = []
+ all_loc_weights = []
+ all_ignore_map = []
+ for lvl_id in range(num_lvls):
+ h, w = featmap_sizes[lvl_id]
+ loc_targets = torch.zeros(
+ img_per_gpu,
+ 1,
+ h,
+ w,
+ device=gt_bboxes_list[0].device,
+ dtype=torch.float32)
+ loc_weights = torch.full_like(loc_targets, -1)
+ ignore_map = torch.zeros_like(loc_targets)
+ all_loc_targets.append(loc_targets)
+ all_loc_weights.append(loc_weights)
+ all_ignore_map.append(ignore_map)
+ for img_id in range(img_per_gpu):
+ gt_bboxes = gt_bboxes_list[img_id]
+ scale = torch.sqrt((gt_bboxes[:, 2] - gt_bboxes[:, 0]) *
+ (gt_bboxes[:, 3] - gt_bboxes[:, 1]))
+ min_anchor_size = scale.new_full(
+ (1, ), float(anchor_scale * anchor_strides[0]))
+ # assign gt bboxes to different feature levels w.r.t. their scales
+ target_lvls = torch.floor(
+ torch.log2(scale) - torch.log2(min_anchor_size) + 0.5)
+ target_lvls = target_lvls.clamp(min=0, max=num_lvls - 1).long()
+ for gt_id in range(gt_bboxes.size(0)):
+ lvl = target_lvls[gt_id].item()
+ # rescaled to corresponding feature map
+ gt_ = gt_bboxes[gt_id, :4] / anchor_strides[lvl]
+ # calculate ignore regions
+ ignore_x1, ignore_y1, ignore_x2, ignore_y2 = calc_region(
+ gt_, r2, featmap_sizes[lvl])
+ # calculate positive (center) regions
+ ctr_x1, ctr_y1, ctr_x2, ctr_y2 = calc_region(
+ gt_, r1, featmap_sizes[lvl])
+ all_loc_targets[lvl][img_id, 0, ctr_y1:ctr_y2 + 1,
+ ctr_x1:ctr_x2 + 1] = 1
+ all_loc_weights[lvl][img_id, 0, ignore_y1:ignore_y2 + 1,
+ ignore_x1:ignore_x2 + 1] = 0
+ all_loc_weights[lvl][img_id, 0, ctr_y1:ctr_y2 + 1,
+ ctr_x1:ctr_x2 + 1] = 1
+ # calculate ignore map on nearby low level feature
+ if lvl > 0:
+ d_lvl = lvl - 1
+ # rescaled to corresponding feature map
+ gt_ = gt_bboxes[gt_id, :4] / anchor_strides[d_lvl]
+ ignore_x1, ignore_y1, ignore_x2, ignore_y2 = calc_region(
+ gt_, r2, featmap_sizes[d_lvl])
+ all_ignore_map[d_lvl][img_id, 0, ignore_y1:ignore_y2 + 1,
+ ignore_x1:ignore_x2 + 1] = 1
+ # calculate ignore map on nearby high level feature
+ if lvl < num_lvls - 1:
+ u_lvl = lvl + 1
+ # rescaled to corresponding feature map
+ gt_ = gt_bboxes[gt_id, :4] / anchor_strides[u_lvl]
+ ignore_x1, ignore_y1, ignore_x2, ignore_y2 = calc_region(
+ gt_, r2, featmap_sizes[u_lvl])
+ all_ignore_map[u_lvl][img_id, 0, ignore_y1:ignore_y2 + 1,
+ ignore_x1:ignore_x2 + 1] = 1
+ for lvl_id in range(num_lvls):
+ # ignore negative regions w.r.t. ignore map
+ all_loc_weights[lvl_id][(all_loc_weights[lvl_id] < 0)
+ & (all_ignore_map[lvl_id] > 0)] = 0
+ # set negative regions with weight 0.1
+ all_loc_weights[lvl_id][all_loc_weights[lvl_id] < 0] = 0.1
+ # loc average factor to balance loss
+ loc_avg_factor = sum(
+ [t.size(0) * t.size(-1) * t.size(-2)
+ for t in all_loc_targets]) / 200
+ return all_loc_targets, all_loc_weights, loc_avg_factor
+
+ def _ga_shape_target_single(self,
+ flat_approxs,
+ inside_flags,
+ flat_squares,
+ gt_bboxes,
+ gt_bboxes_ignore,
+ img_meta,
+ unmap_outputs=True):
+ """Compute guided anchoring targets.
+
+ This function returns sampled anchors and gt bboxes directly
+ rather than calculates regression targets.
+
+ Args:
+ flat_approxs (Tensor): flat approxs of a single image,
+ shape (n, 4)
+ inside_flags (Tensor): inside flags of a single image,
+ shape (n, ).
+ flat_squares (Tensor): flat squares of a single image,
+ shape (approxs_per_octave * n, 4)
+ gt_bboxes (Tensor): Ground truth bboxes of a single image.
+ img_meta (dict): Meta info of a single image.
+ approxs_per_octave (int): number of approxs per octave
+ cfg (dict): RPN train configs.
+ unmap_outputs (bool): unmap outputs or not.
+
+ Returns:
+ tuple
+ """
+ if not inside_flags.any():
+ return (None, ) * 5
+ # assign gt and sample anchors
+ expand_inside_flags = inside_flags[:, None].expand(
+ -1, self.approxs_per_octave).reshape(-1)
+ approxs = flat_approxs[expand_inside_flags, :]
+ squares = flat_squares[inside_flags, :]
+
+ assign_result = self.ga_assigner.assign(approxs, squares,
+ self.approxs_per_octave,
+ gt_bboxes, gt_bboxes_ignore)
+ sampling_result = self.ga_sampler.sample(assign_result, squares,
+ gt_bboxes)
+
+ bbox_anchors = torch.zeros_like(squares)
+ bbox_gts = torch.zeros_like(squares)
+ bbox_weights = torch.zeros_like(squares)
+
+ pos_inds = sampling_result.pos_inds
+ neg_inds = sampling_result.neg_inds
+ if len(pos_inds) > 0:
+ bbox_anchors[pos_inds, :] = sampling_result.pos_bboxes
+ bbox_gts[pos_inds, :] = sampling_result.pos_gt_bboxes
+ bbox_weights[pos_inds, :] = 1.0
+
+ # map up to original set of anchors
+ if unmap_outputs:
+ num_total_anchors = flat_squares.size(0)
+ bbox_anchors = unmap(bbox_anchors, num_total_anchors, inside_flags)
+ bbox_gts = unmap(bbox_gts, num_total_anchors, inside_flags)
+ bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags)
+
+ return (bbox_anchors, bbox_gts, bbox_weights, pos_inds, neg_inds)
+
+ def ga_shape_targets(self,
+ approx_list,
+ inside_flag_list,
+ square_list,
+ gt_bboxes_list,
+ img_metas,
+ gt_bboxes_ignore_list=None,
+ unmap_outputs=True):
+ """Compute guided anchoring targets.
+
+ Args:
+ approx_list (list[list]): Multi level approxs of each image.
+ inside_flag_list (list[list]): Multi level inside flags of each
+ image.
+ square_list (list[list]): Multi level squares of each image.
+ gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image.
+ img_metas (list[dict]): Meta info of each image.
+ gt_bboxes_ignore_list (list[Tensor]): ignore list of gt bboxes.
+ unmap_outputs (bool): unmap outputs or not.
+
+ Returns:
+ tuple
+ """
+ num_imgs = len(img_metas)
+ assert len(approx_list) == len(inside_flag_list) == len(
+ square_list) == num_imgs
+ # anchor number of multi levels
+ num_level_squares = [squares.size(0) for squares in square_list[0]]
+ # concat all level anchors and flags to a single tensor
+ inside_flag_flat_list = []
+ approx_flat_list = []
+ square_flat_list = []
+ for i in range(num_imgs):
+ assert len(square_list[i]) == len(inside_flag_list[i])
+ inside_flag_flat_list.append(torch.cat(inside_flag_list[i]))
+ approx_flat_list.append(torch.cat(approx_list[i]))
+ square_flat_list.append(torch.cat(square_list[i]))
+
+ # compute targets for each image
+ if gt_bboxes_ignore_list is None:
+ gt_bboxes_ignore_list = [None for _ in range(num_imgs)]
+ (all_bbox_anchors, all_bbox_gts, all_bbox_weights, pos_inds_list,
+ neg_inds_list) = multi_apply(
+ self._ga_shape_target_single,
+ approx_flat_list,
+ inside_flag_flat_list,
+ square_flat_list,
+ gt_bboxes_list,
+ gt_bboxes_ignore_list,
+ img_metas,
+ unmap_outputs=unmap_outputs)
+ # no valid anchors
+ if any([bbox_anchors is None for bbox_anchors in all_bbox_anchors]):
+ return None
+ # sampled anchors of all images
+ num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list])
+ num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list])
+ # split targets to a list w.r.t. multiple levels
+ bbox_anchors_list = images_to_levels(all_bbox_anchors,
+ num_level_squares)
+ bbox_gts_list = images_to_levels(all_bbox_gts, num_level_squares)
+ bbox_weights_list = images_to_levels(all_bbox_weights,
+ num_level_squares)
+ return (bbox_anchors_list, bbox_gts_list, bbox_weights_list,
+ num_total_pos, num_total_neg)
+
+ def loss_shape_single(self, shape_pred, bbox_anchors, bbox_gts,
+ anchor_weights, anchor_total_num):
+ shape_pred = shape_pred.permute(0, 2, 3, 1).contiguous().view(-1, 2)
+ bbox_anchors = bbox_anchors.contiguous().view(-1, 4)
+ bbox_gts = bbox_gts.contiguous().view(-1, 4)
+ anchor_weights = anchor_weights.contiguous().view(-1, 4)
+ bbox_deltas = bbox_anchors.new_full(bbox_anchors.size(), 0)
+ bbox_deltas[:, 2:] += shape_pred
+ # filter out negative samples to speed-up weighted_bounded_iou_loss
+ inds = torch.nonzero(
+ anchor_weights[:, 0] > 0, as_tuple=False).squeeze(1)
+ bbox_deltas_ = bbox_deltas[inds]
+ bbox_anchors_ = bbox_anchors[inds]
+ bbox_gts_ = bbox_gts[inds]
+ anchor_weights_ = anchor_weights[inds]
+ pred_anchors_ = self.anchor_coder.decode(
+ bbox_anchors_, bbox_deltas_, wh_ratio_clip=1e-6)
+ loss_shape = self.loss_shape(
+ pred_anchors_,
+ bbox_gts_,
+ anchor_weights_,
+ avg_factor=anchor_total_num)
+ return loss_shape
+
+ def loss_loc_single(self, loc_pred, loc_target, loc_weight,
+ loc_avg_factor):
+ loss_loc = self.loss_loc(
+ loc_pred.reshape(-1, 1),
+ loc_target.reshape(-1).long(),
+ loc_weight.reshape(-1),
+ avg_factor=loc_avg_factor)
+ return loss_loc
+
+ @force_fp32(
+ apply_to=('cls_scores', 'bbox_preds', 'shape_preds', 'loc_preds'))
+ def loss(self,
+ cls_scores,
+ bbox_preds,
+ shape_preds,
+ loc_preds,
+ gt_bboxes,
+ gt_labels,
+ img_metas,
+ gt_bboxes_ignore=None):
+ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
+ assert len(featmap_sizes) == self.approx_anchor_generator.num_levels
+
+ device = cls_scores[0].device
+
+ # get loc targets
+ loc_targets, loc_weights, loc_avg_factor = self.ga_loc_targets(
+ gt_bboxes, featmap_sizes)
+
+ # get sampled approxes
+ approxs_list, inside_flag_list = self.get_sampled_approxs(
+ featmap_sizes, img_metas, device=device)
+ # get squares and guided anchors
+ squares_list, guided_anchors_list, _ = self.get_anchors(
+ featmap_sizes, shape_preds, loc_preds, img_metas, device=device)
+
+ # get shape targets
+ shape_targets = self.ga_shape_targets(approxs_list, inside_flag_list,
+ squares_list, gt_bboxes,
+ img_metas)
+ if shape_targets is None:
+ return None
+ (bbox_anchors_list, bbox_gts_list, anchor_weights_list, anchor_fg_num,
+ anchor_bg_num) = shape_targets
+ anchor_total_num = (
+ anchor_fg_num if not self.ga_sampling else anchor_fg_num +
+ anchor_bg_num)
+
+ # get anchor targets
+ label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1
+ cls_reg_targets = self.get_targets(
+ guided_anchors_list,
+ inside_flag_list,
+ gt_bboxes,
+ img_metas,
+ gt_bboxes_ignore_list=gt_bboxes_ignore,
+ gt_labels_list=gt_labels,
+ label_channels=label_channels)
+ if cls_reg_targets is None:
+ return None
+ (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,
+ num_total_pos, num_total_neg) = cls_reg_targets
+ num_total_samples = (
+ num_total_pos + num_total_neg if self.sampling else num_total_pos)
+
+ # anchor number of multi levels
+ num_level_anchors = [
+ anchors.size(0) for anchors in guided_anchors_list[0]
+ ]
+ # concat all level anchors to a single tensor
+ concat_anchor_list = []
+ for i in range(len(guided_anchors_list)):
+ concat_anchor_list.append(torch.cat(guided_anchors_list[i]))
+ all_anchor_list = images_to_levels(concat_anchor_list,
+ num_level_anchors)
+
+ # get classification and bbox regression losses
+ losses_cls, losses_bbox = multi_apply(
+ self.loss_single,
+ cls_scores,
+ bbox_preds,
+ all_anchor_list,
+ labels_list,
+ label_weights_list,
+ bbox_targets_list,
+ bbox_weights_list,
+ num_total_samples=num_total_samples)
+
+ # get anchor location loss
+ losses_loc = []
+ for i in range(len(loc_preds)):
+ loss_loc = self.loss_loc_single(
+ loc_preds[i],
+ loc_targets[i],
+ loc_weights[i],
+ loc_avg_factor=loc_avg_factor)
+ losses_loc.append(loss_loc)
+
+ # get anchor shape loss
+ losses_shape = []
+ for i in range(len(shape_preds)):
+ loss_shape = self.loss_shape_single(
+ shape_preds[i],
+ bbox_anchors_list[i],
+ bbox_gts_list[i],
+ anchor_weights_list[i],
+ anchor_total_num=anchor_total_num)
+ losses_shape.append(loss_shape)
+
+ return dict(
+ loss_cls=losses_cls,
+ loss_bbox=losses_bbox,
+ loss_shape=losses_shape,
+ loss_loc=losses_loc)
+
+ @force_fp32(
+ apply_to=('cls_scores', 'bbox_preds', 'shape_preds', 'loc_preds'))
+ def get_bboxes(self,
+ cls_scores,
+ bbox_preds,
+ shape_preds,
+ loc_preds,
+ img_metas,
+ cfg=None,
+ rescale=False):
+ assert len(cls_scores) == len(bbox_preds) == len(shape_preds) == len(
+ loc_preds)
+ num_levels = len(cls_scores)
+ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
+ device = cls_scores[0].device
+ # get guided anchors
+ _, guided_anchors, loc_masks = self.get_anchors(
+ featmap_sizes,
+ shape_preds,
+ loc_preds,
+ img_metas,
+ use_loc_filter=not self.training,
+ device=device)
+ result_list = []
+ for img_id in range(len(img_metas)):
+ cls_score_list = [
+ cls_scores[i][img_id].detach() for i in range(num_levels)
+ ]
+ bbox_pred_list = [
+ bbox_preds[i][img_id].detach() for i in range(num_levels)
+ ]
+ guided_anchor_list = [
+ guided_anchors[img_id][i].detach() for i in range(num_levels)
+ ]
+ loc_mask_list = [
+ loc_masks[img_id][i].detach() for i in range(num_levels)
+ ]
+ img_shape = img_metas[img_id]['img_shape']
+ scale_factor = img_metas[img_id]['scale_factor']
+ proposals = self._get_bboxes_single(cls_score_list, bbox_pred_list,
+ guided_anchor_list,
+ loc_mask_list, img_shape,
+ scale_factor, cfg, rescale)
+ result_list.append(proposals)
+ return result_list
+
+ def _get_bboxes_single(self,
+ cls_scores,
+ bbox_preds,
+ mlvl_anchors,
+ mlvl_masks,
+ img_shape,
+ scale_factor,
+ cfg,
+ rescale=False):
+ cfg = self.test_cfg if cfg is None else cfg
+ assert len(cls_scores) == len(bbox_preds) == len(mlvl_anchors)
+ mlvl_bboxes = []
+ mlvl_scores = []
+ for cls_score, bbox_pred, anchors, mask in zip(cls_scores, bbox_preds,
+ mlvl_anchors,
+ mlvl_masks):
+ assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
+ # if no location is kept, end.
+ if mask.sum() == 0:
+ continue
+ # reshape scores and bbox_pred
+ cls_score = cls_score.permute(1, 2,
+ 0).reshape(-1, self.cls_out_channels)
+ if self.use_sigmoid_cls:
+ scores = cls_score.sigmoid()
+ else:
+ scores = cls_score.softmax(-1)
+ bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4)
+ # filter scores, bbox_pred w.r.t. mask.
+ # anchors are filtered in get_anchors() beforehand.
+ scores = scores[mask, :]
+ bbox_pred = bbox_pred[mask, :]
+ if scores.dim() == 0:
+ anchors = anchors.unsqueeze(0)
+ scores = scores.unsqueeze(0)
+ bbox_pred = bbox_pred.unsqueeze(0)
+ # filter anchors, bbox_pred, scores w.r.t. scores
+ nms_pre = cfg.get('nms_pre', -1)
+ if nms_pre > 0 and scores.shape[0] > nms_pre:
+ if self.use_sigmoid_cls:
+ max_scores, _ = scores.max(dim=1)
+ else:
+ # remind that we set FG labels to [0, num_class-1]
+ # since mmdet v2.0
+ # BG cat_id: num_class
+ max_scores, _ = scores[:, :-1].max(dim=1)
+ _, topk_inds = max_scores.topk(nms_pre)
+ anchors = anchors[topk_inds, :]
+ bbox_pred = bbox_pred[topk_inds, :]
+ scores = scores[topk_inds, :]
+ bboxes = self.bbox_coder.decode(
+ anchors, bbox_pred, max_shape=img_shape)
+ mlvl_bboxes.append(bboxes)
+ mlvl_scores.append(scores)
+ mlvl_bboxes = torch.cat(mlvl_bboxes)
+ if rescale:
+ mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor)
+ mlvl_scores = torch.cat(mlvl_scores)
+ if self.use_sigmoid_cls:
+ # Add a dummy background class to the backend when using sigmoid
+ # remind that we set FG labels to [0, num_class-1] since mmdet v2.0
+ # BG cat_id: num_class
+ padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1)
+ mlvl_scores = torch.cat([mlvl_scores, padding], dim=1)
+ # multi class NMS
+ det_bboxes, det_labels = multiclass_nms(mlvl_bboxes, mlvl_scores,
+ cfg.score_thr, cfg.nms,
+ cfg.max_per_img)
+ return det_bboxes, det_labels
diff --git a/annotator/uniformer/mmdet_null/models/dense_heads/ld_head.py b/annotator/uniformer/mmdet_null/models/dense_heads/ld_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..501e1f7befa086f0b2f818531807411fc383d7bd
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/dense_heads/ld_head.py
@@ -0,0 +1,261 @@
+import torch
+from mmcv.runner import force_fp32
+
+from mmdet.core import (bbox2distance, bbox_overlaps, distance2bbox,
+ multi_apply, reduce_mean)
+from ..builder import HEADS, build_loss
+from .gfl_head import GFLHead
+
+
+@HEADS.register_module()
+class LDHead(GFLHead):
+ """Localization distillation Head. (Short description)
+
+ It utilizes the learned bbox distributions to transfer the localization
+ dark knowledge from teacher to student. Original paper: `Localization
+ Distillation for Object Detection. `_
+
+ Args:
+ num_classes (int): Number of categories excluding the background
+ category.
+ in_channels (int): Number of channels in the input feature map.
+ loss_ld (dict): Config of Localization Distillation Loss (LD),
+ T is the temperature for distillation.
+ """
+
+ def __init__(self,
+ num_classes,
+ in_channels,
+ loss_ld=dict(
+ type='LocalizationDistillationLoss',
+ loss_weight=0.25,
+ T=10),
+ **kwargs):
+
+ super(LDHead, self).__init__(num_classes, in_channels, **kwargs)
+ self.loss_ld = build_loss(loss_ld)
+
+ def loss_single(self, anchors, cls_score, bbox_pred, labels, label_weights,
+ bbox_targets, stride, soft_targets, num_total_samples):
+ """Compute loss of a single scale level.
+
+ Args:
+ anchors (Tensor): Box reference for each scale level with shape
+ (N, num_total_anchors, 4).
+ cls_score (Tensor): Cls and quality joint scores for each scale
+ level has shape (N, num_classes, H, W).
+ bbox_pred (Tensor): Box distribution logits for each scale
+ level with shape (N, 4*(n+1), H, W), n is max value of integral
+ set.
+ labels (Tensor): Labels of each anchors with shape
+ (N, num_total_anchors).
+ label_weights (Tensor): Label weights of each anchor with shape
+ (N, num_total_anchors)
+ bbox_targets (Tensor): BBox regression targets of each anchor wight
+ shape (N, num_total_anchors, 4).
+ stride (tuple): Stride in this scale level.
+ num_total_samples (int): Number of positive samples that is
+ reduced over all GPUs.
+
+ Returns:
+ dict[tuple, Tensor]: Loss components and weight targets.
+ """
+ assert stride[0] == stride[1], 'h stride is not equal to w stride!'
+ anchors = anchors.reshape(-1, 4)
+ cls_score = cls_score.permute(0, 2, 3,
+ 1).reshape(-1, self.cls_out_channels)
+ bbox_pred = bbox_pred.permute(0, 2, 3,
+ 1).reshape(-1, 4 * (self.reg_max + 1))
+ soft_targets = soft_targets.permute(0, 2, 3,
+ 1).reshape(-1,
+ 4 * (self.reg_max + 1))
+
+ bbox_targets = bbox_targets.reshape(-1, 4)
+ labels = labels.reshape(-1)
+ label_weights = label_weights.reshape(-1)
+
+ # FG cat_id: [0, num_classes -1], BG cat_id: num_classes
+ bg_class_ind = self.num_classes
+ pos_inds = ((labels >= 0)
+ & (labels < bg_class_ind)).nonzero().squeeze(1)
+ score = label_weights.new_zeros(labels.shape)
+
+ if len(pos_inds) > 0:
+ pos_bbox_targets = bbox_targets[pos_inds]
+ pos_bbox_pred = bbox_pred[pos_inds]
+ pos_anchors = anchors[pos_inds]
+ pos_anchor_centers = self.anchor_center(pos_anchors) / stride[0]
+
+ weight_targets = cls_score.detach().sigmoid()
+ weight_targets = weight_targets.max(dim=1)[0][pos_inds]
+ pos_bbox_pred_corners = self.integral(pos_bbox_pred)
+ pos_decode_bbox_pred = distance2bbox(pos_anchor_centers,
+ pos_bbox_pred_corners)
+ pos_decode_bbox_targets = pos_bbox_targets / stride[0]
+ score[pos_inds] = bbox_overlaps(
+ pos_decode_bbox_pred.detach(),
+ pos_decode_bbox_targets,
+ is_aligned=True)
+ pred_corners = pos_bbox_pred.reshape(-1, self.reg_max + 1)
+ pos_soft_targets = soft_targets[pos_inds]
+ soft_corners = pos_soft_targets.reshape(-1, self.reg_max + 1)
+
+ target_corners = bbox2distance(pos_anchor_centers,
+ pos_decode_bbox_targets,
+ self.reg_max).reshape(-1)
+
+ # regression loss
+ loss_bbox = self.loss_bbox(
+ pos_decode_bbox_pred,
+ pos_decode_bbox_targets,
+ weight=weight_targets,
+ avg_factor=1.0)
+
+ # dfl loss
+ loss_dfl = self.loss_dfl(
+ pred_corners,
+ target_corners,
+ weight=weight_targets[:, None].expand(-1, 4).reshape(-1),
+ avg_factor=4.0)
+
+ # ld loss
+ loss_ld = self.loss_ld(
+ pred_corners,
+ soft_corners,
+ weight=weight_targets[:, None].expand(-1, 4).reshape(-1),
+ avg_factor=4.0)
+
+ else:
+ loss_ld = bbox_pred.sum() * 0
+ loss_bbox = bbox_pred.sum() * 0
+ loss_dfl = bbox_pred.sum() * 0
+ weight_targets = bbox_pred.new_tensor(0)
+
+ # cls (qfl) loss
+ loss_cls = self.loss_cls(
+ cls_score, (labels, score),
+ weight=label_weights,
+ avg_factor=num_total_samples)
+
+ return loss_cls, loss_bbox, loss_dfl, loss_ld, weight_targets.sum()
+
+ def forward_train(self,
+ x,
+ out_teacher,
+ img_metas,
+ gt_bboxes,
+ gt_labels=None,
+ gt_bboxes_ignore=None,
+ proposal_cfg=None,
+ **kwargs):
+ """
+ Args:
+ x (list[Tensor]): Features from FPN.
+ img_metas (list[dict]): Meta information of each image, e.g.,
+ image size, scaling factor, etc.
+ gt_bboxes (Tensor): Ground truth bboxes of the image,
+ shape (num_gts, 4).
+ gt_labels (Tensor): Ground truth labels of each box,
+ shape (num_gts,).
+ gt_bboxes_ignore (Tensor): Ground truth bboxes to be
+ ignored, shape (num_ignored_gts, 4).
+ proposal_cfg (mmcv.Config): Test / postprocessing configuration,
+ if None, test_cfg would be used
+
+ Returns:
+ tuple[dict, list]: The loss components and proposals of each image.
+
+ - losses (dict[str, Tensor]): A dictionary of loss components.
+ - proposal_list (list[Tensor]): Proposals of each image.
+ """
+ outs = self(x)
+ soft_target = out_teacher[1]
+ if gt_labels is None:
+ loss_inputs = outs + (gt_bboxes, soft_target, img_metas)
+ else:
+ loss_inputs = outs + (gt_bboxes, gt_labels, soft_target, img_metas)
+ losses = self.loss(*loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore)
+ if proposal_cfg is None:
+ return losses
+ else:
+ proposal_list = self.get_bboxes(*outs, img_metas, cfg=proposal_cfg)
+ return losses, proposal_list
+
+ @force_fp32(apply_to=('cls_scores', 'bbox_preds'))
+ def loss(self,
+ cls_scores,
+ bbox_preds,
+ gt_bboxes,
+ gt_labels,
+ soft_target,
+ img_metas,
+ gt_bboxes_ignore=None):
+ """Compute losses of the head.
+
+ Args:
+ cls_scores (list[Tensor]): Cls and quality scores for each scale
+ level has shape (N, num_classes, H, W).
+ bbox_preds (list[Tensor]): Box distribution logits for each scale
+ level with shape (N, 4*(n+1), H, W), n is max value of integral
+ set.
+ gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
+ shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
+ gt_labels (list[Tensor]): class indices corresponding to each box
+ img_metas (list[dict]): Meta information of each image, e.g.,
+ image size, scaling factor, etc.
+ gt_bboxes_ignore (list[Tensor] | None): specify which bounding
+ boxes can be ignored when computing the loss.
+
+ Returns:
+ dict[str, Tensor]: A dictionary of loss components.
+ """
+
+ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
+ assert len(featmap_sizes) == self.anchor_generator.num_levels
+
+ device = cls_scores[0].device
+ anchor_list, valid_flag_list = self.get_anchors(
+ featmap_sizes, img_metas, device=device)
+ label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1
+
+ cls_reg_targets = self.get_targets(
+ anchor_list,
+ valid_flag_list,
+ gt_bboxes,
+ img_metas,
+ gt_bboxes_ignore_list=gt_bboxes_ignore,
+ gt_labels_list=gt_labels,
+ label_channels=label_channels)
+ if cls_reg_targets is None:
+ return None
+
+ (anchor_list, labels_list, label_weights_list, bbox_targets_list,
+ bbox_weights_list, num_total_pos, num_total_neg) = cls_reg_targets
+
+ num_total_samples = reduce_mean(
+ torch.tensor(num_total_pos, dtype=torch.float,
+ device=device)).item()
+ num_total_samples = max(num_total_samples, 1.0)
+
+ losses_cls, losses_bbox, losses_dfl, losses_ld, \
+ avg_factor = multi_apply(
+ self.loss_single,
+ anchor_list,
+ cls_scores,
+ bbox_preds,
+ labels_list,
+ label_weights_list,
+ bbox_targets_list,
+ self.anchor_generator.strides,
+ soft_target,
+ num_total_samples=num_total_samples)
+
+ avg_factor = sum(avg_factor) + 1e-6
+ avg_factor = reduce_mean(avg_factor).item()
+ losses_bbox = [x / avg_factor for x in losses_bbox]
+ losses_dfl = [x / avg_factor for x in losses_dfl]
+ return dict(
+ loss_cls=losses_cls,
+ loss_bbox=losses_bbox,
+ loss_dfl=losses_dfl,
+ loss_ld=losses_ld)
diff --git a/annotator/uniformer/mmdet_null/models/dense_heads/nasfcos_head.py b/annotator/uniformer/mmdet_null/models/dense_heads/nasfcos_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..994ce0455e1982110f237b3958a81394c319bb47
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/dense_heads/nasfcos_head.py
@@ -0,0 +1,75 @@
+import copy
+
+import torch.nn as nn
+from mmcv.cnn import (ConvModule, Scale, bias_init_with_prob,
+ caffe2_xavier_init, normal_init)
+
+from mmdet.models.dense_heads.fcos_head import FCOSHead
+from ..builder import HEADS
+
+
+@HEADS.register_module()
+class NASFCOSHead(FCOSHead):
+ """Anchor-free head used in `NASFCOS `_.
+
+ It is quite similar with FCOS head, except for the searched structure of
+ classification branch and bbox regression branch, where a structure of
+ "dconv3x3, conv3x3, dconv3x3, conv1x1" is utilized instead.
+ """
+
+ def _init_layers(self):
+ """Initialize layers of the head."""
+ dconv3x3_config = dict(
+ type='DCNv2',
+ kernel_size=3,
+ use_bias=True,
+ deform_groups=2,
+ padding=1)
+ conv3x3_config = dict(type='Conv', kernel_size=3, padding=1)
+ conv1x1_config = dict(type='Conv', kernel_size=1)
+
+ self.arch_config = [
+ dconv3x3_config, conv3x3_config, dconv3x3_config, conv1x1_config
+ ]
+ self.cls_convs = nn.ModuleList()
+ self.reg_convs = nn.ModuleList()
+ for i, op_ in enumerate(self.arch_config):
+ op = copy.deepcopy(op_)
+ chn = self.in_channels if i == 0 else self.feat_channels
+ assert isinstance(op, dict)
+ use_bias = op.pop('use_bias', False)
+ padding = op.pop('padding', 0)
+ kernel_size = op.pop('kernel_size')
+ module = ConvModule(
+ chn,
+ self.feat_channels,
+ kernel_size,
+ stride=1,
+ padding=padding,
+ norm_cfg=self.norm_cfg,
+ bias=use_bias,
+ conv_cfg=op)
+
+ self.cls_convs.append(copy.deepcopy(module))
+ self.reg_convs.append(copy.deepcopy(module))
+
+ self.conv_cls = nn.Conv2d(
+ self.feat_channels, self.cls_out_channels, 3, padding=1)
+ self.conv_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1)
+ self.conv_centerness = nn.Conv2d(self.feat_channels, 1, 3, padding=1)
+
+ self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides])
+
+ def init_weights(self):
+ """Initialize weights of the head."""
+ # retinanet_bias_init
+ bias_cls = bias_init_with_prob(0.01)
+ normal_init(self.conv_reg, std=0.01)
+ normal_init(self.conv_centerness, std=0.01)
+ normal_init(self.conv_cls, std=0.01, bias=bias_cls)
+
+ for branch in [self.cls_convs, self.reg_convs]:
+ for module in branch.modules():
+ if isinstance(module, ConvModule) \
+ and isinstance(module.conv, nn.Conv2d):
+ caffe2_xavier_init(module.conv)
diff --git a/annotator/uniformer/mmdet_null/models/dense_heads/paa_head.py b/annotator/uniformer/mmdet_null/models/dense_heads/paa_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..e067b0121cf8b8230c0c9c6b8cfd41f56be4e298
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/dense_heads/paa_head.py
@@ -0,0 +1,671 @@
+import numpy as np
+import torch
+from mmcv.runner import force_fp32
+
+from mmdet.core import multi_apply, multiclass_nms
+from mmdet.core.bbox.iou_calculators import bbox_overlaps
+from mmdet.models import HEADS
+from mmdet.models.dense_heads import ATSSHead
+
+EPS = 1e-12
+try:
+ import sklearn.mixture as skm
+except ImportError:
+ skm = None
+
+
+def levels_to_images(mlvl_tensor):
+ """Concat multi-level feature maps by image.
+
+ [feature_level0, feature_level1...] -> [feature_image0, feature_image1...]
+ Convert the shape of each element in mlvl_tensor from (N, C, H, W) to
+ (N, H*W , C), then split the element to N elements with shape (H*W, C), and
+ concat elements in same image of all level along first dimension.
+
+ Args:
+ mlvl_tensor (list[torch.Tensor]): list of Tensor which collect from
+ corresponding level. Each element is of shape (N, C, H, W)
+
+ Returns:
+ list[torch.Tensor]: A list that contains N tensors and each tensor is
+ of shape (num_elements, C)
+ """
+ batch_size = mlvl_tensor[0].size(0)
+ batch_list = [[] for _ in range(batch_size)]
+ channels = mlvl_tensor[0].size(1)
+ for t in mlvl_tensor:
+ t = t.permute(0, 2, 3, 1)
+ t = t.view(batch_size, -1, channels).contiguous()
+ for img in range(batch_size):
+ batch_list[img].append(t[img])
+ return [torch.cat(item, 0) for item in batch_list]
+
+
+@HEADS.register_module()
+class PAAHead(ATSSHead):
+ """Head of PAAAssignment: Probabilistic Anchor Assignment with IoU
+ Prediction for Object Detection.
+
+ Code is modified from the `official github repo
+ `_.
+
+ More details can be found in the `paper
+ `_ .
+
+ Args:
+ topk (int): Select topk samples with smallest loss in
+ each level.
+ score_voting (bool): Whether to use score voting in post-process.
+ covariance_type : String describing the type of covariance parameters
+ to be used in :class:`sklearn.mixture.GaussianMixture`.
+ It must be one of:
+
+ - 'full': each component has its own general covariance matrix
+ - 'tied': all components share the same general covariance matrix
+ - 'diag': each component has its own diagonal covariance matrix
+ - 'spherical': each component has its own single variance
+ Default: 'diag'. From 'full' to 'spherical', the gmm fitting
+ process is faster yet the performance could be influenced. For most
+ cases, 'diag' should be a good choice.
+ """
+
+ def __init__(self,
+ *args,
+ topk=9,
+ score_voting=True,
+ covariance_type='diag',
+ **kwargs):
+ # topk used in paa reassign process
+ self.topk = topk
+ self.with_score_voting = score_voting
+ self.covariance_type = covariance_type
+ super(PAAHead, self).__init__(*args, **kwargs)
+
+ @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'iou_preds'))
+ def loss(self,
+ cls_scores,
+ bbox_preds,
+ iou_preds,
+ gt_bboxes,
+ gt_labels,
+ img_metas,
+ gt_bboxes_ignore=None):
+ """Compute losses of the head.
+
+ Args:
+ cls_scores (list[Tensor]): Box scores for each scale level
+ Has shape (N, num_anchors * num_classes, H, W)
+ bbox_preds (list[Tensor]): Box energies / deltas for each scale
+ level with shape (N, num_anchors * 4, H, W)
+ iou_preds (list[Tensor]): iou_preds for each scale
+ level with shape (N, num_anchors * 1, H, W)
+ gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
+ shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
+ gt_labels (list[Tensor]): class indices corresponding to each box
+ img_metas (list[dict]): Meta information of each image, e.g.,
+ image size, scaling factor, etc.
+ gt_bboxes_ignore (list[Tensor] | None): Specify which bounding
+ boxes can be ignored when are computing the loss.
+
+ Returns:
+ dict[str, Tensor]: A dictionary of loss gmm_assignment.
+ """
+
+ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
+ assert len(featmap_sizes) == self.anchor_generator.num_levels
+
+ device = cls_scores[0].device
+ anchor_list, valid_flag_list = self.get_anchors(
+ featmap_sizes, img_metas, device=device)
+ label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1
+ cls_reg_targets = self.get_targets(
+ anchor_list,
+ valid_flag_list,
+ gt_bboxes,
+ img_metas,
+ gt_bboxes_ignore_list=gt_bboxes_ignore,
+ gt_labels_list=gt_labels,
+ label_channels=label_channels,
+ )
+ (labels, labels_weight, bboxes_target, bboxes_weight, pos_inds,
+ pos_gt_index) = cls_reg_targets
+ cls_scores = levels_to_images(cls_scores)
+ cls_scores = [
+ item.reshape(-1, self.cls_out_channels) for item in cls_scores
+ ]
+ bbox_preds = levels_to_images(bbox_preds)
+ bbox_preds = [item.reshape(-1, 4) for item in bbox_preds]
+ iou_preds = levels_to_images(iou_preds)
+ iou_preds = [item.reshape(-1, 1) for item in iou_preds]
+ pos_losses_list, = multi_apply(self.get_pos_loss, anchor_list,
+ cls_scores, bbox_preds, labels,
+ labels_weight, bboxes_target,
+ bboxes_weight, pos_inds)
+
+ with torch.no_grad():
+ reassign_labels, reassign_label_weight, \
+ reassign_bbox_weights, num_pos = multi_apply(
+ self.paa_reassign,
+ pos_losses_list,
+ labels,
+ labels_weight,
+ bboxes_weight,
+ pos_inds,
+ pos_gt_index,
+ anchor_list)
+ num_pos = sum(num_pos)
+ # convert all tensor list to a flatten tensor
+ cls_scores = torch.cat(cls_scores, 0).view(-1, cls_scores[0].size(-1))
+ bbox_preds = torch.cat(bbox_preds, 0).view(-1, bbox_preds[0].size(-1))
+ iou_preds = torch.cat(iou_preds, 0).view(-1, iou_preds[0].size(-1))
+ labels = torch.cat(reassign_labels, 0).view(-1)
+ flatten_anchors = torch.cat(
+ [torch.cat(item, 0) for item in anchor_list])
+ labels_weight = torch.cat(reassign_label_weight, 0).view(-1)
+ bboxes_target = torch.cat(bboxes_target,
+ 0).view(-1, bboxes_target[0].size(-1))
+
+ pos_inds_flatten = ((labels >= 0)
+ &
+ (labels < self.num_classes)).nonzero().reshape(-1)
+
+ losses_cls = self.loss_cls(
+ cls_scores,
+ labels,
+ labels_weight,
+ avg_factor=max(num_pos, len(img_metas))) # avoid num_pos=0
+ if num_pos:
+ pos_bbox_pred = self.bbox_coder.decode(
+ flatten_anchors[pos_inds_flatten],
+ bbox_preds[pos_inds_flatten])
+ pos_bbox_target = bboxes_target[pos_inds_flatten]
+ iou_target = bbox_overlaps(
+ pos_bbox_pred.detach(), pos_bbox_target, is_aligned=True)
+ losses_iou = self.loss_centerness(
+ iou_preds[pos_inds_flatten],
+ iou_target.unsqueeze(-1),
+ avg_factor=num_pos)
+ losses_bbox = self.loss_bbox(
+ pos_bbox_pred,
+ pos_bbox_target,
+ iou_target.clamp(min=EPS),
+ avg_factor=iou_target.sum())
+ else:
+ losses_iou = iou_preds.sum() * 0
+ losses_bbox = bbox_preds.sum() * 0
+
+ return dict(
+ loss_cls=losses_cls, loss_bbox=losses_bbox, loss_iou=losses_iou)
+
+ def get_pos_loss(self, anchors, cls_score, bbox_pred, label, label_weight,
+ bbox_target, bbox_weight, pos_inds):
+ """Calculate loss of all potential positive samples obtained from first
+ match process.
+
+ Args:
+ anchors (list[Tensor]): Anchors of each scale.
+ cls_score (Tensor): Box scores of single image with shape
+ (num_anchors, num_classes)
+ bbox_pred (Tensor): Box energies / deltas of single image
+ with shape (num_anchors, 4)
+ label (Tensor): classification target of each anchor with
+ shape (num_anchors,)
+ label_weight (Tensor): Classification loss weight of each
+ anchor with shape (num_anchors).
+ bbox_target (dict): Regression target of each anchor with
+ shape (num_anchors, 4).
+ bbox_weight (Tensor): Bbox weight of each anchor with shape
+ (num_anchors, 4).
+ pos_inds (Tensor): Index of all positive samples got from
+ first assign process.
+
+ Returns:
+ Tensor: Losses of all positive samples in single image.
+ """
+ if not len(pos_inds):
+ return cls_score.new([]),
+ anchors_all_level = torch.cat(anchors, 0)
+ pos_scores = cls_score[pos_inds]
+ pos_bbox_pred = bbox_pred[pos_inds]
+ pos_label = label[pos_inds]
+ pos_label_weight = label_weight[pos_inds]
+ pos_bbox_target = bbox_target[pos_inds]
+ pos_bbox_weight = bbox_weight[pos_inds]
+ pos_anchors = anchors_all_level[pos_inds]
+ pos_bbox_pred = self.bbox_coder.decode(pos_anchors, pos_bbox_pred)
+
+ # to keep loss dimension
+ loss_cls = self.loss_cls(
+ pos_scores,
+ pos_label,
+ pos_label_weight,
+ avg_factor=self.loss_cls.loss_weight,
+ reduction_override='none')
+
+ loss_bbox = self.loss_bbox(
+ pos_bbox_pred,
+ pos_bbox_target,
+ pos_bbox_weight,
+ avg_factor=self.loss_cls.loss_weight,
+ reduction_override='none')
+
+ loss_cls = loss_cls.sum(-1)
+ pos_loss = loss_bbox + loss_cls
+ return pos_loss,
+
+ def paa_reassign(self, pos_losses, label, label_weight, bbox_weight,
+ pos_inds, pos_gt_inds, anchors):
+ """Fit loss to GMM distribution and separate positive, ignore, negative
+ samples again with GMM model.
+
+ Args:
+ pos_losses (Tensor): Losses of all positive samples in
+ single image.
+ label (Tensor): classification target of each anchor with
+ shape (num_anchors,)
+ label_weight (Tensor): Classification loss weight of each
+ anchor with shape (num_anchors).
+ bbox_weight (Tensor): Bbox weight of each anchor with shape
+ (num_anchors, 4).
+ pos_inds (Tensor): Index of all positive samples got from
+ first assign process.
+ pos_gt_inds (Tensor): Gt_index of all positive samples got
+ from first assign process.
+ anchors (list[Tensor]): Anchors of each scale.
+
+ Returns:
+ tuple: Usually returns a tuple containing learning targets.
+
+ - label (Tensor): classification target of each anchor after
+ paa assign, with shape (num_anchors,)
+ - label_weight (Tensor): Classification loss weight of each
+ anchor after paa assign, with shape (num_anchors).
+ - bbox_weight (Tensor): Bbox weight of each anchor with shape
+ (num_anchors, 4).
+ - num_pos (int): The number of positive samples after paa
+ assign.
+ """
+ if not len(pos_inds):
+ return label, label_weight, bbox_weight, 0
+ label = label.clone()
+ label_weight = label_weight.clone()
+ bbox_weight = bbox_weight.clone()
+ num_gt = pos_gt_inds.max() + 1
+ num_level = len(anchors)
+ num_anchors_each_level = [item.size(0) for item in anchors]
+ num_anchors_each_level.insert(0, 0)
+ inds_level_interval = np.cumsum(num_anchors_each_level)
+ pos_level_mask = []
+ for i in range(num_level):
+ mask = (pos_inds >= inds_level_interval[i]) & (
+ pos_inds < inds_level_interval[i + 1])
+ pos_level_mask.append(mask)
+ pos_inds_after_paa = [label.new_tensor([])]
+ ignore_inds_after_paa = [label.new_tensor([])]
+ for gt_ind in range(num_gt):
+ pos_inds_gmm = []
+ pos_loss_gmm = []
+ gt_mask = pos_gt_inds == gt_ind
+ for level in range(num_level):
+ level_mask = pos_level_mask[level]
+ level_gt_mask = level_mask & gt_mask
+ value, topk_inds = pos_losses[level_gt_mask].topk(
+ min(level_gt_mask.sum(), self.topk), largest=False)
+ pos_inds_gmm.append(pos_inds[level_gt_mask][topk_inds])
+ pos_loss_gmm.append(value)
+ pos_inds_gmm = torch.cat(pos_inds_gmm)
+ pos_loss_gmm = torch.cat(pos_loss_gmm)
+ # fix gmm need at least two sample
+ if len(pos_inds_gmm) < 2:
+ continue
+ device = pos_inds_gmm.device
+ pos_loss_gmm, sort_inds = pos_loss_gmm.sort()
+ pos_inds_gmm = pos_inds_gmm[sort_inds]
+ pos_loss_gmm = pos_loss_gmm.view(-1, 1).cpu().numpy()
+ min_loss, max_loss = pos_loss_gmm.min(), pos_loss_gmm.max()
+ means_init = np.array([min_loss, max_loss]).reshape(2, 1)
+ weights_init = np.array([0.5, 0.5])
+ precisions_init = np.array([1.0, 1.0]).reshape(2, 1, 1) # full
+ if self.covariance_type == 'spherical':
+ precisions_init = precisions_init.reshape(2)
+ elif self.covariance_type == 'diag':
+ precisions_init = precisions_init.reshape(2, 1)
+ elif self.covariance_type == 'tied':
+ precisions_init = np.array([[1.0]])
+ if skm is None:
+ raise ImportError('Please run "pip install sklearn" '
+ 'to install sklearn first.')
+ gmm = skm.GaussianMixture(
+ 2,
+ weights_init=weights_init,
+ means_init=means_init,
+ precisions_init=precisions_init,
+ covariance_type=self.covariance_type)
+ gmm.fit(pos_loss_gmm)
+ gmm_assignment = gmm.predict(pos_loss_gmm)
+ scores = gmm.score_samples(pos_loss_gmm)
+ gmm_assignment = torch.from_numpy(gmm_assignment).to(device)
+ scores = torch.from_numpy(scores).to(device)
+
+ pos_inds_temp, ignore_inds_temp = self.gmm_separation_scheme(
+ gmm_assignment, scores, pos_inds_gmm)
+ pos_inds_after_paa.append(pos_inds_temp)
+ ignore_inds_after_paa.append(ignore_inds_temp)
+
+ pos_inds_after_paa = torch.cat(pos_inds_after_paa)
+ ignore_inds_after_paa = torch.cat(ignore_inds_after_paa)
+ reassign_mask = (pos_inds.unsqueeze(1) != pos_inds_after_paa).all(1)
+ reassign_ids = pos_inds[reassign_mask]
+ label[reassign_ids] = self.num_classes
+ label_weight[ignore_inds_after_paa] = 0
+ bbox_weight[reassign_ids] = 0
+ num_pos = len(pos_inds_after_paa)
+ return label, label_weight, bbox_weight, num_pos
+
+ def gmm_separation_scheme(self, gmm_assignment, scores, pos_inds_gmm):
+ """A general separation scheme for gmm model.
+
+ It separates a GMM distribution of candidate samples into three
+ parts, 0 1 and uncertain areas, and you can implement other
+ separation schemes by rewriting this function.
+
+ Args:
+ gmm_assignment (Tensor): The prediction of GMM which is of shape
+ (num_samples,). The 0/1 value indicates the distribution
+ that each sample comes from.
+ scores (Tensor): The probability of sample coming from the
+ fit GMM distribution. The tensor is of shape (num_samples,).
+ pos_inds_gmm (Tensor): All the indexes of samples which are used
+ to fit GMM model. The tensor is of shape (num_samples,)
+
+ Returns:
+ tuple[Tensor]: The indices of positive and ignored samples.
+
+ - pos_inds_temp (Tensor): Indices of positive samples.
+ - ignore_inds_temp (Tensor): Indices of ignore samples.
+ """
+ # The implementation is (c) in Fig.3 in origin paper instead of (b).
+ # You can refer to issues such as
+ # https://github.com/kkhoot/PAA/issues/8 and
+ # https://github.com/kkhoot/PAA/issues/9.
+ fgs = gmm_assignment == 0
+ pos_inds_temp = fgs.new_tensor([], dtype=torch.long)
+ ignore_inds_temp = fgs.new_tensor([], dtype=torch.long)
+ if fgs.nonzero().numel():
+ _, pos_thr_ind = scores[fgs].topk(1)
+ pos_inds_temp = pos_inds_gmm[fgs][:pos_thr_ind + 1]
+ ignore_inds_temp = pos_inds_gmm.new_tensor([])
+ return pos_inds_temp, ignore_inds_temp
+
+ def get_targets(
+ self,
+ anchor_list,
+ valid_flag_list,
+ gt_bboxes_list,
+ img_metas,
+ gt_bboxes_ignore_list=None,
+ gt_labels_list=None,
+ label_channels=1,
+ unmap_outputs=True,
+ ):
+ """Get targets for PAA head.
+
+ This method is almost the same as `AnchorHead.get_targets()`. We direct
+ return the results from _get_targets_single instead map it to levels
+ by images_to_levels function.
+
+ Args:
+ anchor_list (list[list[Tensor]]): Multi level anchors of each
+ image. The outer list indicates images, and the inner list
+ corresponds to feature levels of the image. Each element of
+ the inner list is a tensor of shape (num_anchors, 4).
+ valid_flag_list (list[list[Tensor]]): Multi level valid flags of
+ each image. The outer list indicates images, and the inner list
+ corresponds to feature levels of the image. Each element of
+ the inner list is a tensor of shape (num_anchors, )
+ gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image.
+ img_metas (list[dict]): Meta info of each image.
+ gt_bboxes_ignore_list (list[Tensor]): Ground truth bboxes to be
+ ignored.
+ gt_labels_list (list[Tensor]): Ground truth labels of each box.
+ label_channels (int): Channel of label.
+ unmap_outputs (bool): Whether to map outputs back to the original
+ set of anchors.
+
+ Returns:
+ tuple: Usually returns a tuple containing learning targets.
+
+ - labels (list[Tensor]): Labels of all anchors, each with
+ shape (num_anchors,).
+ - label_weights (list[Tensor]): Label weights of all anchor.
+ each with shape (num_anchors,).
+ - bbox_targets (list[Tensor]): BBox targets of all anchors.
+ each with shape (num_anchors, 4).
+ - bbox_weights (list[Tensor]): BBox weights of all anchors.
+ each with shape (num_anchors, 4).
+ - pos_inds (list[Tensor]): Contains all index of positive
+ sample in all anchor.
+ - gt_inds (list[Tensor]): Contains all gt_index of positive
+ sample in all anchor.
+ """
+
+ num_imgs = len(img_metas)
+ assert len(anchor_list) == len(valid_flag_list) == num_imgs
+ concat_anchor_list = []
+ concat_valid_flag_list = []
+ for i in range(num_imgs):
+ assert len(anchor_list[i]) == len(valid_flag_list[i])
+ concat_anchor_list.append(torch.cat(anchor_list[i]))
+ concat_valid_flag_list.append(torch.cat(valid_flag_list[i]))
+
+ # compute targets for each image
+ if gt_bboxes_ignore_list is None:
+ gt_bboxes_ignore_list = [None for _ in range(num_imgs)]
+ if gt_labels_list is None:
+ gt_labels_list = [None for _ in range(num_imgs)]
+ results = multi_apply(
+ self._get_targets_single,
+ concat_anchor_list,
+ concat_valid_flag_list,
+ gt_bboxes_list,
+ gt_bboxes_ignore_list,
+ gt_labels_list,
+ img_metas,
+ label_channels=label_channels,
+ unmap_outputs=unmap_outputs)
+
+ (labels, label_weights, bbox_targets, bbox_weights, valid_pos_inds,
+ valid_neg_inds, sampling_result) = results
+
+ # Due to valid flag of anchors, we have to calculate the real pos_inds
+ # in origin anchor set.
+ pos_inds = []
+ for i, single_labels in enumerate(labels):
+ pos_mask = (0 <= single_labels) & (
+ single_labels < self.num_classes)
+ pos_inds.append(pos_mask.nonzero().view(-1))
+
+ gt_inds = [item.pos_assigned_gt_inds for item in sampling_result]
+ return (labels, label_weights, bbox_targets, bbox_weights, pos_inds,
+ gt_inds)
+
+ def _get_targets_single(self,
+ flat_anchors,
+ valid_flags,
+ gt_bboxes,
+ gt_bboxes_ignore,
+ gt_labels,
+ img_meta,
+ label_channels=1,
+ unmap_outputs=True):
+ """Compute regression and classification targets for anchors in a
+ single image.
+
+ This method is same as `AnchorHead._get_targets_single()`.
+ """
+ assert unmap_outputs, 'We must map outputs back to the original' \
+ 'set of anchors in PAAhead'
+ return super(ATSSHead, self)._get_targets_single(
+ flat_anchors,
+ valid_flags,
+ gt_bboxes,
+ gt_bboxes_ignore,
+ gt_labels,
+ img_meta,
+ label_channels=1,
+ unmap_outputs=True)
+
+ def _get_bboxes(self,
+ cls_scores,
+ bbox_preds,
+ iou_preds,
+ mlvl_anchors,
+ img_shapes,
+ scale_factors,
+ cfg,
+ rescale=False,
+ with_nms=True):
+ """Transform outputs for a single batch item into labeled boxes.
+
+ This method is almost same as `ATSSHead._get_bboxes()`.
+ We use sqrt(iou_preds * cls_scores) in NMS process instead of just
+ cls_scores. Besides, score voting is used when `` score_voting``
+ is set to True.
+ """
+ assert with_nms, 'PAA only supports "with_nms=True" now'
+ assert len(cls_scores) == len(bbox_preds) == len(mlvl_anchors)
+ batch_size = cls_scores[0].shape[0]
+
+ mlvl_bboxes = []
+ mlvl_scores = []
+ mlvl_iou_preds = []
+ for cls_score, bbox_pred, iou_preds, anchors in zip(
+ cls_scores, bbox_preds, iou_preds, mlvl_anchors):
+ assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
+
+ scores = cls_score.permute(0, 2, 3, 1).reshape(
+ batch_size, -1, self.cls_out_channels).sigmoid()
+ bbox_pred = bbox_pred.permute(0, 2, 3,
+ 1).reshape(batch_size, -1, 4)
+ iou_preds = iou_preds.permute(0, 2, 3, 1).reshape(batch_size,
+ -1).sigmoid()
+
+ nms_pre = cfg.get('nms_pre', -1)
+ if nms_pre > 0 and scores.shape[1] > nms_pre:
+ max_scores, _ = (scores * iou_preds[..., None]).sqrt().max(-1)
+ _, topk_inds = max_scores.topk(nms_pre)
+ batch_inds = torch.arange(batch_size).view(
+ -1, 1).expand_as(topk_inds).long()
+ anchors = anchors[topk_inds, :]
+ bbox_pred = bbox_pred[batch_inds, topk_inds, :]
+ scores = scores[batch_inds, topk_inds, :]
+ iou_preds = iou_preds[batch_inds, topk_inds]
+ else:
+ anchors = anchors.expand_as(bbox_pred)
+
+ bboxes = self.bbox_coder.decode(
+ anchors, bbox_pred, max_shape=img_shapes)
+ mlvl_bboxes.append(bboxes)
+ mlvl_scores.append(scores)
+ mlvl_iou_preds.append(iou_preds)
+
+ batch_mlvl_bboxes = torch.cat(mlvl_bboxes, dim=1)
+ if rescale:
+ batch_mlvl_bboxes /= batch_mlvl_bboxes.new_tensor(
+ scale_factors).unsqueeze(1)
+ batch_mlvl_scores = torch.cat(mlvl_scores, dim=1)
+ # Add a dummy background class to the backend when using sigmoid
+ # remind that we set FG labels to [0, num_class-1] since mmdet v2.0
+ # BG cat_id: num_class
+ padding = batch_mlvl_scores.new_zeros(batch_size,
+ batch_mlvl_scores.shape[1], 1)
+ batch_mlvl_scores = torch.cat([batch_mlvl_scores, padding], dim=-1)
+ batch_mlvl_iou_preds = torch.cat(mlvl_iou_preds, dim=1)
+ batch_mlvl_nms_scores = (batch_mlvl_scores *
+ batch_mlvl_iou_preds[..., None]).sqrt()
+
+ det_results = []
+ for (mlvl_bboxes, mlvl_scores) in zip(batch_mlvl_bboxes,
+ batch_mlvl_nms_scores):
+ det_bbox, det_label = multiclass_nms(
+ mlvl_bboxes,
+ mlvl_scores,
+ cfg.score_thr,
+ cfg.nms,
+ cfg.max_per_img,
+ score_factors=None)
+ if self.with_score_voting and len(det_bbox) > 0:
+ det_bbox, det_label = self.score_voting(
+ det_bbox, det_label, mlvl_bboxes, mlvl_scores,
+ cfg.score_thr)
+ det_results.append(tuple([det_bbox, det_label]))
+
+ return det_results
+
+ def score_voting(self, det_bboxes, det_labels, mlvl_bboxes,
+ mlvl_nms_scores, score_thr):
+ """Implementation of score voting method works on each remaining boxes
+ after NMS procedure.
+
+ Args:
+ det_bboxes (Tensor): Remaining boxes after NMS procedure,
+ with shape (k, 5), each dimension means
+ (x1, y1, x2, y2, score).
+ det_labels (Tensor): The label of remaining boxes, with shape
+ (k, 1),Labels are 0-based.
+ mlvl_bboxes (Tensor): All boxes before the NMS procedure,
+ with shape (num_anchors,4).
+ mlvl_nms_scores (Tensor): The scores of all boxes which is used
+ in the NMS procedure, with shape (num_anchors, num_class)
+ mlvl_iou_preds (Tensor): The predictions of IOU of all boxes
+ before the NMS procedure, with shape (num_anchors, 1)
+ score_thr (float): The score threshold of bboxes.
+
+ Returns:
+ tuple: Usually returns a tuple containing voting results.
+
+ - det_bboxes_voted (Tensor): Remaining boxes after
+ score voting procedure, with shape (k, 5), each
+ dimension means (x1, y1, x2, y2, score).
+ - det_labels_voted (Tensor): Label of remaining bboxes
+ after voting, with shape (num_anchors,).
+ """
+ candidate_mask = mlvl_nms_scores > score_thr
+ candidate_mask_nonzeros = candidate_mask.nonzero()
+ candidate_inds = candidate_mask_nonzeros[:, 0]
+ candidate_labels = candidate_mask_nonzeros[:, 1]
+ candidate_bboxes = mlvl_bboxes[candidate_inds]
+ candidate_scores = mlvl_nms_scores[candidate_mask]
+ det_bboxes_voted = []
+ det_labels_voted = []
+ for cls in range(self.cls_out_channels):
+ candidate_cls_mask = candidate_labels == cls
+ if not candidate_cls_mask.any():
+ continue
+ candidate_cls_scores = candidate_scores[candidate_cls_mask]
+ candidate_cls_bboxes = candidate_bboxes[candidate_cls_mask]
+ det_cls_mask = det_labels == cls
+ det_cls_bboxes = det_bboxes[det_cls_mask].view(
+ -1, det_bboxes.size(-1))
+ det_candidate_ious = bbox_overlaps(det_cls_bboxes[:, :4],
+ candidate_cls_bboxes)
+ for det_ind in range(len(det_cls_bboxes)):
+ single_det_ious = det_candidate_ious[det_ind]
+ pos_ious_mask = single_det_ious > 0.01
+ pos_ious = single_det_ious[pos_ious_mask]
+ pos_bboxes = candidate_cls_bboxes[pos_ious_mask]
+ pos_scores = candidate_cls_scores[pos_ious_mask]
+ pis = (torch.exp(-(1 - pos_ious)**2 / 0.025) *
+ pos_scores)[:, None]
+ voted_box = torch.sum(
+ pis * pos_bboxes, dim=0) / torch.sum(
+ pis, dim=0)
+ voted_score = det_cls_bboxes[det_ind][-1:][None, :]
+ det_bboxes_voted.append(
+ torch.cat((voted_box[None, :], voted_score), dim=1))
+ det_labels_voted.append(cls)
+
+ det_bboxes_voted = torch.cat(det_bboxes_voted, dim=0)
+ det_labels_voted = det_labels.new_tensor(det_labels_voted)
+ return det_bboxes_voted, det_labels_voted
diff --git a/annotator/uniformer/mmdet_null/models/dense_heads/pisa_retinanet_head.py b/annotator/uniformer/mmdet_null/models/dense_heads/pisa_retinanet_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..bd87b9aeb07e05ff94b444ac8999eca3f616711a
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/dense_heads/pisa_retinanet_head.py
@@ -0,0 +1,154 @@
+import torch
+from mmcv.runner import force_fp32
+
+from mmdet.core import images_to_levels
+from ..builder import HEADS
+from ..losses import carl_loss, isr_p
+from .retina_head import RetinaHead
+
+
+@HEADS.register_module()
+class PISARetinaHead(RetinaHead):
+ """PISA Retinanet Head.
+
+ The head owns the same structure with Retinanet Head, but differs in two
+ aspects:
+ 1. Importance-based Sample Reweighting Positive (ISR-P) is applied to
+ change the positive loss weights.
+ 2. Classification-aware regression loss is adopted as a third loss.
+ """
+
+ @force_fp32(apply_to=('cls_scores', 'bbox_preds'))
+ def loss(self,
+ cls_scores,
+ bbox_preds,
+ gt_bboxes,
+ gt_labels,
+ img_metas,
+ gt_bboxes_ignore=None):
+ """Compute losses of the head.
+
+ Args:
+ cls_scores (list[Tensor]): Box scores for each scale level
+ Has shape (N, num_anchors * num_classes, H, W)
+ bbox_preds (list[Tensor]): Box energies / deltas for each scale
+ level with shape (N, num_anchors * 4, H, W)
+ gt_bboxes (list[Tensor]): Ground truth bboxes of each image
+ with shape (num_obj, 4).
+ gt_labels (list[Tensor]): Ground truth labels of each image
+ with shape (num_obj, 4).
+ img_metas (list[dict]): Meta information of each image, e.g.,
+ image size, scaling factor, etc.
+ gt_bboxes_ignore (list[Tensor]): Ignored gt bboxes of each image.
+ Default: None.
+
+ Returns:
+ dict: Loss dict, comprise classification loss, regression loss and
+ carl loss.
+ """
+ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
+ assert len(featmap_sizes) == self.anchor_generator.num_levels
+
+ device = cls_scores[0].device
+
+ anchor_list, valid_flag_list = self.get_anchors(
+ featmap_sizes, img_metas, device=device)
+ label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1
+ cls_reg_targets = self.get_targets(
+ anchor_list,
+ valid_flag_list,
+ gt_bboxes,
+ img_metas,
+ gt_bboxes_ignore_list=gt_bboxes_ignore,
+ gt_labels_list=gt_labels,
+ label_channels=label_channels,
+ return_sampling_results=True)
+ if cls_reg_targets is None:
+ return None
+ (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,
+ num_total_pos, num_total_neg, sampling_results_list) = cls_reg_targets
+ num_total_samples = (
+ num_total_pos + num_total_neg if self.sampling else num_total_pos)
+
+ # anchor number of multi levels
+ num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]
+ # concat all level anchors and flags to a single tensor
+ concat_anchor_list = []
+ for i in range(len(anchor_list)):
+ concat_anchor_list.append(torch.cat(anchor_list[i]))
+ all_anchor_list = images_to_levels(concat_anchor_list,
+ num_level_anchors)
+
+ num_imgs = len(img_metas)
+ flatten_cls_scores = [
+ cls_score.permute(0, 2, 3, 1).reshape(num_imgs, -1, label_channels)
+ for cls_score in cls_scores
+ ]
+ flatten_cls_scores = torch.cat(
+ flatten_cls_scores, dim=1).reshape(-1,
+ flatten_cls_scores[0].size(-1))
+ flatten_bbox_preds = [
+ bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4)
+ for bbox_pred in bbox_preds
+ ]
+ flatten_bbox_preds = torch.cat(
+ flatten_bbox_preds, dim=1).view(-1, flatten_bbox_preds[0].size(-1))
+ flatten_labels = torch.cat(labels_list, dim=1).reshape(-1)
+ flatten_label_weights = torch.cat(
+ label_weights_list, dim=1).reshape(-1)
+ flatten_anchors = torch.cat(all_anchor_list, dim=1).reshape(-1, 4)
+ flatten_bbox_targets = torch.cat(
+ bbox_targets_list, dim=1).reshape(-1, 4)
+ flatten_bbox_weights = torch.cat(
+ bbox_weights_list, dim=1).reshape(-1, 4)
+
+ # Apply ISR-P
+ isr_cfg = self.train_cfg.get('isr', None)
+ if isr_cfg is not None:
+ all_targets = (flatten_labels, flatten_label_weights,
+ flatten_bbox_targets, flatten_bbox_weights)
+ with torch.no_grad():
+ all_targets = isr_p(
+ flatten_cls_scores,
+ flatten_bbox_preds,
+ all_targets,
+ flatten_anchors,
+ sampling_results_list,
+ bbox_coder=self.bbox_coder,
+ loss_cls=self.loss_cls,
+ num_class=self.num_classes,
+ **self.train_cfg.isr)
+ (flatten_labels, flatten_label_weights, flatten_bbox_targets,
+ flatten_bbox_weights) = all_targets
+
+ # For convenience we compute loss once instead separating by fpn level,
+ # so that we don't need to separate the weights by level again.
+ # The result should be the same
+ losses_cls = self.loss_cls(
+ flatten_cls_scores,
+ flatten_labels,
+ flatten_label_weights,
+ avg_factor=num_total_samples)
+ losses_bbox = self.loss_bbox(
+ flatten_bbox_preds,
+ flatten_bbox_targets,
+ flatten_bbox_weights,
+ avg_factor=num_total_samples)
+ loss_dict = dict(loss_cls=losses_cls, loss_bbox=losses_bbox)
+
+ # CARL Loss
+ carl_cfg = self.train_cfg.get('carl', None)
+ if carl_cfg is not None:
+ loss_carl = carl_loss(
+ flatten_cls_scores,
+ flatten_labels,
+ flatten_bbox_preds,
+ flatten_bbox_targets,
+ self.loss_bbox,
+ **self.train_cfg.carl,
+ avg_factor=num_total_pos,
+ sigmoid=True,
+ num_class=self.num_classes)
+ loss_dict.update(loss_carl)
+
+ return loss_dict
diff --git a/annotator/uniformer/mmdet_null/models/dense_heads/pisa_ssd_head.py b/annotator/uniformer/mmdet_null/models/dense_heads/pisa_ssd_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..90ef3c83ed62d8346c8daef01f18ad7bd236623c
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/dense_heads/pisa_ssd_head.py
@@ -0,0 +1,139 @@
+import torch
+
+from mmdet.core import multi_apply
+from ..builder import HEADS
+from ..losses import CrossEntropyLoss, SmoothL1Loss, carl_loss, isr_p
+from .ssd_head import SSDHead
+
+
+# TODO: add loss evaluator for SSD
+@HEADS.register_module()
+class PISASSDHead(SSDHead):
+
+ def loss(self,
+ cls_scores,
+ bbox_preds,
+ gt_bboxes,
+ gt_labels,
+ img_metas,
+ gt_bboxes_ignore=None):
+ """Compute losses of the head.
+
+ Args:
+ cls_scores (list[Tensor]): Box scores for each scale level
+ Has shape (N, num_anchors * num_classes, H, W)
+ bbox_preds (list[Tensor]): Box energies / deltas for each scale
+ level with shape (N, num_anchors * 4, H, W)
+ gt_bboxes (list[Tensor]): Ground truth bboxes of each image
+ with shape (num_obj, 4).
+ gt_labels (list[Tensor]): Ground truth labels of each image
+ with shape (num_obj, 4).
+ img_metas (list[dict]): Meta information of each image, e.g.,
+ image size, scaling factor, etc.
+ gt_bboxes_ignore (list[Tensor]): Ignored gt bboxes of each image.
+ Default: None.
+
+ Returns:
+ dict: Loss dict, comprise classification loss regression loss and
+ carl loss.
+ """
+ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
+ assert len(featmap_sizes) == self.anchor_generator.num_levels
+
+ device = cls_scores[0].device
+
+ anchor_list, valid_flag_list = self.get_anchors(
+ featmap_sizes, img_metas, device=device)
+ cls_reg_targets = self.get_targets(
+ anchor_list,
+ valid_flag_list,
+ gt_bboxes,
+ img_metas,
+ gt_bboxes_ignore_list=gt_bboxes_ignore,
+ gt_labels_list=gt_labels,
+ label_channels=1,
+ unmap_outputs=False,
+ return_sampling_results=True)
+ if cls_reg_targets is None:
+ return None
+ (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,
+ num_total_pos, num_total_neg, sampling_results_list) = cls_reg_targets
+
+ num_images = len(img_metas)
+ all_cls_scores = torch.cat([
+ s.permute(0, 2, 3, 1).reshape(
+ num_images, -1, self.cls_out_channels) for s in cls_scores
+ ], 1)
+ all_labels = torch.cat(labels_list, -1).view(num_images, -1)
+ all_label_weights = torch.cat(label_weights_list,
+ -1).view(num_images, -1)
+ all_bbox_preds = torch.cat([
+ b.permute(0, 2, 3, 1).reshape(num_images, -1, 4)
+ for b in bbox_preds
+ ], -2)
+ all_bbox_targets = torch.cat(bbox_targets_list,
+ -2).view(num_images, -1, 4)
+ all_bbox_weights = torch.cat(bbox_weights_list,
+ -2).view(num_images, -1, 4)
+
+ # concat all level anchors to a single tensor
+ all_anchors = []
+ for i in range(num_images):
+ all_anchors.append(torch.cat(anchor_list[i]))
+
+ isr_cfg = self.train_cfg.get('isr', None)
+ all_targets = (all_labels.view(-1), all_label_weights.view(-1),
+ all_bbox_targets.view(-1,
+ 4), all_bbox_weights.view(-1, 4))
+ # apply ISR-P
+ if isr_cfg is not None:
+ all_targets = isr_p(
+ all_cls_scores.view(-1, all_cls_scores.size(-1)),
+ all_bbox_preds.view(-1, 4),
+ all_targets,
+ torch.cat(all_anchors),
+ sampling_results_list,
+ loss_cls=CrossEntropyLoss(),
+ bbox_coder=self.bbox_coder,
+ **self.train_cfg.isr,
+ num_class=self.num_classes)
+ (new_labels, new_label_weights, new_bbox_targets,
+ new_bbox_weights) = all_targets
+ all_labels = new_labels.view(all_labels.shape)
+ all_label_weights = new_label_weights.view(all_label_weights.shape)
+ all_bbox_targets = new_bbox_targets.view(all_bbox_targets.shape)
+ all_bbox_weights = new_bbox_weights.view(all_bbox_weights.shape)
+
+ # add CARL loss
+ carl_loss_cfg = self.train_cfg.get('carl', None)
+ if carl_loss_cfg is not None:
+ loss_carl = carl_loss(
+ all_cls_scores.view(-1, all_cls_scores.size(-1)),
+ all_targets[0],
+ all_bbox_preds.view(-1, 4),
+ all_targets[2],
+ SmoothL1Loss(beta=1.),
+ **self.train_cfg.carl,
+ avg_factor=num_total_pos,
+ num_class=self.num_classes)
+
+ # check NaN and Inf
+ assert torch.isfinite(all_cls_scores).all().item(), \
+ 'classification scores become infinite or NaN!'
+ assert torch.isfinite(all_bbox_preds).all().item(), \
+ 'bbox predications become infinite or NaN!'
+
+ losses_cls, losses_bbox = multi_apply(
+ self.loss_single,
+ all_cls_scores,
+ all_bbox_preds,
+ all_anchors,
+ all_labels,
+ all_label_weights,
+ all_bbox_targets,
+ all_bbox_weights,
+ num_total_samples=num_total_pos)
+ loss_dict = dict(loss_cls=losses_cls, loss_bbox=losses_bbox)
+ if carl_loss_cfg is not None:
+ loss_dict.update(loss_carl)
+ return loss_dict
diff --git a/annotator/uniformer/mmdet_null/models/dense_heads/reppoints_head.py b/annotator/uniformer/mmdet_null/models/dense_heads/reppoints_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..499cc4f71c968704a40ab2bb7a6b22dd079d82de
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/dense_heads/reppoints_head.py
@@ -0,0 +1,763 @@
+import numpy as np
+import torch
+import torch.nn as nn
+from mmcv.cnn import ConvModule, bias_init_with_prob, normal_init
+from mmcv.ops import DeformConv2d
+
+from mmdet.core import (PointGenerator, build_assigner, build_sampler,
+ images_to_levels, multi_apply, multiclass_nms, unmap)
+from ..builder import HEADS, build_loss
+from .anchor_free_head import AnchorFreeHead
+
+
+@HEADS.register_module()
+class RepPointsHead(AnchorFreeHead):
+ """RepPoint head.
+
+ Args:
+ point_feat_channels (int): Number of channels of points features.
+ gradient_mul (float): The multiplier to gradients from
+ points refinement and recognition.
+ point_strides (Iterable): points strides.
+ point_base_scale (int): bbox scale for assigning labels.
+ loss_cls (dict): Config of classification loss.
+ loss_bbox_init (dict): Config of initial points loss.
+ loss_bbox_refine (dict): Config of points loss in refinement.
+ use_grid_points (bool): If we use bounding box representation, the
+ reppoints is represented as grid points on the bounding box.
+ center_init (bool): Whether to use center point assignment.
+ transform_method (str): The methods to transform RepPoints to bbox.
+ """ # noqa: W605
+
+ def __init__(self,
+ num_classes,
+ in_channels,
+ point_feat_channels=256,
+ num_points=9,
+ gradient_mul=0.1,
+ point_strides=[8, 16, 32, 64, 128],
+ point_base_scale=4,
+ loss_cls=dict(
+ type='FocalLoss',
+ use_sigmoid=True,
+ gamma=2.0,
+ alpha=0.25,
+ loss_weight=1.0),
+ loss_bbox_init=dict(
+ type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=0.5),
+ loss_bbox_refine=dict(
+ type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0),
+ use_grid_points=False,
+ center_init=True,
+ transform_method='moment',
+ moment_mul=0.01,
+ **kwargs):
+ self.num_points = num_points
+ self.point_feat_channels = point_feat_channels
+ self.use_grid_points = use_grid_points
+ self.center_init = center_init
+
+ # we use deform conv to extract points features
+ self.dcn_kernel = int(np.sqrt(num_points))
+ self.dcn_pad = int((self.dcn_kernel - 1) / 2)
+ assert self.dcn_kernel * self.dcn_kernel == num_points, \
+ 'The points number should be a square number.'
+ assert self.dcn_kernel % 2 == 1, \
+ 'The points number should be an odd square number.'
+ dcn_base = np.arange(-self.dcn_pad,
+ self.dcn_pad + 1).astype(np.float64)
+ dcn_base_y = np.repeat(dcn_base, self.dcn_kernel)
+ dcn_base_x = np.tile(dcn_base, self.dcn_kernel)
+ dcn_base_offset = np.stack([dcn_base_y, dcn_base_x], axis=1).reshape(
+ (-1))
+ self.dcn_base_offset = torch.tensor(dcn_base_offset).view(1, -1, 1, 1)
+
+ super().__init__(num_classes, in_channels, loss_cls=loss_cls, **kwargs)
+
+ self.gradient_mul = gradient_mul
+ self.point_base_scale = point_base_scale
+ self.point_strides = point_strides
+ self.point_generators = [PointGenerator() for _ in self.point_strides]
+
+ self.sampling = loss_cls['type'] not in ['FocalLoss']
+ if self.train_cfg:
+ self.init_assigner = build_assigner(self.train_cfg.init.assigner)
+ self.refine_assigner = build_assigner(
+ self.train_cfg.refine.assigner)
+ # use PseudoSampler when sampling is False
+ if self.sampling and hasattr(self.train_cfg, 'sampler'):
+ sampler_cfg = self.train_cfg.sampler
+ else:
+ sampler_cfg = dict(type='PseudoSampler')
+ self.sampler = build_sampler(sampler_cfg, context=self)
+ self.transform_method = transform_method
+ if self.transform_method == 'moment':
+ self.moment_transfer = nn.Parameter(
+ data=torch.zeros(2), requires_grad=True)
+ self.moment_mul = moment_mul
+
+ self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False)
+ if self.use_sigmoid_cls:
+ self.cls_out_channels = self.num_classes
+ else:
+ self.cls_out_channels = self.num_classes + 1
+ self.loss_bbox_init = build_loss(loss_bbox_init)
+ self.loss_bbox_refine = build_loss(loss_bbox_refine)
+
+ def _init_layers(self):
+ """Initialize layers of the head."""
+ self.relu = nn.ReLU(inplace=True)
+ self.cls_convs = nn.ModuleList()
+ self.reg_convs = nn.ModuleList()
+ for i in range(self.stacked_convs):
+ chn = self.in_channels if i == 0 else self.feat_channels
+ self.cls_convs.append(
+ ConvModule(
+ chn,
+ self.feat_channels,
+ 3,
+ stride=1,
+ padding=1,
+ conv_cfg=self.conv_cfg,
+ norm_cfg=self.norm_cfg))
+ self.reg_convs.append(
+ ConvModule(
+ chn,
+ self.feat_channels,
+ 3,
+ stride=1,
+ padding=1,
+ conv_cfg=self.conv_cfg,
+ norm_cfg=self.norm_cfg))
+ pts_out_dim = 4 if self.use_grid_points else 2 * self.num_points
+ self.reppoints_cls_conv = DeformConv2d(self.feat_channels,
+ self.point_feat_channels,
+ self.dcn_kernel, 1,
+ self.dcn_pad)
+ self.reppoints_cls_out = nn.Conv2d(self.point_feat_channels,
+ self.cls_out_channels, 1, 1, 0)
+ self.reppoints_pts_init_conv = nn.Conv2d(self.feat_channels,
+ self.point_feat_channels, 3,
+ 1, 1)
+ self.reppoints_pts_init_out = nn.Conv2d(self.point_feat_channels,
+ pts_out_dim, 1, 1, 0)
+ self.reppoints_pts_refine_conv = DeformConv2d(self.feat_channels,
+ self.point_feat_channels,
+ self.dcn_kernel, 1,
+ self.dcn_pad)
+ self.reppoints_pts_refine_out = nn.Conv2d(self.point_feat_channels,
+ pts_out_dim, 1, 1, 0)
+
+ def init_weights(self):
+ """Initialize weights of the head."""
+ for m in self.cls_convs:
+ normal_init(m.conv, std=0.01)
+ for m in self.reg_convs:
+ normal_init(m.conv, std=0.01)
+ bias_cls = bias_init_with_prob(0.01)
+ normal_init(self.reppoints_cls_conv, std=0.01)
+ normal_init(self.reppoints_cls_out, std=0.01, bias=bias_cls)
+ normal_init(self.reppoints_pts_init_conv, std=0.01)
+ normal_init(self.reppoints_pts_init_out, std=0.01)
+ normal_init(self.reppoints_pts_refine_conv, std=0.01)
+ normal_init(self.reppoints_pts_refine_out, std=0.01)
+
+ def points2bbox(self, pts, y_first=True):
+ """Converting the points set into bounding box.
+
+ :param pts: the input points sets (fields), each points
+ set (fields) is represented as 2n scalar.
+ :param y_first: if y_first=True, the point set is represented as
+ [y1, x1, y2, x2 ... yn, xn], otherwise the point set is
+ represented as [x1, y1, x2, y2 ... xn, yn].
+ :return: each points set is converting to a bbox [x1, y1, x2, y2].
+ """
+ pts_reshape = pts.view(pts.shape[0], -1, 2, *pts.shape[2:])
+ pts_y = pts_reshape[:, :, 0, ...] if y_first else pts_reshape[:, :, 1,
+ ...]
+ pts_x = pts_reshape[:, :, 1, ...] if y_first else pts_reshape[:, :, 0,
+ ...]
+ if self.transform_method == 'minmax':
+ bbox_left = pts_x.min(dim=1, keepdim=True)[0]
+ bbox_right = pts_x.max(dim=1, keepdim=True)[0]
+ bbox_up = pts_y.min(dim=1, keepdim=True)[0]
+ bbox_bottom = pts_y.max(dim=1, keepdim=True)[0]
+ bbox = torch.cat([bbox_left, bbox_up, bbox_right, bbox_bottom],
+ dim=1)
+ elif self.transform_method == 'partial_minmax':
+ pts_y = pts_y[:, :4, ...]
+ pts_x = pts_x[:, :4, ...]
+ bbox_left = pts_x.min(dim=1, keepdim=True)[0]
+ bbox_right = pts_x.max(dim=1, keepdim=True)[0]
+ bbox_up = pts_y.min(dim=1, keepdim=True)[0]
+ bbox_bottom = pts_y.max(dim=1, keepdim=True)[0]
+ bbox = torch.cat([bbox_left, bbox_up, bbox_right, bbox_bottom],
+ dim=1)
+ elif self.transform_method == 'moment':
+ pts_y_mean = pts_y.mean(dim=1, keepdim=True)
+ pts_x_mean = pts_x.mean(dim=1, keepdim=True)
+ pts_y_std = torch.std(pts_y - pts_y_mean, dim=1, keepdim=True)
+ pts_x_std = torch.std(pts_x - pts_x_mean, dim=1, keepdim=True)
+ moment_transfer = (self.moment_transfer * self.moment_mul) + (
+ self.moment_transfer.detach() * (1 - self.moment_mul))
+ moment_width_transfer = moment_transfer[0]
+ moment_height_transfer = moment_transfer[1]
+ half_width = pts_x_std * torch.exp(moment_width_transfer)
+ half_height = pts_y_std * torch.exp(moment_height_transfer)
+ bbox = torch.cat([
+ pts_x_mean - half_width, pts_y_mean - half_height,
+ pts_x_mean + half_width, pts_y_mean + half_height
+ ],
+ dim=1)
+ else:
+ raise NotImplementedError
+ return bbox
+
+ def gen_grid_from_reg(self, reg, previous_boxes):
+ """Base on the previous bboxes and regression values, we compute the
+ regressed bboxes and generate the grids on the bboxes.
+
+ :param reg: the regression value to previous bboxes.
+ :param previous_boxes: previous bboxes.
+ :return: generate grids on the regressed bboxes.
+ """
+ b, _, h, w = reg.shape
+ bxy = (previous_boxes[:, :2, ...] + previous_boxes[:, 2:, ...]) / 2.
+ bwh = (previous_boxes[:, 2:, ...] -
+ previous_boxes[:, :2, ...]).clamp(min=1e-6)
+ grid_topleft = bxy + bwh * reg[:, :2, ...] - 0.5 * bwh * torch.exp(
+ reg[:, 2:, ...])
+ grid_wh = bwh * torch.exp(reg[:, 2:, ...])
+ grid_left = grid_topleft[:, [0], ...]
+ grid_top = grid_topleft[:, [1], ...]
+ grid_width = grid_wh[:, [0], ...]
+ grid_height = grid_wh[:, [1], ...]
+ intervel = torch.linspace(0., 1., self.dcn_kernel).view(
+ 1, self.dcn_kernel, 1, 1).type_as(reg)
+ grid_x = grid_left + grid_width * intervel
+ grid_x = grid_x.unsqueeze(1).repeat(1, self.dcn_kernel, 1, 1, 1)
+ grid_x = grid_x.view(b, -1, h, w)
+ grid_y = grid_top + grid_height * intervel
+ grid_y = grid_y.unsqueeze(2).repeat(1, 1, self.dcn_kernel, 1, 1)
+ grid_y = grid_y.view(b, -1, h, w)
+ grid_yx = torch.stack([grid_y, grid_x], dim=2)
+ grid_yx = grid_yx.view(b, -1, h, w)
+ regressed_bbox = torch.cat([
+ grid_left, grid_top, grid_left + grid_width, grid_top + grid_height
+ ], 1)
+ return grid_yx, regressed_bbox
+
+ def forward(self, feats):
+ return multi_apply(self.forward_single, feats)
+
+ def forward_single(self, x):
+ """Forward feature map of a single FPN level."""
+ dcn_base_offset = self.dcn_base_offset.type_as(x)
+ # If we use center_init, the initial reppoints is from center points.
+ # If we use bounding bbox representation, the initial reppoints is
+ # from regular grid placed on a pre-defined bbox.
+ if self.use_grid_points or not self.center_init:
+ scale = self.point_base_scale / 2
+ points_init = dcn_base_offset / dcn_base_offset.max() * scale
+ bbox_init = x.new_tensor([-scale, -scale, scale,
+ scale]).view(1, 4, 1, 1)
+ else:
+ points_init = 0
+ cls_feat = x
+ pts_feat = x
+ for cls_conv in self.cls_convs:
+ cls_feat = cls_conv(cls_feat)
+ for reg_conv in self.reg_convs:
+ pts_feat = reg_conv(pts_feat)
+ # initialize reppoints
+ pts_out_init = self.reppoints_pts_init_out(
+ self.relu(self.reppoints_pts_init_conv(pts_feat)))
+ if self.use_grid_points:
+ pts_out_init, bbox_out_init = self.gen_grid_from_reg(
+ pts_out_init, bbox_init.detach())
+ else:
+ pts_out_init = pts_out_init + points_init
+ # refine and classify reppoints
+ pts_out_init_grad_mul = (1 - self.gradient_mul) * pts_out_init.detach(
+ ) + self.gradient_mul * pts_out_init
+ dcn_offset = pts_out_init_grad_mul - dcn_base_offset
+ cls_out = self.reppoints_cls_out(
+ self.relu(self.reppoints_cls_conv(cls_feat, dcn_offset)))
+ pts_out_refine = self.reppoints_pts_refine_out(
+ self.relu(self.reppoints_pts_refine_conv(pts_feat, dcn_offset)))
+ if self.use_grid_points:
+ pts_out_refine, bbox_out_refine = self.gen_grid_from_reg(
+ pts_out_refine, bbox_out_init.detach())
+ else:
+ pts_out_refine = pts_out_refine + pts_out_init.detach()
+ return cls_out, pts_out_init, pts_out_refine
+
+ def get_points(self, featmap_sizes, img_metas, device):
+ """Get points according to feature map sizes.
+
+ Args:
+ featmap_sizes (list[tuple]): Multi-level feature map sizes.
+ img_metas (list[dict]): Image meta info.
+
+ Returns:
+ tuple: points of each image, valid flags of each image
+ """
+ num_imgs = len(img_metas)
+ num_levels = len(featmap_sizes)
+
+ # since feature map sizes of all images are the same, we only compute
+ # points center for one time
+ multi_level_points = []
+ for i in range(num_levels):
+ points = self.point_generators[i].grid_points(
+ featmap_sizes[i], self.point_strides[i], device)
+ multi_level_points.append(points)
+ points_list = [[point.clone() for point in multi_level_points]
+ for _ in range(num_imgs)]
+
+ # for each image, we compute valid flags of multi level grids
+ valid_flag_list = []
+ for img_id, img_meta in enumerate(img_metas):
+ multi_level_flags = []
+ for i in range(num_levels):
+ point_stride = self.point_strides[i]
+ feat_h, feat_w = featmap_sizes[i]
+ h, w = img_meta['pad_shape'][:2]
+ valid_feat_h = min(int(np.ceil(h / point_stride)), feat_h)
+ valid_feat_w = min(int(np.ceil(w / point_stride)), feat_w)
+ flags = self.point_generators[i].valid_flags(
+ (feat_h, feat_w), (valid_feat_h, valid_feat_w), device)
+ multi_level_flags.append(flags)
+ valid_flag_list.append(multi_level_flags)
+
+ return points_list, valid_flag_list
+
+ def centers_to_bboxes(self, point_list):
+ """Get bboxes according to center points.
+
+ Only used in :class:`MaxIoUAssigner`.
+ """
+ bbox_list = []
+ for i_img, point in enumerate(point_list):
+ bbox = []
+ for i_lvl in range(len(self.point_strides)):
+ scale = self.point_base_scale * self.point_strides[i_lvl] * 0.5
+ bbox_shift = torch.Tensor([-scale, -scale, scale,
+ scale]).view(1, 4).type_as(point[0])
+ bbox_center = torch.cat(
+ [point[i_lvl][:, :2], point[i_lvl][:, :2]], dim=1)
+ bbox.append(bbox_center + bbox_shift)
+ bbox_list.append(bbox)
+ return bbox_list
+
+ def offset_to_pts(self, center_list, pred_list):
+ """Change from point offset to point coordinate."""
+ pts_list = []
+ for i_lvl in range(len(self.point_strides)):
+ pts_lvl = []
+ for i_img in range(len(center_list)):
+ pts_center = center_list[i_img][i_lvl][:, :2].repeat(
+ 1, self.num_points)
+ pts_shift = pred_list[i_lvl][i_img]
+ yx_pts_shift = pts_shift.permute(1, 2, 0).view(
+ -1, 2 * self.num_points)
+ y_pts_shift = yx_pts_shift[..., 0::2]
+ x_pts_shift = yx_pts_shift[..., 1::2]
+ xy_pts_shift = torch.stack([x_pts_shift, y_pts_shift], -1)
+ xy_pts_shift = xy_pts_shift.view(*yx_pts_shift.shape[:-1], -1)
+ pts = xy_pts_shift * self.point_strides[i_lvl] + pts_center
+ pts_lvl.append(pts)
+ pts_lvl = torch.stack(pts_lvl, 0)
+ pts_list.append(pts_lvl)
+ return pts_list
+
+ def _point_target_single(self,
+ flat_proposals,
+ valid_flags,
+ gt_bboxes,
+ gt_bboxes_ignore,
+ gt_labels,
+ label_channels=1,
+ stage='init',
+ unmap_outputs=True):
+ inside_flags = valid_flags
+ if not inside_flags.any():
+ return (None, ) * 7
+ # assign gt and sample proposals
+ proposals = flat_proposals[inside_flags, :]
+
+ if stage == 'init':
+ assigner = self.init_assigner
+ pos_weight = self.train_cfg.init.pos_weight
+ else:
+ assigner = self.refine_assigner
+ pos_weight = self.train_cfg.refine.pos_weight
+ assign_result = assigner.assign(proposals, gt_bboxes, gt_bboxes_ignore,
+ None if self.sampling else gt_labels)
+ sampling_result = self.sampler.sample(assign_result, proposals,
+ gt_bboxes)
+
+ num_valid_proposals = proposals.shape[0]
+ bbox_gt = proposals.new_zeros([num_valid_proposals, 4])
+ pos_proposals = torch.zeros_like(proposals)
+ proposals_weights = proposals.new_zeros([num_valid_proposals, 4])
+ labels = proposals.new_full((num_valid_proposals, ),
+ self.num_classes,
+ dtype=torch.long)
+ label_weights = proposals.new_zeros(
+ num_valid_proposals, dtype=torch.float)
+
+ pos_inds = sampling_result.pos_inds
+ neg_inds = sampling_result.neg_inds
+ if len(pos_inds) > 0:
+ pos_gt_bboxes = sampling_result.pos_gt_bboxes
+ bbox_gt[pos_inds, :] = pos_gt_bboxes
+ pos_proposals[pos_inds, :] = proposals[pos_inds, :]
+ proposals_weights[pos_inds, :] = 1.0
+ if gt_labels is None:
+ # Only rpn gives gt_labels as None
+ # Foreground is the first class
+ labels[pos_inds] = 0
+ else:
+ labels[pos_inds] = gt_labels[
+ sampling_result.pos_assigned_gt_inds]
+ if pos_weight <= 0:
+ label_weights[pos_inds] = 1.0
+ else:
+ label_weights[pos_inds] = pos_weight
+ if len(neg_inds) > 0:
+ label_weights[neg_inds] = 1.0
+
+ # map up to original set of proposals
+ if unmap_outputs:
+ num_total_proposals = flat_proposals.size(0)
+ labels = unmap(labels, num_total_proposals, inside_flags)
+ label_weights = unmap(label_weights, num_total_proposals,
+ inside_flags)
+ bbox_gt = unmap(bbox_gt, num_total_proposals, inside_flags)
+ pos_proposals = unmap(pos_proposals, num_total_proposals,
+ inside_flags)
+ proposals_weights = unmap(proposals_weights, num_total_proposals,
+ inside_flags)
+
+ return (labels, label_weights, bbox_gt, pos_proposals,
+ proposals_weights, pos_inds, neg_inds)
+
+ def get_targets(self,
+ proposals_list,
+ valid_flag_list,
+ gt_bboxes_list,
+ img_metas,
+ gt_bboxes_ignore_list=None,
+ gt_labels_list=None,
+ stage='init',
+ label_channels=1,
+ unmap_outputs=True):
+ """Compute corresponding GT box and classification targets for
+ proposals.
+
+ Args:
+ proposals_list (list[list]): Multi level points/bboxes of each
+ image.
+ valid_flag_list (list[list]): Multi level valid flags of each
+ image.
+ gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image.
+ img_metas (list[dict]): Meta info of each image.
+ gt_bboxes_ignore_list (list[Tensor]): Ground truth bboxes to be
+ ignored.
+ gt_bboxes_list (list[Tensor]): Ground truth labels of each box.
+ stage (str): `init` or `refine`. Generate target for init stage or
+ refine stage
+ label_channels (int): Channel of label.
+ unmap_outputs (bool): Whether to map outputs back to the original
+ set of anchors.
+
+ Returns:
+ tuple:
+ - labels_list (list[Tensor]): Labels of each level.
+ - label_weights_list (list[Tensor]): Label weights of each level. # noqa: E501
+ - bbox_gt_list (list[Tensor]): Ground truth bbox of each level.
+ - proposal_list (list[Tensor]): Proposals(points/bboxes) of each level. # noqa: E501
+ - proposal_weights_list (list[Tensor]): Proposal weights of each level. # noqa: E501
+ - num_total_pos (int): Number of positive samples in all images. # noqa: E501
+ - num_total_neg (int): Number of negative samples in all images. # noqa: E501
+ """
+ assert stage in ['init', 'refine']
+ num_imgs = len(img_metas)
+ assert len(proposals_list) == len(valid_flag_list) == num_imgs
+
+ # points number of multi levels
+ num_level_proposals = [points.size(0) for points in proposals_list[0]]
+
+ # concat all level points and flags to a single tensor
+ for i in range(num_imgs):
+ assert len(proposals_list[i]) == len(valid_flag_list[i])
+ proposals_list[i] = torch.cat(proposals_list[i])
+ valid_flag_list[i] = torch.cat(valid_flag_list[i])
+
+ # compute targets for each image
+ if gt_bboxes_ignore_list is None:
+ gt_bboxes_ignore_list = [None for _ in range(num_imgs)]
+ if gt_labels_list is None:
+ gt_labels_list = [None for _ in range(num_imgs)]
+ (all_labels, all_label_weights, all_bbox_gt, all_proposals,
+ all_proposal_weights, pos_inds_list, neg_inds_list) = multi_apply(
+ self._point_target_single,
+ proposals_list,
+ valid_flag_list,
+ gt_bboxes_list,
+ gt_bboxes_ignore_list,
+ gt_labels_list,
+ stage=stage,
+ label_channels=label_channels,
+ unmap_outputs=unmap_outputs)
+ # no valid points
+ if any([labels is None for labels in all_labels]):
+ return None
+ # sampled points of all images
+ num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list])
+ num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list])
+ labels_list = images_to_levels(all_labels, num_level_proposals)
+ label_weights_list = images_to_levels(all_label_weights,
+ num_level_proposals)
+ bbox_gt_list = images_to_levels(all_bbox_gt, num_level_proposals)
+ proposals_list = images_to_levels(all_proposals, num_level_proposals)
+ proposal_weights_list = images_to_levels(all_proposal_weights,
+ num_level_proposals)
+ return (labels_list, label_weights_list, bbox_gt_list, proposals_list,
+ proposal_weights_list, num_total_pos, num_total_neg)
+
+ def loss_single(self, cls_score, pts_pred_init, pts_pred_refine, labels,
+ label_weights, bbox_gt_init, bbox_weights_init,
+ bbox_gt_refine, bbox_weights_refine, stride,
+ num_total_samples_init, num_total_samples_refine):
+ # classification loss
+ labels = labels.reshape(-1)
+ label_weights = label_weights.reshape(-1)
+ cls_score = cls_score.permute(0, 2, 3,
+ 1).reshape(-1, self.cls_out_channels)
+ cls_score = cls_score.contiguous()
+ loss_cls = self.loss_cls(
+ cls_score,
+ labels,
+ label_weights,
+ avg_factor=num_total_samples_refine)
+
+ # points loss
+ bbox_gt_init = bbox_gt_init.reshape(-1, 4)
+ bbox_weights_init = bbox_weights_init.reshape(-1, 4)
+ bbox_pred_init = self.points2bbox(
+ pts_pred_init.reshape(-1, 2 * self.num_points), y_first=False)
+ bbox_gt_refine = bbox_gt_refine.reshape(-1, 4)
+ bbox_weights_refine = bbox_weights_refine.reshape(-1, 4)
+ bbox_pred_refine = self.points2bbox(
+ pts_pred_refine.reshape(-1, 2 * self.num_points), y_first=False)
+ normalize_term = self.point_base_scale * stride
+ loss_pts_init = self.loss_bbox_init(
+ bbox_pred_init / normalize_term,
+ bbox_gt_init / normalize_term,
+ bbox_weights_init,
+ avg_factor=num_total_samples_init)
+ loss_pts_refine = self.loss_bbox_refine(
+ bbox_pred_refine / normalize_term,
+ bbox_gt_refine / normalize_term,
+ bbox_weights_refine,
+ avg_factor=num_total_samples_refine)
+ return loss_cls, loss_pts_init, loss_pts_refine
+
+ def loss(self,
+ cls_scores,
+ pts_preds_init,
+ pts_preds_refine,
+ gt_bboxes,
+ gt_labels,
+ img_metas,
+ gt_bboxes_ignore=None):
+ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
+ assert len(featmap_sizes) == len(self.point_generators)
+ device = cls_scores[0].device
+ label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1
+
+ # target for initial stage
+ center_list, valid_flag_list = self.get_points(featmap_sizes,
+ img_metas, device)
+ pts_coordinate_preds_init = self.offset_to_pts(center_list,
+ pts_preds_init)
+ if self.train_cfg.init.assigner['type'] == 'PointAssigner':
+ # Assign target for center list
+ candidate_list = center_list
+ else:
+ # transform center list to bbox list and
+ # assign target for bbox list
+ bbox_list = self.centers_to_bboxes(center_list)
+ candidate_list = bbox_list
+ cls_reg_targets_init = self.get_targets(
+ candidate_list,
+ valid_flag_list,
+ gt_bboxes,
+ img_metas,
+ gt_bboxes_ignore_list=gt_bboxes_ignore,
+ gt_labels_list=gt_labels,
+ stage='init',
+ label_channels=label_channels)
+ (*_, bbox_gt_list_init, candidate_list_init, bbox_weights_list_init,
+ num_total_pos_init, num_total_neg_init) = cls_reg_targets_init
+ num_total_samples_init = (
+ num_total_pos_init +
+ num_total_neg_init if self.sampling else num_total_pos_init)
+
+ # target for refinement stage
+ center_list, valid_flag_list = self.get_points(featmap_sizes,
+ img_metas, device)
+ pts_coordinate_preds_refine = self.offset_to_pts(
+ center_list, pts_preds_refine)
+ bbox_list = []
+ for i_img, center in enumerate(center_list):
+ bbox = []
+ for i_lvl in range(len(pts_preds_refine)):
+ bbox_preds_init = self.points2bbox(
+ pts_preds_init[i_lvl].detach())
+ bbox_shift = bbox_preds_init * self.point_strides[i_lvl]
+ bbox_center = torch.cat(
+ [center[i_lvl][:, :2], center[i_lvl][:, :2]], dim=1)
+ bbox.append(bbox_center +
+ bbox_shift[i_img].permute(1, 2, 0).reshape(-1, 4))
+ bbox_list.append(bbox)
+ cls_reg_targets_refine = self.get_targets(
+ bbox_list,
+ valid_flag_list,
+ gt_bboxes,
+ img_metas,
+ gt_bboxes_ignore_list=gt_bboxes_ignore,
+ gt_labels_list=gt_labels,
+ stage='refine',
+ label_channels=label_channels)
+ (labels_list, label_weights_list, bbox_gt_list_refine,
+ candidate_list_refine, bbox_weights_list_refine, num_total_pos_refine,
+ num_total_neg_refine) = cls_reg_targets_refine
+ num_total_samples_refine = (
+ num_total_pos_refine +
+ num_total_neg_refine if self.sampling else num_total_pos_refine)
+
+ # compute loss
+ losses_cls, losses_pts_init, losses_pts_refine = multi_apply(
+ self.loss_single,
+ cls_scores,
+ pts_coordinate_preds_init,
+ pts_coordinate_preds_refine,
+ labels_list,
+ label_weights_list,
+ bbox_gt_list_init,
+ bbox_weights_list_init,
+ bbox_gt_list_refine,
+ bbox_weights_list_refine,
+ self.point_strides,
+ num_total_samples_init=num_total_samples_init,
+ num_total_samples_refine=num_total_samples_refine)
+ loss_dict_all = {
+ 'loss_cls': losses_cls,
+ 'loss_pts_init': losses_pts_init,
+ 'loss_pts_refine': losses_pts_refine
+ }
+ return loss_dict_all
+
+ def get_bboxes(self,
+ cls_scores,
+ pts_preds_init,
+ pts_preds_refine,
+ img_metas,
+ cfg=None,
+ rescale=False,
+ with_nms=True):
+ assert len(cls_scores) == len(pts_preds_refine)
+ device = cls_scores[0].device
+ bbox_preds_refine = [
+ self.points2bbox(pts_pred_refine)
+ for pts_pred_refine in pts_preds_refine
+ ]
+ num_levels = len(cls_scores)
+ mlvl_points = [
+ self.point_generators[i].grid_points(cls_scores[i].size()[-2:],
+ self.point_strides[i], device)
+ for i in range(num_levels)
+ ]
+ result_list = []
+ for img_id in range(len(img_metas)):
+ cls_score_list = [
+ cls_scores[i][img_id].detach() for i in range(num_levels)
+ ]
+ bbox_pred_list = [
+ bbox_preds_refine[i][img_id].detach()
+ for i in range(num_levels)
+ ]
+ img_shape = img_metas[img_id]['img_shape']
+ scale_factor = img_metas[img_id]['scale_factor']
+ proposals = self._get_bboxes_single(cls_score_list, bbox_pred_list,
+ mlvl_points, img_shape,
+ scale_factor, cfg, rescale,
+ with_nms)
+ result_list.append(proposals)
+ return result_list
+
+ def _get_bboxes_single(self,
+ cls_scores,
+ bbox_preds,
+ mlvl_points,
+ img_shape,
+ scale_factor,
+ cfg,
+ rescale=False,
+ with_nms=True):
+ cfg = self.test_cfg if cfg is None else cfg
+ assert len(cls_scores) == len(bbox_preds) == len(mlvl_points)
+ mlvl_bboxes = []
+ mlvl_scores = []
+ for i_lvl, (cls_score, bbox_pred, points) in enumerate(
+ zip(cls_scores, bbox_preds, mlvl_points)):
+ assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
+ cls_score = cls_score.permute(1, 2,
+ 0).reshape(-1, self.cls_out_channels)
+ if self.use_sigmoid_cls:
+ scores = cls_score.sigmoid()
+ else:
+ scores = cls_score.softmax(-1)
+ bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4)
+ nms_pre = cfg.get('nms_pre', -1)
+ if nms_pre > 0 and scores.shape[0] > nms_pre:
+ if self.use_sigmoid_cls:
+ max_scores, _ = scores.max(dim=1)
+ else:
+ # remind that we set FG labels to [0, num_class-1]
+ # since mmdet v2.0
+ # BG cat_id: num_class
+ max_scores, _ = scores[:, :-1].max(dim=1)
+ _, topk_inds = max_scores.topk(nms_pre)
+ points = points[topk_inds, :]
+ bbox_pred = bbox_pred[topk_inds, :]
+ scores = scores[topk_inds, :]
+ bbox_pos_center = torch.cat([points[:, :2], points[:, :2]], dim=1)
+ bboxes = bbox_pred * self.point_strides[i_lvl] + bbox_pos_center
+ x1 = bboxes[:, 0].clamp(min=0, max=img_shape[1])
+ y1 = bboxes[:, 1].clamp(min=0, max=img_shape[0])
+ x2 = bboxes[:, 2].clamp(min=0, max=img_shape[1])
+ y2 = bboxes[:, 3].clamp(min=0, max=img_shape[0])
+ bboxes = torch.stack([x1, y1, x2, y2], dim=-1)
+ mlvl_bboxes.append(bboxes)
+ mlvl_scores.append(scores)
+ mlvl_bboxes = torch.cat(mlvl_bboxes)
+ if rescale:
+ mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor)
+ mlvl_scores = torch.cat(mlvl_scores)
+ if self.use_sigmoid_cls:
+ # Add a dummy background class to the backend when using sigmoid
+ # remind that we set FG labels to [0, num_class-1] since mmdet v2.0
+ # BG cat_id: num_class
+ padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1)
+ mlvl_scores = torch.cat([mlvl_scores, padding], dim=1)
+ if with_nms:
+ det_bboxes, det_labels = multiclass_nms(mlvl_bboxes, mlvl_scores,
+ cfg.score_thr, cfg.nms,
+ cfg.max_per_img)
+ return det_bboxes, det_labels
+ else:
+ return mlvl_bboxes, mlvl_scores
diff --git a/annotator/uniformer/mmdet_null/models/dense_heads/retina_head.py b/annotator/uniformer/mmdet_null/models/dense_heads/retina_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..b12416fa8332f02b9a04bbfc7926f6d13875e61b
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/dense_heads/retina_head.py
@@ -0,0 +1,114 @@
+import torch.nn as nn
+from mmcv.cnn import ConvModule, bias_init_with_prob, normal_init
+
+from ..builder import HEADS
+from .anchor_head import AnchorHead
+
+
+@HEADS.register_module()
+class RetinaHead(AnchorHead):
+ r"""An anchor-based head used in `RetinaNet
+ `_.
+
+ The head contains two subnetworks. The first classifies anchor boxes and
+ the second regresses deltas for the anchors.
+
+ Example:
+ >>> import torch
+ >>> self = RetinaHead(11, 7)
+ >>> x = torch.rand(1, 7, 32, 32)
+ >>> cls_score, bbox_pred = self.forward_single(x)
+ >>> # Each anchor predicts a score for each class except background
+ >>> cls_per_anchor = cls_score.shape[1] / self.num_anchors
+ >>> box_per_anchor = bbox_pred.shape[1] / self.num_anchors
+ >>> assert cls_per_anchor == (self.num_classes)
+ >>> assert box_per_anchor == 4
+ """
+
+ def __init__(self,
+ num_classes,
+ in_channels,
+ stacked_convs=4,
+ conv_cfg=None,
+ norm_cfg=None,
+ anchor_generator=dict(
+ type='AnchorGenerator',
+ octave_base_scale=4,
+ scales_per_octave=3,
+ ratios=[0.5, 1.0, 2.0],
+ strides=[8, 16, 32, 64, 128]),
+ **kwargs):
+ self.stacked_convs = stacked_convs
+ self.conv_cfg = conv_cfg
+ self.norm_cfg = norm_cfg
+ super(RetinaHead, self).__init__(
+ num_classes,
+ in_channels,
+ anchor_generator=anchor_generator,
+ **kwargs)
+
+ def _init_layers(self):
+ """Initialize layers of the head."""
+ self.relu = nn.ReLU(inplace=True)
+ self.cls_convs = nn.ModuleList()
+ self.reg_convs = nn.ModuleList()
+ for i in range(self.stacked_convs):
+ chn = self.in_channels if i == 0 else self.feat_channels
+ self.cls_convs.append(
+ ConvModule(
+ chn,
+ self.feat_channels,
+ 3,
+ stride=1,
+ padding=1,
+ conv_cfg=self.conv_cfg,
+ norm_cfg=self.norm_cfg))
+ self.reg_convs.append(
+ ConvModule(
+ chn,
+ self.feat_channels,
+ 3,
+ stride=1,
+ padding=1,
+ conv_cfg=self.conv_cfg,
+ norm_cfg=self.norm_cfg))
+ self.retina_cls = nn.Conv2d(
+ self.feat_channels,
+ self.num_anchors * self.cls_out_channels,
+ 3,
+ padding=1)
+ self.retina_reg = nn.Conv2d(
+ self.feat_channels, self.num_anchors * 4, 3, padding=1)
+
+ def init_weights(self):
+ """Initialize weights of the head."""
+ for m in self.cls_convs:
+ normal_init(m.conv, std=0.01)
+ for m in self.reg_convs:
+ normal_init(m.conv, std=0.01)
+ bias_cls = bias_init_with_prob(0.01)
+ normal_init(self.retina_cls, std=0.01, bias=bias_cls)
+ normal_init(self.retina_reg, std=0.01)
+
+ def forward_single(self, x):
+ """Forward feature of a single scale level.
+
+ Args:
+ x (Tensor): Features of a single scale level.
+
+ Returns:
+ tuple:
+ cls_score (Tensor): Cls scores for a single scale level
+ the channels number is num_anchors * num_classes.
+ bbox_pred (Tensor): Box energies / deltas for a single scale
+ level, the channels number is num_anchors * 4.
+ """
+ cls_feat = x
+ reg_feat = x
+ for cls_conv in self.cls_convs:
+ cls_feat = cls_conv(cls_feat)
+ for reg_conv in self.reg_convs:
+ reg_feat = reg_conv(reg_feat)
+ cls_score = self.retina_cls(cls_feat)
+ bbox_pred = self.retina_reg(reg_feat)
+ return cls_score, bbox_pred
diff --git a/annotator/uniformer/mmdet_null/models/dense_heads/retina_sepbn_head.py b/annotator/uniformer/mmdet_null/models/dense_heads/retina_sepbn_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..6b8ce7f0104b90af4b128e0f245473a1c0219fcd
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/dense_heads/retina_sepbn_head.py
@@ -0,0 +1,113 @@
+import torch.nn as nn
+from mmcv.cnn import ConvModule, bias_init_with_prob, normal_init
+
+from ..builder import HEADS
+from .anchor_head import AnchorHead
+
+
+@HEADS.register_module()
+class RetinaSepBNHead(AnchorHead):
+ """"RetinaHead with separate BN.
+
+ In RetinaHead, conv/norm layers are shared across different FPN levels,
+ while in RetinaSepBNHead, conv layers are shared across different FPN
+ levels, but BN layers are separated.
+ """
+
+ def __init__(self,
+ num_classes,
+ num_ins,
+ in_channels,
+ stacked_convs=4,
+ conv_cfg=None,
+ norm_cfg=None,
+ **kwargs):
+ self.stacked_convs = stacked_convs
+ self.conv_cfg = conv_cfg
+ self.norm_cfg = norm_cfg
+ self.num_ins = num_ins
+ super(RetinaSepBNHead, self).__init__(num_classes, in_channels,
+ **kwargs)
+
+ def _init_layers(self):
+ """Initialize layers of the head."""
+ self.relu = nn.ReLU(inplace=True)
+ self.cls_convs = nn.ModuleList()
+ self.reg_convs = nn.ModuleList()
+ for i in range(self.num_ins):
+ cls_convs = nn.ModuleList()
+ reg_convs = nn.ModuleList()
+ for i in range(self.stacked_convs):
+ chn = self.in_channels if i == 0 else self.feat_channels
+ cls_convs.append(
+ ConvModule(
+ chn,
+ self.feat_channels,
+ 3,
+ stride=1,
+ padding=1,
+ conv_cfg=self.conv_cfg,
+ norm_cfg=self.norm_cfg))
+ reg_convs.append(
+ ConvModule(
+ chn,
+ self.feat_channels,
+ 3,
+ stride=1,
+ padding=1,
+ conv_cfg=self.conv_cfg,
+ norm_cfg=self.norm_cfg))
+ self.cls_convs.append(cls_convs)
+ self.reg_convs.append(reg_convs)
+ for i in range(self.stacked_convs):
+ for j in range(1, self.num_ins):
+ self.cls_convs[j][i].conv = self.cls_convs[0][i].conv
+ self.reg_convs[j][i].conv = self.reg_convs[0][i].conv
+ self.retina_cls = nn.Conv2d(
+ self.feat_channels,
+ self.num_anchors * self.cls_out_channels,
+ 3,
+ padding=1)
+ self.retina_reg = nn.Conv2d(
+ self.feat_channels, self.num_anchors * 4, 3, padding=1)
+
+ def init_weights(self):
+ """Initialize weights of the head."""
+ for m in self.cls_convs[0]:
+ normal_init(m.conv, std=0.01)
+ for m in self.reg_convs[0]:
+ normal_init(m.conv, std=0.01)
+ bias_cls = bias_init_with_prob(0.01)
+ normal_init(self.retina_cls, std=0.01, bias=bias_cls)
+ normal_init(self.retina_reg, std=0.01)
+
+ def forward(self, feats):
+ """Forward features from the upstream network.
+
+ Args:
+ feats (tuple[Tensor]): Features from the upstream network, each is
+ a 4D-tensor.
+
+ Returns:
+ tuple: Usually a tuple of classification scores and bbox prediction
+ cls_scores (list[Tensor]): Classification scores for all scale
+ levels, each is a 4D-tensor, the channels number is
+ num_anchors * num_classes.
+ bbox_preds (list[Tensor]): Box energies / deltas for all scale
+ levels, each is a 4D-tensor, the channels number is
+ num_anchors * 4.
+ """
+ cls_scores = []
+ bbox_preds = []
+ for i, x in enumerate(feats):
+ cls_feat = feats[i]
+ reg_feat = feats[i]
+ for cls_conv in self.cls_convs[i]:
+ cls_feat = cls_conv(cls_feat)
+ for reg_conv in self.reg_convs[i]:
+ reg_feat = reg_conv(reg_feat)
+ cls_score = self.retina_cls(cls_feat)
+ bbox_pred = self.retina_reg(reg_feat)
+ cls_scores.append(cls_score)
+ bbox_preds.append(bbox_pred)
+ return cls_scores, bbox_preds
diff --git a/annotator/uniformer/mmdet_null/models/dense_heads/rpn_head.py b/annotator/uniformer/mmdet_null/models/dense_heads/rpn_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..a888cb8c188ca6fe63045b6230266553fbe8c996
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/dense_heads/rpn_head.py
@@ -0,0 +1,236 @@
+import copy
+import warnings
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from mmcv import ConfigDict
+from mmcv.cnn import normal_init
+from mmcv.ops import batched_nms
+
+from ..builder import HEADS
+from .anchor_head import AnchorHead
+from .rpn_test_mixin import RPNTestMixin
+
+
+@HEADS.register_module()
+class RPNHead(RPNTestMixin, AnchorHead):
+ """RPN head.
+
+ Args:
+ in_channels (int): Number of channels in the input feature map.
+ """ # noqa: W605
+
+ def __init__(self, in_channels, **kwargs):
+ super(RPNHead, self).__init__(1, in_channels, **kwargs)
+
+ def _init_layers(self):
+ """Initialize layers of the head."""
+ self.rpn_conv = nn.Conv2d(
+ self.in_channels, self.feat_channels, 3, padding=1)
+ self.rpn_cls = nn.Conv2d(self.feat_channels,
+ self.num_anchors * self.cls_out_channels, 1)
+ self.rpn_reg = nn.Conv2d(self.feat_channels, self.num_anchors * 4, 1)
+
+ def init_weights(self):
+ """Initialize weights of the head."""
+ normal_init(self.rpn_conv, std=0.01)
+ normal_init(self.rpn_cls, std=0.01)
+ normal_init(self.rpn_reg, std=0.01)
+
+ def forward_single(self, x):
+ """Forward feature map of a single scale level."""
+ x = self.rpn_conv(x)
+ x = F.relu(x, inplace=True)
+ rpn_cls_score = self.rpn_cls(x)
+ rpn_bbox_pred = self.rpn_reg(x)
+ return rpn_cls_score, rpn_bbox_pred
+
+ def loss(self,
+ cls_scores,
+ bbox_preds,
+ gt_bboxes,
+ img_metas,
+ gt_bboxes_ignore=None):
+ """Compute losses of the head.
+
+ Args:
+ cls_scores (list[Tensor]): Box scores for each scale level
+ Has shape (N, num_anchors * num_classes, H, W)
+ bbox_preds (list[Tensor]): Box energies / deltas for each scale
+ level with shape (N, num_anchors * 4, H, W)
+ gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
+ shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
+ img_metas (list[dict]): Meta information of each image, e.g.,
+ image size, scaling factor, etc.
+ gt_bboxes_ignore (None | list[Tensor]): specify which bounding
+ boxes can be ignored when computing the loss.
+
+ Returns:
+ dict[str, Tensor]: A dictionary of loss components.
+ """
+ losses = super(RPNHead, self).loss(
+ cls_scores,
+ bbox_preds,
+ gt_bboxes,
+ None,
+ img_metas,
+ gt_bboxes_ignore=gt_bboxes_ignore)
+ return dict(
+ loss_rpn_cls=losses['loss_cls'], loss_rpn_bbox=losses['loss_bbox'])
+
+ def _get_bboxes(self,
+ cls_scores,
+ bbox_preds,
+ mlvl_anchors,
+ img_shapes,
+ scale_factors,
+ cfg,
+ rescale=False):
+ """Transform outputs for a single batch item into bbox predictions.
+
+ Args:
+ cls_scores (list[Tensor]): Box scores for each scale level
+ Has shape (N, num_anchors * num_classes, H, W).
+ bbox_preds (list[Tensor]): Box energies / deltas for each scale
+ level with shape (N, num_anchors * 4, H, W).
+ mlvl_anchors (list[Tensor]): Box reference for each scale level
+ with shape (num_total_anchors, 4).
+ img_shapes (list[tuple[int]]): Shape of the input image,
+ (height, width, 3).
+ scale_factors (list[ndarray]): Scale factor of the image arange as
+ (w_scale, h_scale, w_scale, h_scale).
+ cfg (mmcv.Config): Test / postprocessing configuration,
+ if None, test_cfg would be used.
+ rescale (bool): If True, return boxes in original image space.
+
+ Returns:
+ list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple.
+ The first item is an (n, 5) tensor, where the first 4 columns
+ are bounding box positions (tl_x, tl_y, br_x, br_y) and the
+ 5-th column is a score between 0 and 1. The second item is a
+ (n,) tensor where each item is the predicted class labelof the
+ corresponding box.
+ """
+ cfg = self.test_cfg if cfg is None else cfg
+ cfg = copy.deepcopy(cfg)
+ # bboxes from different level should be independent during NMS,
+ # level_ids are used as labels for batched NMS to separate them
+ level_ids = []
+ mlvl_scores = []
+ mlvl_bbox_preds = []
+ mlvl_valid_anchors = []
+ batch_size = cls_scores[0].shape[0]
+ nms_pre_tensor = torch.tensor(
+ cfg.nms_pre, device=cls_scores[0].device, dtype=torch.long)
+ for idx in range(len(cls_scores)):
+ rpn_cls_score = cls_scores[idx]
+ rpn_bbox_pred = bbox_preds[idx]
+ assert rpn_cls_score.size()[-2:] == rpn_bbox_pred.size()[-2:]
+ rpn_cls_score = rpn_cls_score.permute(0, 2, 3, 1)
+ if self.use_sigmoid_cls:
+ rpn_cls_score = rpn_cls_score.reshape(batch_size, -1)
+ scores = rpn_cls_score.sigmoid()
+ else:
+ rpn_cls_score = rpn_cls_score.reshape(batch_size, -1, 2)
+ # We set FG labels to [0, num_class-1] and BG label to
+ # num_class in RPN head since mmdet v2.5, which is unified to
+ # be consistent with other head since mmdet v2.0. In mmdet v2.0
+ # to v2.4 we keep BG label as 0 and FG label as 1 in rpn head.
+ scores = rpn_cls_score.softmax(-1)[..., 0]
+ rpn_bbox_pred = rpn_bbox_pred.permute(0, 2, 3, 1).reshape(
+ batch_size, -1, 4)
+ anchors = mlvl_anchors[idx]
+ anchors = anchors.expand_as(rpn_bbox_pred)
+ if nms_pre_tensor > 0:
+ # sort is faster than topk
+ # _, topk_inds = scores.topk(cfg.nms_pre)
+ # keep topk op for dynamic k in onnx model
+ if torch.onnx.is_in_onnx_export():
+ # sort op will be converted to TopK in onnx
+ # and k<=3480 in TensorRT
+ scores_shape = torch._shape_as_tensor(scores)
+ nms_pre = torch.where(scores_shape[1] < nms_pre_tensor,
+ scores_shape[1], nms_pre_tensor)
+ _, topk_inds = scores.topk(nms_pre)
+ batch_inds = torch.arange(batch_size).view(
+ -1, 1).expand_as(topk_inds)
+ scores = scores[batch_inds, topk_inds]
+ rpn_bbox_pred = rpn_bbox_pred[batch_inds, topk_inds, :]
+ anchors = anchors[batch_inds, topk_inds, :]
+
+ elif scores.shape[-1] > cfg.nms_pre:
+ ranked_scores, rank_inds = scores.sort(descending=True)
+ topk_inds = rank_inds[:, :cfg.nms_pre]
+ scores = ranked_scores[:, :cfg.nms_pre]
+ batch_inds = torch.arange(batch_size).view(
+ -1, 1).expand_as(topk_inds)
+ rpn_bbox_pred = rpn_bbox_pred[batch_inds, topk_inds, :]
+ anchors = anchors[batch_inds, topk_inds, :]
+
+ mlvl_scores.append(scores)
+ mlvl_bbox_preds.append(rpn_bbox_pred)
+ mlvl_valid_anchors.append(anchors)
+ level_ids.append(
+ scores.new_full((
+ batch_size,
+ scores.size(1),
+ ),
+ idx,
+ dtype=torch.long))
+
+ batch_mlvl_scores = torch.cat(mlvl_scores, dim=1)
+ batch_mlvl_anchors = torch.cat(mlvl_valid_anchors, dim=1)
+ batch_mlvl_rpn_bbox_pred = torch.cat(mlvl_bbox_preds, dim=1)
+ batch_mlvl_proposals = self.bbox_coder.decode(
+ batch_mlvl_anchors, batch_mlvl_rpn_bbox_pred, max_shape=img_shapes)
+ batch_mlvl_ids = torch.cat(level_ids, dim=1)
+
+ # deprecate arguments warning
+ if 'nms' not in cfg or 'max_num' in cfg or 'nms_thr' in cfg:
+ warnings.warn(
+ 'In rpn_proposal or test_cfg, '
+ 'nms_thr has been moved to a dict named nms as '
+ 'iou_threshold, max_num has been renamed as max_per_img, '
+ 'name of original arguments and the way to specify '
+ 'iou_threshold of NMS will be deprecated.')
+ if 'nms' not in cfg:
+ cfg.nms = ConfigDict(dict(type='nms', iou_threshold=cfg.nms_thr))
+ if 'max_num' in cfg:
+ if 'max_per_img' in cfg:
+ assert cfg.max_num == cfg.max_per_img, f'You ' \
+ f'set max_num and ' \
+ f'max_per_img at the same time, but get {cfg.max_num} ' \
+ f'and {cfg.max_per_img} respectively' \
+ 'Please delete max_num which will be deprecated.'
+ else:
+ cfg.max_per_img = cfg.max_num
+ if 'nms_thr' in cfg:
+ assert cfg.nms.iou_threshold == cfg.nms_thr, f'You set' \
+ f' iou_threshold in nms and ' \
+ f'nms_thr at the same time, but get' \
+ f' {cfg.nms.iou_threshold} and {cfg.nms_thr}' \
+ f' respectively. Please delete the nms_thr ' \
+ f'which will be deprecated.'
+
+ result_list = []
+ for (mlvl_proposals, mlvl_scores,
+ mlvl_ids) in zip(batch_mlvl_proposals, batch_mlvl_scores,
+ batch_mlvl_ids):
+ # Skip nonzero op while exporting to ONNX
+ if cfg.min_bbox_size > 0 and (not torch.onnx.is_in_onnx_export()):
+ w = mlvl_proposals[:, 2] - mlvl_proposals[:, 0]
+ h = mlvl_proposals[:, 3] - mlvl_proposals[:, 1]
+ valid_ind = torch.nonzero(
+ (w >= cfg.min_bbox_size)
+ & (h >= cfg.min_bbox_size),
+ as_tuple=False).squeeze()
+ if valid_ind.sum().item() != len(mlvl_proposals):
+ mlvl_proposals = mlvl_proposals[valid_ind, :]
+ mlvl_scores = mlvl_scores[valid_ind]
+ mlvl_ids = mlvl_ids[valid_ind]
+
+ dets, keep = batched_nms(mlvl_proposals, mlvl_scores, mlvl_ids,
+ cfg.nms)
+ result_list.append(dets[:cfg.max_per_img])
+ return result_list
diff --git a/annotator/uniformer/mmdet_null/models/dense_heads/rpn_test_mixin.py b/annotator/uniformer/mmdet_null/models/dense_heads/rpn_test_mixin.py
new file mode 100644
index 0000000000000000000000000000000000000000..4ce5c66f82595f496e6e55719c1caee75150d568
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/dense_heads/rpn_test_mixin.py
@@ -0,0 +1,59 @@
+import sys
+
+from mmdet.core import merge_aug_proposals
+
+if sys.version_info >= (3, 7):
+ from mmdet.utils.contextmanagers import completed
+
+
+class RPNTestMixin(object):
+ """Test methods of RPN."""
+
+ if sys.version_info >= (3, 7):
+
+ async def async_simple_test_rpn(self, x, img_metas):
+ sleep_interval = self.test_cfg.pop('async_sleep_interval', 0.025)
+ async with completed(
+ __name__, 'rpn_head_forward',
+ sleep_interval=sleep_interval):
+ rpn_outs = self(x)
+
+ proposal_list = self.get_bboxes(*rpn_outs, img_metas)
+ return proposal_list
+
+ def simple_test_rpn(self, x, img_metas):
+ """Test without augmentation.
+
+ Args:
+ x (tuple[Tensor]): Features from the upstream network, each is
+ a 4D-tensor.
+ img_metas (list[dict]): Meta info of each image.
+
+ Returns:
+ list[Tensor]: Proposals of each image.
+ """
+ rpn_outs = self(x)
+ proposal_list = self.get_bboxes(*rpn_outs, img_metas)
+ return proposal_list
+
+ def aug_test_rpn(self, feats, img_metas):
+ samples_per_gpu = len(img_metas[0])
+ aug_proposals = [[] for _ in range(samples_per_gpu)]
+ for x, img_meta in zip(feats, img_metas):
+ proposal_list = self.simple_test_rpn(x, img_meta)
+ for i, proposals in enumerate(proposal_list):
+ aug_proposals[i].append(proposals)
+ # reorganize the order of 'img_metas' to match the dimensions
+ # of 'aug_proposals'
+ aug_img_metas = []
+ for i in range(samples_per_gpu):
+ aug_img_meta = []
+ for j in range(len(img_metas)):
+ aug_img_meta.append(img_metas[j][i])
+ aug_img_metas.append(aug_img_meta)
+ # after merging, proposals will be rescaled to the original image size
+ merged_proposals = [
+ merge_aug_proposals(proposals, aug_img_meta, self.test_cfg)
+ for proposals, aug_img_meta in zip(aug_proposals, aug_img_metas)
+ ]
+ return merged_proposals
diff --git a/annotator/uniformer/mmdet_null/models/dense_heads/sabl_retina_head.py b/annotator/uniformer/mmdet_null/models/dense_heads/sabl_retina_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..4211622cb8b4fe807230a89bcaab8f4f1681bfc0
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/dense_heads/sabl_retina_head.py
@@ -0,0 +1,621 @@
+import numpy as np
+import torch
+import torch.nn as nn
+from mmcv.cnn import ConvModule, bias_init_with_prob, normal_init
+from mmcv.runner import force_fp32
+
+from mmdet.core import (build_anchor_generator, build_assigner,
+ build_bbox_coder, build_sampler, images_to_levels,
+ multi_apply, multiclass_nms, unmap)
+from ..builder import HEADS, build_loss
+from .base_dense_head import BaseDenseHead
+from .guided_anchor_head import GuidedAnchorHead
+
+
+@HEADS.register_module()
+class SABLRetinaHead(BaseDenseHead):
+ """Side-Aware Boundary Localization (SABL) for RetinaNet.
+
+ The anchor generation, assigning and sampling in SABLRetinaHead
+ are the same as GuidedAnchorHead for guided anchoring.
+
+ Please refer to https://arxiv.org/abs/1912.04260 for more details.
+
+ Args:
+ num_classes (int): Number of classes.
+ in_channels (int): Number of channels in the input feature map.
+ stacked_convs (int): Number of Convs for classification \
+ and regression branches. Defaults to 4.
+ feat_channels (int): Number of hidden channels. \
+ Defaults to 256.
+ approx_anchor_generator (dict): Config dict for approx generator.
+ square_anchor_generator (dict): Config dict for square generator.
+ conv_cfg (dict): Config dict for ConvModule. Defaults to None.
+ norm_cfg (dict): Config dict for Norm Layer. Defaults to None.
+ bbox_coder (dict): Config dict for bbox coder.
+ reg_decoded_bbox (bool): If true, the regression loss would be
+ applied directly on decoded bounding boxes, converting both
+ the predicted boxes and regression targets to absolute
+ coordinates format. Default False. It should be `True` when
+ using `IoULoss`, `GIoULoss`, or `DIoULoss` in the bbox head.
+ train_cfg (dict): Training config of SABLRetinaHead.
+ test_cfg (dict): Testing config of SABLRetinaHead.
+ loss_cls (dict): Config of classification loss.
+ loss_bbox_cls (dict): Config of classification loss for bbox branch.
+ loss_bbox_reg (dict): Config of regression loss for bbox branch.
+ """
+
+ def __init__(self,
+ num_classes,
+ in_channels,
+ stacked_convs=4,
+ feat_channels=256,
+ approx_anchor_generator=dict(
+ type='AnchorGenerator',
+ octave_base_scale=4,
+ scales_per_octave=3,
+ ratios=[0.5, 1.0, 2.0],
+ strides=[8, 16, 32, 64, 128]),
+ square_anchor_generator=dict(
+ type='AnchorGenerator',
+ ratios=[1.0],
+ scales=[4],
+ strides=[8, 16, 32, 64, 128]),
+ conv_cfg=None,
+ norm_cfg=None,
+ bbox_coder=dict(
+ type='BucketingBBoxCoder',
+ num_buckets=14,
+ scale_factor=3.0),
+ reg_decoded_bbox=False,
+ train_cfg=None,
+ test_cfg=None,
+ loss_cls=dict(
+ type='FocalLoss',
+ use_sigmoid=True,
+ gamma=2.0,
+ alpha=0.25,
+ loss_weight=1.0),
+ loss_bbox_cls=dict(
+ type='CrossEntropyLoss',
+ use_sigmoid=True,
+ loss_weight=1.5),
+ loss_bbox_reg=dict(
+ type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.5)):
+ super(SABLRetinaHead, self).__init__()
+ self.in_channels = in_channels
+ self.num_classes = num_classes
+ self.feat_channels = feat_channels
+ self.num_buckets = bbox_coder['num_buckets']
+ self.side_num = int(np.ceil(self.num_buckets / 2))
+
+ assert (approx_anchor_generator['octave_base_scale'] ==
+ square_anchor_generator['scales'][0])
+ assert (approx_anchor_generator['strides'] ==
+ square_anchor_generator['strides'])
+
+ self.approx_anchor_generator = build_anchor_generator(
+ approx_anchor_generator)
+ self.square_anchor_generator = build_anchor_generator(
+ square_anchor_generator)
+ self.approxs_per_octave = (
+ self.approx_anchor_generator.num_base_anchors[0])
+
+ # one anchor per location
+ self.num_anchors = 1
+ self.stacked_convs = stacked_convs
+ self.conv_cfg = conv_cfg
+ self.norm_cfg = norm_cfg
+
+ self.reg_decoded_bbox = reg_decoded_bbox
+
+ self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False)
+ self.sampling = loss_cls['type'] not in [
+ 'FocalLoss', 'GHMC', 'QualityFocalLoss'
+ ]
+ if self.use_sigmoid_cls:
+ self.cls_out_channels = num_classes
+ else:
+ self.cls_out_channels = num_classes + 1
+
+ self.bbox_coder = build_bbox_coder(bbox_coder)
+ self.loss_cls = build_loss(loss_cls)
+ self.loss_bbox_cls = build_loss(loss_bbox_cls)
+ self.loss_bbox_reg = build_loss(loss_bbox_reg)
+
+ self.train_cfg = train_cfg
+ self.test_cfg = test_cfg
+
+ if self.train_cfg:
+ self.assigner = build_assigner(self.train_cfg.assigner)
+ # use PseudoSampler when sampling is False
+ if self.sampling and hasattr(self.train_cfg, 'sampler'):
+ sampler_cfg = self.train_cfg.sampler
+ else:
+ sampler_cfg = dict(type='PseudoSampler')
+ self.sampler = build_sampler(sampler_cfg, context=self)
+
+ self.fp16_enabled = False
+ self._init_layers()
+
+ def _init_layers(self):
+ self.relu = nn.ReLU(inplace=True)
+ self.cls_convs = nn.ModuleList()
+ self.reg_convs = nn.ModuleList()
+ for i in range(self.stacked_convs):
+ chn = self.in_channels if i == 0 else self.feat_channels
+ self.cls_convs.append(
+ ConvModule(
+ chn,
+ self.feat_channels,
+ 3,
+ stride=1,
+ padding=1,
+ conv_cfg=self.conv_cfg,
+ norm_cfg=self.norm_cfg))
+ self.reg_convs.append(
+ ConvModule(
+ chn,
+ self.feat_channels,
+ 3,
+ stride=1,
+ padding=1,
+ conv_cfg=self.conv_cfg,
+ norm_cfg=self.norm_cfg))
+ self.retina_cls = nn.Conv2d(
+ self.feat_channels, self.cls_out_channels, 3, padding=1)
+ self.retina_bbox_reg = nn.Conv2d(
+ self.feat_channels, self.side_num * 4, 3, padding=1)
+ self.retina_bbox_cls = nn.Conv2d(
+ self.feat_channels, self.side_num * 4, 3, padding=1)
+
+ def init_weights(self):
+ for m in self.cls_convs:
+ normal_init(m.conv, std=0.01)
+ for m in self.reg_convs:
+ normal_init(m.conv, std=0.01)
+ bias_cls = bias_init_with_prob(0.01)
+ normal_init(self.retina_cls, std=0.01, bias=bias_cls)
+ normal_init(self.retina_bbox_reg, std=0.01)
+ normal_init(self.retina_bbox_cls, std=0.01)
+
+ def forward_single(self, x):
+ cls_feat = x
+ reg_feat = x
+ for cls_conv in self.cls_convs:
+ cls_feat = cls_conv(cls_feat)
+ for reg_conv in self.reg_convs:
+ reg_feat = reg_conv(reg_feat)
+ cls_score = self.retina_cls(cls_feat)
+ bbox_cls_pred = self.retina_bbox_cls(reg_feat)
+ bbox_reg_pred = self.retina_bbox_reg(reg_feat)
+ bbox_pred = (bbox_cls_pred, bbox_reg_pred)
+ return cls_score, bbox_pred
+
+ def forward(self, feats):
+ return multi_apply(self.forward_single, feats)
+
+ def get_anchors(self, featmap_sizes, img_metas, device='cuda'):
+ """Get squares according to feature map sizes and guided anchors.
+
+ Args:
+ featmap_sizes (list[tuple]): Multi-level feature map sizes.
+ img_metas (list[dict]): Image meta info.
+ device (torch.device | str): device for returned tensors
+
+ Returns:
+ tuple: square approxs of each image
+ """
+ num_imgs = len(img_metas)
+
+ # since feature map sizes of all images are the same, we only compute
+ # squares for one time
+ multi_level_squares = self.square_anchor_generator.grid_anchors(
+ featmap_sizes, device=device)
+ squares_list = [multi_level_squares for _ in range(num_imgs)]
+
+ return squares_list
+
+ def get_target(self,
+ approx_list,
+ inside_flag_list,
+ square_list,
+ gt_bboxes_list,
+ img_metas,
+ gt_bboxes_ignore_list=None,
+ gt_labels_list=None,
+ label_channels=None,
+ sampling=True,
+ unmap_outputs=True):
+ """Compute bucketing targets.
+ Args:
+ approx_list (list[list]): Multi level approxs of each image.
+ inside_flag_list (list[list]): Multi level inside flags of each
+ image.
+ square_list (list[list]): Multi level squares of each image.
+ gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image.
+ img_metas (list[dict]): Meta info of each image.
+ gt_bboxes_ignore_list (list[Tensor]): ignore list of gt bboxes.
+ gt_bboxes_list (list[Tensor]): Gt bboxes of each image.
+ label_channels (int): Channel of label.
+ sampling (bool): Sample Anchors or not.
+ unmap_outputs (bool): unmap outputs or not.
+
+ Returns:
+ tuple: Returns a tuple containing learning targets.
+
+ - labels_list (list[Tensor]): Labels of each level.
+ - label_weights_list (list[Tensor]): Label weights of each \
+ level.
+ - bbox_cls_targets_list (list[Tensor]): BBox cls targets of \
+ each level.
+ - bbox_cls_weights_list (list[Tensor]): BBox cls weights of \
+ each level.
+ - bbox_reg_targets_list (list[Tensor]): BBox reg targets of \
+ each level.
+ - bbox_reg_weights_list (list[Tensor]): BBox reg weights of \
+ each level.
+ - num_total_pos (int): Number of positive samples in all \
+ images.
+ - num_total_neg (int): Number of negative samples in all \
+ images.
+ """
+ num_imgs = len(img_metas)
+ assert len(approx_list) == len(inside_flag_list) == len(
+ square_list) == num_imgs
+ # anchor number of multi levels
+ num_level_squares = [squares.size(0) for squares in square_list[0]]
+ # concat all level anchors and flags to a single tensor
+ inside_flag_flat_list = []
+ approx_flat_list = []
+ square_flat_list = []
+ for i in range(num_imgs):
+ assert len(square_list[i]) == len(inside_flag_list[i])
+ inside_flag_flat_list.append(torch.cat(inside_flag_list[i]))
+ approx_flat_list.append(torch.cat(approx_list[i]))
+ square_flat_list.append(torch.cat(square_list[i]))
+
+ # compute targets for each image
+ if gt_bboxes_ignore_list is None:
+ gt_bboxes_ignore_list = [None for _ in range(num_imgs)]
+ if gt_labels_list is None:
+ gt_labels_list = [None for _ in range(num_imgs)]
+ (all_labels, all_label_weights, all_bbox_cls_targets,
+ all_bbox_cls_weights, all_bbox_reg_targets, all_bbox_reg_weights,
+ pos_inds_list, neg_inds_list) = multi_apply(
+ self._get_target_single,
+ approx_flat_list,
+ inside_flag_flat_list,
+ square_flat_list,
+ gt_bboxes_list,
+ gt_bboxes_ignore_list,
+ gt_labels_list,
+ img_metas,
+ label_channels=label_channels,
+ sampling=sampling,
+ unmap_outputs=unmap_outputs)
+ # no valid anchors
+ if any([labels is None for labels in all_labels]):
+ return None
+ # sampled anchors of all images
+ num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list])
+ num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list])
+ # split targets to a list w.r.t. multiple levels
+ labels_list = images_to_levels(all_labels, num_level_squares)
+ label_weights_list = images_to_levels(all_label_weights,
+ num_level_squares)
+ bbox_cls_targets_list = images_to_levels(all_bbox_cls_targets,
+ num_level_squares)
+ bbox_cls_weights_list = images_to_levels(all_bbox_cls_weights,
+ num_level_squares)
+ bbox_reg_targets_list = images_to_levels(all_bbox_reg_targets,
+ num_level_squares)
+ bbox_reg_weights_list = images_to_levels(all_bbox_reg_weights,
+ num_level_squares)
+ return (labels_list, label_weights_list, bbox_cls_targets_list,
+ bbox_cls_weights_list, bbox_reg_targets_list,
+ bbox_reg_weights_list, num_total_pos, num_total_neg)
+
+ def _get_target_single(self,
+ flat_approxs,
+ inside_flags,
+ flat_squares,
+ gt_bboxes,
+ gt_bboxes_ignore,
+ gt_labels,
+ img_meta,
+ label_channels=None,
+ sampling=True,
+ unmap_outputs=True):
+ """Compute regression and classification targets for anchors in a
+ single image.
+
+ Args:
+ flat_approxs (Tensor): flat approxs of a single image,
+ shape (n, 4)
+ inside_flags (Tensor): inside flags of a single image,
+ shape (n, ).
+ flat_squares (Tensor): flat squares of a single image,
+ shape (approxs_per_octave * n, 4)
+ gt_bboxes (Tensor): Ground truth bboxes of a single image, \
+ shape (num_gts, 4).
+ gt_bboxes_ignore (Tensor): Ground truth bboxes to be
+ ignored, shape (num_ignored_gts, 4).
+ gt_labels (Tensor): Ground truth labels of each box,
+ shape (num_gts,).
+ img_meta (dict): Meta info of the image.
+ label_channels (int): Channel of label.
+ sampling (bool): Sample Anchors or not.
+ unmap_outputs (bool): unmap outputs or not.
+
+ Returns:
+ tuple:
+
+ - labels_list (Tensor): Labels in a single image
+ - label_weights (Tensor): Label weights in a single image
+ - bbox_cls_targets (Tensor): BBox cls targets in a single image
+ - bbox_cls_weights (Tensor): BBox cls weights in a single image
+ - bbox_reg_targets (Tensor): BBox reg targets in a single image
+ - bbox_reg_weights (Tensor): BBox reg weights in a single image
+ - num_total_pos (int): Number of positive samples \
+ in a single image
+ - num_total_neg (int): Number of negative samples \
+ in a single image
+ """
+ if not inside_flags.any():
+ return (None, ) * 8
+ # assign gt and sample anchors
+ expand_inside_flags = inside_flags[:, None].expand(
+ -1, self.approxs_per_octave).reshape(-1)
+ approxs = flat_approxs[expand_inside_flags, :]
+ squares = flat_squares[inside_flags, :]
+
+ assign_result = self.assigner.assign(approxs, squares,
+ self.approxs_per_octave,
+ gt_bboxes, gt_bboxes_ignore)
+ sampling_result = self.sampler.sample(assign_result, squares,
+ gt_bboxes)
+
+ num_valid_squares = squares.shape[0]
+ bbox_cls_targets = squares.new_zeros(
+ (num_valid_squares, self.side_num * 4))
+ bbox_cls_weights = squares.new_zeros(
+ (num_valid_squares, self.side_num * 4))
+ bbox_reg_targets = squares.new_zeros(
+ (num_valid_squares, self.side_num * 4))
+ bbox_reg_weights = squares.new_zeros(
+ (num_valid_squares, self.side_num * 4))
+ labels = squares.new_full((num_valid_squares, ),
+ self.num_classes,
+ dtype=torch.long)
+ label_weights = squares.new_zeros(num_valid_squares, dtype=torch.float)
+
+ pos_inds = sampling_result.pos_inds
+ neg_inds = sampling_result.neg_inds
+ if len(pos_inds) > 0:
+ (pos_bbox_reg_targets, pos_bbox_reg_weights, pos_bbox_cls_targets,
+ pos_bbox_cls_weights) = self.bbox_coder.encode(
+ sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes)
+
+ bbox_cls_targets[pos_inds, :] = pos_bbox_cls_targets
+ bbox_reg_targets[pos_inds, :] = pos_bbox_reg_targets
+ bbox_cls_weights[pos_inds, :] = pos_bbox_cls_weights
+ bbox_reg_weights[pos_inds, :] = pos_bbox_reg_weights
+ if gt_labels is None:
+ # Only rpn gives gt_labels as None
+ # Foreground is the first class
+ labels[pos_inds] = 0
+ else:
+ labels[pos_inds] = gt_labels[
+ sampling_result.pos_assigned_gt_inds]
+ if self.train_cfg.pos_weight <= 0:
+ label_weights[pos_inds] = 1.0
+ else:
+ label_weights[pos_inds] = self.train_cfg.pos_weight
+ if len(neg_inds) > 0:
+ label_weights[neg_inds] = 1.0
+
+ # map up to original set of anchors
+ if unmap_outputs:
+ num_total_anchors = flat_squares.size(0)
+ labels = unmap(
+ labels, num_total_anchors, inside_flags, fill=self.num_classes)
+ label_weights = unmap(label_weights, num_total_anchors,
+ inside_flags)
+ bbox_cls_targets = unmap(bbox_cls_targets, num_total_anchors,
+ inside_flags)
+ bbox_cls_weights = unmap(bbox_cls_weights, num_total_anchors,
+ inside_flags)
+ bbox_reg_targets = unmap(bbox_reg_targets, num_total_anchors,
+ inside_flags)
+ bbox_reg_weights = unmap(bbox_reg_weights, num_total_anchors,
+ inside_flags)
+ return (labels, label_weights, bbox_cls_targets, bbox_cls_weights,
+ bbox_reg_targets, bbox_reg_weights, pos_inds, neg_inds)
+
+ def loss_single(self, cls_score, bbox_pred, labels, label_weights,
+ bbox_cls_targets, bbox_cls_weights, bbox_reg_targets,
+ bbox_reg_weights, num_total_samples):
+ # classification loss
+ labels = labels.reshape(-1)
+ label_weights = label_weights.reshape(-1)
+ cls_score = cls_score.permute(0, 2, 3,
+ 1).reshape(-1, self.cls_out_channels)
+ loss_cls = self.loss_cls(
+ cls_score, labels, label_weights, avg_factor=num_total_samples)
+ # regression loss
+ bbox_cls_targets = bbox_cls_targets.reshape(-1, self.side_num * 4)
+ bbox_cls_weights = bbox_cls_weights.reshape(-1, self.side_num * 4)
+ bbox_reg_targets = bbox_reg_targets.reshape(-1, self.side_num * 4)
+ bbox_reg_weights = bbox_reg_weights.reshape(-1, self.side_num * 4)
+ (bbox_cls_pred, bbox_reg_pred) = bbox_pred
+ bbox_cls_pred = bbox_cls_pred.permute(0, 2, 3, 1).reshape(
+ -1, self.side_num * 4)
+ bbox_reg_pred = bbox_reg_pred.permute(0, 2, 3, 1).reshape(
+ -1, self.side_num * 4)
+ loss_bbox_cls = self.loss_bbox_cls(
+ bbox_cls_pred,
+ bbox_cls_targets.long(),
+ bbox_cls_weights,
+ avg_factor=num_total_samples * 4 * self.side_num)
+ loss_bbox_reg = self.loss_bbox_reg(
+ bbox_reg_pred,
+ bbox_reg_targets,
+ bbox_reg_weights,
+ avg_factor=num_total_samples * 4 * self.bbox_coder.offset_topk)
+ return loss_cls, loss_bbox_cls, loss_bbox_reg
+
+ @force_fp32(apply_to=('cls_scores', 'bbox_preds'))
+ def loss(self,
+ cls_scores,
+ bbox_preds,
+ gt_bboxes,
+ gt_labels,
+ img_metas,
+ gt_bboxes_ignore=None):
+ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
+ assert len(featmap_sizes) == self.approx_anchor_generator.num_levels
+
+ device = cls_scores[0].device
+
+ # get sampled approxes
+ approxs_list, inside_flag_list = GuidedAnchorHead.get_sampled_approxs(
+ self, featmap_sizes, img_metas, device=device)
+
+ square_list = self.get_anchors(featmap_sizes, img_metas, device=device)
+
+ label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1
+
+ cls_reg_targets = self.get_target(
+ approxs_list,
+ inside_flag_list,
+ square_list,
+ gt_bboxes,
+ img_metas,
+ gt_bboxes_ignore_list=gt_bboxes_ignore,
+ gt_labels_list=gt_labels,
+ label_channels=label_channels,
+ sampling=self.sampling)
+ if cls_reg_targets is None:
+ return None
+ (labels_list, label_weights_list, bbox_cls_targets_list,
+ bbox_cls_weights_list, bbox_reg_targets_list, bbox_reg_weights_list,
+ num_total_pos, num_total_neg) = cls_reg_targets
+ num_total_samples = (
+ num_total_pos + num_total_neg if self.sampling else num_total_pos)
+ losses_cls, losses_bbox_cls, losses_bbox_reg = multi_apply(
+ self.loss_single,
+ cls_scores,
+ bbox_preds,
+ labels_list,
+ label_weights_list,
+ bbox_cls_targets_list,
+ bbox_cls_weights_list,
+ bbox_reg_targets_list,
+ bbox_reg_weights_list,
+ num_total_samples=num_total_samples)
+ return dict(
+ loss_cls=losses_cls,
+ loss_bbox_cls=losses_bbox_cls,
+ loss_bbox_reg=losses_bbox_reg)
+
+ @force_fp32(apply_to=('cls_scores', 'bbox_preds'))
+ def get_bboxes(self,
+ cls_scores,
+ bbox_preds,
+ img_metas,
+ cfg=None,
+ rescale=False):
+ assert len(cls_scores) == len(bbox_preds)
+ num_levels = len(cls_scores)
+ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
+
+ device = cls_scores[0].device
+ mlvl_anchors = self.get_anchors(
+ featmap_sizes, img_metas, device=device)
+ result_list = []
+ for img_id in range(len(img_metas)):
+ cls_score_list = [
+ cls_scores[i][img_id].detach() for i in range(num_levels)
+ ]
+ bbox_cls_pred_list = [
+ bbox_preds[i][0][img_id].detach() for i in range(num_levels)
+ ]
+ bbox_reg_pred_list = [
+ bbox_preds[i][1][img_id].detach() for i in range(num_levels)
+ ]
+ img_shape = img_metas[img_id]['img_shape']
+ scale_factor = img_metas[img_id]['scale_factor']
+ proposals = self.get_bboxes_single(cls_score_list,
+ bbox_cls_pred_list,
+ bbox_reg_pred_list,
+ mlvl_anchors[img_id], img_shape,
+ scale_factor, cfg, rescale)
+ result_list.append(proposals)
+ return result_list
+
+ def get_bboxes_single(self,
+ cls_scores,
+ bbox_cls_preds,
+ bbox_reg_preds,
+ mlvl_anchors,
+ img_shape,
+ scale_factor,
+ cfg,
+ rescale=False):
+ cfg = self.test_cfg if cfg is None else cfg
+ mlvl_bboxes = []
+ mlvl_scores = []
+ mlvl_confids = []
+ assert len(cls_scores) == len(bbox_cls_preds) == len(
+ bbox_reg_preds) == len(mlvl_anchors)
+ for cls_score, bbox_cls_pred, bbox_reg_pred, anchors in zip(
+ cls_scores, bbox_cls_preds, bbox_reg_preds, mlvl_anchors):
+ assert cls_score.size()[-2:] == bbox_cls_pred.size(
+ )[-2:] == bbox_reg_pred.size()[-2::]
+ cls_score = cls_score.permute(1, 2,
+ 0).reshape(-1, self.cls_out_channels)
+ if self.use_sigmoid_cls:
+ scores = cls_score.sigmoid()
+ else:
+ scores = cls_score.softmax(-1)
+ bbox_cls_pred = bbox_cls_pred.permute(1, 2, 0).reshape(
+ -1, self.side_num * 4)
+ bbox_reg_pred = bbox_reg_pred.permute(1, 2, 0).reshape(
+ -1, self.side_num * 4)
+ nms_pre = cfg.get('nms_pre', -1)
+ if nms_pre > 0 and scores.shape[0] > nms_pre:
+ if self.use_sigmoid_cls:
+ max_scores, _ = scores.max(dim=1)
+ else:
+ max_scores, _ = scores[:, :-1].max(dim=1)
+ _, topk_inds = max_scores.topk(nms_pre)
+ anchors = anchors[topk_inds, :]
+ bbox_cls_pred = bbox_cls_pred[topk_inds, :]
+ bbox_reg_pred = bbox_reg_pred[topk_inds, :]
+ scores = scores[topk_inds, :]
+ bbox_preds = [
+ bbox_cls_pred.contiguous(),
+ bbox_reg_pred.contiguous()
+ ]
+ bboxes, confids = self.bbox_coder.decode(
+ anchors.contiguous(), bbox_preds, max_shape=img_shape)
+ mlvl_bboxes.append(bboxes)
+ mlvl_scores.append(scores)
+ mlvl_confids.append(confids)
+ mlvl_bboxes = torch.cat(mlvl_bboxes)
+ if rescale:
+ mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor)
+ mlvl_scores = torch.cat(mlvl_scores)
+ mlvl_confids = torch.cat(mlvl_confids)
+ if self.use_sigmoid_cls:
+ padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1)
+ mlvl_scores = torch.cat([mlvl_scores, padding], dim=1)
+ det_bboxes, det_labels = multiclass_nms(
+ mlvl_bboxes,
+ mlvl_scores,
+ cfg.score_thr,
+ cfg.nms,
+ cfg.max_per_img,
+ score_factors=mlvl_confids)
+ return det_bboxes, det_labels
diff --git a/annotator/uniformer/mmdet_null/models/dense_heads/ssd_head.py b/annotator/uniformer/mmdet_null/models/dense_heads/ssd_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..145622b64e3f0b3f7f518fc61a2a01348ebfa4f3
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/dense_heads/ssd_head.py
@@ -0,0 +1,265 @@
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from mmcv.cnn import xavier_init
+from mmcv.runner import force_fp32
+
+from mmdet.core import (build_anchor_generator, build_assigner,
+ build_bbox_coder, build_sampler, multi_apply)
+from ..builder import HEADS
+from ..losses import smooth_l1_loss
+from .anchor_head import AnchorHead
+
+
+# TODO: add loss evaluator for SSD
+@HEADS.register_module()
+class SSDHead(AnchorHead):
+ """SSD head used in https://arxiv.org/abs/1512.02325.
+
+ Args:
+ num_classes (int): Number of categories excluding the background
+ category.
+ in_channels (int): Number of channels in the input feature map.
+ anchor_generator (dict): Config dict for anchor generator
+ bbox_coder (dict): Config of bounding box coder.
+ reg_decoded_bbox (bool): If true, the regression loss would be
+ applied directly on decoded bounding boxes, converting both
+ the predicted boxes and regression targets to absolute
+ coordinates format. Default False. It should be `True` when
+ using `IoULoss`, `GIoULoss`, or `DIoULoss` in the bbox head.
+ train_cfg (dict): Training config of anchor head.
+ test_cfg (dict): Testing config of anchor head.
+ """ # noqa: W605
+
+ def __init__(self,
+ num_classes=80,
+ in_channels=(512, 1024, 512, 256, 256, 256),
+ anchor_generator=dict(
+ type='SSDAnchorGenerator',
+ scale_major=False,
+ input_size=300,
+ strides=[8, 16, 32, 64, 100, 300],
+ ratios=([2], [2, 3], [2, 3], [2, 3], [2], [2]),
+ basesize_ratio_range=(0.1, 0.9)),
+ bbox_coder=dict(
+ type='DeltaXYWHBBoxCoder',
+ clip_border=True,
+ target_means=[.0, .0, .0, .0],
+ target_stds=[1.0, 1.0, 1.0, 1.0],
+ ),
+ reg_decoded_bbox=False,
+ train_cfg=None,
+ test_cfg=None):
+ super(AnchorHead, self).__init__()
+ self.num_classes = num_classes
+ self.in_channels = in_channels
+ self.cls_out_channels = num_classes + 1 # add background class
+ self.anchor_generator = build_anchor_generator(anchor_generator)
+ num_anchors = self.anchor_generator.num_base_anchors
+
+ reg_convs = []
+ cls_convs = []
+ for i in range(len(in_channels)):
+ reg_convs.append(
+ nn.Conv2d(
+ in_channels[i],
+ num_anchors[i] * 4,
+ kernel_size=3,
+ padding=1))
+ cls_convs.append(
+ nn.Conv2d(
+ in_channels[i],
+ num_anchors[i] * (num_classes + 1),
+ kernel_size=3,
+ padding=1))
+ self.reg_convs = nn.ModuleList(reg_convs)
+ self.cls_convs = nn.ModuleList(cls_convs)
+
+ self.bbox_coder = build_bbox_coder(bbox_coder)
+ self.reg_decoded_bbox = reg_decoded_bbox
+ self.use_sigmoid_cls = False
+ self.cls_focal_loss = False
+ self.train_cfg = train_cfg
+ self.test_cfg = test_cfg
+ # set sampling=False for archor_target
+ self.sampling = False
+ if self.train_cfg:
+ self.assigner = build_assigner(self.train_cfg.assigner)
+ # SSD sampling=False so use PseudoSampler
+ sampler_cfg = dict(type='PseudoSampler')
+ self.sampler = build_sampler(sampler_cfg, context=self)
+ self.fp16_enabled = False
+
+ def init_weights(self):
+ """Initialize weights of the head."""
+ for m in self.modules():
+ if isinstance(m, nn.Conv2d):
+ xavier_init(m, distribution='uniform', bias=0)
+
+ def forward(self, feats):
+ """Forward features from the upstream network.
+
+ Args:
+ feats (tuple[Tensor]): Features from the upstream network, each is
+ a 4D-tensor.
+
+ Returns:
+ tuple:
+ cls_scores (list[Tensor]): Classification scores for all scale
+ levels, each is a 4D-tensor, the channels number is
+ num_anchors * num_classes.
+ bbox_preds (list[Tensor]): Box energies / deltas for all scale
+ levels, each is a 4D-tensor, the channels number is
+ num_anchors * 4.
+ """
+ cls_scores = []
+ bbox_preds = []
+ for feat, reg_conv, cls_conv in zip(feats, self.reg_convs,
+ self.cls_convs):
+ cls_scores.append(cls_conv(feat))
+ bbox_preds.append(reg_conv(feat))
+ return cls_scores, bbox_preds
+
+ def loss_single(self, cls_score, bbox_pred, anchor, labels, label_weights,
+ bbox_targets, bbox_weights, num_total_samples):
+ """Compute loss of a single image.
+
+ Args:
+ cls_score (Tensor): Box scores for eachimage
+ Has shape (num_total_anchors, num_classes).
+ bbox_pred (Tensor): Box energies / deltas for each image
+ level with shape (num_total_anchors, 4).
+ anchors (Tensor): Box reference for each scale level with shape
+ (num_total_anchors, 4).
+ labels (Tensor): Labels of each anchors with shape
+ (num_total_anchors,).
+ label_weights (Tensor): Label weights of each anchor with shape
+ (num_total_anchors,)
+ bbox_targets (Tensor): BBox regression targets of each anchor wight
+ shape (num_total_anchors, 4).
+ bbox_weights (Tensor): BBox regression loss weights of each anchor
+ with shape (num_total_anchors, 4).
+ num_total_samples (int): If sampling, num total samples equal to
+ the number of total anchors; Otherwise, it is the number of
+ positive anchors.
+
+ Returns:
+ dict[str, Tensor]: A dictionary of loss components.
+ """
+
+ loss_cls_all = F.cross_entropy(
+ cls_score, labels, reduction='none') * label_weights
+ # FG cat_id: [0, num_classes -1], BG cat_id: num_classes
+ pos_inds = ((labels >= 0) &
+ (labels < self.num_classes)).nonzero().reshape(-1)
+ neg_inds = (labels == self.num_classes).nonzero().view(-1)
+
+ num_pos_samples = pos_inds.size(0)
+ num_neg_samples = self.train_cfg.neg_pos_ratio * num_pos_samples
+ if num_neg_samples > neg_inds.size(0):
+ num_neg_samples = neg_inds.size(0)
+ topk_loss_cls_neg, _ = loss_cls_all[neg_inds].topk(num_neg_samples)
+ loss_cls_pos = loss_cls_all[pos_inds].sum()
+ loss_cls_neg = topk_loss_cls_neg.sum()
+ loss_cls = (loss_cls_pos + loss_cls_neg) / num_total_samples
+
+ if self.reg_decoded_bbox:
+ # When the regression loss (e.g. `IouLoss`, `GIouLoss`)
+ # is applied directly on the decoded bounding boxes, it
+ # decodes the already encoded coordinates to absolute format.
+ bbox_pred = self.bbox_coder.decode(anchor, bbox_pred)
+
+ loss_bbox = smooth_l1_loss(
+ bbox_pred,
+ bbox_targets,
+ bbox_weights,
+ beta=self.train_cfg.smoothl1_beta,
+ avg_factor=num_total_samples)
+ return loss_cls[None], loss_bbox
+
+ @force_fp32(apply_to=('cls_scores', 'bbox_preds'))
+ def loss(self,
+ cls_scores,
+ bbox_preds,
+ gt_bboxes,
+ gt_labels,
+ img_metas,
+ gt_bboxes_ignore=None):
+ """Compute losses of the head.
+
+ Args:
+ cls_scores (list[Tensor]): Box scores for each scale level
+ Has shape (N, num_anchors * num_classes, H, W)
+ bbox_preds (list[Tensor]): Box energies / deltas for each scale
+ level with shape (N, num_anchors * 4, H, W)
+ gt_bboxes (list[Tensor]): each item are the truth boxes for each
+ image in [tl_x, tl_y, br_x, br_y] format.
+ gt_labels (list[Tensor]): class indices corresponding to each box
+ img_metas (list[dict]): Meta information of each image, e.g.,
+ image size, scaling factor, etc.
+ gt_bboxes_ignore (None | list[Tensor]): specify which bounding
+ boxes can be ignored when computing the loss.
+
+ Returns:
+ dict[str, Tensor]: A dictionary of loss components.
+ """
+ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
+ assert len(featmap_sizes) == self.anchor_generator.num_levels
+
+ device = cls_scores[0].device
+
+ anchor_list, valid_flag_list = self.get_anchors(
+ featmap_sizes, img_metas, device=device)
+ cls_reg_targets = self.get_targets(
+ anchor_list,
+ valid_flag_list,
+ gt_bboxes,
+ img_metas,
+ gt_bboxes_ignore_list=gt_bboxes_ignore,
+ gt_labels_list=gt_labels,
+ label_channels=1,
+ unmap_outputs=False)
+ if cls_reg_targets is None:
+ return None
+ (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,
+ num_total_pos, num_total_neg) = cls_reg_targets
+
+ num_images = len(img_metas)
+ all_cls_scores = torch.cat([
+ s.permute(0, 2, 3, 1).reshape(
+ num_images, -1, self.cls_out_channels) for s in cls_scores
+ ], 1)
+ all_labels = torch.cat(labels_list, -1).view(num_images, -1)
+ all_label_weights = torch.cat(label_weights_list,
+ -1).view(num_images, -1)
+ all_bbox_preds = torch.cat([
+ b.permute(0, 2, 3, 1).reshape(num_images, -1, 4)
+ for b in bbox_preds
+ ], -2)
+ all_bbox_targets = torch.cat(bbox_targets_list,
+ -2).view(num_images, -1, 4)
+ all_bbox_weights = torch.cat(bbox_weights_list,
+ -2).view(num_images, -1, 4)
+
+ # concat all level anchors to a single tensor
+ all_anchors = []
+ for i in range(num_images):
+ all_anchors.append(torch.cat(anchor_list[i]))
+
+ # check NaN and Inf
+ assert torch.isfinite(all_cls_scores).all().item(), \
+ 'classification scores become infinite or NaN!'
+ assert torch.isfinite(all_bbox_preds).all().item(), \
+ 'bbox predications become infinite or NaN!'
+
+ losses_cls, losses_bbox = multi_apply(
+ self.loss_single,
+ all_cls_scores,
+ all_bbox_preds,
+ all_anchors,
+ all_labels,
+ all_label_weights,
+ all_bbox_targets,
+ all_bbox_weights,
+ num_total_samples=num_total_pos)
+ return dict(loss_cls=losses_cls, loss_bbox=losses_bbox)
diff --git a/annotator/uniformer/mmdet_null/models/dense_heads/transformer_head.py b/annotator/uniformer/mmdet_null/models/dense_heads/transformer_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..820fd069fcca295f6102f0d27366158a8c640249
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/dense_heads/transformer_head.py
@@ -0,0 +1,654 @@
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from mmcv.cnn import Conv2d, Linear, build_activation_layer
+from mmcv.runner import force_fp32
+
+from mmdet.core import (bbox_cxcywh_to_xyxy, bbox_xyxy_to_cxcywh,
+ build_assigner, build_sampler, multi_apply,
+ reduce_mean)
+from mmdet.models.utils import (FFN, build_positional_encoding,
+ build_transformer)
+from ..builder import HEADS, build_loss
+from .anchor_free_head import AnchorFreeHead
+
+
+@HEADS.register_module()
+class TransformerHead(AnchorFreeHead):
+ """Implements the DETR transformer head.
+
+ See `paper: End-to-End Object Detection with Transformers
+ `_ for details.
+
+ Args:
+ num_classes (int): Number of categories excluding the background.
+ in_channels (int): Number of channels in the input feature map.
+ num_fcs (int, optional): Number of fully-connected layers used in
+ `FFN`, which is then used for the regression head. Default 2.
+ transformer (dict, optional): Config for transformer.
+ positional_encoding (dict, optional): Config for position encoding.
+ loss_cls (dict, optional): Config of the classification loss.
+ Default `CrossEntropyLoss`.
+ loss_bbox (dict, optional): Config of the regression loss.
+ Default `L1Loss`.
+ loss_iou (dict, optional): Config of the regression iou loss.
+ Default `GIoULoss`.
+ tran_cfg (dict, optional): Training config of transformer head.
+ test_cfg (dict, optional): Testing config of transformer head.
+
+ Example:
+ >>> import torch
+ >>> self = TransformerHead(80, 2048)
+ >>> x = torch.rand(1, 2048, 32, 32)
+ >>> mask = torch.ones(1, 32, 32).to(x.dtype)
+ >>> mask[:, :16, :15] = 0
+ >>> all_cls_scores, all_bbox_preds = self(x, mask)
+ """
+
+ def __init__(self,
+ num_classes,
+ in_channels,
+ num_fcs=2,
+ transformer=dict(
+ type='Transformer',
+ embed_dims=256,
+ num_heads=8,
+ num_encoder_layers=6,
+ num_decoder_layers=6,
+ feedforward_channels=2048,
+ dropout=0.1,
+ act_cfg=dict(type='ReLU', inplace=True),
+ norm_cfg=dict(type='LN'),
+ num_fcs=2,
+ pre_norm=False,
+ return_intermediate_dec=True),
+ positional_encoding=dict(
+ type='SinePositionalEncoding',
+ num_feats=128,
+ normalize=True),
+ loss_cls=dict(
+ type='CrossEntropyLoss',
+ bg_cls_weight=0.1,
+ use_sigmoid=False,
+ loss_weight=1.0,
+ class_weight=1.0),
+ loss_bbox=dict(type='L1Loss', loss_weight=5.0),
+ loss_iou=dict(type='GIoULoss', loss_weight=2.0),
+ train_cfg=dict(
+ assigner=dict(
+ type='HungarianAssigner',
+ cls_cost=dict(type='ClassificationCost', weight=1.),
+ reg_cost=dict(type='BBoxL1Cost', weight=5.0),
+ iou_cost=dict(
+ type='IoUCost', iou_mode='giou', weight=2.0))),
+ test_cfg=dict(max_per_img=100),
+ **kwargs):
+ # NOTE here use `AnchorFreeHead` instead of `TransformerHead`,
+ # since it brings inconvenience when the initialization of
+ # `AnchorFreeHead` is called.
+ super(AnchorFreeHead, self).__init__()
+ use_sigmoid_cls = loss_cls.get('use_sigmoid', False)
+ assert not use_sigmoid_cls, 'setting use_sigmoid_cls as True is ' \
+ 'not supported in DETR, since background is needed for the ' \
+ 'matching process.'
+ assert 'embed_dims' in transformer \
+ and 'num_feats' in positional_encoding
+ num_feats = positional_encoding['num_feats']
+ embed_dims = transformer['embed_dims']
+ assert num_feats * 2 == embed_dims, 'embed_dims should' \
+ f' be exactly 2 times of num_feats. Found {embed_dims}' \
+ f' and {num_feats}.'
+ assert test_cfg is not None and 'max_per_img' in test_cfg
+
+ class_weight = loss_cls.get('class_weight', None)
+ if class_weight is not None:
+ assert isinstance(class_weight, float), 'Expected ' \
+ 'class_weight to have type float. Found ' \
+ f'{type(class_weight)}.'
+ # NOTE following the official DETR rep0, bg_cls_weight means
+ # relative classification weight of the no-object class.
+ bg_cls_weight = loss_cls.get('bg_cls_weight', class_weight)
+ assert isinstance(bg_cls_weight, float), 'Expected ' \
+ 'bg_cls_weight to have type float. Found ' \
+ f'{type(bg_cls_weight)}.'
+ class_weight = torch.ones(num_classes + 1) * class_weight
+ # set background class as the last indice
+ class_weight[num_classes] = bg_cls_weight
+ loss_cls.update({'class_weight': class_weight})
+ if 'bg_cls_weight' in loss_cls:
+ loss_cls.pop('bg_cls_weight')
+ self.bg_cls_weight = bg_cls_weight
+
+ if train_cfg:
+ assert 'assigner' in train_cfg, 'assigner should be provided '\
+ 'when train_cfg is set.'
+ assigner = train_cfg['assigner']
+ assert loss_cls['loss_weight'] == assigner['cls_cost']['weight'], \
+ 'The classification weight for loss and matcher should be' \
+ 'exactly the same.'
+ assert loss_bbox['loss_weight'] == assigner['reg_cost'][
+ 'weight'], 'The regression L1 weight for loss and matcher ' \
+ 'should be exactly the same.'
+ assert loss_iou['loss_weight'] == assigner['iou_cost']['weight'], \
+ 'The regression iou weight for loss and matcher should be' \
+ 'exactly the same.'
+ self.assigner = build_assigner(assigner)
+ # DETR sampling=False, so use PseudoSampler
+ sampler_cfg = dict(type='PseudoSampler')
+ self.sampler = build_sampler(sampler_cfg, context=self)
+ self.num_classes = num_classes
+ self.cls_out_channels = num_classes + 1
+ self.in_channels = in_channels
+ self.num_fcs = num_fcs
+ self.train_cfg = train_cfg
+ self.test_cfg = test_cfg
+ self.use_sigmoid_cls = use_sigmoid_cls
+ self.embed_dims = embed_dims
+ self.num_query = test_cfg['max_per_img']
+ self.fp16_enabled = False
+ self.loss_cls = build_loss(loss_cls)
+ self.loss_bbox = build_loss(loss_bbox)
+ self.loss_iou = build_loss(loss_iou)
+ self.act_cfg = transformer.get('act_cfg',
+ dict(type='ReLU', inplace=True))
+ self.activate = build_activation_layer(self.act_cfg)
+ self.positional_encoding = build_positional_encoding(
+ positional_encoding)
+ self.transformer = build_transformer(transformer)
+ self._init_layers()
+
+ def _init_layers(self):
+ """Initialize layers of the transformer head."""
+ self.input_proj = Conv2d(
+ self.in_channels, self.embed_dims, kernel_size=1)
+ self.fc_cls = Linear(self.embed_dims, self.cls_out_channels)
+ self.reg_ffn = FFN(
+ self.embed_dims,
+ self.embed_dims,
+ self.num_fcs,
+ self.act_cfg,
+ dropout=0.0,
+ add_residual=False)
+ self.fc_reg = Linear(self.embed_dims, 4)
+ self.query_embedding = nn.Embedding(self.num_query, self.embed_dims)
+
+ def init_weights(self, distribution='uniform'):
+ """Initialize weights of the transformer head."""
+ # The initialization for transformer is important
+ self.transformer.init_weights()
+
+ def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
+ missing_keys, unexpected_keys, error_msgs):
+ """load checkpoints."""
+ # NOTE here use `AnchorFreeHead` instead of `TransformerHead`,
+ # since `AnchorFreeHead._load_from_state_dict` should not be
+ # called here. Invoking the default `Module._load_from_state_dict`
+ # is enough.
+ super(AnchorFreeHead,
+ self)._load_from_state_dict(state_dict, prefix, local_metadata,
+ strict, missing_keys,
+ unexpected_keys, error_msgs)
+
+ def forward(self, feats, img_metas):
+ """Forward function.
+
+ Args:
+ feats (tuple[Tensor]): Features from the upstream network, each is
+ a 4D-tensor.
+ img_metas (list[dict]): List of image information.
+
+ Returns:
+ tuple[list[Tensor], list[Tensor]]: Outputs for all scale levels.
+
+ - all_cls_scores_list (list[Tensor]): Classification scores \
+ for each scale level. Each is a 4D-tensor with shape \
+ [nb_dec, bs, num_query, cls_out_channels]. Note \
+ `cls_out_channels` should includes background.
+ - all_bbox_preds_list (list[Tensor]): Sigmoid regression \
+ outputs for each scale level. Each is a 4D-tensor with \
+ normalized coordinate format (cx, cy, w, h) and shape \
+ [nb_dec, bs, num_query, 4].
+ """
+ num_levels = len(feats)
+ img_metas_list = [img_metas for _ in range(num_levels)]
+ return multi_apply(self.forward_single, feats, img_metas_list)
+
+ def forward_single(self, x, img_metas):
+ """"Forward function for a single feature level.
+
+ Args:
+ x (Tensor): Input feature from backbone's single stage, shape
+ [bs, c, h, w].
+ img_metas (list[dict]): List of image information.
+
+ Returns:
+ all_cls_scores (Tensor): Outputs from the classification head,
+ shape [nb_dec, bs, num_query, cls_out_channels]. Note
+ cls_out_channels should includes background.
+ all_bbox_preds (Tensor): Sigmoid outputs from the regression
+ head with normalized coordinate format (cx, cy, w, h).
+ Shape [nb_dec, bs, num_query, 4].
+ """
+ # construct binary masks which used for the transformer.
+ # NOTE following the official DETR repo, non-zero values representing
+ # ignored positions, while zero values means valid positions.
+ batch_size = x.size(0)
+ input_img_h, input_img_w = img_metas[0]['batch_input_shape']
+ masks = x.new_ones((batch_size, input_img_h, input_img_w))
+ for img_id in range(batch_size):
+ img_h, img_w, _ = img_metas[img_id]['img_shape']
+ masks[img_id, :img_h, :img_w] = 0
+
+ x = self.input_proj(x)
+ # interpolate masks to have the same spatial shape with x
+ masks = F.interpolate(
+ masks.unsqueeze(1), size=x.shape[-2:]).to(torch.bool).squeeze(1)
+ # position encoding
+ pos_embed = self.positional_encoding(masks) # [bs, embed_dim, h, w]
+ # outs_dec: [nb_dec, bs, num_query, embed_dim]
+ outs_dec, _ = self.transformer(x, masks, self.query_embedding.weight,
+ pos_embed)
+
+ all_cls_scores = self.fc_cls(outs_dec)
+ all_bbox_preds = self.fc_reg(self.activate(
+ self.reg_ffn(outs_dec))).sigmoid()
+ return all_cls_scores, all_bbox_preds
+
+ @force_fp32(apply_to=('all_cls_scores_list', 'all_bbox_preds_list'))
+ def loss(self,
+ all_cls_scores_list,
+ all_bbox_preds_list,
+ gt_bboxes_list,
+ gt_labels_list,
+ img_metas,
+ gt_bboxes_ignore=None):
+ """"Loss function.
+
+ Only outputs from the last feature level are used for computing
+ losses by default.
+
+ Args:
+ all_cls_scores_list (list[Tensor]): Classification outputs
+ for each feature level. Each is a 4D-tensor with shape
+ [nb_dec, bs, num_query, cls_out_channels].
+ all_bbox_preds_list (list[Tensor]): Sigmoid regression
+ outputs for each feature level. Each is a 4D-tensor with
+ normalized coordinate format (cx, cy, w, h) and shape
+ [nb_dec, bs, num_query, 4].
+ gt_bboxes_list (list[Tensor]): Ground truth bboxes for each image
+ with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
+ gt_labels_list (list[Tensor]): Ground truth class indices for each
+ image with shape (num_gts, ).
+ img_metas (list[dict]): List of image meta information.
+ gt_bboxes_ignore (list[Tensor], optional): Bounding boxes
+ which can be ignored for each image. Default None.
+
+ Returns:
+ dict[str, Tensor]: A dictionary of loss components.
+ """
+ # NOTE defaultly only the outputs from the last feature scale is used.
+ all_cls_scores = all_cls_scores_list[-1]
+ all_bbox_preds = all_bbox_preds_list[-1]
+ assert gt_bboxes_ignore is None, \
+ 'Only supports for gt_bboxes_ignore setting to None.'
+
+ num_dec_layers = len(all_cls_scores)
+ all_gt_bboxes_list = [gt_bboxes_list for _ in range(num_dec_layers)]
+ all_gt_labels_list = [gt_labels_list for _ in range(num_dec_layers)]
+ all_gt_bboxes_ignore_list = [
+ gt_bboxes_ignore for _ in range(num_dec_layers)
+ ]
+ img_metas_list = [img_metas for _ in range(num_dec_layers)]
+
+ losses_cls, losses_bbox, losses_iou = multi_apply(
+ self.loss_single, all_cls_scores, all_bbox_preds,
+ all_gt_bboxes_list, all_gt_labels_list, img_metas_list,
+ all_gt_bboxes_ignore_list)
+
+ loss_dict = dict()
+ # loss from the last decoder layer
+ loss_dict['loss_cls'] = losses_cls[-1]
+ loss_dict['loss_bbox'] = losses_bbox[-1]
+ loss_dict['loss_iou'] = losses_iou[-1]
+ # loss from other decoder layers
+ num_dec_layer = 0
+ for loss_cls_i, loss_bbox_i, loss_iou_i in zip(losses_cls[:-1],
+ losses_bbox[:-1],
+ losses_iou[:-1]):
+ loss_dict[f'd{num_dec_layer}.loss_cls'] = loss_cls_i
+ loss_dict[f'd{num_dec_layer}.loss_bbox'] = loss_bbox_i
+ loss_dict[f'd{num_dec_layer}.loss_iou'] = loss_iou_i
+ num_dec_layer += 1
+ return loss_dict
+
+ def loss_single(self,
+ cls_scores,
+ bbox_preds,
+ gt_bboxes_list,
+ gt_labels_list,
+ img_metas,
+ gt_bboxes_ignore_list=None):
+ """"Loss function for outputs from a single decoder layer of a single
+ feature level.
+
+ Args:
+ cls_scores (Tensor): Box score logits from a single decoder layer
+ for all images. Shape [bs, num_query, cls_out_channels].
+ bbox_preds (Tensor): Sigmoid outputs from a single decoder layer
+ for all images, with normalized coordinate (cx, cy, w, h) and
+ shape [bs, num_query, 4].
+ gt_bboxes_list (list[Tensor]): Ground truth bboxes for each image
+ with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
+ gt_labels_list (list[Tensor]): Ground truth class indices for each
+ image with shape (num_gts, ).
+ img_metas (list[dict]): List of image meta information.
+ gt_bboxes_ignore_list (list[Tensor], optional): Bounding
+ boxes which can be ignored for each image. Default None.
+
+ Returns:
+ dict[str, Tensor]: A dictionary of loss components for outputs from
+ a single decoder layer.
+ """
+ num_imgs = cls_scores.size(0)
+ cls_scores_list = [cls_scores[i] for i in range(num_imgs)]
+ bbox_preds_list = [bbox_preds[i] for i in range(num_imgs)]
+ cls_reg_targets = self.get_targets(cls_scores_list, bbox_preds_list,
+ gt_bboxes_list, gt_labels_list,
+ img_metas, gt_bboxes_ignore_list)
+ (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,
+ num_total_pos, num_total_neg) = cls_reg_targets
+ labels = torch.cat(labels_list, 0)
+ label_weights = torch.cat(label_weights_list, 0)
+ bbox_targets = torch.cat(bbox_targets_list, 0)
+ bbox_weights = torch.cat(bbox_weights_list, 0)
+
+ # classification loss
+ cls_scores = cls_scores.reshape(-1, self.cls_out_channels)
+ # construct weighted avg_factor to match with the official DETR repo
+ cls_avg_factor = num_total_pos * 1.0 + \
+ num_total_neg * self.bg_cls_weight
+ loss_cls = self.loss_cls(
+ cls_scores, labels, label_weights, avg_factor=cls_avg_factor)
+
+ # Compute the average number of gt boxes accross all gpus, for
+ # normalization purposes
+ num_total_pos = loss_cls.new_tensor([num_total_pos])
+ num_total_pos = torch.clamp(reduce_mean(num_total_pos), min=1).item()
+
+ # construct factors used for rescale bboxes
+ factors = []
+ for img_meta, bbox_pred in zip(img_metas, bbox_preds):
+ img_h, img_w, _ = img_meta['img_shape']
+ factor = bbox_pred.new_tensor([img_w, img_h, img_w,
+ img_h]).unsqueeze(0).repeat(
+ bbox_pred.size(0), 1)
+ factors.append(factor)
+ factors = torch.cat(factors, 0)
+
+ # DETR regress the relative position of boxes (cxcywh) in the image,
+ # thus the learning target is normalized by the image size. So here
+ # we need to re-scale them for calculating IoU loss
+ bbox_preds = bbox_preds.reshape(-1, 4)
+ bboxes = bbox_cxcywh_to_xyxy(bbox_preds) * factors
+ bboxes_gt = bbox_cxcywh_to_xyxy(bbox_targets) * factors
+
+ # regression IoU loss, defaultly GIoU loss
+ loss_iou = self.loss_iou(
+ bboxes, bboxes_gt, bbox_weights, avg_factor=num_total_pos)
+
+ # regression L1 loss
+ loss_bbox = self.loss_bbox(
+ bbox_preds, bbox_targets, bbox_weights, avg_factor=num_total_pos)
+ return loss_cls, loss_bbox, loss_iou
+
+ def get_targets(self,
+ cls_scores_list,
+ bbox_preds_list,
+ gt_bboxes_list,
+ gt_labels_list,
+ img_metas,
+ gt_bboxes_ignore_list=None):
+ """"Compute regression and classification targets for a batch image.
+
+ Outputs from a single decoder layer of a single feature level are used.
+
+ Args:
+ cls_scores_list (list[Tensor]): Box score logits from a single
+ decoder layer for each image with shape [num_query,
+ cls_out_channels].
+ bbox_preds_list (list[Tensor]): Sigmoid outputs from a single
+ decoder layer for each image, with normalized coordinate
+ (cx, cy, w, h) and shape [num_query, 4].
+ gt_bboxes_list (list[Tensor]): Ground truth bboxes for each image
+ with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
+ gt_labels_list (list[Tensor]): Ground truth class indices for each
+ image with shape (num_gts, ).
+ img_metas (list[dict]): List of image meta information.
+ gt_bboxes_ignore_list (list[Tensor], optional): Bounding
+ boxes which can be ignored for each image. Default None.
+
+ Returns:
+ tuple: a tuple containing the following targets.
+
+ - labels_list (list[Tensor]): Labels for all images.
+ - label_weights_list (list[Tensor]): Label weights for all \
+ images.
+ - bbox_targets_list (list[Tensor]): BBox targets for all \
+ images.
+ - bbox_weights_list (list[Tensor]): BBox weights for all \
+ images.
+ - num_total_pos (int): Number of positive samples in all \
+ images.
+ - num_total_neg (int): Number of negative samples in all \
+ images.
+ """
+ assert gt_bboxes_ignore_list is None, \
+ 'Only supports for gt_bboxes_ignore setting to None.'
+ num_imgs = len(cls_scores_list)
+ gt_bboxes_ignore_list = [
+ gt_bboxes_ignore_list for _ in range(num_imgs)
+ ]
+
+ (labels_list, label_weights_list, bbox_targets_list,
+ bbox_weights_list, pos_inds_list, neg_inds_list) = multi_apply(
+ self._get_target_single, cls_scores_list, bbox_preds_list,
+ gt_bboxes_list, gt_labels_list, img_metas, gt_bboxes_ignore_list)
+ num_total_pos = sum((inds.numel() for inds in pos_inds_list))
+ num_total_neg = sum((inds.numel() for inds in neg_inds_list))
+ return (labels_list, label_weights_list, bbox_targets_list,
+ bbox_weights_list, num_total_pos, num_total_neg)
+
+ def _get_target_single(self,
+ cls_score,
+ bbox_pred,
+ gt_bboxes,
+ gt_labels,
+ img_meta,
+ gt_bboxes_ignore=None):
+ """"Compute regression and classification targets for one image.
+
+ Outputs from a single decoder layer of a single feature level are used.
+
+ Args:
+ cls_score (Tensor): Box score logits from a single decoder layer
+ for one image. Shape [num_query, cls_out_channels].
+ bbox_pred (Tensor): Sigmoid outputs from a single decoder layer
+ for one image, with normalized coordinate (cx, cy, w, h) and
+ shape [num_query, 4].
+ gt_bboxes (Tensor): Ground truth bboxes for one image with
+ shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
+ gt_labels (Tensor): Ground truth class indices for one image
+ with shape (num_gts, ).
+ img_meta (dict): Meta information for one image.
+ gt_bboxes_ignore (Tensor, optional): Bounding boxes
+ which can be ignored. Default None.
+
+ Returns:
+ tuple[Tensor]: a tuple containing the following for one image.
+
+ - labels (Tensor): Labels of each image.
+ - label_weights (Tensor]): Label weights of each image.
+ - bbox_targets (Tensor): BBox targets of each image.
+ - bbox_weights (Tensor): BBox weights of each image.
+ - pos_inds (Tensor): Sampled positive indices for each image.
+ - neg_inds (Tensor): Sampled negative indices for each image.
+ """
+
+ num_bboxes = bbox_pred.size(0)
+ # assigner and sampler
+ assign_result = self.assigner.assign(bbox_pred, cls_score, gt_bboxes,
+ gt_labels, img_meta,
+ gt_bboxes_ignore)
+ sampling_result = self.sampler.sample(assign_result, bbox_pred,
+ gt_bboxes)
+ pos_inds = sampling_result.pos_inds
+ neg_inds = sampling_result.neg_inds
+
+ # label targets
+ labels = gt_bboxes.new_full((num_bboxes, ),
+ self.num_classes,
+ dtype=torch.long)
+ labels[pos_inds] = gt_labels[sampling_result.pos_assigned_gt_inds]
+ label_weights = gt_bboxes.new_ones(num_bboxes)
+
+ # bbox targets
+ bbox_targets = torch.zeros_like(bbox_pred)
+ bbox_weights = torch.zeros_like(bbox_pred)
+ bbox_weights[pos_inds] = 1.0
+ img_h, img_w, _ = img_meta['img_shape']
+
+ # DETR regress the relative position of boxes (cxcywh) in the image.
+ # Thus the learning target should be normalized by the image size, also
+ # the box format should be converted from defaultly x1y1x2y2 to cxcywh.
+ factor = bbox_pred.new_tensor([img_w, img_h, img_w,
+ img_h]).unsqueeze(0)
+ pos_gt_bboxes_normalized = sampling_result.pos_gt_bboxes / factor
+ pos_gt_bboxes_targets = bbox_xyxy_to_cxcywh(pos_gt_bboxes_normalized)
+ bbox_targets[pos_inds] = pos_gt_bboxes_targets
+ return (labels, label_weights, bbox_targets, bbox_weights, pos_inds,
+ neg_inds)
+
+ # over-write because img_metas are needed as inputs for bbox_head.
+ def forward_train(self,
+ x,
+ img_metas,
+ gt_bboxes,
+ gt_labels=None,
+ gt_bboxes_ignore=None,
+ proposal_cfg=None,
+ **kwargs):
+ """Forward function for training mode.
+
+ Args:
+ x (list[Tensor]): Features from backbone.
+ img_metas (list[dict]): Meta information of each image, e.g.,
+ image size, scaling factor, etc.
+ gt_bboxes (Tensor): Ground truth bboxes of the image,
+ shape (num_gts, 4).
+ gt_labels (Tensor): Ground truth labels of each box,
+ shape (num_gts,).
+ gt_bboxes_ignore (Tensor): Ground truth bboxes to be
+ ignored, shape (num_ignored_gts, 4).
+ proposal_cfg (mmcv.Config): Test / postprocessing configuration,
+ if None, test_cfg would be used.
+
+ Returns:
+ dict[str, Tensor]: A dictionary of loss components.
+ """
+ assert proposal_cfg is None, '"proposal_cfg" must be None'
+ outs = self(x, img_metas)
+ if gt_labels is None:
+ loss_inputs = outs + (gt_bboxes, img_metas)
+ else:
+ loss_inputs = outs + (gt_bboxes, gt_labels, img_metas)
+ losses = self.loss(*loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore)
+ return losses
+
+ @force_fp32(apply_to=('all_cls_scores_list', 'all_bbox_preds_list'))
+ def get_bboxes(self,
+ all_cls_scores_list,
+ all_bbox_preds_list,
+ img_metas,
+ rescale=False):
+ """Transform network outputs for a batch into bbox predictions.
+
+ Args:
+ all_cls_scores_list (list[Tensor]): Classification outputs
+ for each feature level. Each is a 4D-tensor with shape
+ [nb_dec, bs, num_query, cls_out_channels].
+ all_bbox_preds_list (list[Tensor]): Sigmoid regression
+ outputs for each feature level. Each is a 4D-tensor with
+ normalized coordinate format (cx, cy, w, h) and shape
+ [nb_dec, bs, num_query, 4].
+ img_metas (list[dict]): Meta information of each image.
+ rescale (bool, optional): If True, return boxes in original
+ image space. Default False.
+
+ Returns:
+ list[list[Tensor, Tensor]]: Each item in result_list is 2-tuple. \
+ The first item is an (n, 5) tensor, where the first 4 columns \
+ are bounding box positions (tl_x, tl_y, br_x, br_y) and the \
+ 5-th column is a score between 0 and 1. The second item is a \
+ (n,) tensor where each item is the predicted class label of \
+ the corresponding box.
+ """
+ # NOTE defaultly only using outputs from the last feature level,
+ # and only the outputs from the last decoder layer is used.
+ cls_scores = all_cls_scores_list[-1][-1]
+ bbox_preds = all_bbox_preds_list[-1][-1]
+
+ result_list = []
+ for img_id in range(len(img_metas)):
+ cls_score = cls_scores[img_id]
+ bbox_pred = bbox_preds[img_id]
+ img_shape = img_metas[img_id]['img_shape']
+ scale_factor = img_metas[img_id]['scale_factor']
+ proposals = self._get_bboxes_single(cls_score, bbox_pred,
+ img_shape, scale_factor,
+ rescale)
+ result_list.append(proposals)
+ return result_list
+
+ def _get_bboxes_single(self,
+ cls_score,
+ bbox_pred,
+ img_shape,
+ scale_factor,
+ rescale=False):
+ """Transform outputs from the last decoder layer into bbox predictions
+ for each image.
+
+ Args:
+ cls_score (Tensor): Box score logits from the last decoder layer
+ for each image. Shape [num_query, cls_out_channels].
+ bbox_pred (Tensor): Sigmoid outputs from the last decoder layer
+ for each image, with coordinate format (cx, cy, w, h) and
+ shape [num_query, 4].
+ img_shape (tuple[int]): Shape of input image, (height, width, 3).
+ scale_factor (ndarray, optional): Scale factor of the image arange
+ as (w_scale, h_scale, w_scale, h_scale).
+ rescale (bool, optional): If True, return boxes in original image
+ space. Default False.
+
+ Returns:
+ tuple[Tensor]: Results of detected bboxes and labels.
+
+ - det_bboxes: Predicted bboxes with shape [num_query, 5], \
+ where the first 4 columns are bounding box positions \
+ (tl_x, tl_y, br_x, br_y) and the 5-th column are scores \
+ between 0 and 1.
+ - det_labels: Predicted labels of the corresponding box with \
+ shape [num_query].
+ """
+ assert len(cls_score) == len(bbox_pred)
+ # exclude background
+ scores, det_labels = F.softmax(cls_score, dim=-1)[..., :-1].max(-1)
+ det_bboxes = bbox_cxcywh_to_xyxy(bbox_pred)
+ det_bboxes[:, 0::2] = det_bboxes[:, 0::2] * img_shape[1]
+ det_bboxes[:, 1::2] = det_bboxes[:, 1::2] * img_shape[0]
+ det_bboxes[:, 0::2].clamp_(min=0, max=img_shape[1])
+ det_bboxes[:, 1::2].clamp_(min=0, max=img_shape[0])
+ if rescale:
+ det_bboxes /= det_bboxes.new_tensor(scale_factor)
+ det_bboxes = torch.cat((det_bboxes, scores.unsqueeze(1)), -1)
+ return det_bboxes, det_labels
diff --git a/annotator/uniformer/mmdet_null/models/dense_heads/vfnet_head.py b/annotator/uniformer/mmdet_null/models/dense_heads/vfnet_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..7243bb62893839568ec51928d88a5ad40b02a66c
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/dense_heads/vfnet_head.py
@@ -0,0 +1,794 @@
+import numpy as np
+import torch
+import torch.nn as nn
+from mmcv.cnn import ConvModule, Scale, bias_init_with_prob, normal_init
+from mmcv.ops import DeformConv2d
+from mmcv.runner import force_fp32
+
+from mmdet.core import (bbox2distance, bbox_overlaps, build_anchor_generator,
+ build_assigner, build_sampler, distance2bbox,
+ multi_apply, multiclass_nms, reduce_mean)
+from ..builder import HEADS, build_loss
+from .atss_head import ATSSHead
+from .fcos_head import FCOSHead
+
+INF = 1e8
+
+
+@HEADS.register_module()
+class VFNetHead(ATSSHead, FCOSHead):
+ """Head of `VarifocalNet (VFNet): An IoU-aware Dense Object
+ Detector.`_.
+
+ The VFNet predicts IoU-aware classification scores which mix the
+ object presence confidence and object localization accuracy as the
+ detection score. It is built on the FCOS architecture and uses ATSS
+ for defining positive/negative training examples. The VFNet is trained
+ with Varifocal Loss and empolys star-shaped deformable convolution to
+ extract features for a bbox.
+
+ Args:
+ num_classes (int): Number of categories excluding the background
+ category.
+ in_channels (int): Number of channels in the input feature map.
+ regress_ranges (tuple[tuple[int, int]]): Regress range of multiple
+ level points.
+ center_sampling (bool): If true, use center sampling. Default: False.
+ center_sample_radius (float): Radius of center sampling. Default: 1.5.
+ sync_num_pos (bool): If true, synchronize the number of positive
+ examples across GPUs. Default: True
+ gradient_mul (float): The multiplier to gradients from bbox refinement
+ and recognition. Default: 0.1.
+ bbox_norm_type (str): The bbox normalization type, 'reg_denom' or
+ 'stride'. Default: reg_denom
+ loss_cls_fl (dict): Config of focal loss.
+ use_vfl (bool): If true, use varifocal loss for training.
+ Default: True.
+ loss_cls (dict): Config of varifocal loss.
+ loss_bbox (dict): Config of localization loss, GIoU Loss.
+ loss_bbox (dict): Config of localization refinement loss, GIoU Loss.
+ norm_cfg (dict): dictionary to construct and config norm layer.
+ Default: norm_cfg=dict(type='GN', num_groups=32,
+ requires_grad=True).
+ use_atss (bool): If true, use ATSS to define positive/negative
+ examples. Default: True.
+ anchor_generator (dict): Config of anchor generator for ATSS.
+
+ Example:
+ >>> self = VFNetHead(11, 7)
+ >>> feats = [torch.rand(1, 7, s, s) for s in [4, 8, 16, 32, 64]]
+ >>> cls_score, bbox_pred, bbox_pred_refine= self.forward(feats)
+ >>> assert len(cls_score) == len(self.scales)
+ """ # noqa: E501
+
+ def __init__(self,
+ num_classes,
+ in_channels,
+ regress_ranges=((-1, 64), (64, 128), (128, 256), (256, 512),
+ (512, INF)),
+ center_sampling=False,
+ center_sample_radius=1.5,
+ sync_num_pos=True,
+ gradient_mul=0.1,
+ bbox_norm_type='reg_denom',
+ loss_cls_fl=dict(
+ type='FocalLoss',
+ use_sigmoid=True,
+ gamma=2.0,
+ alpha=0.25,
+ loss_weight=1.0),
+ use_vfl=True,
+ loss_cls=dict(
+ type='VarifocalLoss',
+ use_sigmoid=True,
+ alpha=0.75,
+ gamma=2.0,
+ iou_weighted=True,
+ loss_weight=1.0),
+ loss_bbox=dict(type='GIoULoss', loss_weight=1.5),
+ loss_bbox_refine=dict(type='GIoULoss', loss_weight=2.0),
+ norm_cfg=dict(type='GN', num_groups=32, requires_grad=True),
+ use_atss=True,
+ anchor_generator=dict(
+ type='AnchorGenerator',
+ ratios=[1.0],
+ octave_base_scale=8,
+ scales_per_octave=1,
+ center_offset=0.0,
+ strides=[8, 16, 32, 64, 128]),
+ **kwargs):
+ # dcn base offsets, adapted from reppoints_head.py
+ self.num_dconv_points = 9
+ self.dcn_kernel = int(np.sqrt(self.num_dconv_points))
+ self.dcn_pad = int((self.dcn_kernel - 1) / 2)
+ dcn_base = np.arange(-self.dcn_pad,
+ self.dcn_pad + 1).astype(np.float64)
+ dcn_base_y = np.repeat(dcn_base, self.dcn_kernel)
+ dcn_base_x = np.tile(dcn_base, self.dcn_kernel)
+ dcn_base_offset = np.stack([dcn_base_y, dcn_base_x], axis=1).reshape(
+ (-1))
+ self.dcn_base_offset = torch.tensor(dcn_base_offset).view(1, -1, 1, 1)
+
+ super(FCOSHead, self).__init__(
+ num_classes, in_channels, norm_cfg=norm_cfg, **kwargs)
+ self.regress_ranges = regress_ranges
+ self.reg_denoms = [
+ regress_range[-1] for regress_range in regress_ranges
+ ]
+ self.reg_denoms[-1] = self.reg_denoms[-2] * 2
+ self.center_sampling = center_sampling
+ self.center_sample_radius = center_sample_radius
+ self.sync_num_pos = sync_num_pos
+ self.bbox_norm_type = bbox_norm_type
+ self.gradient_mul = gradient_mul
+ self.use_vfl = use_vfl
+ if self.use_vfl:
+ self.loss_cls = build_loss(loss_cls)
+ else:
+ self.loss_cls = build_loss(loss_cls_fl)
+ self.loss_bbox = build_loss(loss_bbox)
+ self.loss_bbox_refine = build_loss(loss_bbox_refine)
+
+ # for getting ATSS targets
+ self.use_atss = use_atss
+ self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False)
+ self.anchor_generator = build_anchor_generator(anchor_generator)
+ self.anchor_center_offset = anchor_generator['center_offset']
+ self.num_anchors = self.anchor_generator.num_base_anchors[0]
+ self.sampling = False
+ if self.train_cfg:
+ self.assigner = build_assigner(self.train_cfg.assigner)
+ sampler_cfg = dict(type='PseudoSampler')
+ self.sampler = build_sampler(sampler_cfg, context=self)
+
+ def _init_layers(self):
+ """Initialize layers of the head."""
+ super(FCOSHead, self)._init_cls_convs()
+ super(FCOSHead, self)._init_reg_convs()
+ self.relu = nn.ReLU(inplace=True)
+ self.vfnet_reg_conv = ConvModule(
+ self.feat_channels,
+ self.feat_channels,
+ 3,
+ stride=1,
+ padding=1,
+ conv_cfg=self.conv_cfg,
+ norm_cfg=self.norm_cfg,
+ bias=self.conv_bias)
+ self.vfnet_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1)
+ self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides])
+
+ self.vfnet_reg_refine_dconv = DeformConv2d(
+ self.feat_channels,
+ self.feat_channels,
+ self.dcn_kernel,
+ 1,
+ padding=self.dcn_pad)
+ self.vfnet_reg_refine = nn.Conv2d(self.feat_channels, 4, 3, padding=1)
+ self.scales_refine = nn.ModuleList([Scale(1.0) for _ in self.strides])
+
+ self.vfnet_cls_dconv = DeformConv2d(
+ self.feat_channels,
+ self.feat_channels,
+ self.dcn_kernel,
+ 1,
+ padding=self.dcn_pad)
+ self.vfnet_cls = nn.Conv2d(
+ self.feat_channels, self.cls_out_channels, 3, padding=1)
+
+ def init_weights(self):
+ """Initialize weights of the head."""
+ for m in self.cls_convs:
+ if isinstance(m.conv, nn.Conv2d):
+ normal_init(m.conv, std=0.01)
+ for m in self.reg_convs:
+ if isinstance(m.conv, nn.Conv2d):
+ normal_init(m.conv, std=0.01)
+ normal_init(self.vfnet_reg_conv.conv, std=0.01)
+ normal_init(self.vfnet_reg, std=0.01)
+ normal_init(self.vfnet_reg_refine_dconv, std=0.01)
+ normal_init(self.vfnet_reg_refine, std=0.01)
+ normal_init(self.vfnet_cls_dconv, std=0.01)
+ bias_cls = bias_init_with_prob(0.01)
+ normal_init(self.vfnet_cls, std=0.01, bias=bias_cls)
+
+ def forward(self, feats):
+ """Forward features from the upstream network.
+
+ Args:
+ feats (tuple[Tensor]): Features from the upstream network, each is
+ a 4D-tensor.
+
+ Returns:
+ tuple:
+ cls_scores (list[Tensor]): Box iou-aware scores for each scale
+ level, each is a 4D-tensor, the channel number is
+ num_points * num_classes.
+ bbox_preds (list[Tensor]): Box offsets for each
+ scale level, each is a 4D-tensor, the channel number is
+ num_points * 4.
+ bbox_preds_refine (list[Tensor]): Refined Box offsets for
+ each scale level, each is a 4D-tensor, the channel
+ number is num_points * 4.
+ """
+ return multi_apply(self.forward_single, feats, self.scales,
+ self.scales_refine, self.strides, self.reg_denoms)
+
+ def forward_single(self, x, scale, scale_refine, stride, reg_denom):
+ """Forward features of a single scale level.
+
+ Args:
+ x (Tensor): FPN feature maps of the specified stride.
+ scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize
+ the bbox prediction.
+ scale_refine (:obj: `mmcv.cnn.Scale`): Learnable scale module to
+ resize the refined bbox prediction.
+ stride (int): The corresponding stride for feature maps,
+ used to normalize the bbox prediction when
+ bbox_norm_type = 'stride'.
+ reg_denom (int): The corresponding regression range for feature
+ maps, only used to normalize the bbox prediction when
+ bbox_norm_type = 'reg_denom'.
+
+ Returns:
+ tuple: iou-aware cls scores for each box, bbox predictions and
+ refined bbox predictions of input feature maps.
+ """
+ cls_feat = x
+ reg_feat = x
+
+ for cls_layer in self.cls_convs:
+ cls_feat = cls_layer(cls_feat)
+
+ for reg_layer in self.reg_convs:
+ reg_feat = reg_layer(reg_feat)
+
+ # predict the bbox_pred of different level
+ reg_feat_init = self.vfnet_reg_conv(reg_feat)
+ if self.bbox_norm_type == 'reg_denom':
+ bbox_pred = scale(
+ self.vfnet_reg(reg_feat_init)).float().exp() * reg_denom
+ elif self.bbox_norm_type == 'stride':
+ bbox_pred = scale(
+ self.vfnet_reg(reg_feat_init)).float().exp() * stride
+ else:
+ raise NotImplementedError
+
+ # compute star deformable convolution offsets
+ # converting dcn_offset to reg_feat.dtype thus VFNet can be
+ # trained with FP16
+ dcn_offset = self.star_dcn_offset(bbox_pred, self.gradient_mul,
+ stride).to(reg_feat.dtype)
+
+ # refine the bbox_pred
+ reg_feat = self.relu(self.vfnet_reg_refine_dconv(reg_feat, dcn_offset))
+ bbox_pred_refine = scale_refine(
+ self.vfnet_reg_refine(reg_feat)).float().exp()
+ bbox_pred_refine = bbox_pred_refine * bbox_pred.detach()
+
+ # predict the iou-aware cls score
+ cls_feat = self.relu(self.vfnet_cls_dconv(cls_feat, dcn_offset))
+ cls_score = self.vfnet_cls(cls_feat)
+
+ return cls_score, bbox_pred, bbox_pred_refine
+
+ def star_dcn_offset(self, bbox_pred, gradient_mul, stride):
+ """Compute the star deformable conv offsets.
+
+ Args:
+ bbox_pred (Tensor): Predicted bbox distance offsets (l, r, t, b).
+ gradient_mul (float): Gradient multiplier.
+ stride (int): The corresponding stride for feature maps,
+ used to project the bbox onto the feature map.
+
+ Returns:
+ dcn_offsets (Tensor): The offsets for deformable convolution.
+ """
+ dcn_base_offset = self.dcn_base_offset.type_as(bbox_pred)
+ bbox_pred_grad_mul = (1 - gradient_mul) * bbox_pred.detach() + \
+ gradient_mul * bbox_pred
+ # map to the feature map scale
+ bbox_pred_grad_mul = bbox_pred_grad_mul / stride
+ N, C, H, W = bbox_pred.size()
+
+ x1 = bbox_pred_grad_mul[:, 0, :, :]
+ y1 = bbox_pred_grad_mul[:, 1, :, :]
+ x2 = bbox_pred_grad_mul[:, 2, :, :]
+ y2 = bbox_pred_grad_mul[:, 3, :, :]
+ bbox_pred_grad_mul_offset = bbox_pred.new_zeros(
+ N, 2 * self.num_dconv_points, H, W)
+ bbox_pred_grad_mul_offset[:, 0, :, :] = -1.0 * y1 # -y1
+ bbox_pred_grad_mul_offset[:, 1, :, :] = -1.0 * x1 # -x1
+ bbox_pred_grad_mul_offset[:, 2, :, :] = -1.0 * y1 # -y1
+ bbox_pred_grad_mul_offset[:, 4, :, :] = -1.0 * y1 # -y1
+ bbox_pred_grad_mul_offset[:, 5, :, :] = x2 # x2
+ bbox_pred_grad_mul_offset[:, 7, :, :] = -1.0 * x1 # -x1
+ bbox_pred_grad_mul_offset[:, 11, :, :] = x2 # x2
+ bbox_pred_grad_mul_offset[:, 12, :, :] = y2 # y2
+ bbox_pred_grad_mul_offset[:, 13, :, :] = -1.0 * x1 # -x1
+ bbox_pred_grad_mul_offset[:, 14, :, :] = y2 # y2
+ bbox_pred_grad_mul_offset[:, 16, :, :] = y2 # y2
+ bbox_pred_grad_mul_offset[:, 17, :, :] = x2 # x2
+ dcn_offset = bbox_pred_grad_mul_offset - dcn_base_offset
+
+ return dcn_offset
+
+ @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'bbox_preds_refine'))
+ def loss(self,
+ cls_scores,
+ bbox_preds,
+ bbox_preds_refine,
+ gt_bboxes,
+ gt_labels,
+ img_metas,
+ gt_bboxes_ignore=None):
+ """Compute loss of the head.
+
+ Args:
+ cls_scores (list[Tensor]): Box iou-aware scores for each scale
+ level, each is a 4D-tensor, the channel number is
+ num_points * num_classes.
+ bbox_preds (list[Tensor]): Box offsets for each
+ scale level, each is a 4D-tensor, the channel number is
+ num_points * 4.
+ bbox_preds_refine (list[Tensor]): Refined Box offsets for
+ each scale level, each is a 4D-tensor, the channel
+ number is num_points * 4.
+ gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
+ shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
+ gt_labels (list[Tensor]): class indices corresponding to each box
+ img_metas (list[dict]): Meta information of each image, e.g.,
+ image size, scaling factor, etc.
+ gt_bboxes_ignore (None | list[Tensor]): specify which bounding
+ boxes can be ignored when computing the loss.
+ Default: None.
+
+ Returns:
+ dict[str, Tensor]: A dictionary of loss components.
+ """
+ assert len(cls_scores) == len(bbox_preds) == len(bbox_preds_refine)
+ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
+ all_level_points = self.get_points(featmap_sizes, bbox_preds[0].dtype,
+ bbox_preds[0].device)
+ labels, label_weights, bbox_targets, bbox_weights = self.get_targets(
+ cls_scores, all_level_points, gt_bboxes, gt_labels, img_metas,
+ gt_bboxes_ignore)
+
+ num_imgs = cls_scores[0].size(0)
+ # flatten cls_scores, bbox_preds and bbox_preds_refine
+ flatten_cls_scores = [
+ cls_score.permute(0, 2, 3,
+ 1).reshape(-1,
+ self.cls_out_channels).contiguous()
+ for cls_score in cls_scores
+ ]
+ flatten_bbox_preds = [
+ bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4).contiguous()
+ for bbox_pred in bbox_preds
+ ]
+ flatten_bbox_preds_refine = [
+ bbox_pred_refine.permute(0, 2, 3, 1).reshape(-1, 4).contiguous()
+ for bbox_pred_refine in bbox_preds_refine
+ ]
+ flatten_cls_scores = torch.cat(flatten_cls_scores)
+ flatten_bbox_preds = torch.cat(flatten_bbox_preds)
+ flatten_bbox_preds_refine = torch.cat(flatten_bbox_preds_refine)
+ flatten_labels = torch.cat(labels)
+ flatten_bbox_targets = torch.cat(bbox_targets)
+ # repeat points to align with bbox_preds
+ flatten_points = torch.cat(
+ [points.repeat(num_imgs, 1) for points in all_level_points])
+
+ # FG cat_id: [0, num_classes - 1], BG cat_id: num_classes
+ bg_class_ind = self.num_classes
+ pos_inds = torch.where(
+ ((flatten_labels >= 0) & (flatten_labels < bg_class_ind)) > 0)[0]
+ num_pos = len(pos_inds)
+
+ pos_bbox_preds = flatten_bbox_preds[pos_inds]
+ pos_bbox_preds_refine = flatten_bbox_preds_refine[pos_inds]
+ pos_labels = flatten_labels[pos_inds]
+
+ # sync num_pos across all gpus
+ if self.sync_num_pos:
+ num_pos_avg_per_gpu = reduce_mean(
+ pos_inds.new_tensor(num_pos).float()).item()
+ num_pos_avg_per_gpu = max(num_pos_avg_per_gpu, 1.0)
+ else:
+ num_pos_avg_per_gpu = num_pos
+
+ if num_pos > 0:
+ pos_bbox_targets = flatten_bbox_targets[pos_inds]
+ pos_points = flatten_points[pos_inds]
+
+ pos_decoded_bbox_preds = distance2bbox(pos_points, pos_bbox_preds)
+ pos_decoded_target_preds = distance2bbox(pos_points,
+ pos_bbox_targets)
+ iou_targets_ini = bbox_overlaps(
+ pos_decoded_bbox_preds,
+ pos_decoded_target_preds.detach(),
+ is_aligned=True).clamp(min=1e-6)
+ bbox_weights_ini = iou_targets_ini.clone().detach()
+ iou_targets_ini_avg_per_gpu = reduce_mean(
+ bbox_weights_ini.sum()).item()
+ bbox_avg_factor_ini = max(iou_targets_ini_avg_per_gpu, 1.0)
+ loss_bbox = self.loss_bbox(
+ pos_decoded_bbox_preds,
+ pos_decoded_target_preds.detach(),
+ weight=bbox_weights_ini,
+ avg_factor=bbox_avg_factor_ini)
+
+ pos_decoded_bbox_preds_refine = \
+ distance2bbox(pos_points, pos_bbox_preds_refine)
+ iou_targets_rf = bbox_overlaps(
+ pos_decoded_bbox_preds_refine,
+ pos_decoded_target_preds.detach(),
+ is_aligned=True).clamp(min=1e-6)
+ bbox_weights_rf = iou_targets_rf.clone().detach()
+ iou_targets_rf_avg_per_gpu = reduce_mean(
+ bbox_weights_rf.sum()).item()
+ bbox_avg_factor_rf = max(iou_targets_rf_avg_per_gpu, 1.0)
+ loss_bbox_refine = self.loss_bbox_refine(
+ pos_decoded_bbox_preds_refine,
+ pos_decoded_target_preds.detach(),
+ weight=bbox_weights_rf,
+ avg_factor=bbox_avg_factor_rf)
+
+ # build IoU-aware cls_score targets
+ if self.use_vfl:
+ pos_ious = iou_targets_rf.clone().detach()
+ cls_iou_targets = torch.zeros_like(flatten_cls_scores)
+ cls_iou_targets[pos_inds, pos_labels] = pos_ious
+ else:
+ loss_bbox = pos_bbox_preds.sum() * 0
+ loss_bbox_refine = pos_bbox_preds_refine.sum() * 0
+ if self.use_vfl:
+ cls_iou_targets = torch.zeros_like(flatten_cls_scores)
+
+ if self.use_vfl:
+ loss_cls = self.loss_cls(
+ flatten_cls_scores,
+ cls_iou_targets,
+ avg_factor=num_pos_avg_per_gpu)
+ else:
+ loss_cls = self.loss_cls(
+ flatten_cls_scores,
+ flatten_labels,
+ weight=label_weights,
+ avg_factor=num_pos_avg_per_gpu)
+
+ return dict(
+ loss_cls=loss_cls,
+ loss_bbox=loss_bbox,
+ loss_bbox_rf=loss_bbox_refine)
+
+ @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'bbox_preds_refine'))
+ def get_bboxes(self,
+ cls_scores,
+ bbox_preds,
+ bbox_preds_refine,
+ img_metas,
+ cfg=None,
+ rescale=None,
+ with_nms=True):
+ """Transform network outputs for a batch into bbox predictions.
+
+ Args:
+ cls_scores (list[Tensor]): Box iou-aware scores for each scale
+ level with shape (N, num_points * num_classes, H, W).
+ bbox_preds (list[Tensor]): Box offsets for each scale
+ level with shape (N, num_points * 4, H, W).
+ bbox_preds_refine (list[Tensor]): Refined Box offsets for
+ each scale level with shape (N, num_points * 4, H, W).
+ img_metas (list[dict]): Meta information of each image, e.g.,
+ image size, scaling factor, etc.
+ cfg (mmcv.Config): Test / postprocessing configuration,
+ if None, test_cfg would be used. Default: None.
+ rescale (bool): If True, return boxes in original image space.
+ Default: False.
+ with_nms (bool): If True, do nms before returning boxes.
+ Default: True.
+
+ Returns:
+ list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple.
+ The first item is an (n, 5) tensor, where the first 4 columns
+ are bounding box positions (tl_x, tl_y, br_x, br_y) and the
+ 5-th column is a score between 0 and 1. The second item is a
+ (n,) tensor where each item is the predicted class label of
+ the corresponding box.
+ """
+ assert len(cls_scores) == len(bbox_preds) == len(bbox_preds_refine)
+ num_levels = len(cls_scores)
+
+ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
+ mlvl_points = self.get_points(featmap_sizes, bbox_preds[0].dtype,
+ bbox_preds[0].device)
+ result_list = []
+ for img_id in range(len(img_metas)):
+ cls_score_list = [
+ cls_scores[i][img_id].detach() for i in range(num_levels)
+ ]
+ bbox_pred_list = [
+ bbox_preds_refine[i][img_id].detach()
+ for i in range(num_levels)
+ ]
+ img_shape = img_metas[img_id]['img_shape']
+ scale_factor = img_metas[img_id]['scale_factor']
+ det_bboxes = self._get_bboxes_single(cls_score_list,
+ bbox_pred_list, mlvl_points,
+ img_shape, scale_factor, cfg,
+ rescale, with_nms)
+ result_list.append(det_bboxes)
+ return result_list
+
+ def _get_bboxes_single(self,
+ cls_scores,
+ bbox_preds,
+ mlvl_points,
+ img_shape,
+ scale_factor,
+ cfg,
+ rescale=False,
+ with_nms=True):
+ """Transform outputs for a single batch item into bbox predictions.
+
+ Args:
+ cls_scores (list[Tensor]): Box iou-aware scores for a single scale
+ level with shape (num_points * num_classes, H, W).
+ bbox_preds (list[Tensor]): Box offsets for a single scale
+ level with shape (num_points * 4, H, W).
+ mlvl_points (list[Tensor]): Box reference for a single scale level
+ with shape (num_total_points, 4).
+ img_shape (tuple[int]): Shape of the input image,
+ (height, width, 3).
+ scale_factor (ndarray): Scale factor of the image arrange as
+ (w_scale, h_scale, w_scale, h_scale).
+ cfg (mmcv.Config | None): Test / postprocessing configuration,
+ if None, test_cfg would be used.
+ rescale (bool): If True, return boxes in original image space.
+ Default: False.
+ with_nms (bool): If True, do nms before returning boxes.
+ Default: True.
+
+ Returns:
+ tuple(Tensor):
+ det_bboxes (Tensor): BBox predictions in shape (n, 5), where
+ the first 4 columns are bounding box positions
+ (tl_x, tl_y, br_x, br_y) and the 5-th column is a score
+ between 0 and 1.
+ det_labels (Tensor): A (n,) tensor where each item is the
+ predicted class label of the corresponding box.
+ """
+ cfg = self.test_cfg if cfg is None else cfg
+ assert len(cls_scores) == len(bbox_preds) == len(mlvl_points)
+ mlvl_bboxes = []
+ mlvl_scores = []
+ for cls_score, bbox_pred, points in zip(cls_scores, bbox_preds,
+ mlvl_points):
+ assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
+ scores = cls_score.permute(1, 2, 0).reshape(
+ -1, self.cls_out_channels).contiguous().sigmoid()
+ bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4).contiguous()
+
+ nms_pre = cfg.get('nms_pre', -1)
+ if 0 < nms_pre < scores.shape[0]:
+ max_scores, _ = scores.max(dim=1)
+ _, topk_inds = max_scores.topk(nms_pre)
+ points = points[topk_inds, :]
+ bbox_pred = bbox_pred[topk_inds, :]
+ scores = scores[topk_inds, :]
+ bboxes = distance2bbox(points, bbox_pred, max_shape=img_shape)
+ mlvl_bboxes.append(bboxes)
+ mlvl_scores.append(scores)
+ mlvl_bboxes = torch.cat(mlvl_bboxes)
+ if rescale:
+ mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor)
+ mlvl_scores = torch.cat(mlvl_scores)
+ padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1)
+ # remind that we set FG labels to [0, num_class-1] since mmdet v2.0
+ # BG cat_id: num_class
+ mlvl_scores = torch.cat([mlvl_scores, padding], dim=1)
+ if with_nms:
+ det_bboxes, det_labels = multiclass_nms(mlvl_bboxes, mlvl_scores,
+ cfg.score_thr, cfg.nms,
+ cfg.max_per_img)
+ return det_bboxes, det_labels
+ else:
+ return mlvl_bboxes, mlvl_scores
+
+ def _get_points_single(self,
+ featmap_size,
+ stride,
+ dtype,
+ device,
+ flatten=False):
+ """Get points according to feature map sizes."""
+ h, w = featmap_size
+ x_range = torch.arange(
+ 0, w * stride, stride, dtype=dtype, device=device)
+ y_range = torch.arange(
+ 0, h * stride, stride, dtype=dtype, device=device)
+ y, x = torch.meshgrid(y_range, x_range)
+ # to be compatible with anchor points in ATSS
+ if self.use_atss:
+ points = torch.stack(
+ (x.reshape(-1), y.reshape(-1)), dim=-1) + \
+ stride * self.anchor_center_offset
+ else:
+ points = torch.stack(
+ (x.reshape(-1), y.reshape(-1)), dim=-1) + stride // 2
+ return points
+
+ def get_targets(self, cls_scores, mlvl_points, gt_bboxes, gt_labels,
+ img_metas, gt_bboxes_ignore):
+ """A wrapper for computing ATSS and FCOS targets for points in multiple
+ images.
+
+ Args:
+ cls_scores (list[Tensor]): Box iou-aware scores for each scale
+ level with shape (N, num_points * num_classes, H, W).
+ mlvl_points (list[Tensor]): Points of each fpn level, each has
+ shape (num_points, 2).
+ gt_bboxes (list[Tensor]): Ground truth bboxes of each image,
+ each has shape (num_gt, 4).
+ gt_labels (list[Tensor]): Ground truth labels of each box,
+ each has shape (num_gt,).
+ img_metas (list[dict]): Meta information of each image, e.g.,
+ image size, scaling factor, etc.
+ gt_bboxes_ignore (None | Tensor): Ground truth bboxes to be
+ ignored, shape (num_ignored_gts, 4).
+
+ Returns:
+ tuple:
+ labels_list (list[Tensor]): Labels of each level.
+ label_weights (Tensor/None): Label weights of all levels.
+ bbox_targets_list (list[Tensor]): Regression targets of each
+ level, (l, t, r, b).
+ bbox_weights (Tensor/None): Bbox weights of all levels.
+ """
+ if self.use_atss:
+ return self.get_atss_targets(cls_scores, mlvl_points, gt_bboxes,
+ gt_labels, img_metas,
+ gt_bboxes_ignore)
+ else:
+ self.norm_on_bbox = False
+ return self.get_fcos_targets(mlvl_points, gt_bboxes, gt_labels)
+
+ def _get_target_single(self, *args, **kwargs):
+ """Avoid ambiguity in multiple inheritance."""
+ if self.use_atss:
+ return ATSSHead._get_target_single(self, *args, **kwargs)
+ else:
+ return FCOSHead._get_target_single(self, *args, **kwargs)
+
+ def get_fcos_targets(self, points, gt_bboxes_list, gt_labels_list):
+ """Compute FCOS regression and classification targets for points in
+ multiple images.
+
+ Args:
+ points (list[Tensor]): Points of each fpn level, each has shape
+ (num_points, 2).
+ gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image,
+ each has shape (num_gt, 4).
+ gt_labels_list (list[Tensor]): Ground truth labels of each box,
+ each has shape (num_gt,).
+
+ Returns:
+ tuple:
+ labels (list[Tensor]): Labels of each level.
+ label_weights: None, to be compatible with ATSS targets.
+ bbox_targets (list[Tensor]): BBox targets of each level.
+ bbox_weights: None, to be compatible with ATSS targets.
+ """
+ labels, bbox_targets = FCOSHead.get_targets(self, points,
+ gt_bboxes_list,
+ gt_labels_list)
+ label_weights = None
+ bbox_weights = None
+ return labels, label_weights, bbox_targets, bbox_weights
+
+ def get_atss_targets(self,
+ cls_scores,
+ mlvl_points,
+ gt_bboxes,
+ gt_labels,
+ img_metas,
+ gt_bboxes_ignore=None):
+ """A wrapper for computing ATSS targets for points in multiple images.
+
+ Args:
+ cls_scores (list[Tensor]): Box iou-aware scores for each scale
+ level with shape (N, num_points * num_classes, H, W).
+ mlvl_points (list[Tensor]): Points of each fpn level, each has
+ shape (num_points, 2).
+ gt_bboxes (list[Tensor]): Ground truth bboxes of each image,
+ each has shape (num_gt, 4).
+ gt_labels (list[Tensor]): Ground truth labels of each box,
+ each has shape (num_gt,).
+ img_metas (list[dict]): Meta information of each image, e.g.,
+ image size, scaling factor, etc.
+ gt_bboxes_ignore (None | Tensor): Ground truth bboxes to be
+ ignored, shape (num_ignored_gts, 4). Default: None.
+
+ Returns:
+ tuple:
+ labels_list (list[Tensor]): Labels of each level.
+ label_weights (Tensor): Label weights of all levels.
+ bbox_targets_list (list[Tensor]): Regression targets of each
+ level, (l, t, r, b).
+ bbox_weights (Tensor): Bbox weights of all levels.
+ """
+ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
+ assert len(featmap_sizes) == self.anchor_generator.num_levels
+
+ device = cls_scores[0].device
+ anchor_list, valid_flag_list = self.get_anchors(
+ featmap_sizes, img_metas, device=device)
+ label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1
+
+ cls_reg_targets = ATSSHead.get_targets(
+ self,
+ anchor_list,
+ valid_flag_list,
+ gt_bboxes,
+ img_metas,
+ gt_bboxes_ignore_list=gt_bboxes_ignore,
+ gt_labels_list=gt_labels,
+ label_channels=label_channels,
+ unmap_outputs=True)
+ if cls_reg_targets is None:
+ return None
+
+ (anchor_list, labels_list, label_weights_list, bbox_targets_list,
+ bbox_weights_list, num_total_pos, num_total_neg) = cls_reg_targets
+
+ bbox_targets_list = [
+ bbox_targets.reshape(-1, 4) for bbox_targets in bbox_targets_list
+ ]
+
+ num_imgs = len(img_metas)
+ # transform bbox_targets (x1, y1, x2, y2) into (l, t, r, b) format
+ bbox_targets_list = self.transform_bbox_targets(
+ bbox_targets_list, mlvl_points, num_imgs)
+
+ labels_list = [labels.reshape(-1) for labels in labels_list]
+ label_weights_list = [
+ label_weights.reshape(-1) for label_weights in label_weights_list
+ ]
+ bbox_weights_list = [
+ bbox_weights.reshape(-1) for bbox_weights in bbox_weights_list
+ ]
+ label_weights = torch.cat(label_weights_list)
+ bbox_weights = torch.cat(bbox_weights_list)
+ return labels_list, label_weights, bbox_targets_list, bbox_weights
+
+ def transform_bbox_targets(self, decoded_bboxes, mlvl_points, num_imgs):
+ """Transform bbox_targets (x1, y1, x2, y2) into (l, t, r, b) format.
+
+ Args:
+ decoded_bboxes (list[Tensor]): Regression targets of each level,
+ in the form of (x1, y1, x2, y2).
+ mlvl_points (list[Tensor]): Points of each fpn level, each has
+ shape (num_points, 2).
+ num_imgs (int): the number of images in a batch.
+
+ Returns:
+ bbox_targets (list[Tensor]): Regression targets of each level in
+ the form of (l, t, r, b).
+ """
+ # TODO: Re-implemented in Class PointCoder
+ assert len(decoded_bboxes) == len(mlvl_points)
+ num_levels = len(decoded_bboxes)
+ mlvl_points = [points.repeat(num_imgs, 1) for points in mlvl_points]
+ bbox_targets = []
+ for i in range(num_levels):
+ bbox_target = bbox2distance(mlvl_points[i], decoded_bboxes[i])
+ bbox_targets.append(bbox_target)
+
+ return bbox_targets
+
+ def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
+ missing_keys, unexpected_keys, error_msgs):
+ """Override the method in the parent class to avoid changing para's
+ name."""
+ pass
diff --git a/annotator/uniformer/mmdet_null/models/dense_heads/yolact_head.py b/annotator/uniformer/mmdet_null/models/dense_heads/yolact_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..10d311f94ee99e1bf65ee3e5827f1699c28a23e3
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/dense_heads/yolact_head.py
@@ -0,0 +1,943 @@
+import numpy as np
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from mmcv.cnn import ConvModule, xavier_init
+from mmcv.runner import force_fp32
+
+from mmdet.core import build_sampler, fast_nms, images_to_levels, multi_apply
+from ..builder import HEADS, build_loss
+from .anchor_head import AnchorHead
+
+
+@HEADS.register_module()
+class YOLACTHead(AnchorHead):
+ """YOLACT box head used in https://arxiv.org/abs/1904.02689.
+
+ Note that YOLACT head is a light version of RetinaNet head.
+ Four differences are described as follows:
+
+ 1. YOLACT box head has three-times fewer anchors.
+ 2. YOLACT box head shares the convs for box and cls branches.
+ 3. YOLACT box head uses OHEM instead of Focal loss.
+ 4. YOLACT box head predicts a set of mask coefficients for each box.
+
+ Args:
+ num_classes (int): Number of categories excluding the background
+ category.
+ in_channels (int): Number of channels in the input feature map.
+ anchor_generator (dict): Config dict for anchor generator
+ loss_cls (dict): Config of classification loss.
+ loss_bbox (dict): Config of localization loss.
+ num_head_convs (int): Number of the conv layers shared by
+ box and cls branches.
+ num_protos (int): Number of the mask coefficients.
+ use_ohem (bool): If true, ``loss_single_OHEM`` will be used for
+ cls loss calculation. If false, ``loss_single`` will be used.
+ conv_cfg (dict): Dictionary to construct and config conv layer.
+ norm_cfg (dict): Dictionary to construct and config norm layer.
+ """
+
+ def __init__(self,
+ num_classes,
+ in_channels,
+ anchor_generator=dict(
+ type='AnchorGenerator',
+ octave_base_scale=3,
+ scales_per_octave=1,
+ ratios=[0.5, 1.0, 2.0],
+ strides=[8, 16, 32, 64, 128]),
+ loss_cls=dict(
+ type='CrossEntropyLoss',
+ use_sigmoid=False,
+ reduction='none',
+ loss_weight=1.0),
+ loss_bbox=dict(
+ type='SmoothL1Loss', beta=1.0, loss_weight=1.5),
+ num_head_convs=1,
+ num_protos=32,
+ use_ohem=True,
+ conv_cfg=None,
+ norm_cfg=None,
+ **kwargs):
+ self.num_head_convs = num_head_convs
+ self.num_protos = num_protos
+ self.use_ohem = use_ohem
+ self.conv_cfg = conv_cfg
+ self.norm_cfg = norm_cfg
+ super(YOLACTHead, self).__init__(
+ num_classes,
+ in_channels,
+ loss_cls=loss_cls,
+ loss_bbox=loss_bbox,
+ anchor_generator=anchor_generator,
+ **kwargs)
+ if self.use_ohem:
+ sampler_cfg = dict(type='PseudoSampler')
+ self.sampler = build_sampler(sampler_cfg, context=self)
+ self.sampling = False
+
+ def _init_layers(self):
+ """Initialize layers of the head."""
+ self.relu = nn.ReLU(inplace=True)
+ self.head_convs = nn.ModuleList()
+ for i in range(self.num_head_convs):
+ chn = self.in_channels if i == 0 else self.feat_channels
+ self.head_convs.append(
+ ConvModule(
+ chn,
+ self.feat_channels,
+ 3,
+ stride=1,
+ padding=1,
+ conv_cfg=self.conv_cfg,
+ norm_cfg=self.norm_cfg))
+ self.conv_cls = nn.Conv2d(
+ self.feat_channels,
+ self.num_anchors * self.cls_out_channels,
+ 3,
+ padding=1)
+ self.conv_reg = nn.Conv2d(
+ self.feat_channels, self.num_anchors * 4, 3, padding=1)
+ self.conv_coeff = nn.Conv2d(
+ self.feat_channels,
+ self.num_anchors * self.num_protos,
+ 3,
+ padding=1)
+
+ def init_weights(self):
+ """Initialize weights of the head."""
+ for m in self.head_convs:
+ xavier_init(m.conv, distribution='uniform', bias=0)
+ xavier_init(self.conv_cls, distribution='uniform', bias=0)
+ xavier_init(self.conv_reg, distribution='uniform', bias=0)
+ xavier_init(self.conv_coeff, distribution='uniform', bias=0)
+
+ def forward_single(self, x):
+ """Forward feature of a single scale level.
+
+ Args:
+ x (Tensor): Features of a single scale level.
+
+ Returns:
+ tuple:
+ cls_score (Tensor): Cls scores for a single scale level \
+ the channels number is num_anchors * num_classes.
+ bbox_pred (Tensor): Box energies / deltas for a single scale \
+ level, the channels number is num_anchors * 4.
+ coeff_pred (Tensor): Mask coefficients for a single scale \
+ level, the channels number is num_anchors * num_protos.
+ """
+ for head_conv in self.head_convs:
+ x = head_conv(x)
+ cls_score = self.conv_cls(x)
+ bbox_pred = self.conv_reg(x)
+ coeff_pred = self.conv_coeff(x).tanh()
+ return cls_score, bbox_pred, coeff_pred
+
+ @force_fp32(apply_to=('cls_scores', 'bbox_preds'))
+ def loss(self,
+ cls_scores,
+ bbox_preds,
+ gt_bboxes,
+ gt_labels,
+ img_metas,
+ gt_bboxes_ignore=None):
+ """A combination of the func:``AnchorHead.loss`` and
+ func:``SSDHead.loss``.
+
+ When ``self.use_ohem == True``, it functions like ``SSDHead.loss``,
+ otherwise, it follows ``AnchorHead.loss``. Besides, it additionally
+ returns ``sampling_results``.
+
+ Args:
+ cls_scores (list[Tensor]): Box scores for each scale level
+ Has shape (N, num_anchors * num_classes, H, W)
+ bbox_preds (list[Tensor]): Box energies / deltas for each scale
+ level with shape (N, num_anchors * 4, H, W)
+ gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
+ shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
+ gt_labels (list[Tensor]): Class indices corresponding to each box
+ img_metas (list[dict]): Meta information of each image, e.g.,
+ image size, scaling factor, etc.
+ gt_bboxes_ignore (None | list[Tensor]): Specify which bounding
+ boxes can be ignored when computing the loss. Default: None
+
+ Returns:
+ tuple:
+ dict[str, Tensor]: A dictionary of loss components.
+ List[:obj:``SamplingResult``]: Sampler results for each image.
+ """
+ featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
+ assert len(featmap_sizes) == self.anchor_generator.num_levels
+
+ device = cls_scores[0].device
+
+ anchor_list, valid_flag_list = self.get_anchors(
+ featmap_sizes, img_metas, device=device)
+ label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1
+ cls_reg_targets = self.get_targets(
+ anchor_list,
+ valid_flag_list,
+ gt_bboxes,
+ img_metas,
+ gt_bboxes_ignore_list=gt_bboxes_ignore,
+ gt_labels_list=gt_labels,
+ label_channels=label_channels,
+ unmap_outputs=not self.use_ohem,
+ return_sampling_results=True)
+ if cls_reg_targets is None:
+ return None
+ (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,
+ num_total_pos, num_total_neg, sampling_results) = cls_reg_targets
+
+ if self.use_ohem:
+ num_images = len(img_metas)
+ all_cls_scores = torch.cat([
+ s.permute(0, 2, 3, 1).reshape(
+ num_images, -1, self.cls_out_channels) for s in cls_scores
+ ], 1)
+ all_labels = torch.cat(labels_list, -1).view(num_images, -1)
+ all_label_weights = torch.cat(label_weights_list,
+ -1).view(num_images, -1)
+ all_bbox_preds = torch.cat([
+ b.permute(0, 2, 3, 1).reshape(num_images, -1, 4)
+ for b in bbox_preds
+ ], -2)
+ all_bbox_targets = torch.cat(bbox_targets_list,
+ -2).view(num_images, -1, 4)
+ all_bbox_weights = torch.cat(bbox_weights_list,
+ -2).view(num_images, -1, 4)
+
+ # concat all level anchors to a single tensor
+ all_anchors = []
+ for i in range(num_images):
+ all_anchors.append(torch.cat(anchor_list[i]))
+
+ # check NaN and Inf
+ assert torch.isfinite(all_cls_scores).all().item(), \
+ 'classification scores become infinite or NaN!'
+ assert torch.isfinite(all_bbox_preds).all().item(), \
+ 'bbox predications become infinite or NaN!'
+
+ losses_cls, losses_bbox = multi_apply(
+ self.loss_single_OHEM,
+ all_cls_scores,
+ all_bbox_preds,
+ all_anchors,
+ all_labels,
+ all_label_weights,
+ all_bbox_targets,
+ all_bbox_weights,
+ num_total_samples=num_total_pos)
+ else:
+ num_total_samples = (
+ num_total_pos +
+ num_total_neg if self.sampling else num_total_pos)
+
+ # anchor number of multi levels
+ num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]
+ # concat all level anchors and flags to a single tensor
+ concat_anchor_list = []
+ for i in range(len(anchor_list)):
+ concat_anchor_list.append(torch.cat(anchor_list[i]))
+ all_anchor_list = images_to_levels(concat_anchor_list,
+ num_level_anchors)
+ losses_cls, losses_bbox = multi_apply(
+ self.loss_single,
+ cls_scores,
+ bbox_preds,
+ all_anchor_list,
+ labels_list,
+ label_weights_list,
+ bbox_targets_list,
+ bbox_weights_list,
+ num_total_samples=num_total_samples)
+
+ return dict(
+ loss_cls=losses_cls, loss_bbox=losses_bbox), sampling_results
+
+ def loss_single_OHEM(self, cls_score, bbox_pred, anchors, labels,
+ label_weights, bbox_targets, bbox_weights,
+ num_total_samples):
+ """"See func:``SSDHead.loss``."""
+ loss_cls_all = self.loss_cls(cls_score, labels, label_weights)
+
+ # FG cat_id: [0, num_classes -1], BG cat_id: num_classes
+ pos_inds = ((labels >= 0) & (labels < self.num_classes)).nonzero(
+ as_tuple=False).reshape(-1)
+ neg_inds = (labels == self.num_classes).nonzero(
+ as_tuple=False).view(-1)
+
+ num_pos_samples = pos_inds.size(0)
+ if num_pos_samples == 0:
+ num_neg_samples = neg_inds.size(0)
+ else:
+ num_neg_samples = self.train_cfg.neg_pos_ratio * num_pos_samples
+ if num_neg_samples > neg_inds.size(0):
+ num_neg_samples = neg_inds.size(0)
+ topk_loss_cls_neg, _ = loss_cls_all[neg_inds].topk(num_neg_samples)
+ loss_cls_pos = loss_cls_all[pos_inds].sum()
+ loss_cls_neg = topk_loss_cls_neg.sum()
+ loss_cls = (loss_cls_pos + loss_cls_neg) / num_total_samples
+ if self.reg_decoded_bbox:
+ # When the regression loss (e.g. `IouLoss`, `GIouLoss`)
+ # is applied directly on the decoded bounding boxes, it
+ # decodes the already encoded coordinates to absolute format.
+ bbox_pred = self.bbox_coder.decode(anchors, bbox_pred)
+ loss_bbox = self.loss_bbox(
+ bbox_pred,
+ bbox_targets,
+ bbox_weights,
+ avg_factor=num_total_samples)
+ return loss_cls[None], loss_bbox
+
+ @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'coeff_preds'))
+ def get_bboxes(self,
+ cls_scores,
+ bbox_preds,
+ coeff_preds,
+ img_metas,
+ cfg=None,
+ rescale=False):
+ """"Similiar to func:``AnchorHead.get_bboxes``, but additionally
+ processes coeff_preds.
+
+ Args:
+ cls_scores (list[Tensor]): Box scores for each scale level
+ with shape (N, num_anchors * num_classes, H, W)
+ bbox_preds (list[Tensor]): Box energies / deltas for each scale
+ level with shape (N, num_anchors * 4, H, W)
+ coeff_preds (list[Tensor]): Mask coefficients for each scale
+ level with shape (N, num_anchors * num_protos, H, W)
+ img_metas (list[dict]): Meta information of each image, e.g.,
+ image size, scaling factor, etc.
+ cfg (mmcv.Config | None): Test / postprocessing configuration,
+ if None, test_cfg would be used
+ rescale (bool): If True, return boxes in original image space.
+ Default: False.
+
+ Returns:
+ list[tuple[Tensor, Tensor, Tensor]]: Each item in result_list is
+ a 3-tuple. The first item is an (n, 5) tensor, where the
+ first 4 columns are bounding box positions
+ (tl_x, tl_y, br_x, br_y) and the 5-th column is a score
+ between 0 and 1. The second item is an (n,) tensor where each
+ item is the predicted class label of the corresponding box.
+ The third item is an (n, num_protos) tensor where each item
+ is the predicted mask coefficients of instance inside the
+ corresponding box.
+ """
+ assert len(cls_scores) == len(bbox_preds)
+ num_levels = len(cls_scores)
+
+ device = cls_scores[0].device
+ featmap_sizes = [cls_scores[i].shape[-2:] for i in range(num_levels)]
+ mlvl_anchors = self.anchor_generator.grid_anchors(
+ featmap_sizes, device=device)
+
+ det_bboxes = []
+ det_labels = []
+ det_coeffs = []
+ for img_id in range(len(img_metas)):
+ cls_score_list = [
+ cls_scores[i][img_id].detach() for i in range(num_levels)
+ ]
+ bbox_pred_list = [
+ bbox_preds[i][img_id].detach() for i in range(num_levels)
+ ]
+ coeff_pred_list = [
+ coeff_preds[i][img_id].detach() for i in range(num_levels)
+ ]
+ img_shape = img_metas[img_id]['img_shape']
+ scale_factor = img_metas[img_id]['scale_factor']
+ bbox_res = self._get_bboxes_single(cls_score_list, bbox_pred_list,
+ coeff_pred_list, mlvl_anchors,
+ img_shape, scale_factor, cfg,
+ rescale)
+ det_bboxes.append(bbox_res[0])
+ det_labels.append(bbox_res[1])
+ det_coeffs.append(bbox_res[2])
+ return det_bboxes, det_labels, det_coeffs
+
+ def _get_bboxes_single(self,
+ cls_score_list,
+ bbox_pred_list,
+ coeff_preds_list,
+ mlvl_anchors,
+ img_shape,
+ scale_factor,
+ cfg,
+ rescale=False):
+ """"Similiar to func:``AnchorHead._get_bboxes_single``, but
+ additionally processes coeff_preds_list and uses fast NMS instead of
+ traditional NMS.
+
+ Args:
+ cls_score_list (list[Tensor]): Box scores for a single scale level
+ Has shape (num_anchors * num_classes, H, W).
+ bbox_pred_list (list[Tensor]): Box energies / deltas for a single
+ scale level with shape (num_anchors * 4, H, W).
+ coeff_preds_list (list[Tensor]): Mask coefficients for a single
+ scale level with shape (num_anchors * num_protos, H, W).
+ mlvl_anchors (list[Tensor]): Box reference for a single scale level
+ with shape (num_total_anchors, 4).
+ img_shape (tuple[int]): Shape of the input image,
+ (height, width, 3).
+ scale_factor (ndarray): Scale factor of the image arange as
+ (w_scale, h_scale, w_scale, h_scale).
+ cfg (mmcv.Config): Test / postprocessing configuration,
+ if None, test_cfg would be used.
+ rescale (bool): If True, return boxes in original image space.
+
+ Returns:
+ tuple[Tensor, Tensor, Tensor]: The first item is an (n, 5) tensor,
+ where the first 4 columns are bounding box positions
+ (tl_x, tl_y, br_x, br_y) and the 5-th column is a score between
+ 0 and 1. The second item is an (n,) tensor where each item is
+ the predicted class label of the corresponding box. The third
+ item is an (n, num_protos) tensor where each item is the
+ predicted mask coefficients of instance inside the
+ corresponding box.
+ """
+ cfg = self.test_cfg if cfg is None else cfg
+ assert len(cls_score_list) == len(bbox_pred_list) == len(mlvl_anchors)
+ mlvl_bboxes = []
+ mlvl_scores = []
+ mlvl_coeffs = []
+ for cls_score, bbox_pred, coeff_pred, anchors in \
+ zip(cls_score_list, bbox_pred_list,
+ coeff_preds_list, mlvl_anchors):
+ assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
+ cls_score = cls_score.permute(1, 2,
+ 0).reshape(-1, self.cls_out_channels)
+ if self.use_sigmoid_cls:
+ scores = cls_score.sigmoid()
+ else:
+ scores = cls_score.softmax(-1)
+ bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4)
+ coeff_pred = coeff_pred.permute(1, 2,
+ 0).reshape(-1, self.num_protos)
+ nms_pre = cfg.get('nms_pre', -1)
+ if nms_pre > 0 and scores.shape[0] > nms_pre:
+ # Get maximum scores for foreground classes.
+ if self.use_sigmoid_cls:
+ max_scores, _ = scores.max(dim=1)
+ else:
+ # remind that we set FG labels to [0, num_class-1]
+ # since mmdet v2.0
+ # BG cat_id: num_class
+ max_scores, _ = scores[:, :-1].max(dim=1)
+ _, topk_inds = max_scores.topk(nms_pre)
+ anchors = anchors[topk_inds, :]
+ bbox_pred = bbox_pred[topk_inds, :]
+ scores = scores[topk_inds, :]
+ coeff_pred = coeff_pred[topk_inds, :]
+ bboxes = self.bbox_coder.decode(
+ anchors, bbox_pred, max_shape=img_shape)
+ mlvl_bboxes.append(bboxes)
+ mlvl_scores.append(scores)
+ mlvl_coeffs.append(coeff_pred)
+ mlvl_bboxes = torch.cat(mlvl_bboxes)
+ if rescale:
+ mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor)
+ mlvl_scores = torch.cat(mlvl_scores)
+ mlvl_coeffs = torch.cat(mlvl_coeffs)
+ if self.use_sigmoid_cls:
+ # Add a dummy background class to the backend when using sigmoid
+ # remind that we set FG labels to [0, num_class-1] since mmdet v2.0
+ # BG cat_id: num_class
+ padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1)
+ mlvl_scores = torch.cat([mlvl_scores, padding], dim=1)
+ det_bboxes, det_labels, det_coeffs = fast_nms(mlvl_bboxes, mlvl_scores,
+ mlvl_coeffs,
+ cfg.score_thr,
+ cfg.iou_thr, cfg.top_k,
+ cfg.max_per_img)
+ return det_bboxes, det_labels, det_coeffs
+
+
+@HEADS.register_module()
+class YOLACTSegmHead(nn.Module):
+ """YOLACT segmentation head used in https://arxiv.org/abs/1904.02689.
+
+ Apply a semantic segmentation loss on feature space using layers that are
+ only evaluated during training to increase performance with no speed
+ penalty.
+
+ Args:
+ in_channels (int): Number of channels in the input feature map.
+ num_classes (int): Number of categories excluding the background
+ category.
+ loss_segm (dict): Config of semantic segmentation loss.
+ """
+
+ def __init__(self,
+ num_classes,
+ in_channels=256,
+ loss_segm=dict(
+ type='CrossEntropyLoss',
+ use_sigmoid=True,
+ loss_weight=1.0)):
+ super(YOLACTSegmHead, self).__init__()
+ self.in_channels = in_channels
+ self.num_classes = num_classes
+ self.loss_segm = build_loss(loss_segm)
+ self._init_layers()
+ self.fp16_enabled = False
+
+ def _init_layers(self):
+ """Initialize layers of the head."""
+ self.segm_conv = nn.Conv2d(
+ self.in_channels, self.num_classes, kernel_size=1)
+
+ def init_weights(self):
+ """Initialize weights of the head."""
+ xavier_init(self.segm_conv, distribution='uniform')
+
+ def forward(self, x):
+ """Forward feature from the upstream network.
+
+ Args:
+ x (Tensor): Feature from the upstream network, which is
+ a 4D-tensor.
+
+ Returns:
+ Tensor: Predicted semantic segmentation map with shape
+ (N, num_classes, H, W).
+ """
+ return self.segm_conv(x)
+
+ @force_fp32(apply_to=('segm_pred', ))
+ def loss(self, segm_pred, gt_masks, gt_labels):
+ """Compute loss of the head.
+
+ Args:
+ segm_pred (list[Tensor]): Predicted semantic segmentation map
+ with shape (N, num_classes, H, W).
+ gt_masks (list[Tensor]): Ground truth masks for each image with
+ the same shape of the input image.
+ gt_labels (list[Tensor]): Class indices corresponding to each box.
+
+ Returns:
+ dict[str, Tensor]: A dictionary of loss components.
+ """
+ loss_segm = []
+ num_imgs, num_classes, mask_h, mask_w = segm_pred.size()
+ for idx in range(num_imgs):
+ cur_segm_pred = segm_pred[idx]
+ cur_gt_masks = gt_masks[idx].float()
+ cur_gt_labels = gt_labels[idx]
+ segm_targets = self.get_targets(cur_segm_pred, cur_gt_masks,
+ cur_gt_labels)
+ if segm_targets is None:
+ loss = self.loss_segm(cur_segm_pred,
+ torch.zeros_like(cur_segm_pred),
+ torch.zeros_like(cur_segm_pred))
+ else:
+ loss = self.loss_segm(
+ cur_segm_pred,
+ segm_targets,
+ avg_factor=num_imgs * mask_h * mask_w)
+ loss_segm.append(loss)
+ return dict(loss_segm=loss_segm)
+
+ def get_targets(self, segm_pred, gt_masks, gt_labels):
+ """Compute semantic segmentation targets for each image.
+
+ Args:
+ segm_pred (Tensor): Predicted semantic segmentation map
+ with shape (num_classes, H, W).
+ gt_masks (Tensor): Ground truth masks for each image with
+ the same shape of the input image.
+ gt_labels (Tensor): Class indices corresponding to each box.
+
+ Returns:
+ Tensor: Semantic segmentation targets with shape
+ (num_classes, H, W).
+ """
+ if gt_masks.size(0) == 0:
+ return None
+ num_classes, mask_h, mask_w = segm_pred.size()
+ with torch.no_grad():
+ downsampled_masks = F.interpolate(
+ gt_masks.unsqueeze(0), (mask_h, mask_w),
+ mode='bilinear',
+ align_corners=False).squeeze(0)
+ downsampled_masks = downsampled_masks.gt(0.5).float()
+ segm_targets = torch.zeros_like(segm_pred, requires_grad=False)
+ for obj_idx in range(downsampled_masks.size(0)):
+ segm_targets[gt_labels[obj_idx] - 1] = torch.max(
+ segm_targets[gt_labels[obj_idx] - 1],
+ downsampled_masks[obj_idx])
+ return segm_targets
+
+
+@HEADS.register_module()
+class YOLACTProtonet(nn.Module):
+ """YOLACT mask head used in https://arxiv.org/abs/1904.02689.
+
+ This head outputs the mask prototypes for YOLACT.
+
+ Args:
+ in_channels (int): Number of channels in the input feature map.
+ proto_channels (tuple[int]): Output channels of protonet convs.
+ proto_kernel_sizes (tuple[int]): Kernel sizes of protonet convs.
+ include_last_relu (Bool): If keep the last relu of protonet.
+ num_protos (int): Number of prototypes.
+ num_classes (int): Number of categories excluding the background
+ category.
+ loss_mask_weight (float): Reweight the mask loss by this factor.
+ max_masks_to_train (int): Maximum number of masks to train for
+ each image.
+ """
+
+ def __init__(self,
+ num_classes,
+ in_channels=256,
+ proto_channels=(256, 256, 256, None, 256, 32),
+ proto_kernel_sizes=(3, 3, 3, -2, 3, 1),
+ include_last_relu=True,
+ num_protos=32,
+ loss_mask_weight=1.0,
+ max_masks_to_train=100):
+ super(YOLACTProtonet, self).__init__()
+ self.in_channels = in_channels
+ self.proto_channels = proto_channels
+ self.proto_kernel_sizes = proto_kernel_sizes
+ self.include_last_relu = include_last_relu
+ self.protonet = self._init_layers()
+
+ self.loss_mask_weight = loss_mask_weight
+ self.num_protos = num_protos
+ self.num_classes = num_classes
+ self.max_masks_to_train = max_masks_to_train
+ self.fp16_enabled = False
+
+ def _init_layers(self):
+ """A helper function to take a config setting and turn it into a
+ network."""
+ # Possible patterns:
+ # ( 256, 3) -> conv
+ # ( 256,-2) -> deconv
+ # (None,-2) -> bilinear interpolate
+ in_channels = self.in_channels
+ protonets = nn.ModuleList()
+ for num_channels, kernel_size in zip(self.proto_channels,
+ self.proto_kernel_sizes):
+ if kernel_size > 0:
+ layer = nn.Conv2d(
+ in_channels,
+ num_channels,
+ kernel_size,
+ padding=kernel_size // 2)
+ else:
+ if num_channels is None:
+ layer = InterpolateModule(
+ scale_factor=-kernel_size,
+ mode='bilinear',
+ align_corners=False)
+ else:
+ layer = nn.ConvTranspose2d(
+ in_channels,
+ num_channels,
+ -kernel_size,
+ padding=kernel_size // 2)
+ protonets.append(layer)
+ protonets.append(nn.ReLU(inplace=True))
+ in_channels = num_channels if num_channels is not None \
+ else in_channels
+ if not self.include_last_relu:
+ protonets = protonets[:-1]
+ return nn.Sequential(*protonets)
+
+ def init_weights(self):
+ """Initialize weights of the head."""
+ for m in self.protonet:
+ if isinstance(m, nn.Conv2d):
+ xavier_init(m, distribution='uniform')
+
+ def forward(self, x, coeff_pred, bboxes, img_meta, sampling_results=None):
+ """Forward feature from the upstream network to get prototypes and
+ linearly combine the prototypes, using masks coefficients, into
+ instance masks. Finally, crop the instance masks with given bboxes.
+
+ Args:
+ x (Tensor): Feature from the upstream network, which is
+ a 4D-tensor.
+ coeff_pred (list[Tensor]): Mask coefficients for each scale
+ level with shape (N, num_anchors * num_protos, H, W).
+ bboxes (list[Tensor]): Box used for cropping with shape
+ (N, num_anchors * 4, H, W). During training, they are
+ ground truth boxes. During testing, they are predicted
+ boxes.
+ img_meta (list[dict]): Meta information of each image, e.g.,
+ image size, scaling factor, etc.
+ sampling_results (List[:obj:``SamplingResult``]): Sampler results
+ for each image.
+
+ Returns:
+ list[Tensor]: Predicted instance segmentation masks.
+ """
+ prototypes = self.protonet(x)
+ prototypes = prototypes.permute(0, 2, 3, 1).contiguous()
+
+ num_imgs = x.size(0)
+ # Training state
+ if self.training:
+ coeff_pred_list = []
+ for coeff_pred_per_level in coeff_pred:
+ coeff_pred_per_level = \
+ coeff_pred_per_level.permute(0, 2, 3, 1)\
+ .reshape(num_imgs, -1, self.num_protos)
+ coeff_pred_list.append(coeff_pred_per_level)
+ coeff_pred = torch.cat(coeff_pred_list, dim=1)
+
+ mask_pred_list = []
+ for idx in range(num_imgs):
+ cur_prototypes = prototypes[idx]
+ cur_coeff_pred = coeff_pred[idx]
+ cur_bboxes = bboxes[idx]
+ cur_img_meta = img_meta[idx]
+
+ # Testing state
+ if not self.training:
+ bboxes_for_cropping = cur_bboxes
+ else:
+ cur_sampling_results = sampling_results[idx]
+ pos_assigned_gt_inds = \
+ cur_sampling_results.pos_assigned_gt_inds
+ bboxes_for_cropping = cur_bboxes[pos_assigned_gt_inds].clone()
+ pos_inds = cur_sampling_results.pos_inds
+ cur_coeff_pred = cur_coeff_pred[pos_inds]
+
+ # Linearly combine the prototypes with the mask coefficients
+ mask_pred = cur_prototypes @ cur_coeff_pred.t()
+ mask_pred = torch.sigmoid(mask_pred)
+
+ h, w = cur_img_meta['img_shape'][:2]
+ bboxes_for_cropping[:, 0] /= w
+ bboxes_for_cropping[:, 1] /= h
+ bboxes_for_cropping[:, 2] /= w
+ bboxes_for_cropping[:, 3] /= h
+
+ mask_pred = self.crop(mask_pred, bboxes_for_cropping)
+ mask_pred = mask_pred.permute(2, 0, 1).contiguous()
+ mask_pred_list.append(mask_pred)
+ return mask_pred_list
+
+ @force_fp32(apply_to=('mask_pred', ))
+ def loss(self, mask_pred, gt_masks, gt_bboxes, img_meta, sampling_results):
+ """Compute loss of the head.
+
+ Args:
+ mask_pred (list[Tensor]): Predicted prototypes with shape
+ (num_classes, H, W).
+ gt_masks (list[Tensor]): Ground truth masks for each image with
+ the same shape of the input image.
+ gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
+ shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
+ img_meta (list[dict]): Meta information of each image, e.g.,
+ image size, scaling factor, etc.
+ sampling_results (List[:obj:``SamplingResult``]): Sampler results
+ for each image.
+
+ Returns:
+ dict[str, Tensor]: A dictionary of loss components.
+ """
+ loss_mask = []
+ num_imgs = len(mask_pred)
+ total_pos = 0
+ for idx in range(num_imgs):
+ cur_mask_pred = mask_pred[idx]
+ cur_gt_masks = gt_masks[idx].float()
+ cur_gt_bboxes = gt_bboxes[idx]
+ cur_img_meta = img_meta[idx]
+ cur_sampling_results = sampling_results[idx]
+
+ pos_assigned_gt_inds = cur_sampling_results.pos_assigned_gt_inds
+ num_pos = pos_assigned_gt_inds.size(0)
+ # Since we're producing (near) full image masks,
+ # it'd take too much vram to backprop on every single mask.
+ # Thus we select only a subset.
+ if num_pos > self.max_masks_to_train:
+ perm = torch.randperm(num_pos)
+ select = perm[:self.max_masks_to_train]
+ cur_mask_pred = cur_mask_pred[select]
+ pos_assigned_gt_inds = pos_assigned_gt_inds[select]
+ num_pos = self.max_masks_to_train
+ total_pos += num_pos
+
+ gt_bboxes_for_reweight = cur_gt_bboxes[pos_assigned_gt_inds]
+
+ mask_targets = self.get_targets(cur_mask_pred, cur_gt_masks,
+ pos_assigned_gt_inds)
+ if num_pos == 0:
+ loss = cur_mask_pred.sum() * 0.
+ elif mask_targets is None:
+ loss = F.binary_cross_entropy(cur_mask_pred,
+ torch.zeros_like(cur_mask_pred),
+ torch.zeros_like(cur_mask_pred))
+ else:
+ cur_mask_pred = torch.clamp(cur_mask_pred, 0, 1)
+ loss = F.binary_cross_entropy(
+ cur_mask_pred, mask_targets,
+ reduction='none') * self.loss_mask_weight
+
+ h, w = cur_img_meta['img_shape'][:2]
+ gt_bboxes_width = (gt_bboxes_for_reweight[:, 2] -
+ gt_bboxes_for_reweight[:, 0]) / w
+ gt_bboxes_height = (gt_bboxes_for_reweight[:, 3] -
+ gt_bboxes_for_reweight[:, 1]) / h
+ loss = loss.mean(dim=(1,
+ 2)) / gt_bboxes_width / gt_bboxes_height
+ loss = torch.sum(loss)
+ loss_mask.append(loss)
+
+ if total_pos == 0:
+ total_pos += 1 # avoid nan
+ loss_mask = [x / total_pos for x in loss_mask]
+
+ return dict(loss_mask=loss_mask)
+
+ def get_targets(self, mask_pred, gt_masks, pos_assigned_gt_inds):
+ """Compute instance segmentation targets for each image.
+
+ Args:
+ mask_pred (Tensor): Predicted prototypes with shape
+ (num_classes, H, W).
+ gt_masks (Tensor): Ground truth masks for each image with
+ the same shape of the input image.
+ pos_assigned_gt_inds (Tensor): GT indices of the corresponding
+ positive samples.
+ Returns:
+ Tensor: Instance segmentation targets with shape
+ (num_instances, H, W).
+ """
+ if gt_masks.size(0) == 0:
+ return None
+ mask_h, mask_w = mask_pred.shape[-2:]
+ gt_masks = F.interpolate(
+ gt_masks.unsqueeze(0), (mask_h, mask_w),
+ mode='bilinear',
+ align_corners=False).squeeze(0)
+ gt_masks = gt_masks.gt(0.5).float()
+ mask_targets = gt_masks[pos_assigned_gt_inds]
+ return mask_targets
+
+ def get_seg_masks(self, mask_pred, label_pred, img_meta, rescale):
+ """Resize, binarize, and format the instance mask predictions.
+
+ Args:
+ mask_pred (Tensor): shape (N, H, W).
+ label_pred (Tensor): shape (N, ).
+ img_meta (dict): Meta information of each image, e.g.,
+ image size, scaling factor, etc.
+ rescale (bool): If rescale is False, then returned masks will
+ fit the scale of imgs[0].
+ Returns:
+ list[ndarray]: Mask predictions grouped by their predicted classes.
+ """
+ ori_shape = img_meta['ori_shape']
+ scale_factor = img_meta['scale_factor']
+ if rescale:
+ img_h, img_w = ori_shape[:2]
+ else:
+ img_h = np.round(ori_shape[0] * scale_factor[1]).astype(np.int32)
+ img_w = np.round(ori_shape[1] * scale_factor[0]).astype(np.int32)
+
+ cls_segms = [[] for _ in range(self.num_classes)]
+ if mask_pred.size(0) == 0:
+ return cls_segms
+
+ mask_pred = F.interpolate(
+ mask_pred.unsqueeze(0), (img_h, img_w),
+ mode='bilinear',
+ align_corners=False).squeeze(0) > 0.5
+ mask_pred = mask_pred.cpu().numpy().astype(np.uint8)
+
+ for m, l in zip(mask_pred, label_pred):
+ cls_segms[l].append(m)
+ return cls_segms
+
+ def crop(self, masks, boxes, padding=1):
+ """Crop predicted masks by zeroing out everything not in the predicted
+ bbox.
+
+ Args:
+ masks (Tensor): shape [H, W, N].
+ boxes (Tensor): bbox coords in relative point form with
+ shape [N, 4].
+
+ Return:
+ Tensor: The cropped masks.
+ """
+ h, w, n = masks.size()
+ x1, x2 = self.sanitize_coordinates(
+ boxes[:, 0], boxes[:, 2], w, padding, cast=False)
+ y1, y2 = self.sanitize_coordinates(
+ boxes[:, 1], boxes[:, 3], h, padding, cast=False)
+
+ rows = torch.arange(
+ w, device=masks.device, dtype=x1.dtype).view(1, -1,
+ 1).expand(h, w, n)
+ cols = torch.arange(
+ h, device=masks.device, dtype=x1.dtype).view(-1, 1,
+ 1).expand(h, w, n)
+
+ masks_left = rows >= x1.view(1, 1, -1)
+ masks_right = rows < x2.view(1, 1, -1)
+ masks_up = cols >= y1.view(1, 1, -1)
+ masks_down = cols < y2.view(1, 1, -1)
+
+ crop_mask = masks_left * masks_right * masks_up * masks_down
+
+ return masks * crop_mask.float()
+
+ def sanitize_coordinates(self, x1, x2, img_size, padding=0, cast=True):
+ """Sanitizes the input coordinates so that x1 < x2, x1 != x2, x1 >= 0,
+ and x2 <= image_size. Also converts from relative to absolute
+ coordinates and casts the results to long tensors.
+
+ Warning: this does things in-place behind the scenes so
+ copy if necessary.
+
+ Args:
+ _x1 (Tensor): shape (N, ).
+ _x2 (Tensor): shape (N, ).
+ img_size (int): Size of the input image.
+ padding (int): x1 >= padding, x2 <= image_size-padding.
+ cast (bool): If cast is false, the result won't be cast to longs.
+
+ Returns:
+ tuple:
+ x1 (Tensor): Sanitized _x1.
+ x2 (Tensor): Sanitized _x2.
+ """
+ x1 = x1 * img_size
+ x2 = x2 * img_size
+ if cast:
+ x1 = x1.long()
+ x2 = x2.long()
+ x1 = torch.min(x1, x2)
+ x2 = torch.max(x1, x2)
+ x1 = torch.clamp(x1 - padding, min=0)
+ x2 = torch.clamp(x2 + padding, max=img_size)
+ return x1, x2
+
+
+class InterpolateModule(nn.Module):
+ """This is a module version of F.interpolate.
+
+ Any arguments you give it just get passed along for the ride.
+ """
+
+ def __init__(self, *args, **kwargs):
+ super().__init__()
+
+ self.args = args
+ self.kwargs = kwargs
+
+ def forward(self, x):
+ """Forward features from the upstream network."""
+ return F.interpolate(x, *self.args, **self.kwargs)
diff --git a/annotator/uniformer/mmdet_null/models/dense_heads/yolo_head.py b/annotator/uniformer/mmdet_null/models/dense_heads/yolo_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..25a005d36903333f37a6c6d31b4d613c071f4a07
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/dense_heads/yolo_head.py
@@ -0,0 +1,577 @@
+# Copyright (c) 2019 Western Digital Corporation or its affiliates.
+
+import warnings
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from mmcv.cnn import ConvModule, normal_init
+from mmcv.runner import force_fp32
+
+from mmdet.core import (build_anchor_generator, build_assigner,
+ build_bbox_coder, build_sampler, images_to_levels,
+ multi_apply, multiclass_nms)
+from ..builder import HEADS, build_loss
+from .base_dense_head import BaseDenseHead
+from .dense_test_mixins import BBoxTestMixin
+
+
+@HEADS.register_module()
+class YOLOV3Head(BaseDenseHead, BBoxTestMixin):
+ """YOLOV3Head Paper link: https://arxiv.org/abs/1804.02767.
+
+ Args:
+ num_classes (int): The number of object classes (w/o background)
+ in_channels (List[int]): Number of input channels per scale.
+ out_channels (List[int]): The number of output channels per scale
+ before the final 1x1 layer. Default: (1024, 512, 256).
+ anchor_generator (dict): Config dict for anchor generator
+ bbox_coder (dict): Config of bounding box coder.
+ featmap_strides (List[int]): The stride of each scale.
+ Should be in descending order. Default: (32, 16, 8).
+ one_hot_smoother (float): Set a non-zero value to enable label-smooth
+ Default: 0.
+ conv_cfg (dict): Config dict for convolution layer. Default: None.
+ norm_cfg (dict): Dictionary to construct and config norm layer.
+ Default: dict(type='BN', requires_grad=True)
+ act_cfg (dict): Config dict for activation layer.
+ Default: dict(type='LeakyReLU', negative_slope=0.1).
+ loss_cls (dict): Config of classification loss.
+ loss_conf (dict): Config of confidence loss.
+ loss_xy (dict): Config of xy coordinate loss.
+ loss_wh (dict): Config of wh coordinate loss.
+ train_cfg (dict): Training config of YOLOV3 head. Default: None.
+ test_cfg (dict): Testing config of YOLOV3 head. Default: None.
+ """
+
+ def __init__(self,
+ num_classes,
+ in_channels,
+ out_channels=(1024, 512, 256),
+ anchor_generator=dict(
+ type='YOLOAnchorGenerator',
+ base_sizes=[[(116, 90), (156, 198), (373, 326)],
+ [(30, 61), (62, 45), (59, 119)],
+ [(10, 13), (16, 30), (33, 23)]],
+ strides=[32, 16, 8]),
+ bbox_coder=dict(type='YOLOBBoxCoder'),
+ featmap_strides=[32, 16, 8],
+ one_hot_smoother=0.,
+ conv_cfg=None,
+ norm_cfg=dict(type='BN', requires_grad=True),
+ act_cfg=dict(type='LeakyReLU', negative_slope=0.1),
+ loss_cls=dict(
+ type='CrossEntropyLoss',
+ use_sigmoid=True,
+ loss_weight=1.0),
+ loss_conf=dict(
+ type='CrossEntropyLoss',
+ use_sigmoid=True,
+ loss_weight=1.0),
+ loss_xy=dict(
+ type='CrossEntropyLoss',
+ use_sigmoid=True,
+ loss_weight=1.0),
+ loss_wh=dict(type='MSELoss', loss_weight=1.0),
+ train_cfg=None,
+ test_cfg=None):
+ super(YOLOV3Head, self).__init__()
+ # Check params
+ assert (len(in_channels) == len(out_channels) == len(featmap_strides))
+
+ self.num_classes = num_classes
+ self.in_channels = in_channels
+ self.out_channels = out_channels
+ self.featmap_strides = featmap_strides
+ self.train_cfg = train_cfg
+ self.test_cfg = test_cfg
+ if self.train_cfg:
+ self.assigner = build_assigner(self.train_cfg.assigner)
+ if hasattr(self.train_cfg, 'sampler'):
+ sampler_cfg = self.train_cfg.sampler
+ else:
+ sampler_cfg = dict(type='PseudoSampler')
+ self.sampler = build_sampler(sampler_cfg, context=self)
+
+ self.one_hot_smoother = one_hot_smoother
+
+ self.conv_cfg = conv_cfg
+ self.norm_cfg = norm_cfg
+ self.act_cfg = act_cfg
+
+ self.bbox_coder = build_bbox_coder(bbox_coder)
+ self.anchor_generator = build_anchor_generator(anchor_generator)
+
+ self.loss_cls = build_loss(loss_cls)
+ self.loss_conf = build_loss(loss_conf)
+ self.loss_xy = build_loss(loss_xy)
+ self.loss_wh = build_loss(loss_wh)
+ # usually the numbers of anchors for each level are the same
+ # except SSD detectors
+ self.num_anchors = self.anchor_generator.num_base_anchors[0]
+ assert len(
+ self.anchor_generator.num_base_anchors) == len(featmap_strides)
+ self._init_layers()
+
+ @property
+ def num_levels(self):
+ return len(self.featmap_strides)
+
+ @property
+ def num_attrib(self):
+ """int: number of attributes in pred_map, bboxes (4) +
+ objectness (1) + num_classes"""
+
+ return 5 + self.num_classes
+
+ def _init_layers(self):
+ self.convs_bridge = nn.ModuleList()
+ self.convs_pred = nn.ModuleList()
+ for i in range(self.num_levels):
+ conv_bridge = ConvModule(
+ self.in_channels[i],
+ self.out_channels[i],
+ 3,
+ padding=1,
+ conv_cfg=self.conv_cfg,
+ norm_cfg=self.norm_cfg,
+ act_cfg=self.act_cfg)
+ conv_pred = nn.Conv2d(self.out_channels[i],
+ self.num_anchors * self.num_attrib, 1)
+
+ self.convs_bridge.append(conv_bridge)
+ self.convs_pred.append(conv_pred)
+
+ def init_weights(self):
+ """Initialize weights of the head."""
+ for m in self.convs_pred:
+ normal_init(m, std=0.01)
+
+ def forward(self, feats):
+ """Forward features from the upstream network.
+
+ Args:
+ feats (tuple[Tensor]): Features from the upstream network, each is
+ a 4D-tensor.
+
+ Returns:
+ tuple[Tensor]: A tuple of multi-level predication map, each is a
+ 4D-tensor of shape (batch_size, 5+num_classes, height, width).
+ """
+
+ assert len(feats) == self.num_levels
+ pred_maps = []
+ for i in range(self.num_levels):
+ x = feats[i]
+ x = self.convs_bridge[i](x)
+ pred_map = self.convs_pred[i](x)
+ pred_maps.append(pred_map)
+
+ return tuple(pred_maps),
+
+ @force_fp32(apply_to=('pred_maps', ))
+ def get_bboxes(self,
+ pred_maps,
+ img_metas,
+ cfg=None,
+ rescale=False,
+ with_nms=True):
+ """Transform network output for a batch into bbox predictions.
+
+ Args:
+ pred_maps (list[Tensor]): Raw predictions for a batch of images.
+ img_metas (list[dict]): Meta information of each image, e.g.,
+ image size, scaling factor, etc.
+ cfg (mmcv.Config | None): Test / postprocessing configuration,
+ if None, test_cfg would be used. Default: None.
+ rescale (bool): If True, return boxes in original image space.
+ Default: False.
+ with_nms (bool): If True, do nms before return boxes.
+ Default: True.
+
+ Returns:
+ list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple.
+ The first item is an (n, 5) tensor, where 5 represent
+ (tl_x, tl_y, br_x, br_y, score) and the score between 0 and 1.
+ The shape of the second tensor in the tuple is (n,), and
+ each element represents the class label of the corresponding
+ box.
+ """
+ num_levels = len(pred_maps)
+ pred_maps_list = [pred_maps[i].detach() for i in range(num_levels)]
+ scale_factors = [
+ img_metas[i]['scale_factor']
+ for i in range(pred_maps_list[0].shape[0])
+ ]
+ result_list = self._get_bboxes(pred_maps_list, scale_factors, cfg,
+ rescale, with_nms)
+ return result_list
+
+ def _get_bboxes(self,
+ pred_maps_list,
+ scale_factors,
+ cfg,
+ rescale=False,
+ with_nms=True):
+ """Transform outputs for a single batch item into bbox predictions.
+
+ Args:
+ pred_maps_list (list[Tensor]): Prediction maps for different scales
+ of each single image in the batch.
+ scale_factors (list(ndarray)): Scale factor of the image arrange as
+ (w_scale, h_scale, w_scale, h_scale).
+ cfg (mmcv.Config | None): Test / postprocessing configuration,
+ if None, test_cfg would be used.
+ rescale (bool): If True, return boxes in original image space.
+ Default: False.
+ with_nms (bool): If True, do nms before return boxes.
+ Default: True.
+
+ Returns:
+ list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple.
+ The first item is an (n, 5) tensor, where 5 represent
+ (tl_x, tl_y, br_x, br_y, score) and the score between 0 and 1.
+ The shape of the second tensor in the tuple is (n,), and
+ each element represents the class label of the corresponding
+ box.
+ """
+ cfg = self.test_cfg if cfg is None else cfg
+ assert len(pred_maps_list) == self.num_levels
+
+ device = pred_maps_list[0].device
+ batch_size = pred_maps_list[0].shape[0]
+
+ featmap_sizes = [
+ pred_maps_list[i].shape[-2:] for i in range(self.num_levels)
+ ]
+ multi_lvl_anchors = self.anchor_generator.grid_anchors(
+ featmap_sizes, device)
+ # convert to tensor to keep tracing
+ nms_pre_tensor = torch.tensor(
+ cfg.get('nms_pre', -1), device=device, dtype=torch.long)
+
+ multi_lvl_bboxes = []
+ multi_lvl_cls_scores = []
+ multi_lvl_conf_scores = []
+ for i in range(self.num_levels):
+ # get some key info for current scale
+ pred_map = pred_maps_list[i]
+ stride = self.featmap_strides[i]
+ # (b,h, w, num_anchors*num_attrib) ->
+ # (b,h*w*num_anchors, num_attrib)
+ pred_map = pred_map.permute(0, 2, 3,
+ 1).reshape(batch_size, -1,
+ self.num_attrib)
+ # Inplace operation like
+ # ```pred_map[..., :2] = \torch.sigmoid(pred_map[..., :2])```
+ # would create constant tensor when exporting to onnx
+ pred_map_conf = torch.sigmoid(pred_map[..., :2])
+ pred_map_rest = pred_map[..., 2:]
+ pred_map = torch.cat([pred_map_conf, pred_map_rest], dim=-1)
+ pred_map_boxes = pred_map[..., :4]
+ multi_lvl_anchor = multi_lvl_anchors[i]
+ multi_lvl_anchor = multi_lvl_anchor.expand_as(pred_map_boxes)
+ bbox_pred = self.bbox_coder.decode(multi_lvl_anchor,
+ pred_map_boxes, stride)
+ # conf and cls
+ conf_pred = torch.sigmoid(pred_map[..., 4])
+ cls_pred = torch.sigmoid(pred_map[..., 5:]).view(
+ batch_size, -1, self.num_classes) # Cls pred one-hot.
+
+ # Get top-k prediction
+ # Always keep topk op for dynamic input in onnx
+ if nms_pre_tensor > 0 and (torch.onnx.is_in_onnx_export()
+ or conf_pred.shape[1] > nms_pre_tensor):
+ from torch import _shape_as_tensor
+ # keep shape as tensor and get k
+ num_anchor = _shape_as_tensor(conf_pred)[1].to(device)
+ nms_pre = torch.where(nms_pre_tensor < num_anchor,
+ nms_pre_tensor, num_anchor)
+ _, topk_inds = conf_pred.topk(nms_pre)
+ batch_inds = torch.arange(batch_size).view(
+ -1, 1).expand_as(topk_inds).long()
+ bbox_pred = bbox_pred[batch_inds, topk_inds, :]
+ cls_pred = cls_pred[batch_inds, topk_inds, :]
+ conf_pred = conf_pred[batch_inds, topk_inds]
+
+ # Save the result of current scale
+ multi_lvl_bboxes.append(bbox_pred)
+ multi_lvl_cls_scores.append(cls_pred)
+ multi_lvl_conf_scores.append(conf_pred)
+
+ # Merge the results of different scales together
+ batch_mlvl_bboxes = torch.cat(multi_lvl_bboxes, dim=1)
+ batch_mlvl_scores = torch.cat(multi_lvl_cls_scores, dim=1)
+ batch_mlvl_conf_scores = torch.cat(multi_lvl_conf_scores, dim=1)
+
+ # Set max number of box to be feed into nms in deployment
+ deploy_nms_pre = cfg.get('deploy_nms_pre', -1)
+ if deploy_nms_pre > 0 and torch.onnx.is_in_onnx_export():
+ _, topk_inds = batch_mlvl_conf_scores.topk(deploy_nms_pre)
+ batch_inds = torch.arange(batch_size).view(
+ -1, 1).expand_as(topk_inds).long()
+ batch_mlvl_bboxes = batch_mlvl_bboxes[batch_inds, topk_inds, :]
+ batch_mlvl_scores = batch_mlvl_scores[batch_inds, topk_inds, :]
+ batch_mlvl_conf_scores = batch_mlvl_conf_scores[batch_inds,
+ topk_inds]
+
+ if with_nms and (batch_mlvl_conf_scores.size(0) == 0):
+ return torch.zeros((0, 5)), torch.zeros((0, ))
+
+ if rescale:
+ batch_mlvl_bboxes /= batch_mlvl_bboxes.new_tensor(
+ scale_factors).unsqueeze(1)
+
+ # In mmdet 2.x, the class_id for background is num_classes.
+ # i.e., the last column.
+ padding = batch_mlvl_scores.new_zeros(batch_size,
+ batch_mlvl_scores.shape[1], 1)
+ batch_mlvl_scores = torch.cat([batch_mlvl_scores, padding], dim=-1)
+
+ # Support exporting to onnx without nms
+ if with_nms and cfg.get('nms', None) is not None:
+ det_results = []
+ for (mlvl_bboxes, mlvl_scores,
+ mlvl_conf_scores) in zip(batch_mlvl_bboxes, batch_mlvl_scores,
+ batch_mlvl_conf_scores):
+ # Filtering out all predictions with conf < conf_thr
+ conf_thr = cfg.get('conf_thr', -1)
+ if conf_thr > 0 and (not torch.onnx.is_in_onnx_export()):
+ # TensorRT not support NonZero
+ # add as_tuple=False for compatibility in Pytorch 1.6
+ # flatten would create a Reshape op with constant values,
+ # and raise RuntimeError when doing inference in ONNX
+ # Runtime with a different input image (#4221).
+ conf_inds = mlvl_conf_scores.ge(conf_thr).nonzero(
+ as_tuple=False).squeeze(1)
+ mlvl_bboxes = mlvl_bboxes[conf_inds, :]
+ mlvl_scores = mlvl_scores[conf_inds, :]
+ mlvl_conf_scores = mlvl_conf_scores[conf_inds]
+
+ det_bboxes, det_labels = multiclass_nms(
+ mlvl_bboxes,
+ mlvl_scores,
+ cfg.score_thr,
+ cfg.nms,
+ cfg.max_per_img,
+ score_factors=mlvl_conf_scores)
+ det_results.append(tuple([det_bboxes, det_labels]))
+
+ else:
+ det_results = [
+ tuple(mlvl_bs)
+ for mlvl_bs in zip(batch_mlvl_bboxes, batch_mlvl_scores,
+ batch_mlvl_conf_scores)
+ ]
+ return det_results
+
+ @force_fp32(apply_to=('pred_maps', ))
+ def loss(self,
+ pred_maps,
+ gt_bboxes,
+ gt_labels,
+ img_metas,
+ gt_bboxes_ignore=None):
+ """Compute loss of the head.
+
+ Args:
+ pred_maps (list[Tensor]): Prediction map for each scale level,
+ shape (N, num_anchors * num_attrib, H, W)
+ gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
+ shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
+ gt_labels (list[Tensor]): class indices corresponding to each box
+ img_metas (list[dict]): Meta information of each image, e.g.,
+ image size, scaling factor, etc.
+ gt_bboxes_ignore (None | list[Tensor]): specify which bounding
+ boxes can be ignored when computing the loss.
+
+ Returns:
+ dict[str, Tensor]: A dictionary of loss components.
+ """
+ num_imgs = len(img_metas)
+ device = pred_maps[0][0].device
+
+ featmap_sizes = [
+ pred_maps[i].shape[-2:] for i in range(self.num_levels)
+ ]
+ multi_level_anchors = self.anchor_generator.grid_anchors(
+ featmap_sizes, device)
+ anchor_list = [multi_level_anchors for _ in range(num_imgs)]
+
+ responsible_flag_list = []
+ for img_id in range(len(img_metas)):
+ responsible_flag_list.append(
+ self.anchor_generator.responsible_flags(
+ featmap_sizes, gt_bboxes[img_id], device))
+
+ target_maps_list, neg_maps_list = self.get_targets(
+ anchor_list, responsible_flag_list, gt_bboxes, gt_labels)
+
+ losses_cls, losses_conf, losses_xy, losses_wh = multi_apply(
+ self.loss_single, pred_maps, target_maps_list, neg_maps_list)
+
+ return dict(
+ loss_cls=losses_cls,
+ loss_conf=losses_conf,
+ loss_xy=losses_xy,
+ loss_wh=losses_wh)
+
+ def loss_single(self, pred_map, target_map, neg_map):
+ """Compute loss of a single image from a batch.
+
+ Args:
+ pred_map (Tensor): Raw predictions for a single level.
+ target_map (Tensor): The Ground-Truth target for a single level.
+ neg_map (Tensor): The negative masks for a single level.
+
+ Returns:
+ tuple:
+ loss_cls (Tensor): Classification loss.
+ loss_conf (Tensor): Confidence loss.
+ loss_xy (Tensor): Regression loss of x, y coordinate.
+ loss_wh (Tensor): Regression loss of w, h coordinate.
+ """
+
+ num_imgs = len(pred_map)
+ pred_map = pred_map.permute(0, 2, 3,
+ 1).reshape(num_imgs, -1, self.num_attrib)
+ neg_mask = neg_map.float()
+ pos_mask = target_map[..., 4]
+ pos_and_neg_mask = neg_mask + pos_mask
+ pos_mask = pos_mask.unsqueeze(dim=-1)
+ if torch.max(pos_and_neg_mask) > 1.:
+ warnings.warn('There is overlap between pos and neg sample.')
+ pos_and_neg_mask = pos_and_neg_mask.clamp(min=0., max=1.)
+
+ pred_xy = pred_map[..., :2]
+ pred_wh = pred_map[..., 2:4]
+ pred_conf = pred_map[..., 4]
+ pred_label = pred_map[..., 5:]
+
+ target_xy = target_map[..., :2]
+ target_wh = target_map[..., 2:4]
+ target_conf = target_map[..., 4]
+ target_label = target_map[..., 5:]
+
+ loss_cls = self.loss_cls(pred_label, target_label, weight=pos_mask)
+ loss_conf = self.loss_conf(
+ pred_conf, target_conf, weight=pos_and_neg_mask)
+ loss_xy = self.loss_xy(pred_xy, target_xy, weight=pos_mask)
+ loss_wh = self.loss_wh(pred_wh, target_wh, weight=pos_mask)
+
+ return loss_cls, loss_conf, loss_xy, loss_wh
+
+ def get_targets(self, anchor_list, responsible_flag_list, gt_bboxes_list,
+ gt_labels_list):
+ """Compute target maps for anchors in multiple images.
+
+ Args:
+ anchor_list (list[list[Tensor]]): Multi level anchors of each
+ image. The outer list indicates images, and the inner list
+ corresponds to feature levels of the image. Each element of
+ the inner list is a tensor of shape (num_total_anchors, 4).
+ responsible_flag_list (list[list[Tensor]]): Multi level responsible
+ flags of each image. Each element is a tensor of shape
+ (num_total_anchors, )
+ gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image.
+ gt_labels_list (list[Tensor]): Ground truth labels of each box.
+
+ Returns:
+ tuple: Usually returns a tuple containing learning targets.
+ - target_map_list (list[Tensor]): Target map of each level.
+ - neg_map_list (list[Tensor]): Negative map of each level.
+ """
+ num_imgs = len(anchor_list)
+
+ # anchor number of multi levels
+ num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]
+
+ results = multi_apply(self._get_targets_single, anchor_list,
+ responsible_flag_list, gt_bboxes_list,
+ gt_labels_list)
+
+ all_target_maps, all_neg_maps = results
+ assert num_imgs == len(all_target_maps) == len(all_neg_maps)
+ target_maps_list = images_to_levels(all_target_maps, num_level_anchors)
+ neg_maps_list = images_to_levels(all_neg_maps, num_level_anchors)
+
+ return target_maps_list, neg_maps_list
+
+ def _get_targets_single(self, anchors, responsible_flags, gt_bboxes,
+ gt_labels):
+ """Generate matching bounding box prior and converted GT.
+
+ Args:
+ anchors (list[Tensor]): Multi-level anchors of the image.
+ responsible_flags (list[Tensor]): Multi-level responsible flags of
+ anchors
+ gt_bboxes (Tensor): Ground truth bboxes of single image.
+ gt_labels (Tensor): Ground truth labels of single image.
+
+ Returns:
+ tuple:
+ target_map (Tensor): Predication target map of each
+ scale level, shape (num_total_anchors,
+ 5+num_classes)
+ neg_map (Tensor): Negative map of each scale level,
+ shape (num_total_anchors,)
+ """
+
+ anchor_strides = []
+ for i in range(len(anchors)):
+ anchor_strides.append(
+ torch.tensor(self.featmap_strides[i],
+ device=gt_bboxes.device).repeat(len(anchors[i])))
+ concat_anchors = torch.cat(anchors)
+ concat_responsible_flags = torch.cat(responsible_flags)
+
+ anchor_strides = torch.cat(anchor_strides)
+ assert len(anchor_strides) == len(concat_anchors) == \
+ len(concat_responsible_flags)
+ assign_result = self.assigner.assign(concat_anchors,
+ concat_responsible_flags,
+ gt_bboxes)
+ sampling_result = self.sampler.sample(assign_result, concat_anchors,
+ gt_bboxes)
+
+ target_map = concat_anchors.new_zeros(
+ concat_anchors.size(0), self.num_attrib)
+
+ target_map[sampling_result.pos_inds, :4] = self.bbox_coder.encode(
+ sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes,
+ anchor_strides[sampling_result.pos_inds])
+
+ target_map[sampling_result.pos_inds, 4] = 1
+
+ gt_labels_one_hot = F.one_hot(
+ gt_labels, num_classes=self.num_classes).float()
+ if self.one_hot_smoother != 0: # label smooth
+ gt_labels_one_hot = gt_labels_one_hot * (
+ 1 - self.one_hot_smoother
+ ) + self.one_hot_smoother / self.num_classes
+ target_map[sampling_result.pos_inds, 5:] = gt_labels_one_hot[
+ sampling_result.pos_assigned_gt_inds]
+
+ neg_map = concat_anchors.new_zeros(
+ concat_anchors.size(0), dtype=torch.uint8)
+ neg_map[sampling_result.neg_inds] = 1
+
+ return target_map, neg_map
+
+ def aug_test(self, feats, img_metas, rescale=False):
+ """Test function with test time augmentation.
+
+ Args:
+ feats (list[Tensor]): the outer list indicates test-time
+ augmentations and inner Tensor should have a shape NxCxHxW,
+ which contains features for all images in the batch.
+ img_metas (list[list[dict]]): the outer list indicates test-time
+ augs (multiscale, flip, etc.) and the inner list indicates
+ images in a batch. each dict has image information.
+ rescale (bool, optional): Whether to rescale the results.
+ Defaults to False.
+
+ Returns:
+ list[ndarray]: bbox results of each class
+ """
+ return self.aug_test_bboxes(feats, img_metas, rescale=rescale)
diff --git a/annotator/uniformer/mmdet_null/models/detectors/__init__.py b/annotator/uniformer/mmdet_null/models/detectors/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..04011130435cf9fdfadeb821919046b1bddab7d4
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/detectors/__init__.py
@@ -0,0 +1,40 @@
+from .atss import ATSS
+from .base import BaseDetector
+from .cascade_rcnn import CascadeRCNN
+from .cornernet import CornerNet
+from .detr import DETR
+from .fast_rcnn import FastRCNN
+from .faster_rcnn import FasterRCNN
+from .fcos import FCOS
+from .fovea import FOVEA
+from .fsaf import FSAF
+from .gfl import GFL
+from .grid_rcnn import GridRCNN
+from .htc import HybridTaskCascade
+from .kd_one_stage import KnowledgeDistillationSingleStageDetector
+from .mask_rcnn import MaskRCNN
+from .mask_scoring_rcnn import MaskScoringRCNN
+from .nasfcos import NASFCOS
+from .paa import PAA
+from .point_rend import PointRend
+from .reppoints_detector import RepPointsDetector
+from .retinanet import RetinaNet
+from .rpn import RPN
+from .scnet import SCNet
+from .single_stage import SingleStageDetector
+from .sparse_rcnn import SparseRCNN
+from .trident_faster_rcnn import TridentFasterRCNN
+from .two_stage import TwoStageDetector
+from .vfnet import VFNet
+from .yolact import YOLACT
+from .yolo import YOLOV3
+
+__all__ = [
+ 'ATSS', 'BaseDetector', 'SingleStageDetector',
+ 'KnowledgeDistillationSingleStageDetector', 'TwoStageDetector', 'RPN',
+ 'FastRCNN', 'FasterRCNN', 'MaskRCNN', 'CascadeRCNN', 'HybridTaskCascade',
+ 'RetinaNet', 'FCOS', 'GridRCNN', 'MaskScoringRCNN', 'RepPointsDetector',
+ 'FOVEA', 'FSAF', 'NASFCOS', 'PointRend', 'GFL', 'CornerNet', 'PAA',
+ 'YOLOV3', 'YOLACT', 'VFNet', 'DETR', 'TridentFasterRCNN', 'SparseRCNN',
+ 'SCNet'
+]
diff --git a/annotator/uniformer/mmdet_null/models/detectors/atss.py b/annotator/uniformer/mmdet_null/models/detectors/atss.py
new file mode 100644
index 0000000000000000000000000000000000000000..db7139c6b4fcd7e83007cdb785520743ddae7066
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/detectors/atss.py
@@ -0,0 +1,17 @@
+from ..builder import DETECTORS
+from .single_stage import SingleStageDetector
+
+
+@DETECTORS.register_module()
+class ATSS(SingleStageDetector):
+ """Implementation of `ATSS `_."""
+
+ def __init__(self,
+ backbone,
+ neck,
+ bbox_head,
+ train_cfg=None,
+ test_cfg=None,
+ pretrained=None):
+ super(ATSS, self).__init__(backbone, neck, bbox_head, train_cfg,
+ test_cfg, pretrained)
diff --git a/annotator/uniformer/mmdet_null/models/detectors/base.py b/annotator/uniformer/mmdet_null/models/detectors/base.py
new file mode 100644
index 0000000000000000000000000000000000000000..89134f3696ead442a5ff57184e9d256fdf7d0ba4
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/detectors/base.py
@@ -0,0 +1,355 @@
+from abc import ABCMeta, abstractmethod
+from collections import OrderedDict
+
+import mmcv
+import numpy as np
+import torch
+import torch.distributed as dist
+import torch.nn as nn
+from mmcv.runner import auto_fp16
+from mmcv.utils import print_log
+
+from mmdet.core.visualization import imshow_det_bboxes
+from mmdet.utils import get_root_logger
+
+
+class BaseDetector(nn.Module, metaclass=ABCMeta):
+ """Base class for detectors."""
+
+ def __init__(self):
+ super(BaseDetector, self).__init__()
+ self.fp16_enabled = False
+
+ @property
+ def with_neck(self):
+ """bool: whether the detector has a neck"""
+ return hasattr(self, 'neck') and self.neck is not None
+
+ # TODO: these properties need to be carefully handled
+ # for both single stage & two stage detectors
+ @property
+ def with_shared_head(self):
+ """bool: whether the detector has a shared head in the RoI Head"""
+ return hasattr(self, 'roi_head') and self.roi_head.with_shared_head
+
+ @property
+ def with_bbox(self):
+ """bool: whether the detector has a bbox head"""
+ return ((hasattr(self, 'roi_head') and self.roi_head.with_bbox)
+ or (hasattr(self, 'bbox_head') and self.bbox_head is not None))
+
+ @property
+ def with_mask(self):
+ """bool: whether the detector has a mask head"""
+ return ((hasattr(self, 'roi_head') and self.roi_head.with_mask)
+ or (hasattr(self, 'mask_head') and self.mask_head is not None))
+
+ @abstractmethod
+ def extract_feat(self, imgs):
+ """Extract features from images."""
+ pass
+
+ def extract_feats(self, imgs):
+ """Extract features from multiple images.
+
+ Args:
+ imgs (list[torch.Tensor]): A list of images. The images are
+ augmented from the same image but in different ways.
+
+ Returns:
+ list[torch.Tensor]: Features of different images
+ """
+ assert isinstance(imgs, list)
+ return [self.extract_feat(img) for img in imgs]
+
+ def forward_train(self, imgs, img_metas, **kwargs):
+ """
+ Args:
+ img (list[Tensor]): List of tensors of shape (1, C, H, W).
+ Typically these should be mean centered and std scaled.
+ img_metas (list[dict]): List of image info dict where each dict
+ has: 'img_shape', 'scale_factor', 'flip', and may also contain
+ 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
+ For details on the values of these keys, see
+ :class:`mmdet.datasets.pipelines.Collect`.
+ kwargs (keyword arguments): Specific to concrete implementation.
+ """
+ # NOTE the batched image size information may be useful, e.g.
+ # in DETR, this is needed for the construction of masks, which is
+ # then used for the transformer_head.
+ batch_input_shape = tuple(imgs[0].size()[-2:])
+ for img_meta in img_metas:
+ img_meta['batch_input_shape'] = batch_input_shape
+
+ async def async_simple_test(self, img, img_metas, **kwargs):
+ raise NotImplementedError
+
+ @abstractmethod
+ def simple_test(self, img, img_metas, **kwargs):
+ pass
+
+ @abstractmethod
+ def aug_test(self, imgs, img_metas, **kwargs):
+ """Test function with test time augmentation."""
+ pass
+
+ def init_weights(self, pretrained=None):
+ """Initialize the weights in detector.
+
+ Args:
+ pretrained (str, optional): Path to pre-trained weights.
+ Defaults to None.
+ """
+ if pretrained is not None:
+ logger = get_root_logger()
+ print_log(f'load model from: {pretrained}', logger=logger)
+
+ async def aforward_test(self, *, img, img_metas, **kwargs):
+ for var, name in [(img, 'img'), (img_metas, 'img_metas')]:
+ if not isinstance(var, list):
+ raise TypeError(f'{name} must be a list, but got {type(var)}')
+
+ num_augs = len(img)
+ if num_augs != len(img_metas):
+ raise ValueError(f'num of augmentations ({len(img)}) '
+ f'!= num of image metas ({len(img_metas)})')
+ # TODO: remove the restriction of samples_per_gpu == 1 when prepared
+ samples_per_gpu = img[0].size(0)
+ assert samples_per_gpu == 1
+
+ if num_augs == 1:
+ return await self.async_simple_test(img[0], img_metas[0], **kwargs)
+ else:
+ raise NotImplementedError
+
+ def forward_test(self, imgs, img_metas, **kwargs):
+ """
+ Args:
+ imgs (List[Tensor]): the outer list indicates test-time
+ augmentations and inner Tensor should have a shape NxCxHxW,
+ which contains all images in the batch.
+ img_metas (List[List[dict]]): the outer list indicates test-time
+ augs (multiscale, flip, etc.) and the inner list indicates
+ images in a batch.
+ """
+ for var, name in [(imgs, 'imgs'), (img_metas, 'img_metas')]:
+ if not isinstance(var, list):
+ raise TypeError(f'{name} must be a list, but got {type(var)}')
+
+ num_augs = len(imgs)
+ if num_augs != len(img_metas):
+ raise ValueError(f'num of augmentations ({len(imgs)}) '
+ f'!= num of image meta ({len(img_metas)})')
+
+ # NOTE the batched image size information may be useful, e.g.
+ # in DETR, this is needed for the construction of masks, which is
+ # then used for the transformer_head.
+ for img, img_meta in zip(imgs, img_metas):
+ batch_size = len(img_meta)
+ for img_id in range(batch_size):
+ img_meta[img_id]['batch_input_shape'] = tuple(img.size()[-2:])
+
+ if num_augs == 1:
+ # proposals (List[List[Tensor]]): the outer list indicates
+ # test-time augs (multiscale, flip, etc.) and the inner list
+ # indicates images in a batch.
+ # The Tensor should have a shape Px4, where P is the number of
+ # proposals.
+ if 'proposals' in kwargs:
+ kwargs['proposals'] = kwargs['proposals'][0]
+ return self.simple_test(imgs[0], img_metas[0], **kwargs)
+ else:
+ assert imgs[0].size(0) == 1, 'aug test does not support ' \
+ 'inference with batch size ' \
+ f'{imgs[0].size(0)}'
+ # TODO: support test augmentation for predefined proposals
+ assert 'proposals' not in kwargs
+ return self.aug_test(imgs, img_metas, **kwargs)
+
+ @auto_fp16(apply_to=('img', ))
+ def forward(self, img, img_metas, return_loss=True, **kwargs):
+ """Calls either :func:`forward_train` or :func:`forward_test` depending
+ on whether ``return_loss`` is ``True``.
+
+ Note this setting will change the expected inputs. When
+ ``return_loss=True``, img and img_meta are single-nested (i.e. Tensor
+ and List[dict]), and when ``resturn_loss=False``, img and img_meta
+ should be double nested (i.e. List[Tensor], List[List[dict]]), with
+ the outer list indicating test time augmentations.
+ """
+ if return_loss:
+ return self.forward_train(img, img_metas, **kwargs)
+ else:
+ return self.forward_test(img, img_metas, **kwargs)
+
+ def _parse_losses(self, losses):
+ """Parse the raw outputs (losses) of the network.
+
+ Args:
+ losses (dict): Raw output of the network, which usually contain
+ losses and other necessary infomation.
+
+ Returns:
+ tuple[Tensor, dict]: (loss, log_vars), loss is the loss tensor \
+ which may be a weighted sum of all losses, log_vars contains \
+ all the variables to be sent to the logger.
+ """
+ log_vars = OrderedDict()
+ for loss_name, loss_value in losses.items():
+ if isinstance(loss_value, torch.Tensor):
+ log_vars[loss_name] = loss_value.mean()
+ elif isinstance(loss_value, list):
+ log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value)
+ else:
+ raise TypeError(
+ f'{loss_name} is not a tensor or list of tensors')
+
+ loss = sum(_value for _key, _value in log_vars.items()
+ if 'loss' in _key)
+
+ log_vars['loss'] = loss
+ for loss_name, loss_value in log_vars.items():
+ # reduce loss when distributed training
+ if dist.is_available() and dist.is_initialized():
+ loss_value = loss_value.data.clone()
+ dist.all_reduce(loss_value.div_(dist.get_world_size()))
+ log_vars[loss_name] = loss_value.item()
+
+ return loss, log_vars
+
+ def train_step(self, data, optimizer):
+ """The iteration step during training.
+
+ This method defines an iteration step during training, except for the
+ back propagation and optimizer updating, which are done in an optimizer
+ hook. Note that in some complicated cases or models, the whole process
+ including back propagation and optimizer updating is also defined in
+ this method, such as GAN.
+
+ Args:
+ data (dict): The output of dataloader.
+ optimizer (:obj:`torch.optim.Optimizer` | dict): The optimizer of
+ runner is passed to ``train_step()``. This argument is unused
+ and reserved.
+
+ Returns:
+ dict: It should contain at least 3 keys: ``loss``, ``log_vars``, \
+ ``num_samples``.
+
+ - ``loss`` is a tensor for back propagation, which can be a \
+ weighted sum of multiple losses.
+ - ``log_vars`` contains all the variables to be sent to the
+ logger.
+ - ``num_samples`` indicates the batch size (when the model is \
+ DDP, it means the batch size on each GPU), which is used for \
+ averaging the logs.
+ """
+ losses = self(**data)
+ loss, log_vars = self._parse_losses(losses)
+
+ outputs = dict(
+ loss=loss, log_vars=log_vars, num_samples=len(data['img_metas']))
+
+ return outputs
+
+ def val_step(self, data, optimizer):
+ """The iteration step during validation.
+
+ This method shares the same signature as :func:`train_step`, but used
+ during val epochs. Note that the evaluation after training epochs is
+ not implemented with this method, but an evaluation hook.
+ """
+ losses = self(**data)
+ loss, log_vars = self._parse_losses(losses)
+
+ outputs = dict(
+ loss=loss, log_vars=log_vars, num_samples=len(data['img_metas']))
+
+ return outputs
+
+ def show_result(self,
+ img,
+ result,
+ score_thr=0.3,
+ bbox_color=(72, 101, 241),
+ text_color=(72, 101, 241),
+ mask_color=None,
+ thickness=2,
+ font_size=13,
+ win_name='',
+ show=False,
+ wait_time=0,
+ out_file=None):
+ """Draw `result` over `img`.
+
+ Args:
+ img (str or Tensor): The image to be displayed.
+ result (Tensor or tuple): The results to draw over `img`
+ bbox_result or (bbox_result, segm_result).
+ score_thr (float, optional): Minimum score of bboxes to be shown.
+ Default: 0.3.
+ bbox_color (str or tuple(int) or :obj:`Color`):Color of bbox lines.
+ The tuple of color should be in BGR order. Default: 'green'
+ text_color (str or tuple(int) or :obj:`Color`):Color of texts.
+ The tuple of color should be in BGR order. Default: 'green'
+ mask_color (None or str or tuple(int) or :obj:`Color`):
+ Color of masks. The tuple of color should be in BGR order.
+ Default: None
+ thickness (int): Thickness of lines. Default: 2
+ font_size (int): Font size of texts. Default: 13
+ win_name (str): The window name. Default: ''
+ wait_time (float): Value of waitKey param.
+ Default: 0.
+ show (bool): Whether to show the image.
+ Default: False.
+ out_file (str or None): The filename to write the image.
+ Default: None.
+
+ Returns:
+ img (Tensor): Only if not `show` or `out_file`
+ """
+ img = mmcv.imread(img)
+ img = img.copy()
+ if isinstance(result, tuple):
+ bbox_result, segm_result = result
+ if isinstance(segm_result, tuple):
+ segm_result = segm_result[0] # ms rcnn
+ else:
+ bbox_result, segm_result = result, None
+ bboxes = np.vstack(bbox_result)
+ labels = [
+ np.full(bbox.shape[0], i, dtype=np.int32)
+ for i, bbox in enumerate(bbox_result)
+ ]
+ labels = np.concatenate(labels)
+ # draw segmentation masks
+ segms = None
+ if segm_result is not None and len(labels) > 0: # non empty
+ segms = mmcv.concat_list(segm_result)
+ if isinstance(segms[0], torch.Tensor):
+ segms = torch.stack(segms, dim=0).detach().cpu().numpy()
+ else:
+ segms = np.stack(segms, axis=0)
+ # if out_file specified, do not show image in window
+ if out_file is not None:
+ show = False
+ # draw bounding boxes
+ img = imshow_det_bboxes(
+ img,
+ bboxes,
+ labels,
+ segms,
+ class_names=self.CLASSES,
+ score_thr=score_thr,
+ bbox_color=bbox_color,
+ text_color=text_color,
+ mask_color=mask_color,
+ thickness=thickness,
+ font_size=font_size,
+ win_name=win_name,
+ show=show,
+ wait_time=wait_time,
+ out_file=out_file)
+
+ if not (show or out_file):
+ return img
diff --git a/annotator/uniformer/mmdet_null/models/detectors/cascade_rcnn.py b/annotator/uniformer/mmdet_null/models/detectors/cascade_rcnn.py
new file mode 100644
index 0000000000000000000000000000000000000000..d873dceb7e4efdf8d1e7d282badfe9b7118426b9
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/detectors/cascade_rcnn.py
@@ -0,0 +1,46 @@
+from ..builder import DETECTORS
+from .two_stage import TwoStageDetector
+
+
+@DETECTORS.register_module()
+class CascadeRCNN(TwoStageDetector):
+ r"""Implementation of `Cascade R-CNN: Delving into High Quality Object
+ Detection `_"""
+
+ def __init__(self,
+ backbone,
+ neck=None,
+ rpn_head=None,
+ roi_head=None,
+ train_cfg=None,
+ test_cfg=None,
+ pretrained=None):
+ super(CascadeRCNN, self).__init__(
+ backbone=backbone,
+ neck=neck,
+ rpn_head=rpn_head,
+ roi_head=roi_head,
+ train_cfg=train_cfg,
+ test_cfg=test_cfg,
+ pretrained=pretrained)
+
+ def show_result(self, data, result, **kwargs):
+ """Show prediction results of the detector.
+
+ Args:
+ data (str or np.ndarray): Image filename or loaded image.
+ result (Tensor or tuple): The results to draw over `img`
+ bbox_result or (bbox_result, segm_result).
+
+ Returns:
+ np.ndarray: The image with bboxes drawn on it.
+ """
+ if self.with_mask:
+ ms_bbox_result, ms_segm_result = result
+ if isinstance(ms_bbox_result, dict):
+ result = (ms_bbox_result['ensemble'],
+ ms_segm_result['ensemble'])
+ else:
+ if isinstance(result, dict):
+ result = result['ensemble']
+ return super(CascadeRCNN, self).show_result(data, result, **kwargs)
diff --git a/annotator/uniformer/mmdet_null/models/detectors/cornernet.py b/annotator/uniformer/mmdet_null/models/detectors/cornernet.py
new file mode 100644
index 0000000000000000000000000000000000000000..bb8ccc1465ab66d1615ca16701a533a22b156295
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/detectors/cornernet.py
@@ -0,0 +1,95 @@
+import torch
+
+from mmdet.core import bbox2result, bbox_mapping_back
+from ..builder import DETECTORS
+from .single_stage import SingleStageDetector
+
+
+@DETECTORS.register_module()
+class CornerNet(SingleStageDetector):
+ """CornerNet.
+
+ This detector is the implementation of the paper `CornerNet: Detecting
+ Objects as Paired Keypoints `_ .
+ """
+
+ def __init__(self,
+ backbone,
+ neck,
+ bbox_head,
+ train_cfg=None,
+ test_cfg=None,
+ pretrained=None):
+ super(CornerNet, self).__init__(backbone, neck, bbox_head, train_cfg,
+ test_cfg, pretrained)
+
+ def merge_aug_results(self, aug_results, img_metas):
+ """Merge augmented detection bboxes and score.
+
+ Args:
+ aug_results (list[list[Tensor]]): Det_bboxes and det_labels of each
+ image.
+ img_metas (list[list[dict]]): Meta information of each image, e.g.,
+ image size, scaling factor, etc.
+
+ Returns:
+ tuple: (bboxes, labels)
+ """
+ recovered_bboxes, aug_labels = [], []
+ for bboxes_labels, img_info in zip(aug_results, img_metas):
+ img_shape = img_info[0]['img_shape'] # using shape before padding
+ scale_factor = img_info[0]['scale_factor']
+ flip = img_info[0]['flip']
+ bboxes, labels = bboxes_labels
+ bboxes, scores = bboxes[:, :4], bboxes[:, -1:]
+ bboxes = bbox_mapping_back(bboxes, img_shape, scale_factor, flip)
+ recovered_bboxes.append(torch.cat([bboxes, scores], dim=-1))
+ aug_labels.append(labels)
+
+ bboxes = torch.cat(recovered_bboxes, dim=0)
+ labels = torch.cat(aug_labels)
+
+ if bboxes.shape[0] > 0:
+ out_bboxes, out_labels = self.bbox_head._bboxes_nms(
+ bboxes, labels, self.bbox_head.test_cfg)
+ else:
+ out_bboxes, out_labels = bboxes, labels
+
+ return out_bboxes, out_labels
+
+ def aug_test(self, imgs, img_metas, rescale=False):
+ """Augment testing of CornerNet.
+
+ Args:
+ imgs (list[Tensor]): Augmented images.
+ img_metas (list[list[dict]]): Meta information of each image, e.g.,
+ image size, scaling factor, etc.
+ rescale (bool): If True, return boxes in original image space.
+ Default: False.
+
+ Note:
+ ``imgs`` must including flipped image pairs.
+
+ Returns:
+ list[list[np.ndarray]]: BBox results of each image and classes.
+ The outer list corresponds to each image. The inner list
+ corresponds to each class.
+ """
+ img_inds = list(range(len(imgs)))
+
+ assert img_metas[0][0]['flip'] + img_metas[1][0]['flip'], (
+ 'aug test must have flipped image pair')
+ aug_results = []
+ for ind, flip_ind in zip(img_inds[0::2], img_inds[1::2]):
+ img_pair = torch.cat([imgs[ind], imgs[flip_ind]])
+ x = self.extract_feat(img_pair)
+ outs = self.bbox_head(x)
+ bbox_list = self.bbox_head.get_bboxes(
+ *outs, [img_metas[ind], img_metas[flip_ind]], False, False)
+ aug_results.append(bbox_list[0])
+ aug_results.append(bbox_list[1])
+
+ bboxes, labels = self.merge_aug_results(aug_results, img_metas)
+ bbox_results = bbox2result(bboxes, labels, self.bbox_head.num_classes)
+
+ return [bbox_results]
diff --git a/annotator/uniformer/mmdet_null/models/detectors/detr.py b/annotator/uniformer/mmdet_null/models/detectors/detr.py
new file mode 100644
index 0000000000000000000000000000000000000000..5ff82a280daa0a015f662bdf2509fa11542d46d4
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/detectors/detr.py
@@ -0,0 +1,46 @@
+from mmdet.core import bbox2result
+from ..builder import DETECTORS
+from .single_stage import SingleStageDetector
+
+
+@DETECTORS.register_module()
+class DETR(SingleStageDetector):
+ r"""Implementation of `DETR: End-to-End Object Detection with
+ Transformers `_"""
+
+ def __init__(self,
+ backbone,
+ bbox_head,
+ train_cfg=None,
+ test_cfg=None,
+ pretrained=None):
+ super(DETR, self).__init__(backbone, None, bbox_head, train_cfg,
+ test_cfg, pretrained)
+
+ def simple_test(self, img, img_metas, rescale=False):
+ """Test function without test time augmentation.
+
+ Args:
+ imgs (list[torch.Tensor]): List of multiple images
+ img_metas (list[dict]): List of image information.
+ rescale (bool, optional): Whether to rescale the results.
+ Defaults to False.
+
+ Returns:
+ list[list[np.ndarray]]: BBox results of each image and classes.
+ The outer list corresponds to each image. The inner list
+ corresponds to each class.
+ """
+ batch_size = len(img_metas)
+ assert batch_size == 1, 'Currently only batch_size 1 for inference ' \
+ f'mode is supported. Found batch_size {batch_size}.'
+ x = self.extract_feat(img)
+ outs = self.bbox_head(x, img_metas)
+ bbox_list = self.bbox_head.get_bboxes(
+ *outs, img_metas, rescale=rescale)
+
+ bbox_results = [
+ bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes)
+ for det_bboxes, det_labels in bbox_list
+ ]
+ return bbox_results
diff --git a/annotator/uniformer/mmdet_null/models/detectors/fast_rcnn.py b/annotator/uniformer/mmdet_null/models/detectors/fast_rcnn.py
new file mode 100644
index 0000000000000000000000000000000000000000..3d6e242767b927ed37198b6bc7862abecef99a33
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/detectors/fast_rcnn.py
@@ -0,0 +1,52 @@
+from ..builder import DETECTORS
+from .two_stage import TwoStageDetector
+
+
+@DETECTORS.register_module()
+class FastRCNN(TwoStageDetector):
+ """Implementation of `Fast R-CNN `_"""
+
+ def __init__(self,
+ backbone,
+ roi_head,
+ train_cfg,
+ test_cfg,
+ neck=None,
+ pretrained=None):
+ super(FastRCNN, self).__init__(
+ backbone=backbone,
+ neck=neck,
+ roi_head=roi_head,
+ train_cfg=train_cfg,
+ test_cfg=test_cfg,
+ pretrained=pretrained)
+
+ def forward_test(self, imgs, img_metas, proposals, **kwargs):
+ """
+ Args:
+ imgs (List[Tensor]): the outer list indicates test-time
+ augmentations and inner Tensor should have a shape NxCxHxW,
+ which contains all images in the batch.
+ img_metas (List[List[dict]]): the outer list indicates test-time
+ augs (multiscale, flip, etc.) and the inner list indicates
+ images in a batch.
+ proposals (List[List[Tensor]]): the outer list indicates test-time
+ augs (multiscale, flip, etc.) and the inner list indicates
+ images in a batch. The Tensor should have a shape Px4, where
+ P is the number of proposals.
+ """
+ for var, name in [(imgs, 'imgs'), (img_metas, 'img_metas')]:
+ if not isinstance(var, list):
+ raise TypeError(f'{name} must be a list, but got {type(var)}')
+
+ num_augs = len(imgs)
+ if num_augs != len(img_metas):
+ raise ValueError(f'num of augmentations ({len(imgs)}) '
+ f'!= num of image meta ({len(img_metas)})')
+
+ if num_augs == 1:
+ return self.simple_test(imgs[0], img_metas[0], proposals[0],
+ **kwargs)
+ else:
+ # TODO: support test-time augmentation
+ assert NotImplementedError
diff --git a/annotator/uniformer/mmdet_null/models/detectors/faster_rcnn.py b/annotator/uniformer/mmdet_null/models/detectors/faster_rcnn.py
new file mode 100644
index 0000000000000000000000000000000000000000..81bad0f43a48b1022c4cd996e26d6c90be93d4d0
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/detectors/faster_rcnn.py
@@ -0,0 +1,24 @@
+from ..builder import DETECTORS
+from .two_stage import TwoStageDetector
+
+
+@DETECTORS.register_module()
+class FasterRCNN(TwoStageDetector):
+ """Implementation of `Faster R-CNN `_"""
+
+ def __init__(self,
+ backbone,
+ rpn_head,
+ roi_head,
+ train_cfg,
+ test_cfg,
+ neck=None,
+ pretrained=None):
+ super(FasterRCNN, self).__init__(
+ backbone=backbone,
+ neck=neck,
+ rpn_head=rpn_head,
+ roi_head=roi_head,
+ train_cfg=train_cfg,
+ test_cfg=test_cfg,
+ pretrained=pretrained)
diff --git a/annotator/uniformer/mmdet_null/models/detectors/fcos.py b/annotator/uniformer/mmdet_null/models/detectors/fcos.py
new file mode 100644
index 0000000000000000000000000000000000000000..58485c1864a11a66168b7597f345ea759ce20551
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/detectors/fcos.py
@@ -0,0 +1,17 @@
+from ..builder import DETECTORS
+from .single_stage import SingleStageDetector
+
+
+@DETECTORS.register_module()
+class FCOS(SingleStageDetector):
+ """Implementation of `FCOS `_"""
+
+ def __init__(self,
+ backbone,
+ neck,
+ bbox_head,
+ train_cfg=None,
+ test_cfg=None,
+ pretrained=None):
+ super(FCOS, self).__init__(backbone, neck, bbox_head, train_cfg,
+ test_cfg, pretrained)
diff --git a/annotator/uniformer/mmdet_null/models/detectors/fovea.py b/annotator/uniformer/mmdet_null/models/detectors/fovea.py
new file mode 100644
index 0000000000000000000000000000000000000000..22a578efffbd108db644d907bae95c7c8df31f2e
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/detectors/fovea.py
@@ -0,0 +1,17 @@
+from ..builder import DETECTORS
+from .single_stage import SingleStageDetector
+
+
+@DETECTORS.register_module()
+class FOVEA(SingleStageDetector):
+ """Implementation of `FoveaBox `_"""
+
+ def __init__(self,
+ backbone,
+ neck,
+ bbox_head,
+ train_cfg=None,
+ test_cfg=None,
+ pretrained=None):
+ super(FOVEA, self).__init__(backbone, neck, bbox_head, train_cfg,
+ test_cfg, pretrained)
diff --git a/annotator/uniformer/mmdet_null/models/detectors/fsaf.py b/annotator/uniformer/mmdet_null/models/detectors/fsaf.py
new file mode 100644
index 0000000000000000000000000000000000000000..9f10fa1ae10f31e6cb5de65505b14a4fc97dd022
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/detectors/fsaf.py
@@ -0,0 +1,17 @@
+from ..builder import DETECTORS
+from .single_stage import SingleStageDetector
+
+
+@DETECTORS.register_module()
+class FSAF(SingleStageDetector):
+ """Implementation of `FSAF `_"""
+
+ def __init__(self,
+ backbone,
+ neck,
+ bbox_head,
+ train_cfg=None,
+ test_cfg=None,
+ pretrained=None):
+ super(FSAF, self).__init__(backbone, neck, bbox_head, train_cfg,
+ test_cfg, pretrained)
diff --git a/annotator/uniformer/mmdet_null/models/detectors/gfl.py b/annotator/uniformer/mmdet_null/models/detectors/gfl.py
new file mode 100644
index 0000000000000000000000000000000000000000..64d65cb2dfb7a56f57e08c3fcad67e1539e1e841
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/detectors/gfl.py
@@ -0,0 +1,16 @@
+from ..builder import DETECTORS
+from .single_stage import SingleStageDetector
+
+
+@DETECTORS.register_module()
+class GFL(SingleStageDetector):
+
+ def __init__(self,
+ backbone,
+ neck,
+ bbox_head,
+ train_cfg=None,
+ test_cfg=None,
+ pretrained=None):
+ super(GFL, self).__init__(backbone, neck, bbox_head, train_cfg,
+ test_cfg, pretrained)
diff --git a/annotator/uniformer/mmdet_null/models/detectors/grid_rcnn.py b/annotator/uniformer/mmdet_null/models/detectors/grid_rcnn.py
new file mode 100644
index 0000000000000000000000000000000000000000..b6145a1464cd940bd4f98eaa15f6f9ecf6a10a20
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/detectors/grid_rcnn.py
@@ -0,0 +1,29 @@
+from ..builder import DETECTORS
+from .two_stage import TwoStageDetector
+
+
+@DETECTORS.register_module()
+class GridRCNN(TwoStageDetector):
+ """Grid R-CNN.
+
+ This detector is the implementation of:
+ - Grid R-CNN (https://arxiv.org/abs/1811.12030)
+ - Grid R-CNN Plus: Faster and Better (https://arxiv.org/abs/1906.05688)
+ """
+
+ def __init__(self,
+ backbone,
+ rpn_head,
+ roi_head,
+ train_cfg,
+ test_cfg,
+ neck=None,
+ pretrained=None):
+ super(GridRCNN, self).__init__(
+ backbone=backbone,
+ neck=neck,
+ rpn_head=rpn_head,
+ roi_head=roi_head,
+ train_cfg=train_cfg,
+ test_cfg=test_cfg,
+ pretrained=pretrained)
diff --git a/annotator/uniformer/mmdet_null/models/detectors/htc.py b/annotator/uniformer/mmdet_null/models/detectors/htc.py
new file mode 100644
index 0000000000000000000000000000000000000000..d9efdf420fa7373f7f1d116f8d97836d73b457bf
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/detectors/htc.py
@@ -0,0 +1,15 @@
+from ..builder import DETECTORS
+from .cascade_rcnn import CascadeRCNN
+
+
+@DETECTORS.register_module()
+class HybridTaskCascade(CascadeRCNN):
+ """Implementation of `HTC `_"""
+
+ def __init__(self, **kwargs):
+ super(HybridTaskCascade, self).__init__(**kwargs)
+
+ @property
+ def with_semantic(self):
+ """bool: whether the detector has a semantic head"""
+ return self.roi_head.with_semantic
diff --git a/annotator/uniformer/mmdet_null/models/detectors/kd_one_stage.py b/annotator/uniformer/mmdet_null/models/detectors/kd_one_stage.py
new file mode 100644
index 0000000000000000000000000000000000000000..671ec19015c87fefd065b84ae887147f90cc892b
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/detectors/kd_one_stage.py
@@ -0,0 +1,100 @@
+import mmcv
+import torch
+from mmcv.runner import load_checkpoint
+
+from .. import build_detector
+from ..builder import DETECTORS
+from .single_stage import SingleStageDetector
+
+
+@DETECTORS.register_module()
+class KnowledgeDistillationSingleStageDetector(SingleStageDetector):
+ r"""Implementation of `Distilling the Knowledge in a Neural Network.
+ `_.
+
+ Args:
+ teacher_config (str | dict): Config file path
+ or the config object of teacher model.
+ teacher_ckpt (str, optional): Checkpoint path of teacher model.
+ If left as None, the model will not load any weights.
+ """
+
+ def __init__(self,
+ backbone,
+ neck,
+ bbox_head,
+ teacher_config,
+ teacher_ckpt=None,
+ eval_teacher=True,
+ train_cfg=None,
+ test_cfg=None,
+ pretrained=None):
+ super().__init__(backbone, neck, bbox_head, train_cfg, test_cfg,
+ pretrained)
+ self.eval_teacher = eval_teacher
+ # Build teacher model
+ if isinstance(teacher_config, str):
+ teacher_config = mmcv.Config.fromfile(teacher_config)
+ self.teacher_model = build_detector(teacher_config['model'])
+ if teacher_ckpt is not None:
+ load_checkpoint(
+ self.teacher_model, teacher_ckpt, map_location='cpu')
+
+ def forward_train(self,
+ img,
+ img_metas,
+ gt_bboxes,
+ gt_labels,
+ gt_bboxes_ignore=None):
+ """
+ Args:
+ img (Tensor): Input images of shape (N, C, H, W).
+ Typically these should be mean centered and std scaled.
+ img_metas (list[dict]): A List of image info dict where each dict
+ has: 'img_shape', 'scale_factor', 'flip', and may also contain
+ 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
+ For details on the values of these keys see
+ :class:`mmdet.datasets.pipelines.Collect`.
+ gt_bboxes (list[Tensor]): Each item are the truth boxes for each
+ image in [tl_x, tl_y, br_x, br_y] format.
+ gt_labels (list[Tensor]): Class indices corresponding to each box
+ gt_bboxes_ignore (None | list[Tensor]): Specify which bounding
+ boxes can be ignored when computing the loss.
+ Returns:
+ dict[str, Tensor]: A dictionary of loss components.
+ """
+ x = self.extract_feat(img)
+ with torch.no_grad():
+ teacher_x = self.teacher_model.extract_feat(img)
+ out_teacher = self.teacher_model.bbox_head(teacher_x)
+ losses = self.bbox_head.forward_train(x, out_teacher, img_metas,
+ gt_bboxes, gt_labels,
+ gt_bboxes_ignore)
+ return losses
+
+ def cuda(self, device=None):
+ """Since teacher_model is registered as a plain object, it is necessary
+ to put the teacher model to cuda when calling cuda function."""
+ self.teacher_model.cuda(device=device)
+ return super().cuda(device=device)
+
+ def train(self, mode=True):
+ """Set the same train mode for teacher and student model."""
+ if self.eval_teacher:
+ self.teacher_model.train(False)
+ else:
+ self.teacher_model.train(mode)
+ super().train(mode)
+
+ def __setattr__(self, name, value):
+ """Set attribute, i.e. self.name = value
+
+ This reloading prevent the teacher model from being registered as a
+ nn.Module. The teacher module is registered as a plain object, so that
+ the teacher parameters will not show up when calling
+ ``self.parameters``, ``self.modules``, ``self.children`` methods.
+ """
+ if name == 'teacher_model':
+ object.__setattr__(self, name, value)
+ else:
+ super().__setattr__(name, value)
diff --git a/annotator/uniformer/mmdet_null/models/detectors/mask_rcnn.py b/annotator/uniformer/mmdet_null/models/detectors/mask_rcnn.py
new file mode 100644
index 0000000000000000000000000000000000000000..c15a7733170e059d2825138b3812319915b7cad6
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/detectors/mask_rcnn.py
@@ -0,0 +1,24 @@
+from ..builder import DETECTORS
+from .two_stage import TwoStageDetector
+
+
+@DETECTORS.register_module()
+class MaskRCNN(TwoStageDetector):
+ """Implementation of `Mask R-CNN `_"""
+
+ def __init__(self,
+ backbone,
+ rpn_head,
+ roi_head,
+ train_cfg,
+ test_cfg,
+ neck=None,
+ pretrained=None):
+ super(MaskRCNN, self).__init__(
+ backbone=backbone,
+ neck=neck,
+ rpn_head=rpn_head,
+ roi_head=roi_head,
+ train_cfg=train_cfg,
+ test_cfg=test_cfg,
+ pretrained=pretrained)
diff --git a/annotator/uniformer/mmdet_null/models/detectors/mask_scoring_rcnn.py b/annotator/uniformer/mmdet_null/models/detectors/mask_scoring_rcnn.py
new file mode 100644
index 0000000000000000000000000000000000000000..b6252b6e1d234a201725342a5780fade7e21957c
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/detectors/mask_scoring_rcnn.py
@@ -0,0 +1,27 @@
+from ..builder import DETECTORS
+from .two_stage import TwoStageDetector
+
+
+@DETECTORS.register_module()
+class MaskScoringRCNN(TwoStageDetector):
+ """Mask Scoring RCNN.
+
+ https://arxiv.org/abs/1903.00241
+ """
+
+ def __init__(self,
+ backbone,
+ rpn_head,
+ roi_head,
+ train_cfg,
+ test_cfg,
+ neck=None,
+ pretrained=None):
+ super(MaskScoringRCNN, self).__init__(
+ backbone=backbone,
+ neck=neck,
+ rpn_head=rpn_head,
+ roi_head=roi_head,
+ train_cfg=train_cfg,
+ test_cfg=test_cfg,
+ pretrained=pretrained)
diff --git a/annotator/uniformer/mmdet_null/models/detectors/nasfcos.py b/annotator/uniformer/mmdet_null/models/detectors/nasfcos.py
new file mode 100644
index 0000000000000000000000000000000000000000..fb0148351546f45a451ef5f7a2a9ef4024e85b7c
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/detectors/nasfcos.py
@@ -0,0 +1,20 @@
+from ..builder import DETECTORS
+from .single_stage import SingleStageDetector
+
+
+@DETECTORS.register_module()
+class NASFCOS(SingleStageDetector):
+ """NAS-FCOS: Fast Neural Architecture Search for Object Detection.
+
+ https://arxiv.org/abs/1906.0442
+ """
+
+ def __init__(self,
+ backbone,
+ neck,
+ bbox_head,
+ train_cfg=None,
+ test_cfg=None,
+ pretrained=None):
+ super(NASFCOS, self).__init__(backbone, neck, bbox_head, train_cfg,
+ test_cfg, pretrained)
diff --git a/annotator/uniformer/mmdet_null/models/detectors/paa.py b/annotator/uniformer/mmdet_null/models/detectors/paa.py
new file mode 100644
index 0000000000000000000000000000000000000000..9b4bb5e0939b824d9fef7fc3bd49a0164c29613a
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/detectors/paa.py
@@ -0,0 +1,17 @@
+from ..builder import DETECTORS
+from .single_stage import SingleStageDetector
+
+
+@DETECTORS.register_module()
+class PAA(SingleStageDetector):
+ """Implementation of `PAA `_."""
+
+ def __init__(self,
+ backbone,
+ neck,
+ bbox_head,
+ train_cfg=None,
+ test_cfg=None,
+ pretrained=None):
+ super(PAA, self).__init__(backbone, neck, bbox_head, train_cfg,
+ test_cfg, pretrained)
diff --git a/annotator/uniformer/mmdet_null/models/detectors/point_rend.py b/annotator/uniformer/mmdet_null/models/detectors/point_rend.py
new file mode 100644
index 0000000000000000000000000000000000000000..808ef2258ae88301d349db3aaa2711f223e5c971
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/detectors/point_rend.py
@@ -0,0 +1,29 @@
+from ..builder import DETECTORS
+from .two_stage import TwoStageDetector
+
+
+@DETECTORS.register_module()
+class PointRend(TwoStageDetector):
+ """PointRend: Image Segmentation as Rendering
+
+ This detector is the implementation of
+ `PointRend `_.
+
+ """
+
+ def __init__(self,
+ backbone,
+ rpn_head,
+ roi_head,
+ train_cfg,
+ test_cfg,
+ neck=None,
+ pretrained=None):
+ super(PointRend, self).__init__(
+ backbone=backbone,
+ neck=neck,
+ rpn_head=rpn_head,
+ roi_head=roi_head,
+ train_cfg=train_cfg,
+ test_cfg=test_cfg,
+ pretrained=pretrained)
diff --git a/annotator/uniformer/mmdet_null/models/detectors/reppoints_detector.py b/annotator/uniformer/mmdet_null/models/detectors/reppoints_detector.py
new file mode 100644
index 0000000000000000000000000000000000000000..a5f6be31e14488e4b8a006b7142a82c872388d82
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/detectors/reppoints_detector.py
@@ -0,0 +1,22 @@
+from ..builder import DETECTORS
+from .single_stage import SingleStageDetector
+
+
+@DETECTORS.register_module()
+class RepPointsDetector(SingleStageDetector):
+ """RepPoints: Point Set Representation for Object Detection.
+
+ This detector is the implementation of:
+ - RepPoints detector (https://arxiv.org/pdf/1904.11490)
+ """
+
+ def __init__(self,
+ backbone,
+ neck,
+ bbox_head,
+ train_cfg=None,
+ test_cfg=None,
+ pretrained=None):
+ super(RepPointsDetector,
+ self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg,
+ pretrained)
diff --git a/annotator/uniformer/mmdet_null/models/detectors/retinanet.py b/annotator/uniformer/mmdet_null/models/detectors/retinanet.py
new file mode 100644
index 0000000000000000000000000000000000000000..41378e8bc74bf9d5cbc7e3e6630bb1e6657049f9
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/detectors/retinanet.py
@@ -0,0 +1,17 @@
+from ..builder import DETECTORS
+from .single_stage import SingleStageDetector
+
+
+@DETECTORS.register_module()
+class RetinaNet(SingleStageDetector):
+ """Implementation of `RetinaNet `_"""
+
+ def __init__(self,
+ backbone,
+ neck,
+ bbox_head,
+ train_cfg=None,
+ test_cfg=None,
+ pretrained=None):
+ super(RetinaNet, self).__init__(backbone, neck, bbox_head, train_cfg,
+ test_cfg, pretrained)
diff --git a/annotator/uniformer/mmdet_null/models/detectors/rpn.py b/annotator/uniformer/mmdet_null/models/detectors/rpn.py
new file mode 100644
index 0000000000000000000000000000000000000000..1a77294549d1c3dc7821063c3f3d08bb331fbe59
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/detectors/rpn.py
@@ -0,0 +1,154 @@
+import mmcv
+from mmcv.image import tensor2imgs
+
+from mmdet.core import bbox_mapping
+from ..builder import DETECTORS, build_backbone, build_head, build_neck
+from .base import BaseDetector
+
+
+@DETECTORS.register_module()
+class RPN(BaseDetector):
+ """Implementation of Region Proposal Network."""
+
+ def __init__(self,
+ backbone,
+ neck,
+ rpn_head,
+ train_cfg,
+ test_cfg,
+ pretrained=None):
+ super(RPN, self).__init__()
+ self.backbone = build_backbone(backbone)
+ self.neck = build_neck(neck) if neck is not None else None
+ rpn_train_cfg = train_cfg.rpn if train_cfg is not None else None
+ rpn_head.update(train_cfg=rpn_train_cfg)
+ rpn_head.update(test_cfg=test_cfg.rpn)
+ self.rpn_head = build_head(rpn_head)
+ self.train_cfg = train_cfg
+ self.test_cfg = test_cfg
+ self.init_weights(pretrained=pretrained)
+
+ def init_weights(self, pretrained=None):
+ """Initialize the weights in detector.
+
+ Args:
+ pretrained (str, optional): Path to pre-trained weights.
+ Defaults to None.
+ """
+ super(RPN, self).init_weights(pretrained)
+ self.backbone.init_weights(pretrained=pretrained)
+ if self.with_neck:
+ self.neck.init_weights()
+ self.rpn_head.init_weights()
+
+ def extract_feat(self, img):
+ """Extract features.
+
+ Args:
+ img (torch.Tensor): Image tensor with shape (n, c, h ,w).
+
+ Returns:
+ list[torch.Tensor]: Multi-level features that may have
+ different resolutions.
+ """
+ x = self.backbone(img)
+ if self.with_neck:
+ x = self.neck(x)
+ return x
+
+ def forward_dummy(self, img):
+ """Dummy forward function."""
+ x = self.extract_feat(img)
+ rpn_outs = self.rpn_head(x)
+ return rpn_outs
+
+ def forward_train(self,
+ img,
+ img_metas,
+ gt_bboxes=None,
+ gt_bboxes_ignore=None):
+ """
+ Args:
+ img (Tensor): Input images of shape (N, C, H, W).
+ Typically these should be mean centered and std scaled.
+ img_metas (list[dict]): A List of image info dict where each dict
+ has: 'img_shape', 'scale_factor', 'flip', and may also contain
+ 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
+ For details on the values of these keys see
+ :class:`mmdet.datasets.pipelines.Collect`.
+ gt_bboxes (list[Tensor]): Each item are the truth boxes for each
+ image in [tl_x, tl_y, br_x, br_y] format.
+ gt_bboxes_ignore (None | list[Tensor]): Specify which bounding
+ boxes can be ignored when computing the loss.
+
+ Returns:
+ dict[str, Tensor]: A dictionary of loss components.
+ """
+ if (isinstance(self.train_cfg.rpn, dict)
+ and self.train_cfg.rpn.get('debug', False)):
+ self.rpn_head.debug_imgs = tensor2imgs(img)
+
+ x = self.extract_feat(img)
+ losses = self.rpn_head.forward_train(x, img_metas, gt_bboxes, None,
+ gt_bboxes_ignore)
+ return losses
+
+ def simple_test(self, img, img_metas, rescale=False):
+ """Test function without test time augmentation.
+
+ Args:
+ imgs (list[torch.Tensor]): List of multiple images
+ img_metas (list[dict]): List of image information.
+ rescale (bool, optional): Whether to rescale the results.
+ Defaults to False.
+
+ Returns:
+ list[np.ndarray]: proposals
+ """
+ x = self.extract_feat(img)
+ proposal_list = self.rpn_head.simple_test_rpn(x, img_metas)
+ if rescale:
+ for proposals, meta in zip(proposal_list, img_metas):
+ proposals[:, :4] /= proposals.new_tensor(meta['scale_factor'])
+
+ return [proposal.cpu().numpy() for proposal in proposal_list]
+
+ def aug_test(self, imgs, img_metas, rescale=False):
+ """Test function with test time augmentation.
+
+ Args:
+ imgs (list[torch.Tensor]): List of multiple images
+ img_metas (list[dict]): List of image information.
+ rescale (bool, optional): Whether to rescale the results.
+ Defaults to False.
+
+ Returns:
+ list[np.ndarray]: proposals
+ """
+ proposal_list = self.rpn_head.aug_test_rpn(
+ self.extract_feats(imgs), img_metas)
+ if not rescale:
+ for proposals, img_meta in zip(proposal_list, img_metas[0]):
+ img_shape = img_meta['img_shape']
+ scale_factor = img_meta['scale_factor']
+ flip = img_meta['flip']
+ flip_direction = img_meta['flip_direction']
+ proposals[:, :4] = bbox_mapping(proposals[:, :4], img_shape,
+ scale_factor, flip,
+ flip_direction)
+ return [proposal.cpu().numpy() for proposal in proposal_list]
+
+ def show_result(self, data, result, top_k=20, **kwargs):
+ """Show RPN proposals on the image.
+
+ Args:
+ data (str or np.ndarray): Image filename or loaded image.
+ result (Tensor or tuple): The results to draw over `img`
+ bbox_result or (bbox_result, segm_result).
+ top_k (int): Plot the first k bboxes only
+ if set positive. Default: 20
+
+ Returns:
+ np.ndarray: The image with bboxes drawn on it.
+ """
+ mmcv.imshow_bboxes(data, result, top_k=top_k)
diff --git a/annotator/uniformer/mmdet_null/models/detectors/scnet.py b/annotator/uniformer/mmdet_null/models/detectors/scnet.py
new file mode 100644
index 0000000000000000000000000000000000000000..04a2347c4ec1efcbfda59a134cddd8bde620d983
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/detectors/scnet.py
@@ -0,0 +1,10 @@
+from ..builder import DETECTORS
+from .cascade_rcnn import CascadeRCNN
+
+
+@DETECTORS.register_module()
+class SCNet(CascadeRCNN):
+ """Implementation of `SCNet `_"""
+
+ def __init__(self, **kwargs):
+ super(SCNet, self).__init__(**kwargs)
diff --git a/annotator/uniformer/mmdet_null/models/detectors/single_stage.py b/annotator/uniformer/mmdet_null/models/detectors/single_stage.py
new file mode 100644
index 0000000000000000000000000000000000000000..5172bdbd945889445eeaa18398c9f0118bb845ad
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/detectors/single_stage.py
@@ -0,0 +1,154 @@
+import torch
+import torch.nn as nn
+
+from mmdet.core import bbox2result
+from ..builder import DETECTORS, build_backbone, build_head, build_neck
+from .base import BaseDetector
+
+
+@DETECTORS.register_module()
+class SingleStageDetector(BaseDetector):
+ """Base class for single-stage detectors.
+
+ Single-stage detectors directly and densely predict bounding boxes on the
+ output features of the backbone+neck.
+ """
+
+ def __init__(self,
+ backbone,
+ neck=None,
+ bbox_head=None,
+ train_cfg=None,
+ test_cfg=None,
+ pretrained=None):
+ super(SingleStageDetector, self).__init__()
+ self.backbone = build_backbone(backbone)
+ if neck is not None:
+ self.neck = build_neck(neck)
+ bbox_head.update(train_cfg=train_cfg)
+ bbox_head.update(test_cfg=test_cfg)
+ self.bbox_head = build_head(bbox_head)
+ self.train_cfg = train_cfg
+ self.test_cfg = test_cfg
+ self.init_weights(pretrained=pretrained)
+
+ def init_weights(self, pretrained=None):
+ """Initialize the weights in detector.
+
+ Args:
+ pretrained (str, optional): Path to pre-trained weights.
+ Defaults to None.
+ """
+ super(SingleStageDetector, self).init_weights(pretrained)
+ self.backbone.init_weights(pretrained=pretrained)
+ if self.with_neck:
+ if isinstance(self.neck, nn.Sequential):
+ for m in self.neck:
+ m.init_weights()
+ else:
+ self.neck.init_weights()
+ self.bbox_head.init_weights()
+
+ def extract_feat(self, img):
+ """Directly extract features from the backbone+neck."""
+ x = self.backbone(img)
+ if self.with_neck:
+ x = self.neck(x)
+ return x
+
+ def forward_dummy(self, img):
+ """Used for computing network flops.
+
+ See `mmdetection/tools/analysis_tools/get_flops.py`
+ """
+ x = self.extract_feat(img)
+ outs = self.bbox_head(x)
+ return outs
+
+ def forward_train(self,
+ img,
+ img_metas,
+ gt_bboxes,
+ gt_labels,
+ gt_bboxes_ignore=None):
+ """
+ Args:
+ img (Tensor): Input images of shape (N, C, H, W).
+ Typically these should be mean centered and std scaled.
+ img_metas (list[dict]): A List of image info dict where each dict
+ has: 'img_shape', 'scale_factor', 'flip', and may also contain
+ 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
+ For details on the values of these keys see
+ :class:`mmdet.datasets.pipelines.Collect`.
+ gt_bboxes (list[Tensor]): Each item are the truth boxes for each
+ image in [tl_x, tl_y, br_x, br_y] format.
+ gt_labels (list[Tensor]): Class indices corresponding to each box
+ gt_bboxes_ignore (None | list[Tensor]): Specify which bounding
+ boxes can be ignored when computing the loss.
+
+ Returns:
+ dict[str, Tensor]: A dictionary of loss components.
+ """
+ super(SingleStageDetector, self).forward_train(img, img_metas)
+ x = self.extract_feat(img)
+ losses = self.bbox_head.forward_train(x, img_metas, gt_bboxes,
+ gt_labels, gt_bboxes_ignore)
+ return losses
+
+ def simple_test(self, img, img_metas, rescale=False):
+ """Test function without test time augmentation.
+
+ Args:
+ imgs (list[torch.Tensor]): List of multiple images
+ img_metas (list[dict]): List of image information.
+ rescale (bool, optional): Whether to rescale the results.
+ Defaults to False.
+
+ Returns:
+ list[list[np.ndarray]]: BBox results of each image and classes.
+ The outer list corresponds to each image. The inner list
+ corresponds to each class.
+ """
+ x = self.extract_feat(img)
+ outs = self.bbox_head(x)
+ # get origin input shape to support onnx dynamic shape
+ if torch.onnx.is_in_onnx_export():
+ # get shape as tensor
+ img_shape = torch._shape_as_tensor(img)[2:]
+ img_metas[0]['img_shape_for_onnx'] = img_shape
+ bbox_list = self.bbox_head.get_bboxes(
+ *outs, img_metas, rescale=rescale)
+ # skip post-processing when exporting to ONNX
+ if torch.onnx.is_in_onnx_export():
+ return bbox_list
+
+ bbox_results = [
+ bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes)
+ for det_bboxes, det_labels in bbox_list
+ ]
+ return bbox_results
+
+ def aug_test(self, imgs, img_metas, rescale=False):
+ """Test function with test time augmentation.
+
+ Args:
+ imgs (list[Tensor]): the outer list indicates test-time
+ augmentations and inner Tensor should have a shape NxCxHxW,
+ which contains all images in the batch.
+ img_metas (list[list[dict]]): the outer list indicates test-time
+ augs (multiscale, flip, etc.) and the inner list indicates
+ images in a batch. each dict has image information.
+ rescale (bool, optional): Whether to rescale the results.
+ Defaults to False.
+
+ Returns:
+ list[list[np.ndarray]]: BBox results of each image and classes.
+ The outer list corresponds to each image. The inner list
+ corresponds to each class.
+ """
+ assert hasattr(self.bbox_head, 'aug_test'), \
+ f'{self.bbox_head.__class__.__name__}' \
+ ' does not support test-time augmentation'
+
+ feats = self.extract_feats(imgs)
+ return [self.bbox_head.aug_test(feats, img_metas, rescale=rescale)]
diff --git a/annotator/uniformer/mmdet_null/models/detectors/sparse_rcnn.py b/annotator/uniformer/mmdet_null/models/detectors/sparse_rcnn.py
new file mode 100644
index 0000000000000000000000000000000000000000..0dbd0250f189e610a0bbc72b0dab2559e26857ae
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/detectors/sparse_rcnn.py
@@ -0,0 +1,110 @@
+from ..builder import DETECTORS
+from .two_stage import TwoStageDetector
+
+
+@DETECTORS.register_module()
+class SparseRCNN(TwoStageDetector):
+ r"""Implementation of `Sparse R-CNN: End-to-End Object Detection with
+ Learnable Proposals `_"""
+
+ def __init__(self, *args, **kwargs):
+ super(SparseRCNN, self).__init__(*args, **kwargs)
+ assert self.with_rpn, 'Sparse R-CNN do not support external proposals'
+
+ def forward_train(self,
+ img,
+ img_metas,
+ gt_bboxes,
+ gt_labels,
+ gt_bboxes_ignore=None,
+ gt_masks=None,
+ proposals=None,
+ **kwargs):
+ """Forward function of SparseR-CNN in train stage.
+
+ Args:
+ img (Tensor): of shape (N, C, H, W) encoding input images.
+ Typically these should be mean centered and std scaled.
+ img_metas (list[dict]): list of image info dict where each dict
+ has: 'img_shape', 'scale_factor', 'flip', and may also contain
+ 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
+ For details on the values of these keys see
+ :class:`mmdet.datasets.pipelines.Collect`.
+ gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
+ shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
+ gt_labels (list[Tensor]): class indices corresponding to each box
+ gt_bboxes_ignore (None | list[Tensor): specify which bounding
+ boxes can be ignored when computing the loss.
+ gt_masks (List[Tensor], optional) : Segmentation masks for
+ each box. But we don't support it in this architecture.
+ proposals (List[Tensor], optional): override rpn proposals with
+ custom proposals. Use when `with_rpn` is False.
+
+ Returns:
+ dict[str, Tensor]: a dictionary of loss components
+ """
+
+ assert proposals is None, 'Sparse R-CNN does not support' \
+ ' external proposals'
+ assert gt_masks is None, 'Sparse R-CNN does not instance segmentation'
+
+ x = self.extract_feat(img)
+ proposal_boxes, proposal_features, imgs_whwh = \
+ self.rpn_head.forward_train(x, img_metas)
+ roi_losses = self.roi_head.forward_train(
+ x,
+ proposal_boxes,
+ proposal_features,
+ img_metas,
+ gt_bboxes,
+ gt_labels,
+ gt_bboxes_ignore=gt_bboxes_ignore,
+ gt_masks=gt_masks,
+ imgs_whwh=imgs_whwh)
+ return roi_losses
+
+ def simple_test(self, img, img_metas, rescale=False):
+ """Test function without test time augmentation.
+
+ Args:
+ imgs (list[torch.Tensor]): List of multiple images
+ img_metas (list[dict]): List of image information.
+ rescale (bool): Whether to rescale the results.
+ Defaults to False.
+
+ Returns:
+ list[list[np.ndarray]]: BBox results of each image and classes.
+ The outer list corresponds to each image. The inner list
+ corresponds to each class.
+ """
+ x = self.extract_feat(img)
+ proposal_boxes, proposal_features, imgs_whwh = \
+ self.rpn_head.simple_test_rpn(x, img_metas)
+ bbox_results = self.roi_head.simple_test(
+ x,
+ proposal_boxes,
+ proposal_features,
+ img_metas,
+ imgs_whwh=imgs_whwh,
+ rescale=rescale)
+ return bbox_results
+
+ def forward_dummy(self, img):
+ """Used for computing network flops.
+
+ See `mmdetection/tools/analysis_tools/get_flops.py`
+ """
+ # backbone
+ x = self.extract_feat(img)
+ # rpn
+ num_imgs = len(img)
+ dummy_img_metas = [
+ dict(img_shape=(800, 1333, 3)) for _ in range(num_imgs)
+ ]
+ proposal_boxes, proposal_features, imgs_whwh = \
+ self.rpn_head.simple_test_rpn(x, dummy_img_metas)
+ # roi_head
+ roi_outs = self.roi_head.forward_dummy(x, proposal_boxes,
+ proposal_features,
+ dummy_img_metas)
+ return roi_outs
diff --git a/annotator/uniformer/mmdet_null/models/detectors/trident_faster_rcnn.py b/annotator/uniformer/mmdet_null/models/detectors/trident_faster_rcnn.py
new file mode 100644
index 0000000000000000000000000000000000000000..f0fd80d41407162df71ba5349fc659d4713cdb6e
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/detectors/trident_faster_rcnn.py
@@ -0,0 +1,66 @@
+from ..builder import DETECTORS
+from .faster_rcnn import FasterRCNN
+
+
+@DETECTORS.register_module()
+class TridentFasterRCNN(FasterRCNN):
+ """Implementation of `TridentNet `_"""
+
+ def __init__(self,
+ backbone,
+ rpn_head,
+ roi_head,
+ train_cfg,
+ test_cfg,
+ neck=None,
+ pretrained=None):
+
+ super(TridentFasterRCNN, self).__init__(
+ backbone=backbone,
+ neck=neck,
+ rpn_head=rpn_head,
+ roi_head=roi_head,
+ train_cfg=train_cfg,
+ test_cfg=test_cfg,
+ pretrained=pretrained)
+ assert self.backbone.num_branch == self.roi_head.num_branch
+ assert self.backbone.test_branch_idx == self.roi_head.test_branch_idx
+ self.num_branch = self.backbone.num_branch
+ self.test_branch_idx = self.backbone.test_branch_idx
+
+ def simple_test(self, img, img_metas, proposals=None, rescale=False):
+ """Test without augmentation."""
+ assert self.with_bbox, 'Bbox head must be implemented.'
+ x = self.extract_feat(img)
+ if proposals is None:
+ num_branch = (self.num_branch if self.test_branch_idx == -1 else 1)
+ trident_img_metas = img_metas * num_branch
+ proposal_list = self.rpn_head.simple_test_rpn(x, trident_img_metas)
+ else:
+ proposal_list = proposals
+
+ return self.roi_head.simple_test(
+ x, proposal_list, trident_img_metas, rescale=rescale)
+
+ def aug_test(self, imgs, img_metas, rescale=False):
+ """Test with augmentations.
+
+ If rescale is False, then returned bboxes and masks will fit the scale
+ of imgs[0].
+ """
+ x = self.extract_feats(imgs)
+ num_branch = (self.num_branch if self.test_branch_idx == -1 else 1)
+ trident_img_metas = [img_metas * num_branch for img_metas in img_metas]
+ proposal_list = self.rpn_head.aug_test_rpn(x, trident_img_metas)
+ return self.roi_head.aug_test(
+ x, proposal_list, img_metas, rescale=rescale)
+
+ def forward_train(self, img, img_metas, gt_bboxes, gt_labels, **kwargs):
+ """make copies of img and gts to fit multi-branch."""
+ trident_gt_bboxes = tuple(gt_bboxes * self.num_branch)
+ trident_gt_labels = tuple(gt_labels * self.num_branch)
+ trident_img_metas = tuple(img_metas * self.num_branch)
+
+ return super(TridentFasterRCNN,
+ self).forward_train(img, trident_img_metas,
+ trident_gt_bboxes, trident_gt_labels)
diff --git a/annotator/uniformer/mmdet_null/models/detectors/two_stage.py b/annotator/uniformer/mmdet_null/models/detectors/two_stage.py
new file mode 100644
index 0000000000000000000000000000000000000000..ba5bdde980dc0cd76375455c9c7ffaae4b25531e
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/detectors/two_stage.py
@@ -0,0 +1,215 @@
+import torch
+import torch.nn as nn
+
+# from mmdet.core import bbox2result, bbox2roi, build_assigner, build_sampler
+from ..builder import DETECTORS, build_backbone, build_head, build_neck
+from .base import BaseDetector
+
+
+@DETECTORS.register_module()
+class TwoStageDetector(BaseDetector):
+ """Base class for two-stage detectors.
+
+ Two-stage detectors typically consisting of a region proposal network and a
+ task-specific regression head.
+ """
+
+ def __init__(self,
+ backbone,
+ neck=None,
+ rpn_head=None,
+ roi_head=None,
+ train_cfg=None,
+ test_cfg=None,
+ pretrained=None):
+ super(TwoStageDetector, self).__init__()
+ self.backbone = build_backbone(backbone)
+
+ if neck is not None:
+ self.neck = build_neck(neck)
+
+ if rpn_head is not None:
+ rpn_train_cfg = train_cfg.rpn if train_cfg is not None else None
+ rpn_head_ = rpn_head.copy()
+ rpn_head_.update(train_cfg=rpn_train_cfg, test_cfg=test_cfg.rpn)
+ self.rpn_head = build_head(rpn_head_)
+
+ if roi_head is not None:
+ # update train and test cfg here for now
+ # TODO: refactor assigner & sampler
+ rcnn_train_cfg = train_cfg.rcnn if train_cfg is not None else None
+ roi_head.update(train_cfg=rcnn_train_cfg)
+ roi_head.update(test_cfg=test_cfg.rcnn)
+ self.roi_head = build_head(roi_head)
+
+ self.train_cfg = train_cfg
+ self.test_cfg = test_cfg
+
+ self.init_weights(pretrained=pretrained)
+
+ @property
+ def with_rpn(self):
+ """bool: whether the detector has RPN"""
+ return hasattr(self, 'rpn_head') and self.rpn_head is not None
+
+ @property
+ def with_roi_head(self):
+ """bool: whether the detector has a RoI head"""
+ return hasattr(self, 'roi_head') and self.roi_head is not None
+
+ def init_weights(self, pretrained=None):
+ """Initialize the weights in detector.
+
+ Args:
+ pretrained (str, optional): Path to pre-trained weights.
+ Defaults to None.
+ """
+ super(TwoStageDetector, self).init_weights(pretrained)
+ self.backbone.init_weights(pretrained=pretrained)
+ if self.with_neck:
+ if isinstance(self.neck, nn.Sequential):
+ for m in self.neck:
+ m.init_weights()
+ else:
+ self.neck.init_weights()
+ if self.with_rpn:
+ self.rpn_head.init_weights()
+ if self.with_roi_head:
+ self.roi_head.init_weights(pretrained)
+
+ def extract_feat(self, img):
+ """Directly extract features from the backbone+neck."""
+ x = self.backbone(img)
+ if self.with_neck:
+ x = self.neck(x)
+ return x
+
+ def forward_dummy(self, img):
+ """Used for computing network flops.
+
+ See `mmdetection/tools/analysis_tools/get_flops.py`
+ """
+ outs = ()
+ # backbone
+ x = self.extract_feat(img)
+ # rpn
+ if self.with_rpn:
+ rpn_outs = self.rpn_head(x)
+ outs = outs + (rpn_outs, )
+ proposals = torch.randn(1000, 4).to(img.device)
+ # roi_head
+ roi_outs = self.roi_head.forward_dummy(x, proposals)
+ outs = outs + (roi_outs, )
+ return outs
+
+ def forward_train(self,
+ img,
+ img_metas,
+ gt_bboxes,
+ gt_labels,
+ gt_bboxes_ignore=None,
+ gt_masks=None,
+ proposals=None,
+ **kwargs):
+ """
+ Args:
+ img (Tensor): of shape (N, C, H, W) encoding input images.
+ Typically these should be mean centered and std scaled.
+
+ img_metas (list[dict]): list of image info dict where each dict
+ has: 'img_shape', 'scale_factor', 'flip', and may also contain
+ 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
+ For details on the values of these keys see
+ `mmdet/datasets/pipelines/formatting.py:Collect`.
+
+ gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
+ shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
+
+ gt_labels (list[Tensor]): class indices corresponding to each box
+
+ gt_bboxes_ignore (None | list[Tensor]): specify which bounding
+ boxes can be ignored when computing the loss.
+
+ gt_masks (None | Tensor) : true segmentation masks for each box
+ used if the architecture supports a segmentation task.
+
+ proposals : override rpn proposals with custom proposals. Use when
+ `with_rpn` is False.
+
+ Returns:
+ dict[str, Tensor]: a dictionary of loss components
+ """
+ x = self.extract_feat(img)
+
+ losses = dict()
+
+ # RPN forward and loss
+ if self.with_rpn:
+ proposal_cfg = self.train_cfg.get('rpn_proposal',
+ self.test_cfg.rpn)
+ rpn_losses, proposal_list = self.rpn_head.forward_train(
+ x,
+ img_metas,
+ gt_bboxes,
+ gt_labels=None,
+ gt_bboxes_ignore=gt_bboxes_ignore,
+ proposal_cfg=proposal_cfg)
+ losses.update(rpn_losses)
+ else:
+ proposal_list = proposals
+
+ roi_losses = self.roi_head.forward_train(x, img_metas, proposal_list,
+ gt_bboxes, gt_labels,
+ gt_bboxes_ignore, gt_masks,
+ **kwargs)
+ losses.update(roi_losses)
+
+ return losses
+
+ async def async_simple_test(self,
+ img,
+ img_meta,
+ proposals=None,
+ rescale=False):
+ """Async test without augmentation."""
+ assert self.with_bbox, 'Bbox head must be implemented.'
+ x = self.extract_feat(img)
+
+ if proposals is None:
+ proposal_list = await self.rpn_head.async_simple_test_rpn(
+ x, img_meta)
+ else:
+ proposal_list = proposals
+
+ return await self.roi_head.async_simple_test(
+ x, proposal_list, img_meta, rescale=rescale)
+
+ def simple_test(self, img, img_metas, proposals=None, rescale=False):
+ """Test without augmentation."""
+ assert self.with_bbox, 'Bbox head must be implemented.'
+
+ x = self.extract_feat(img)
+
+ # get origin input shape to onnx dynamic input shape
+ if torch.onnx.is_in_onnx_export():
+ img_shape = torch._shape_as_tensor(img)[2:]
+ img_metas[0]['img_shape_for_onnx'] = img_shape
+
+ if proposals is None:
+ proposal_list = self.rpn_head.simple_test_rpn(x, img_metas)
+ else:
+ proposal_list = proposals
+
+ return self.roi_head.simple_test(
+ x, proposal_list, img_metas, rescale=rescale)
+
+ def aug_test(self, imgs, img_metas, rescale=False):
+ """Test with augmentations.
+
+ If rescale is False, then returned bboxes and masks will fit the scale
+ of imgs[0].
+ """
+ x = self.extract_feats(imgs)
+ proposal_list = self.rpn_head.aug_test_rpn(x, img_metas)
+ return self.roi_head.aug_test(
+ x, proposal_list, img_metas, rescale=rescale)
diff --git a/annotator/uniformer/mmdet_null/models/detectors/vfnet.py b/annotator/uniformer/mmdet_null/models/detectors/vfnet.py
new file mode 100644
index 0000000000000000000000000000000000000000..e23f89674c919921219ffd3486587a2d3c318fbd
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/detectors/vfnet.py
@@ -0,0 +1,18 @@
+from ..builder import DETECTORS
+from .single_stage import SingleStageDetector
+
+
+@DETECTORS.register_module()
+class VFNet(SingleStageDetector):
+ """Implementation of `VarifocalNet
+ (VFNet).`_"""
+
+ def __init__(self,
+ backbone,
+ neck,
+ bbox_head,
+ train_cfg=None,
+ test_cfg=None,
+ pretrained=None):
+ super(VFNet, self).__init__(backbone, neck, bbox_head, train_cfg,
+ test_cfg, pretrained)
diff --git a/annotator/uniformer/mmdet_null/models/detectors/yolact.py b/annotator/uniformer/mmdet_null/models/detectors/yolact.py
new file mode 100644
index 0000000000000000000000000000000000000000..f32fde0d3dcbb55a405e05df433c4353938a148b
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/detectors/yolact.py
@@ -0,0 +1,146 @@
+import torch
+
+from mmdet.core import bbox2result
+from ..builder import DETECTORS, build_head
+from .single_stage import SingleStageDetector
+
+
+@DETECTORS.register_module()
+class YOLACT(SingleStageDetector):
+ """Implementation of `YOLACT `_"""
+
+ def __init__(self,
+ backbone,
+ neck,
+ bbox_head,
+ segm_head,
+ mask_head,
+ train_cfg=None,
+ test_cfg=None,
+ pretrained=None):
+ super(YOLACT, self).__init__(backbone, neck, bbox_head, train_cfg,
+ test_cfg, pretrained)
+ self.segm_head = build_head(segm_head)
+ self.mask_head = build_head(mask_head)
+ self.init_segm_mask_weights()
+
+ def init_segm_mask_weights(self):
+ """Initialize weights of the YOLACT segm head and YOLACT mask head."""
+ self.segm_head.init_weights()
+ self.mask_head.init_weights()
+
+ def forward_dummy(self, img):
+ """Used for computing network flops.
+
+ See `mmdetection/tools/analysis_tools/get_flops.py`
+ """
+ raise NotImplementedError
+
+ def forward_train(self,
+ img,
+ img_metas,
+ gt_bboxes,
+ gt_labels,
+ gt_bboxes_ignore=None,
+ gt_masks=None):
+ """
+ Args:
+ img (Tensor): of shape (N, C, H, W) encoding input images.
+ Typically these should be mean centered and std scaled.
+ img_metas (list[dict]): list of image info dict where each dict
+ has: 'img_shape', 'scale_factor', 'flip', and may also contain
+ 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
+ For details on the values of these keys see
+ `mmdet/datasets/pipelines/formatting.py:Collect`.
+ gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
+ shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
+ gt_labels (list[Tensor]): class indices corresponding to each box
+ gt_bboxes_ignore (None | list[Tensor]): specify which bounding
+ boxes can be ignored when computing the loss.
+ gt_masks (None | Tensor) : true segmentation masks for each box
+ used if the architecture supports a segmentation task.
+
+ Returns:
+ dict[str, Tensor]: a dictionary of loss components
+ """
+ # convert Bitmap mask or Polygon Mask to Tensor here
+ gt_masks = [
+ gt_mask.to_tensor(dtype=torch.uint8, device=img.device)
+ for gt_mask in gt_masks
+ ]
+
+ x = self.extract_feat(img)
+
+ cls_score, bbox_pred, coeff_pred = self.bbox_head(x)
+ bbox_head_loss_inputs = (cls_score, bbox_pred) + (gt_bboxes, gt_labels,
+ img_metas)
+ losses, sampling_results = self.bbox_head.loss(
+ *bbox_head_loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore)
+
+ segm_head_outs = self.segm_head(x[0])
+ loss_segm = self.segm_head.loss(segm_head_outs, gt_masks, gt_labels)
+ losses.update(loss_segm)
+
+ mask_pred = self.mask_head(x[0], coeff_pred, gt_bboxes, img_metas,
+ sampling_results)
+ loss_mask = self.mask_head.loss(mask_pred, gt_masks, gt_bboxes,
+ img_metas, sampling_results)
+ losses.update(loss_mask)
+
+ # check NaN and Inf
+ for loss_name in losses.keys():
+ assert torch.isfinite(torch.stack(losses[loss_name]))\
+ .all().item(), '{} becomes infinite or NaN!'\
+ .format(loss_name)
+
+ return losses
+
+ def simple_test(self, img, img_metas, rescale=False):
+ """Test function without test time augmentation."""
+ x = self.extract_feat(img)
+
+ cls_score, bbox_pred, coeff_pred = self.bbox_head(x)
+
+ bbox_inputs = (cls_score, bbox_pred,
+ coeff_pred) + (img_metas, self.test_cfg, rescale)
+ det_bboxes, det_labels, det_coeffs = self.bbox_head.get_bboxes(
+ *bbox_inputs)
+ bbox_results = [
+ bbox2result(det_bbox, det_label, self.bbox_head.num_classes)
+ for det_bbox, det_label in zip(det_bboxes, det_labels)
+ ]
+
+ num_imgs = len(img_metas)
+ scale_factors = tuple(meta['scale_factor'] for meta in img_metas)
+ if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes):
+ segm_results = [[[] for _ in range(self.mask_head.num_classes)]
+ for _ in range(num_imgs)]
+ else:
+ # if det_bboxes is rescaled to the original image size, we need to
+ # rescale it back to the testing scale to obtain RoIs.
+ if rescale and not isinstance(scale_factors[0], float):
+ scale_factors = [
+ torch.from_numpy(scale_factor).to(det_bboxes[0].device)
+ for scale_factor in scale_factors
+ ]
+ _bboxes = [
+ det_bboxes[i][:, :4] *
+ scale_factors[i] if rescale else det_bboxes[i][:, :4]
+ for i in range(len(det_bboxes))
+ ]
+ mask_preds = self.mask_head(x[0], det_coeffs, _bboxes, img_metas)
+ # apply mask post-processing to each image individually
+ segm_results = []
+ for i in range(num_imgs):
+ if det_bboxes[i].shape[0] == 0:
+ segm_results.append(
+ [[] for _ in range(self.mask_head.num_classes)])
+ else:
+ segm_result = self.mask_head.get_seg_masks(
+ mask_preds[i], det_labels[i], img_metas[i], rescale)
+ segm_results.append(segm_result)
+ return list(zip(bbox_results, segm_results))
+
+ def aug_test(self, imgs, img_metas, rescale=False):
+ """Test with augmentations."""
+ raise NotImplementedError
diff --git a/annotator/uniformer/mmdet_null/models/detectors/yolo.py b/annotator/uniformer/mmdet_null/models/detectors/yolo.py
new file mode 100644
index 0000000000000000000000000000000000000000..240aab20f857befe25e64114300ebb15a66c6a70
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/detectors/yolo.py
@@ -0,0 +1,18 @@
+# Copyright (c) 2019 Western Digital Corporation or its affiliates.
+
+from ..builder import DETECTORS
+from .single_stage import SingleStageDetector
+
+
+@DETECTORS.register_module()
+class YOLOV3(SingleStageDetector):
+
+ def __init__(self,
+ backbone,
+ neck,
+ bbox_head,
+ train_cfg=None,
+ test_cfg=None,
+ pretrained=None):
+ super(YOLOV3, self).__init__(backbone, neck, bbox_head, train_cfg,
+ test_cfg, pretrained)
diff --git a/annotator/uniformer/mmdet_null/models/losses/__init__.py b/annotator/uniformer/mmdet_null/models/losses/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..297aa228277768eb0ba0e8a377f19704d1feeca8
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/losses/__init__.py
@@ -0,0 +1,29 @@
+from .accuracy import Accuracy, accuracy
+from .ae_loss import AssociativeEmbeddingLoss
+from .balanced_l1_loss import BalancedL1Loss, balanced_l1_loss
+from .cross_entropy_loss import (CrossEntropyLoss, binary_cross_entropy,
+ cross_entropy, mask_cross_entropy)
+from .focal_loss import FocalLoss, sigmoid_focal_loss
+from .gaussian_focal_loss import GaussianFocalLoss
+from .gfocal_loss import DistributionFocalLoss, QualityFocalLoss
+from .ghm_loss import GHMC, GHMR
+from .iou_loss import (BoundedIoULoss, CIoULoss, DIoULoss, GIoULoss, IoULoss,
+ bounded_iou_loss, iou_loss)
+from .kd_loss import KnowledgeDistillationKLDivLoss
+from .mse_loss import MSELoss, mse_loss
+from .pisa_loss import carl_loss, isr_p
+from .smooth_l1_loss import L1Loss, SmoothL1Loss, l1_loss, smooth_l1_loss
+from .utils import reduce_loss, weight_reduce_loss, weighted_loss
+from .varifocal_loss import VarifocalLoss
+
+__all__ = [
+ 'accuracy', 'Accuracy', 'cross_entropy', 'binary_cross_entropy',
+ 'mask_cross_entropy', 'CrossEntropyLoss', 'sigmoid_focal_loss',
+ 'FocalLoss', 'smooth_l1_loss', 'SmoothL1Loss', 'balanced_l1_loss',
+ 'BalancedL1Loss', 'mse_loss', 'MSELoss', 'iou_loss', 'bounded_iou_loss',
+ 'IoULoss', 'BoundedIoULoss', 'GIoULoss', 'DIoULoss', 'CIoULoss', 'GHMC',
+ 'GHMR', 'reduce_loss', 'weight_reduce_loss', 'weighted_loss', 'L1Loss',
+ 'l1_loss', 'isr_p', 'carl_loss', 'AssociativeEmbeddingLoss',
+ 'GaussianFocalLoss', 'QualityFocalLoss', 'DistributionFocalLoss',
+ 'VarifocalLoss', 'KnowledgeDistillationKLDivLoss'
+]
diff --git a/annotator/uniformer/mmdet_null/models/losses/accuracy.py b/annotator/uniformer/mmdet_null/models/losses/accuracy.py
new file mode 100644
index 0000000000000000000000000000000000000000..789a2240a491289c5801b6690116e8ca657d004f
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/losses/accuracy.py
@@ -0,0 +1,78 @@
+import mmcv
+import torch.nn as nn
+
+
+@mmcv.jit(coderize=True)
+def accuracy(pred, target, topk=1, thresh=None):
+ """Calculate accuracy according to the prediction and target.
+
+ Args:
+ pred (torch.Tensor): The model prediction, shape (N, num_class)
+ target (torch.Tensor): The target of each prediction, shape (N, )
+ topk (int | tuple[int], optional): If the predictions in ``topk``
+ matches the target, the predictions will be regarded as
+ correct ones. Defaults to 1.
+ thresh (float, optional): If not None, predictions with scores under
+ this threshold are considered incorrect. Default to None.
+
+ Returns:
+ float | tuple[float]: If the input ``topk`` is a single integer,
+ the function will return a single float as accuracy. If
+ ``topk`` is a tuple containing multiple integers, the
+ function will return a tuple containing accuracies of
+ each ``topk`` number.
+ """
+ assert isinstance(topk, (int, tuple))
+ if isinstance(topk, int):
+ topk = (topk, )
+ return_single = True
+ else:
+ return_single = False
+
+ maxk = max(topk)
+ if pred.size(0) == 0:
+ accu = [pred.new_tensor(0.) for i in range(len(topk))]
+ return accu[0] if return_single else accu
+ assert pred.ndim == 2 and target.ndim == 1
+ assert pred.size(0) == target.size(0)
+ assert maxk <= pred.size(1), \
+ f'maxk {maxk} exceeds pred dimension {pred.size(1)}'
+ pred_value, pred_label = pred.topk(maxk, dim=1)
+ pred_label = pred_label.t() # transpose to shape (maxk, N)
+ correct = pred_label.eq(target.view(1, -1).expand_as(pred_label))
+ if thresh is not None:
+ # Only prediction values larger than thresh are counted as correct
+ correct = correct & (pred_value > thresh).t()
+ res = []
+ for k in topk:
+ correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
+ res.append(correct_k.mul_(100.0 / pred.size(0)))
+ return res[0] if return_single else res
+
+
+class Accuracy(nn.Module):
+
+ def __init__(self, topk=(1, ), thresh=None):
+ """Module to calculate the accuracy.
+
+ Args:
+ topk (tuple, optional): The criterion used to calculate the
+ accuracy. Defaults to (1,).
+ thresh (float, optional): If not None, predictions with scores
+ under this threshold are considered incorrect. Default to None.
+ """
+ super().__init__()
+ self.topk = topk
+ self.thresh = thresh
+
+ def forward(self, pred, target):
+ """Forward function to calculate accuracy.
+
+ Args:
+ pred (torch.Tensor): Prediction of models.
+ target (torch.Tensor): Target for each prediction.
+
+ Returns:
+ tuple[float]: The accuracies under different topk criterions.
+ """
+ return accuracy(pred, target, self.topk, self.thresh)
diff --git a/annotator/uniformer/mmdet_null/models/losses/ae_loss.py b/annotator/uniformer/mmdet_null/models/losses/ae_loss.py
new file mode 100644
index 0000000000000000000000000000000000000000..cff472aa03080fb49dbb3adba6fec68647a575e6
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/losses/ae_loss.py
@@ -0,0 +1,102 @@
+import mmcv
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+from ..builder import LOSSES
+
+
+@mmcv.jit(derivate=True, coderize=True)
+def ae_loss_per_image(tl_preds, br_preds, match):
+ """Associative Embedding Loss in one image.
+
+ Associative Embedding Loss including two parts: pull loss and push loss.
+ Pull loss makes embedding vectors from same object closer to each other.
+ Push loss distinguish embedding vector from different objects, and makes
+ the gap between them is large enough.
+
+ During computing, usually there are 3 cases:
+ - no object in image: both pull loss and push loss will be 0.
+ - one object in image: push loss will be 0 and pull loss is computed
+ by the two corner of the only object.
+ - more than one objects in image: pull loss is computed by corner pairs
+ from each object, push loss is computed by each object with all
+ other objects. We use confusion matrix with 0 in diagonal to
+ compute the push loss.
+
+ Args:
+ tl_preds (tensor): Embedding feature map of left-top corner.
+ br_preds (tensor): Embedding feature map of bottim-right corner.
+ match (list): Downsampled coordinates pair of each ground truth box.
+ """
+
+ tl_list, br_list, me_list = [], [], []
+ if len(match) == 0: # no object in image
+ pull_loss = tl_preds.sum() * 0.
+ push_loss = tl_preds.sum() * 0.
+ else:
+ for m in match:
+ [tl_y, tl_x], [br_y, br_x] = m
+ tl_e = tl_preds[:, tl_y, tl_x].view(-1, 1)
+ br_e = br_preds[:, br_y, br_x].view(-1, 1)
+ tl_list.append(tl_e)
+ br_list.append(br_e)
+ me_list.append((tl_e + br_e) / 2.0)
+
+ tl_list = torch.cat(tl_list)
+ br_list = torch.cat(br_list)
+ me_list = torch.cat(me_list)
+
+ assert tl_list.size() == br_list.size()
+
+ # N is object number in image, M is dimension of embedding vector
+ N, M = tl_list.size()
+
+ pull_loss = (tl_list - me_list).pow(2) + (br_list - me_list).pow(2)
+ pull_loss = pull_loss.sum() / N
+
+ margin = 1 # exp setting of CornerNet, details in section 3.3 of paper
+
+ # confusion matrix of push loss
+ conf_mat = me_list.expand((N, N, M)).permute(1, 0, 2) - me_list
+ conf_weight = 1 - torch.eye(N).type_as(me_list)
+ conf_mat = conf_weight * (margin - conf_mat.sum(-1).abs())
+
+ if N > 1: # more than one object in current image
+ push_loss = F.relu(conf_mat).sum() / (N * (N - 1))
+ else:
+ push_loss = tl_preds.sum() * 0.
+
+ return pull_loss, push_loss
+
+
+@LOSSES.register_module()
+class AssociativeEmbeddingLoss(nn.Module):
+ """Associative Embedding Loss.
+
+ More details can be found in
+ `Associative Embedding `_ and
+ `CornerNet `_ .
+ Code is modified from `kp_utils.py `_ # noqa: E501
+
+ Args:
+ pull_weight (float): Loss weight for corners from same object.
+ push_weight (float): Loss weight for corners from different object.
+ """
+
+ def __init__(self, pull_weight=0.25, push_weight=0.25):
+ super(AssociativeEmbeddingLoss, self).__init__()
+ self.pull_weight = pull_weight
+ self.push_weight = push_weight
+
+ def forward(self, pred, target, match):
+ """Forward function."""
+ batch = pred.size(0)
+ pull_all, push_all = 0.0, 0.0
+ for i in range(batch):
+ pull, push = ae_loss_per_image(pred[i], target[i], match[i])
+
+ pull_all += self.pull_weight * pull
+ push_all += self.push_weight * push
+
+ return pull_all, push_all
diff --git a/annotator/uniformer/mmdet_null/models/losses/balanced_l1_loss.py b/annotator/uniformer/mmdet_null/models/losses/balanced_l1_loss.py
new file mode 100644
index 0000000000000000000000000000000000000000..7bcd13ff26dbdc9f6eff8d7c7b5bde742a8d7d1d
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/losses/balanced_l1_loss.py
@@ -0,0 +1,120 @@
+import mmcv
+import numpy as np
+import torch
+import torch.nn as nn
+
+from ..builder import LOSSES
+from .utils import weighted_loss
+
+
+@mmcv.jit(derivate=True, coderize=True)
+@weighted_loss
+def balanced_l1_loss(pred,
+ target,
+ beta=1.0,
+ alpha=0.5,
+ gamma=1.5,
+ reduction='mean'):
+ """Calculate balanced L1 loss.
+
+ Please see the `Libra R-CNN `_
+
+ Args:
+ pred (torch.Tensor): The prediction with shape (N, 4).
+ target (torch.Tensor): The learning target of the prediction with
+ shape (N, 4).
+ beta (float): The loss is a piecewise function of prediction and target
+ and ``beta`` serves as a threshold for the difference between the
+ prediction and target. Defaults to 1.0.
+ alpha (float): The denominator ``alpha`` in the balanced L1 loss.
+ Defaults to 0.5.
+ gamma (float): The ``gamma`` in the balanced L1 loss.
+ Defaults to 1.5.
+ reduction (str, optional): The method that reduces the loss to a
+ scalar. Options are "none", "mean" and "sum".
+
+ Returns:
+ torch.Tensor: The calculated loss
+ """
+ assert beta > 0
+ assert pred.size() == target.size() and target.numel() > 0
+
+ diff = torch.abs(pred - target)
+ b = np.e**(gamma / alpha) - 1
+ loss = torch.where(
+ diff < beta, alpha / b *
+ (b * diff + 1) * torch.log(b * diff / beta + 1) - alpha * diff,
+ gamma * diff + gamma / b - alpha * beta)
+
+ return loss
+
+
+@LOSSES.register_module()
+class BalancedL1Loss(nn.Module):
+ """Balanced L1 Loss.
+
+ arXiv: https://arxiv.org/pdf/1904.02701.pdf (CVPR 2019)
+
+ Args:
+ alpha (float): The denominator ``alpha`` in the balanced L1 loss.
+ Defaults to 0.5.
+ gamma (float): The ``gamma`` in the balanced L1 loss. Defaults to 1.5.
+ beta (float, optional): The loss is a piecewise function of prediction
+ and target. ``beta`` serves as a threshold for the difference
+ between the prediction and target. Defaults to 1.0.
+ reduction (str, optional): The method that reduces the loss to a
+ scalar. Options are "none", "mean" and "sum".
+ loss_weight (float, optional): The weight of the loss. Defaults to 1.0
+ """
+
+ def __init__(self,
+ alpha=0.5,
+ gamma=1.5,
+ beta=1.0,
+ reduction='mean',
+ loss_weight=1.0):
+ super(BalancedL1Loss, self).__init__()
+ self.alpha = alpha
+ self.gamma = gamma
+ self.beta = beta
+ self.reduction = reduction
+ self.loss_weight = loss_weight
+
+ def forward(self,
+ pred,
+ target,
+ weight=None,
+ avg_factor=None,
+ reduction_override=None,
+ **kwargs):
+ """Forward function of loss.
+
+ Args:
+ pred (torch.Tensor): The prediction with shape (N, 4).
+ target (torch.Tensor): The learning target of the prediction with
+ shape (N, 4).
+ weight (torch.Tensor, optional): Sample-wise loss weight with
+ shape (N, ).
+ avg_factor (int, optional): Average factor that is used to average
+ the loss. Defaults to None.
+ reduction_override (str, optional): The reduction method used to
+ override the original reduction method of the loss.
+ Options are "none", "mean" and "sum".
+
+ Returns:
+ torch.Tensor: The calculated loss
+ """
+ assert reduction_override in (None, 'none', 'mean', 'sum')
+ reduction = (
+ reduction_override if reduction_override else self.reduction)
+ loss_bbox = self.loss_weight * balanced_l1_loss(
+ pred,
+ target,
+ weight,
+ alpha=self.alpha,
+ gamma=self.gamma,
+ beta=self.beta,
+ reduction=reduction,
+ avg_factor=avg_factor,
+ **kwargs)
+ return loss_bbox
diff --git a/annotator/uniformer/mmdet_null/models/losses/cross_entropy_loss.py b/annotator/uniformer/mmdet_null/models/losses/cross_entropy_loss.py
new file mode 100644
index 0000000000000000000000000000000000000000..57994157960eeae5530bd983b8b86263de31d0ff
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/losses/cross_entropy_loss.py
@@ -0,0 +1,214 @@
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+from ..builder import LOSSES
+from .utils import weight_reduce_loss
+
+
+def cross_entropy(pred,
+ label,
+ weight=None,
+ reduction='mean',
+ avg_factor=None,
+ class_weight=None):
+ """Calculate the CrossEntropy loss.
+
+ Args:
+ pred (torch.Tensor): The prediction with shape (N, C), C is the number
+ of classes.
+ label (torch.Tensor): The learning label of the prediction.
+ weight (torch.Tensor, optional): Sample-wise loss weight.
+ reduction (str, optional): The method used to reduce the loss.
+ avg_factor (int, optional): Average factor that is used to average
+ the loss. Defaults to None.
+ class_weight (list[float], optional): The weight for each class.
+
+ Returns:
+ torch.Tensor: The calculated loss
+ """
+ # element-wise losses
+ loss = F.cross_entropy(pred, label, weight=class_weight, reduction='none')
+
+ # apply weights and do the reduction
+ if weight is not None:
+ weight = weight.float()
+ loss = weight_reduce_loss(
+ loss, weight=weight, reduction=reduction, avg_factor=avg_factor)
+
+ return loss
+
+
+def _expand_onehot_labels(labels, label_weights, label_channels):
+ bin_labels = labels.new_full((labels.size(0), label_channels), 0)
+ inds = torch.nonzero(
+ (labels >= 0) & (labels < label_channels), as_tuple=False).squeeze()
+ if inds.numel() > 0:
+ bin_labels[inds, labels[inds]] = 1
+
+ if label_weights is None:
+ bin_label_weights = None
+ else:
+ bin_label_weights = label_weights.view(-1, 1).expand(
+ label_weights.size(0), label_channels)
+
+ return bin_labels, bin_label_weights
+
+
+def binary_cross_entropy(pred,
+ label,
+ weight=None,
+ reduction='mean',
+ avg_factor=None,
+ class_weight=None):
+ """Calculate the binary CrossEntropy loss.
+
+ Args:
+ pred (torch.Tensor): The prediction with shape (N, 1).
+ label (torch.Tensor): The learning label of the prediction.
+ weight (torch.Tensor, optional): Sample-wise loss weight.
+ reduction (str, optional): The method used to reduce the loss.
+ Options are "none", "mean" and "sum".
+ avg_factor (int, optional): Average factor that is used to average
+ the loss. Defaults to None.
+ class_weight (list[float], optional): The weight for each class.
+
+ Returns:
+ torch.Tensor: The calculated loss
+ """
+ if pred.dim() != label.dim():
+ label, weight = _expand_onehot_labels(label, weight, pred.size(-1))
+
+ # weighted element-wise losses
+ if weight is not None:
+ weight = weight.float()
+ loss = F.binary_cross_entropy_with_logits(
+ pred, label.float(), pos_weight=class_weight, reduction='none')
+ # do the reduction for the weighted loss
+ loss = weight_reduce_loss(
+ loss, weight, reduction=reduction, avg_factor=avg_factor)
+
+ return loss
+
+
+def mask_cross_entropy(pred,
+ target,
+ label,
+ reduction='mean',
+ avg_factor=None,
+ class_weight=None):
+ """Calculate the CrossEntropy loss for masks.
+
+ Args:
+ pred (torch.Tensor): The prediction with shape (N, C, *), C is the
+ number of classes. The trailing * indicates arbitrary shape.
+ target (torch.Tensor): The learning label of the prediction.
+ label (torch.Tensor): ``label`` indicates the class label of the mask
+ corresponding object. This will be used to select the mask in the
+ of the class which the object belongs to when the mask prediction
+ if not class-agnostic.
+ reduction (str, optional): The method used to reduce the loss.
+ Options are "none", "mean" and "sum".
+ avg_factor (int, optional): Average factor that is used to average
+ the loss. Defaults to None.
+ class_weight (list[float], optional): The weight for each class.
+
+ Returns:
+ torch.Tensor: The calculated loss
+
+ Example:
+ >>> N, C = 3, 11
+ >>> H, W = 2, 2
+ >>> pred = torch.randn(N, C, H, W) * 1000
+ >>> target = torch.rand(N, H, W)
+ >>> label = torch.randint(0, C, size=(N,))
+ >>> reduction = 'mean'
+ >>> avg_factor = None
+ >>> class_weights = None
+ >>> loss = mask_cross_entropy(pred, target, label, reduction,
+ >>> avg_factor, class_weights)
+ >>> assert loss.shape == (1,)
+ """
+ # TODO: handle these two reserved arguments
+ assert reduction == 'mean' and avg_factor is None
+ num_rois = pred.size()[0]
+ inds = torch.arange(0, num_rois, dtype=torch.long, device=pred.device)
+ pred_slice = pred[inds, label].squeeze(1)
+ return F.binary_cross_entropy_with_logits(
+ pred_slice, target, weight=class_weight, reduction='mean')[None]
+
+
+@LOSSES.register_module()
+class CrossEntropyLoss(nn.Module):
+
+ def __init__(self,
+ use_sigmoid=False,
+ use_mask=False,
+ reduction='mean',
+ class_weight=None,
+ loss_weight=1.0):
+ """CrossEntropyLoss.
+
+ Args:
+ use_sigmoid (bool, optional): Whether the prediction uses sigmoid
+ of softmax. Defaults to False.
+ use_mask (bool, optional): Whether to use mask cross entropy loss.
+ Defaults to False.
+ reduction (str, optional): . Defaults to 'mean'.
+ Options are "none", "mean" and "sum".
+ class_weight (list[float], optional): Weight of each class.
+ Defaults to None.
+ loss_weight (float, optional): Weight of the loss. Defaults to 1.0.
+ """
+ super(CrossEntropyLoss, self).__init__()
+ assert (use_sigmoid is False) or (use_mask is False)
+ self.use_sigmoid = use_sigmoid
+ self.use_mask = use_mask
+ self.reduction = reduction
+ self.loss_weight = loss_weight
+ self.class_weight = class_weight
+
+ if self.use_sigmoid:
+ self.cls_criterion = binary_cross_entropy
+ elif self.use_mask:
+ self.cls_criterion = mask_cross_entropy
+ else:
+ self.cls_criterion = cross_entropy
+
+ def forward(self,
+ cls_score,
+ label,
+ weight=None,
+ avg_factor=None,
+ reduction_override=None,
+ **kwargs):
+ """Forward function.
+
+ Args:
+ cls_score (torch.Tensor): The prediction.
+ label (torch.Tensor): The learning label of the prediction.
+ weight (torch.Tensor, optional): Sample-wise loss weight.
+ avg_factor (int, optional): Average factor that is used to average
+ the loss. Defaults to None.
+ reduction (str, optional): The method used to reduce the loss.
+ Options are "none", "mean" and "sum".
+ Returns:
+ torch.Tensor: The calculated loss
+ """
+ assert reduction_override in (None, 'none', 'mean', 'sum')
+ reduction = (
+ reduction_override if reduction_override else self.reduction)
+ if self.class_weight is not None:
+ class_weight = cls_score.new_tensor(
+ self.class_weight, device=cls_score.device)
+ else:
+ class_weight = None
+ loss_cls = self.loss_weight * self.cls_criterion(
+ cls_score,
+ label,
+ weight,
+ class_weight=class_weight,
+ reduction=reduction,
+ avg_factor=avg_factor,
+ **kwargs)
+ return loss_cls
diff --git a/annotator/uniformer/mmdet_null/models/losses/focal_loss.py b/annotator/uniformer/mmdet_null/models/losses/focal_loss.py
new file mode 100644
index 0000000000000000000000000000000000000000..493907c6984d532175e0351daf2eafe4b9ff0256
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/losses/focal_loss.py
@@ -0,0 +1,181 @@
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from mmcv.ops import sigmoid_focal_loss as _sigmoid_focal_loss
+
+from ..builder import LOSSES
+from .utils import weight_reduce_loss
+
+
+# This method is only for debugging
+def py_sigmoid_focal_loss(pred,
+ target,
+ weight=None,
+ gamma=2.0,
+ alpha=0.25,
+ reduction='mean',
+ avg_factor=None):
+ """PyTorch version of `Focal Loss `_.
+
+ Args:
+ pred (torch.Tensor): The prediction with shape (N, C), C is the
+ number of classes
+ target (torch.Tensor): The learning label of the prediction.
+ weight (torch.Tensor, optional): Sample-wise loss weight.
+ gamma (float, optional): The gamma for calculating the modulating
+ factor. Defaults to 2.0.
+ alpha (float, optional): A balanced form for Focal Loss.
+ Defaults to 0.25.
+ reduction (str, optional): The method used to reduce the loss into
+ a scalar. Defaults to 'mean'.
+ avg_factor (int, optional): Average factor that is used to average
+ the loss. Defaults to None.
+ """
+ pred_sigmoid = pred.sigmoid()
+ target = target.type_as(pred)
+ pt = (1 - pred_sigmoid) * target + pred_sigmoid * (1 - target)
+ focal_weight = (alpha * target + (1 - alpha) *
+ (1 - target)) * pt.pow(gamma)
+ loss = F.binary_cross_entropy_with_logits(
+ pred, target, reduction='none') * focal_weight
+ if weight is not None:
+ if weight.shape != loss.shape:
+ if weight.size(0) == loss.size(0):
+ # For most cases, weight is of shape (num_priors, ),
+ # which means it does not have the second axis num_class
+ weight = weight.view(-1, 1)
+ else:
+ # Sometimes, weight per anchor per class is also needed. e.g.
+ # in FSAF. But it may be flattened of shape
+ # (num_priors x num_class, ), while loss is still of shape
+ # (num_priors, num_class).
+ assert weight.numel() == loss.numel()
+ weight = weight.view(loss.size(0), -1)
+ assert weight.ndim == loss.ndim
+ loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
+ return loss
+
+
+def sigmoid_focal_loss(pred,
+ target,
+ weight=None,
+ gamma=2.0,
+ alpha=0.25,
+ reduction='mean',
+ avg_factor=None):
+ r"""A warpper of cuda version `Focal Loss
+ `_.
+
+ Args:
+ pred (torch.Tensor): The prediction with shape (N, C), C is the number
+ of classes.
+ target (torch.Tensor): The learning label of the prediction.
+ weight (torch.Tensor, optional): Sample-wise loss weight.
+ gamma (float, optional): The gamma for calculating the modulating
+ factor. Defaults to 2.0.
+ alpha (float, optional): A balanced form for Focal Loss.
+ Defaults to 0.25.
+ reduction (str, optional): The method used to reduce the loss into
+ a scalar. Defaults to 'mean'. Options are "none", "mean" and "sum".
+ avg_factor (int, optional): Average factor that is used to average
+ the loss. Defaults to None.
+ """
+ # Function.apply does not accept keyword arguments, so the decorator
+ # "weighted_loss" is not applicable
+ loss = _sigmoid_focal_loss(pred.contiguous(), target, gamma, alpha, None,
+ 'none')
+ if weight is not None:
+ if weight.shape != loss.shape:
+ if weight.size(0) == loss.size(0):
+ # For most cases, weight is of shape (num_priors, ),
+ # which means it does not have the second axis num_class
+ weight = weight.view(-1, 1)
+ else:
+ # Sometimes, weight per anchor per class is also needed. e.g.
+ # in FSAF. But it may be flattened of shape
+ # (num_priors x num_class, ), while loss is still of shape
+ # (num_priors, num_class).
+ assert weight.numel() == loss.numel()
+ weight = weight.view(loss.size(0), -1)
+ assert weight.ndim == loss.ndim
+ loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
+ return loss
+
+
+@LOSSES.register_module()
+class FocalLoss(nn.Module):
+
+ def __init__(self,
+ use_sigmoid=True,
+ gamma=2.0,
+ alpha=0.25,
+ reduction='mean',
+ loss_weight=1.0):
+ """`Focal Loss `_
+
+ Args:
+ use_sigmoid (bool, optional): Whether to the prediction is
+ used for sigmoid or softmax. Defaults to True.
+ gamma (float, optional): The gamma for calculating the modulating
+ factor. Defaults to 2.0.
+ alpha (float, optional): A balanced form for Focal Loss.
+ Defaults to 0.25.
+ reduction (str, optional): The method used to reduce the loss into
+ a scalar. Defaults to 'mean'. Options are "none", "mean" and
+ "sum".
+ loss_weight (float, optional): Weight of loss. Defaults to 1.0.
+ """
+ super(FocalLoss, self).__init__()
+ assert use_sigmoid is True, 'Only sigmoid focal loss supported now.'
+ self.use_sigmoid = use_sigmoid
+ self.gamma = gamma
+ self.alpha = alpha
+ self.reduction = reduction
+ self.loss_weight = loss_weight
+
+ def forward(self,
+ pred,
+ target,
+ weight=None,
+ avg_factor=None,
+ reduction_override=None):
+ """Forward function.
+
+ Args:
+ pred (torch.Tensor): The prediction.
+ target (torch.Tensor): The learning label of the prediction.
+ weight (torch.Tensor, optional): The weight of loss for each
+ prediction. Defaults to None.
+ avg_factor (int, optional): Average factor that is used to average
+ the loss. Defaults to None.
+ reduction_override (str, optional): The reduction method used to
+ override the original reduction method of the loss.
+ Options are "none", "mean" and "sum".
+
+ Returns:
+ torch.Tensor: The calculated loss
+ """
+ assert reduction_override in (None, 'none', 'mean', 'sum')
+ reduction = (
+ reduction_override if reduction_override else self.reduction)
+ if self.use_sigmoid:
+ if torch.cuda.is_available() and pred.is_cuda:
+ calculate_loss_func = sigmoid_focal_loss
+ else:
+ num_classes = pred.size(1)
+ target = F.one_hot(target, num_classes=num_classes + 1)
+ target = target[:, :num_classes]
+ calculate_loss_func = py_sigmoid_focal_loss
+
+ loss_cls = self.loss_weight * calculate_loss_func(
+ pred,
+ target,
+ weight,
+ gamma=self.gamma,
+ alpha=self.alpha,
+ reduction=reduction,
+ avg_factor=avg_factor)
+
+ else:
+ raise NotImplementedError
+ return loss_cls
diff --git a/annotator/uniformer/mmdet_null/models/losses/gaussian_focal_loss.py b/annotator/uniformer/mmdet_null/models/losses/gaussian_focal_loss.py
new file mode 100644
index 0000000000000000000000000000000000000000..e45506a38e8e3c187be8288d0b714cc1ee29cf27
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/losses/gaussian_focal_loss.py
@@ -0,0 +1,91 @@
+import mmcv
+import torch.nn as nn
+
+from ..builder import LOSSES
+from .utils import weighted_loss
+
+
+@mmcv.jit(derivate=True, coderize=True)
+@weighted_loss
+def gaussian_focal_loss(pred, gaussian_target, alpha=2.0, gamma=4.0):
+ """`Focal Loss `_ for targets in gaussian
+ distribution.
+
+ Args:
+ pred (torch.Tensor): The prediction.
+ gaussian_target (torch.Tensor): The learning target of the prediction
+ in gaussian distribution.
+ alpha (float, optional): A balanced form for Focal Loss.
+ Defaults to 2.0.
+ gamma (float, optional): The gamma for calculating the modulating
+ factor. Defaults to 4.0.
+ """
+ eps = 1e-12
+ pos_weights = gaussian_target.eq(1)
+ neg_weights = (1 - gaussian_target).pow(gamma)
+ pos_loss = -(pred + eps).log() * (1 - pred).pow(alpha) * pos_weights
+ neg_loss = -(1 - pred + eps).log() * pred.pow(alpha) * neg_weights
+ return pos_loss + neg_loss
+
+
+@LOSSES.register_module()
+class GaussianFocalLoss(nn.Module):
+ """GaussianFocalLoss is a variant of focal loss.
+
+ More details can be found in the `paper
+ `_
+ Code is modified from `kp_utils.py
+ `_ # noqa: E501
+ Please notice that the target in GaussianFocalLoss is a gaussian heatmap,
+ not 0/1 binary target.
+
+ Args:
+ alpha (float): Power of prediction.
+ gamma (float): Power of target for negative samples.
+ reduction (str): Options are "none", "mean" and "sum".
+ loss_weight (float): Loss weight of current loss.
+ """
+
+ def __init__(self,
+ alpha=2.0,
+ gamma=4.0,
+ reduction='mean',
+ loss_weight=1.0):
+ super(GaussianFocalLoss, self).__init__()
+ self.alpha = alpha
+ self.gamma = gamma
+ self.reduction = reduction
+ self.loss_weight = loss_weight
+
+ def forward(self,
+ pred,
+ target,
+ weight=None,
+ avg_factor=None,
+ reduction_override=None):
+ """Forward function.
+
+ Args:
+ pred (torch.Tensor): The prediction.
+ target (torch.Tensor): The learning target of the prediction
+ in gaussian distribution.
+ weight (torch.Tensor, optional): The weight of loss for each
+ prediction. Defaults to None.
+ avg_factor (int, optional): Average factor that is used to average
+ the loss. Defaults to None.
+ reduction_override (str, optional): The reduction method used to
+ override the original reduction method of the loss.
+ Defaults to None.
+ """
+ assert reduction_override in (None, 'none', 'mean', 'sum')
+ reduction = (
+ reduction_override if reduction_override else self.reduction)
+ loss_reg = self.loss_weight * gaussian_focal_loss(
+ pred,
+ target,
+ weight,
+ alpha=self.alpha,
+ gamma=self.gamma,
+ reduction=reduction,
+ avg_factor=avg_factor)
+ return loss_reg
diff --git a/annotator/uniformer/mmdet_null/models/losses/gfocal_loss.py b/annotator/uniformer/mmdet_null/models/losses/gfocal_loss.py
new file mode 100644
index 0000000000000000000000000000000000000000..9d3b8833dc50c76f6741db5341dbf8da3402d07b
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/losses/gfocal_loss.py
@@ -0,0 +1,188 @@
+import mmcv
+import torch.nn as nn
+import torch.nn.functional as F
+
+from ..builder import LOSSES
+from .utils import weighted_loss
+
+
+@mmcv.jit(derivate=True, coderize=True)
+@weighted_loss
+def quality_focal_loss(pred, target, beta=2.0):
+ r"""Quality Focal Loss (QFL) is from `Generalized Focal Loss: Learning
+ Qualified and Distributed Bounding Boxes for Dense Object Detection
+ `_.
+
+ Args:
+ pred (torch.Tensor): Predicted joint representation of classification
+ and quality (IoU) estimation with shape (N, C), C is the number of
+ classes.
+ target (tuple([torch.Tensor])): Target category label with shape (N,)
+ and target quality label with shape (N,).
+ beta (float): The beta parameter for calculating the modulating factor.
+ Defaults to 2.0.
+
+ Returns:
+ torch.Tensor: Loss tensor with shape (N,).
+ """
+ assert len(target) == 2, """target for QFL must be a tuple of two elements,
+ including category label and quality label, respectively"""
+ # label denotes the category id, score denotes the quality score
+ label, score = target
+
+ # negatives are supervised by 0 quality score
+ pred_sigmoid = pred.sigmoid()
+ scale_factor = pred_sigmoid
+ zerolabel = scale_factor.new_zeros(pred.shape)
+ loss = F.binary_cross_entropy_with_logits(
+ pred, zerolabel, reduction='none') * scale_factor.pow(beta)
+
+ # FG cat_id: [0, num_classes -1], BG cat_id: num_classes
+ bg_class_ind = pred.size(1)
+ pos = ((label >= 0) & (label < bg_class_ind)).nonzero().squeeze(1)
+ pos_label = label[pos].long()
+ # positives are supervised by bbox quality (IoU) score
+ scale_factor = score[pos] - pred_sigmoid[pos, pos_label]
+ loss[pos, pos_label] = F.binary_cross_entropy_with_logits(
+ pred[pos, pos_label], score[pos],
+ reduction='none') * scale_factor.abs().pow(beta)
+
+ loss = loss.sum(dim=1, keepdim=False)
+ return loss
+
+
+@mmcv.jit(derivate=True, coderize=True)
+@weighted_loss
+def distribution_focal_loss(pred, label):
+ r"""Distribution Focal Loss (DFL) is from `Generalized Focal Loss: Learning
+ Qualified and Distributed Bounding Boxes for Dense Object Detection
+ `_.
+
+ Args:
+ pred (torch.Tensor): Predicted general distribution of bounding boxes
+ (before softmax) with shape (N, n+1), n is the max value of the
+ integral set `{0, ..., n}` in paper.
+ label (torch.Tensor): Target distance label for bounding boxes with
+ shape (N,).
+
+ Returns:
+ torch.Tensor: Loss tensor with shape (N,).
+ """
+ dis_left = label.long()
+ dis_right = dis_left + 1
+ weight_left = dis_right.float() - label
+ weight_right = label - dis_left.float()
+ loss = F.cross_entropy(pred, dis_left, reduction='none') * weight_left \
+ + F.cross_entropy(pred, dis_right, reduction='none') * weight_right
+ return loss
+
+
+@LOSSES.register_module()
+class QualityFocalLoss(nn.Module):
+ r"""Quality Focal Loss (QFL) is a variant of `Generalized Focal Loss:
+ Learning Qualified and Distributed Bounding Boxes for Dense Object
+ Detection `_.
+
+ Args:
+ use_sigmoid (bool): Whether sigmoid operation is conducted in QFL.
+ Defaults to True.
+ beta (float): The beta parameter for calculating the modulating factor.
+ Defaults to 2.0.
+ reduction (str): Options are "none", "mean" and "sum".
+ loss_weight (float): Loss weight of current loss.
+ """
+
+ def __init__(self,
+ use_sigmoid=True,
+ beta=2.0,
+ reduction='mean',
+ loss_weight=1.0):
+ super(QualityFocalLoss, self).__init__()
+ assert use_sigmoid is True, 'Only sigmoid in QFL supported now.'
+ self.use_sigmoid = use_sigmoid
+ self.beta = beta
+ self.reduction = reduction
+ self.loss_weight = loss_weight
+
+ def forward(self,
+ pred,
+ target,
+ weight=None,
+ avg_factor=None,
+ reduction_override=None):
+ """Forward function.
+
+ Args:
+ pred (torch.Tensor): Predicted joint representation of
+ classification and quality (IoU) estimation with shape (N, C),
+ C is the number of classes.
+ target (tuple([torch.Tensor])): Target category label with shape
+ (N,) and target quality label with shape (N,).
+ weight (torch.Tensor, optional): The weight of loss for each
+ prediction. Defaults to None.
+ avg_factor (int, optional): Average factor that is used to average
+ the loss. Defaults to None.
+ reduction_override (str, optional): The reduction method used to
+ override the original reduction method of the loss.
+ Defaults to None.
+ """
+ assert reduction_override in (None, 'none', 'mean', 'sum')
+ reduction = (
+ reduction_override if reduction_override else self.reduction)
+ if self.use_sigmoid:
+ loss_cls = self.loss_weight * quality_focal_loss(
+ pred,
+ target,
+ weight,
+ beta=self.beta,
+ reduction=reduction,
+ avg_factor=avg_factor)
+ else:
+ raise NotImplementedError
+ return loss_cls
+
+
+@LOSSES.register_module()
+class DistributionFocalLoss(nn.Module):
+ r"""Distribution Focal Loss (DFL) is a variant of `Generalized Focal Loss:
+ Learning Qualified and Distributed Bounding Boxes for Dense Object
+ Detection `_.
+
+ Args:
+ reduction (str): Options are `'none'`, `'mean'` and `'sum'`.
+ loss_weight (float): Loss weight of current loss.
+ """
+
+ def __init__(self, reduction='mean', loss_weight=1.0):
+ super(DistributionFocalLoss, self).__init__()
+ self.reduction = reduction
+ self.loss_weight = loss_weight
+
+ def forward(self,
+ pred,
+ target,
+ weight=None,
+ avg_factor=None,
+ reduction_override=None):
+ """Forward function.
+
+ Args:
+ pred (torch.Tensor): Predicted general distribution of bounding
+ boxes (before softmax) with shape (N, n+1), n is the max value
+ of the integral set `{0, ..., n}` in paper.
+ target (torch.Tensor): Target distance label for bounding boxes
+ with shape (N,).
+ weight (torch.Tensor, optional): The weight of loss for each
+ prediction. Defaults to None.
+ avg_factor (int, optional): Average factor that is used to average
+ the loss. Defaults to None.
+ reduction_override (str, optional): The reduction method used to
+ override the original reduction method of the loss.
+ Defaults to None.
+ """
+ assert reduction_override in (None, 'none', 'mean', 'sum')
+ reduction = (
+ reduction_override if reduction_override else self.reduction)
+ loss_cls = self.loss_weight * distribution_focal_loss(
+ pred, target, weight, reduction=reduction, avg_factor=avg_factor)
+ return loss_cls
diff --git a/annotator/uniformer/mmdet_null/models/losses/ghm_loss.py b/annotator/uniformer/mmdet_null/models/losses/ghm_loss.py
new file mode 100644
index 0000000000000000000000000000000000000000..8969a23fd98bb746415f96ac5e4ad9e37ba3af52
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/losses/ghm_loss.py
@@ -0,0 +1,172 @@
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+from ..builder import LOSSES
+
+
+def _expand_onehot_labels(labels, label_weights, label_channels):
+ bin_labels = labels.new_full((labels.size(0), label_channels), 0)
+ inds = torch.nonzero(
+ (labels >= 0) & (labels < label_channels), as_tuple=False).squeeze()
+ if inds.numel() > 0:
+ bin_labels[inds, labels[inds]] = 1
+ bin_label_weights = label_weights.view(-1, 1).expand(
+ label_weights.size(0), label_channels)
+ return bin_labels, bin_label_weights
+
+
+# TODO: code refactoring to make it consistent with other losses
+@LOSSES.register_module()
+class GHMC(nn.Module):
+ """GHM Classification Loss.
+
+ Details of the theorem can be viewed in the paper
+ `Gradient Harmonized Single-stage Detector
+ `_.
+
+ Args:
+ bins (int): Number of the unit regions for distribution calculation.
+ momentum (float): The parameter for moving average.
+ use_sigmoid (bool): Can only be true for BCE based loss now.
+ loss_weight (float): The weight of the total GHM-C loss.
+ """
+
+ def __init__(self, bins=10, momentum=0, use_sigmoid=True, loss_weight=1.0):
+ super(GHMC, self).__init__()
+ self.bins = bins
+ self.momentum = momentum
+ edges = torch.arange(bins + 1).float() / bins
+ self.register_buffer('edges', edges)
+ self.edges[-1] += 1e-6
+ if momentum > 0:
+ acc_sum = torch.zeros(bins)
+ self.register_buffer('acc_sum', acc_sum)
+ self.use_sigmoid = use_sigmoid
+ if not self.use_sigmoid:
+ raise NotImplementedError
+ self.loss_weight = loss_weight
+
+ def forward(self, pred, target, label_weight, *args, **kwargs):
+ """Calculate the GHM-C loss.
+
+ Args:
+ pred (float tensor of size [batch_num, class_num]):
+ The direct prediction of classification fc layer.
+ target (float tensor of size [batch_num, class_num]):
+ Binary class target for each sample.
+ label_weight (float tensor of size [batch_num, class_num]):
+ the value is 1 if the sample is valid and 0 if ignored.
+ Returns:
+ The gradient harmonized loss.
+ """
+ # the target should be binary class label
+ if pred.dim() != target.dim():
+ target, label_weight = _expand_onehot_labels(
+ target, label_weight, pred.size(-1))
+ target, label_weight = target.float(), label_weight.float()
+ edges = self.edges
+ mmt = self.momentum
+ weights = torch.zeros_like(pred)
+
+ # gradient length
+ g = torch.abs(pred.sigmoid().detach() - target)
+
+ valid = label_weight > 0
+ tot = max(valid.float().sum().item(), 1.0)
+ n = 0 # n valid bins
+ for i in range(self.bins):
+ inds = (g >= edges[i]) & (g < edges[i + 1]) & valid
+ num_in_bin = inds.sum().item()
+ if num_in_bin > 0:
+ if mmt > 0:
+ self.acc_sum[i] = mmt * self.acc_sum[i] \
+ + (1 - mmt) * num_in_bin
+ weights[inds] = tot / self.acc_sum[i]
+ else:
+ weights[inds] = tot / num_in_bin
+ n += 1
+ if n > 0:
+ weights = weights / n
+
+ loss = F.binary_cross_entropy_with_logits(
+ pred, target, weights, reduction='sum') / tot
+ return loss * self.loss_weight
+
+
+# TODO: code refactoring to make it consistent with other losses
+@LOSSES.register_module()
+class GHMR(nn.Module):
+ """GHM Regression Loss.
+
+ Details of the theorem can be viewed in the paper
+ `Gradient Harmonized Single-stage Detector
+ `_.
+
+ Args:
+ mu (float): The parameter for the Authentic Smooth L1 loss.
+ bins (int): Number of the unit regions for distribution calculation.
+ momentum (float): The parameter for moving average.
+ loss_weight (float): The weight of the total GHM-R loss.
+ """
+
+ def __init__(self, mu=0.02, bins=10, momentum=0, loss_weight=1.0):
+ super(GHMR, self).__init__()
+ self.mu = mu
+ self.bins = bins
+ edges = torch.arange(bins + 1).float() / bins
+ self.register_buffer('edges', edges)
+ self.edges[-1] = 1e3
+ self.momentum = momentum
+ if momentum > 0:
+ acc_sum = torch.zeros(bins)
+ self.register_buffer('acc_sum', acc_sum)
+ self.loss_weight = loss_weight
+
+ # TODO: support reduction parameter
+ def forward(self, pred, target, label_weight, avg_factor=None):
+ """Calculate the GHM-R loss.
+
+ Args:
+ pred (float tensor of size [batch_num, 4 (* class_num)]):
+ The prediction of box regression layer. Channel number can be 4
+ or 4 * class_num depending on whether it is class-agnostic.
+ target (float tensor of size [batch_num, 4 (* class_num)]):
+ The target regression values with the same size of pred.
+ label_weight (float tensor of size [batch_num, 4 (* class_num)]):
+ The weight of each sample, 0 if ignored.
+ Returns:
+ The gradient harmonized loss.
+ """
+ mu = self.mu
+ edges = self.edges
+ mmt = self.momentum
+
+ # ASL1 loss
+ diff = pred - target
+ loss = torch.sqrt(diff * diff + mu * mu) - mu
+
+ # gradient length
+ g = torch.abs(diff / torch.sqrt(mu * mu + diff * diff)).detach()
+ weights = torch.zeros_like(g)
+
+ valid = label_weight > 0
+ tot = max(label_weight.float().sum().item(), 1.0)
+ n = 0 # n: valid bins
+ for i in range(self.bins):
+ inds = (g >= edges[i]) & (g < edges[i + 1]) & valid
+ num_in_bin = inds.sum().item()
+ if num_in_bin > 0:
+ n += 1
+ if mmt > 0:
+ self.acc_sum[i] = mmt * self.acc_sum[i] \
+ + (1 - mmt) * num_in_bin
+ weights[inds] = tot / self.acc_sum[i]
+ else:
+ weights[inds] = tot / num_in_bin
+ if n > 0:
+ weights /= n
+
+ loss = loss * weights
+ loss = loss.sum() / tot
+ return loss * self.loss_weight
diff --git a/annotator/uniformer/mmdet_null/models/losses/iou_loss.py b/annotator/uniformer/mmdet_null/models/losses/iou_loss.py
new file mode 100644
index 0000000000000000000000000000000000000000..eba6f18b80981ca891c1add37007e6bf478c651f
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/losses/iou_loss.py
@@ -0,0 +1,436 @@
+import math
+
+import mmcv
+import torch
+import torch.nn as nn
+
+from mmdet.core import bbox_overlaps
+from ..builder import LOSSES
+from .utils import weighted_loss
+
+
+@mmcv.jit(derivate=True, coderize=True)
+@weighted_loss
+def iou_loss(pred, target, linear=False, eps=1e-6):
+ """IoU loss.
+
+ Computing the IoU loss between a set of predicted bboxes and target bboxes.
+ The loss is calculated as negative log of IoU.
+
+ Args:
+ pred (torch.Tensor): Predicted bboxes of format (x1, y1, x2, y2),
+ shape (n, 4).
+ target (torch.Tensor): Corresponding gt bboxes, shape (n, 4).
+ linear (bool, optional): If True, use linear scale of loss instead of
+ log scale. Default: False.
+ eps (float): Eps to avoid log(0).
+
+ Return:
+ torch.Tensor: Loss tensor.
+ """
+ ious = bbox_overlaps(pred, target, is_aligned=True).clamp(min=eps)
+ if linear:
+ loss = 1 - ious
+ else:
+ loss = -ious.log()
+ return loss
+
+
+@mmcv.jit(derivate=True, coderize=True)
+@weighted_loss
+def bounded_iou_loss(pred, target, beta=0.2, eps=1e-3):
+ """BIoULoss.
+
+ This is an implementation of paper
+ `Improving Object Localization with Fitness NMS and Bounded IoU Loss.
+ `_.
+
+ Args:
+ pred (torch.Tensor): Predicted bboxes.
+ target (torch.Tensor): Target bboxes.
+ beta (float): beta parameter in smoothl1.
+ eps (float): eps to avoid NaN.
+ """
+ pred_ctrx = (pred[:, 0] + pred[:, 2]) * 0.5
+ pred_ctry = (pred[:, 1] + pred[:, 3]) * 0.5
+ pred_w = pred[:, 2] - pred[:, 0]
+ pred_h = pred[:, 3] - pred[:, 1]
+ with torch.no_grad():
+ target_ctrx = (target[:, 0] + target[:, 2]) * 0.5
+ target_ctry = (target[:, 1] + target[:, 3]) * 0.5
+ target_w = target[:, 2] - target[:, 0]
+ target_h = target[:, 3] - target[:, 1]
+
+ dx = target_ctrx - pred_ctrx
+ dy = target_ctry - pred_ctry
+
+ loss_dx = 1 - torch.max(
+ (target_w - 2 * dx.abs()) /
+ (target_w + 2 * dx.abs() + eps), torch.zeros_like(dx))
+ loss_dy = 1 - torch.max(
+ (target_h - 2 * dy.abs()) /
+ (target_h + 2 * dy.abs() + eps), torch.zeros_like(dy))
+ loss_dw = 1 - torch.min(target_w / (pred_w + eps), pred_w /
+ (target_w + eps))
+ loss_dh = 1 - torch.min(target_h / (pred_h + eps), pred_h /
+ (target_h + eps))
+ loss_comb = torch.stack([loss_dx, loss_dy, loss_dw, loss_dh],
+ dim=-1).view(loss_dx.size(0), -1)
+
+ loss = torch.where(loss_comb < beta, 0.5 * loss_comb * loss_comb / beta,
+ loss_comb - 0.5 * beta)
+ return loss
+
+
+@mmcv.jit(derivate=True, coderize=True)
+@weighted_loss
+def giou_loss(pred, target, eps=1e-7):
+ r"""`Generalized Intersection over Union: A Metric and A Loss for Bounding
+ Box Regression `_.
+
+ Args:
+ pred (torch.Tensor): Predicted bboxes of format (x1, y1, x2, y2),
+ shape (n, 4).
+ target (torch.Tensor): Corresponding gt bboxes, shape (n, 4).
+ eps (float): Eps to avoid log(0).
+
+ Return:
+ Tensor: Loss tensor.
+ """
+ gious = bbox_overlaps(pred, target, mode='giou', is_aligned=True, eps=eps)
+ loss = 1 - gious
+ return loss
+
+
+@mmcv.jit(derivate=True, coderize=True)
+@weighted_loss
+def diou_loss(pred, target, eps=1e-7):
+ r"""`Implementation of Distance-IoU Loss: Faster and Better
+ Learning for Bounding Box Regression, https://arxiv.org/abs/1911.08287`_.
+
+ Code is modified from https://github.com/Zzh-tju/DIoU.
+
+ Args:
+ pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2),
+ shape (n, 4).
+ target (Tensor): Corresponding gt bboxes, shape (n, 4).
+ eps (float): Eps to avoid log(0).
+ Return:
+ Tensor: Loss tensor.
+ """
+ # overlap
+ lt = torch.max(pred[:, :2], target[:, :2])
+ rb = torch.min(pred[:, 2:], target[:, 2:])
+ wh = (rb - lt).clamp(min=0)
+ overlap = wh[:, 0] * wh[:, 1]
+
+ # union
+ ap = (pred[:, 2] - pred[:, 0]) * (pred[:, 3] - pred[:, 1])
+ ag = (target[:, 2] - target[:, 0]) * (target[:, 3] - target[:, 1])
+ union = ap + ag - overlap + eps
+
+ # IoU
+ ious = overlap / union
+
+ # enclose area
+ enclose_x1y1 = torch.min(pred[:, :2], target[:, :2])
+ enclose_x2y2 = torch.max(pred[:, 2:], target[:, 2:])
+ enclose_wh = (enclose_x2y2 - enclose_x1y1).clamp(min=0)
+
+ cw = enclose_wh[:, 0]
+ ch = enclose_wh[:, 1]
+
+ c2 = cw**2 + ch**2 + eps
+
+ b1_x1, b1_y1 = pred[:, 0], pred[:, 1]
+ b1_x2, b1_y2 = pred[:, 2], pred[:, 3]
+ b2_x1, b2_y1 = target[:, 0], target[:, 1]
+ b2_x2, b2_y2 = target[:, 2], target[:, 3]
+
+ left = ((b2_x1 + b2_x2) - (b1_x1 + b1_x2))**2 / 4
+ right = ((b2_y1 + b2_y2) - (b1_y1 + b1_y2))**2 / 4
+ rho2 = left + right
+
+ # DIoU
+ dious = ious - rho2 / c2
+ loss = 1 - dious
+ return loss
+
+
+@mmcv.jit(derivate=True, coderize=True)
+@weighted_loss
+def ciou_loss(pred, target, eps=1e-7):
+ r"""`Implementation of paper `Enhancing Geometric Factors into
+ Model Learning and Inference for Object Detection and Instance
+ Segmentation `_.
+
+ Code is modified from https://github.com/Zzh-tju/CIoU.
+
+ Args:
+ pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2),
+ shape (n, 4).
+ target (Tensor): Corresponding gt bboxes, shape (n, 4).
+ eps (float): Eps to avoid log(0).
+ Return:
+ Tensor: Loss tensor.
+ """
+ # overlap
+ lt = torch.max(pred[:, :2], target[:, :2])
+ rb = torch.min(pred[:, 2:], target[:, 2:])
+ wh = (rb - lt).clamp(min=0)
+ overlap = wh[:, 0] * wh[:, 1]
+
+ # union
+ ap = (pred[:, 2] - pred[:, 0]) * (pred[:, 3] - pred[:, 1])
+ ag = (target[:, 2] - target[:, 0]) * (target[:, 3] - target[:, 1])
+ union = ap + ag - overlap + eps
+
+ # IoU
+ ious = overlap / union
+
+ # enclose area
+ enclose_x1y1 = torch.min(pred[:, :2], target[:, :2])
+ enclose_x2y2 = torch.max(pred[:, 2:], target[:, 2:])
+ enclose_wh = (enclose_x2y2 - enclose_x1y1).clamp(min=0)
+
+ cw = enclose_wh[:, 0]
+ ch = enclose_wh[:, 1]
+
+ c2 = cw**2 + ch**2 + eps
+
+ b1_x1, b1_y1 = pred[:, 0], pred[:, 1]
+ b1_x2, b1_y2 = pred[:, 2], pred[:, 3]
+ b2_x1, b2_y1 = target[:, 0], target[:, 1]
+ b2_x2, b2_y2 = target[:, 2], target[:, 3]
+
+ w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps
+ w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps
+
+ left = ((b2_x1 + b2_x2) - (b1_x1 + b1_x2))**2 / 4
+ right = ((b2_y1 + b2_y2) - (b1_y1 + b1_y2))**2 / 4
+ rho2 = left + right
+
+ factor = 4 / math.pi**2
+ v = factor * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2)
+
+ # CIoU
+ cious = ious - (rho2 / c2 + v**2 / (1 - ious + v))
+ loss = 1 - cious
+ return loss
+
+
+@LOSSES.register_module()
+class IoULoss(nn.Module):
+ """IoULoss.
+
+ Computing the IoU loss between a set of predicted bboxes and target bboxes.
+
+ Args:
+ linear (bool): If True, use linear scale of loss instead of log scale.
+ Default: False.
+ eps (float): Eps to avoid log(0).
+ reduction (str): Options are "none", "mean" and "sum".
+ loss_weight (float): Weight of loss.
+ """
+
+ def __init__(self,
+ linear=False,
+ eps=1e-6,
+ reduction='mean',
+ loss_weight=1.0):
+ super(IoULoss, self).__init__()
+ self.linear = linear
+ self.eps = eps
+ self.reduction = reduction
+ self.loss_weight = loss_weight
+
+ def forward(self,
+ pred,
+ target,
+ weight=None,
+ avg_factor=None,
+ reduction_override=None,
+ **kwargs):
+ """Forward function.
+
+ Args:
+ pred (torch.Tensor): The prediction.
+ target (torch.Tensor): The learning target of the prediction.
+ weight (torch.Tensor, optional): The weight of loss for each
+ prediction. Defaults to None.
+ avg_factor (int, optional): Average factor that is used to average
+ the loss. Defaults to None.
+ reduction_override (str, optional): The reduction method used to
+ override the original reduction method of the loss.
+ Defaults to None. Options are "none", "mean" and "sum".
+ """
+ assert reduction_override in (None, 'none', 'mean', 'sum')
+ reduction = (
+ reduction_override if reduction_override else self.reduction)
+ if (weight is not None) and (not torch.any(weight > 0)) and (
+ reduction != 'none'):
+ return (pred * weight).sum() # 0
+ if weight is not None and weight.dim() > 1:
+ # TODO: remove this in the future
+ # reduce the weight of shape (n, 4) to (n,) to match the
+ # iou_loss of shape (n,)
+ assert weight.shape == pred.shape
+ weight = weight.mean(-1)
+ loss = self.loss_weight * iou_loss(
+ pred,
+ target,
+ weight,
+ linear=self.linear,
+ eps=self.eps,
+ reduction=reduction,
+ avg_factor=avg_factor,
+ **kwargs)
+ return loss
+
+
+@LOSSES.register_module()
+class BoundedIoULoss(nn.Module):
+
+ def __init__(self, beta=0.2, eps=1e-3, reduction='mean', loss_weight=1.0):
+ super(BoundedIoULoss, self).__init__()
+ self.beta = beta
+ self.eps = eps
+ self.reduction = reduction
+ self.loss_weight = loss_weight
+
+ def forward(self,
+ pred,
+ target,
+ weight=None,
+ avg_factor=None,
+ reduction_override=None,
+ **kwargs):
+ if weight is not None and not torch.any(weight > 0):
+ return (pred * weight).sum() # 0
+ assert reduction_override in (None, 'none', 'mean', 'sum')
+ reduction = (
+ reduction_override if reduction_override else self.reduction)
+ loss = self.loss_weight * bounded_iou_loss(
+ pred,
+ target,
+ weight,
+ beta=self.beta,
+ eps=self.eps,
+ reduction=reduction,
+ avg_factor=avg_factor,
+ **kwargs)
+ return loss
+
+
+@LOSSES.register_module()
+class GIoULoss(nn.Module):
+
+ def __init__(self, eps=1e-6, reduction='mean', loss_weight=1.0):
+ super(GIoULoss, self).__init__()
+ self.eps = eps
+ self.reduction = reduction
+ self.loss_weight = loss_weight
+
+ def forward(self,
+ pred,
+ target,
+ weight=None,
+ avg_factor=None,
+ reduction_override=None,
+ **kwargs):
+ if weight is not None and not torch.any(weight > 0):
+ return (pred * weight).sum() # 0
+ assert reduction_override in (None, 'none', 'mean', 'sum')
+ reduction = (
+ reduction_override if reduction_override else self.reduction)
+ if weight is not None and weight.dim() > 1:
+ # TODO: remove this in the future
+ # reduce the weight of shape (n, 4) to (n,) to match the
+ # giou_loss of shape (n,)
+ assert weight.shape == pred.shape
+ weight = weight.mean(-1)
+ loss = self.loss_weight * giou_loss(
+ pred,
+ target,
+ weight,
+ eps=self.eps,
+ reduction=reduction,
+ avg_factor=avg_factor,
+ **kwargs)
+ return loss
+
+
+@LOSSES.register_module()
+class DIoULoss(nn.Module):
+
+ def __init__(self, eps=1e-6, reduction='mean', loss_weight=1.0):
+ super(DIoULoss, self).__init__()
+ self.eps = eps
+ self.reduction = reduction
+ self.loss_weight = loss_weight
+
+ def forward(self,
+ pred,
+ target,
+ weight=None,
+ avg_factor=None,
+ reduction_override=None,
+ **kwargs):
+ if weight is not None and not torch.any(weight > 0):
+ return (pred * weight).sum() # 0
+ assert reduction_override in (None, 'none', 'mean', 'sum')
+ reduction = (
+ reduction_override if reduction_override else self.reduction)
+ if weight is not None and weight.dim() > 1:
+ # TODO: remove this in the future
+ # reduce the weight of shape (n, 4) to (n,) to match the
+ # giou_loss of shape (n,)
+ assert weight.shape == pred.shape
+ weight = weight.mean(-1)
+ loss = self.loss_weight * diou_loss(
+ pred,
+ target,
+ weight,
+ eps=self.eps,
+ reduction=reduction,
+ avg_factor=avg_factor,
+ **kwargs)
+ return loss
+
+
+@LOSSES.register_module()
+class CIoULoss(nn.Module):
+
+ def __init__(self, eps=1e-6, reduction='mean', loss_weight=1.0):
+ super(CIoULoss, self).__init__()
+ self.eps = eps
+ self.reduction = reduction
+ self.loss_weight = loss_weight
+
+ def forward(self,
+ pred,
+ target,
+ weight=None,
+ avg_factor=None,
+ reduction_override=None,
+ **kwargs):
+ if weight is not None and not torch.any(weight > 0):
+ return (pred * weight).sum() # 0
+ assert reduction_override in (None, 'none', 'mean', 'sum')
+ reduction = (
+ reduction_override if reduction_override else self.reduction)
+ if weight is not None and weight.dim() > 1:
+ # TODO: remove this in the future
+ # reduce the weight of shape (n, 4) to (n,) to match the
+ # giou_loss of shape (n,)
+ assert weight.shape == pred.shape
+ weight = weight.mean(-1)
+ loss = self.loss_weight * ciou_loss(
+ pred,
+ target,
+ weight,
+ eps=self.eps,
+ reduction=reduction,
+ avg_factor=avg_factor,
+ **kwargs)
+ return loss
diff --git a/annotator/uniformer/mmdet_null/models/losses/kd_loss.py b/annotator/uniformer/mmdet_null/models/losses/kd_loss.py
new file mode 100644
index 0000000000000000000000000000000000000000..f3abb68d4f7b3eec98b873f69c1105a22eb33913
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/losses/kd_loss.py
@@ -0,0 +1,87 @@
+import mmcv
+import torch.nn as nn
+import torch.nn.functional as F
+
+from ..builder import LOSSES
+from .utils import weighted_loss
+
+
+@mmcv.jit(derivate=True, coderize=True)
+@weighted_loss
+def knowledge_distillation_kl_div_loss(pred,
+ soft_label,
+ T,
+ detach_target=True):
+ r"""Loss function for knowledge distilling using KL divergence.
+
+ Args:
+ pred (Tensor): Predicted logits with shape (N, n + 1).
+ soft_label (Tensor): Target logits with shape (N, N + 1).
+ T (int): Temperature for distillation.
+ detach_target (bool): Remove soft_label from automatic differentiation
+
+ Returns:
+ torch.Tensor: Loss tensor with shape (N,).
+ """
+ assert pred.size() == soft_label.size()
+ target = F.softmax(soft_label / T, dim=1)
+ if detach_target:
+ target = target.detach()
+
+ kd_loss = F.kl_div(
+ F.log_softmax(pred / T, dim=1), target, reduction='none').mean(1) * (
+ T * T)
+
+ return kd_loss
+
+
+@LOSSES.register_module()
+class KnowledgeDistillationKLDivLoss(nn.Module):
+ """Loss function for knowledge distilling using KL divergence.
+
+ Args:
+ reduction (str): Options are `'none'`, `'mean'` and `'sum'`.
+ loss_weight (float): Loss weight of current loss.
+ T (int): Temperature for distillation.
+ """
+
+ def __init__(self, reduction='mean', loss_weight=1.0, T=10):
+ super(KnowledgeDistillationKLDivLoss, self).__init__()
+ assert T >= 1
+ self.reduction = reduction
+ self.loss_weight = loss_weight
+ self.T = T
+
+ def forward(self,
+ pred,
+ soft_label,
+ weight=None,
+ avg_factor=None,
+ reduction_override=None):
+ """Forward function.
+
+ Args:
+ pred (Tensor): Predicted logits with shape (N, n + 1).
+ soft_label (Tensor): Target logits with shape (N, N + 1).
+ weight (torch.Tensor, optional): The weight of loss for each
+ prediction. Defaults to None.
+ avg_factor (int, optional): Average factor that is used to average
+ the loss. Defaults to None.
+ reduction_override (str, optional): The reduction method used to
+ override the original reduction method of the loss.
+ Defaults to None.
+ """
+ assert reduction_override in (None, 'none', 'mean', 'sum')
+
+ reduction = (
+ reduction_override if reduction_override else self.reduction)
+
+ loss_kd = self.loss_weight * knowledge_distillation_kl_div_loss(
+ pred,
+ soft_label,
+ weight,
+ reduction=reduction,
+ avg_factor=avg_factor,
+ T=self.T)
+
+ return loss_kd
diff --git a/annotator/uniformer/mmdet_null/models/losses/mse_loss.py b/annotator/uniformer/mmdet_null/models/losses/mse_loss.py
new file mode 100644
index 0000000000000000000000000000000000000000..68d05752a245548862f4c9919448d4fb8dc1b8ca
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/losses/mse_loss.py
@@ -0,0 +1,49 @@
+import torch.nn as nn
+import torch.nn.functional as F
+
+from ..builder import LOSSES
+from .utils import weighted_loss
+
+
+@weighted_loss
+def mse_loss(pred, target):
+ """Warpper of mse loss."""
+ return F.mse_loss(pred, target, reduction='none')
+
+
+@LOSSES.register_module()
+class MSELoss(nn.Module):
+ """MSELoss.
+
+ Args:
+ reduction (str, optional): The method that reduces the loss to a
+ scalar. Options are "none", "mean" and "sum".
+ loss_weight (float, optional): The weight of the loss. Defaults to 1.0
+ """
+
+ def __init__(self, reduction='mean', loss_weight=1.0):
+ super().__init__()
+ self.reduction = reduction
+ self.loss_weight = loss_weight
+
+ def forward(self, pred, target, weight=None, avg_factor=None):
+ """Forward function of loss.
+
+ Args:
+ pred (torch.Tensor): The prediction.
+ target (torch.Tensor): The learning target of the prediction.
+ weight (torch.Tensor, optional): Weight of the loss for each
+ prediction. Defaults to None.
+ avg_factor (int, optional): Average factor that is used to average
+ the loss. Defaults to None.
+
+ Returns:
+ torch.Tensor: The calculated loss
+ """
+ loss = self.loss_weight * mse_loss(
+ pred,
+ target,
+ weight,
+ reduction=self.reduction,
+ avg_factor=avg_factor)
+ return loss
diff --git a/annotator/uniformer/mmdet_null/models/losses/pisa_loss.py b/annotator/uniformer/mmdet_null/models/losses/pisa_loss.py
new file mode 100644
index 0000000000000000000000000000000000000000..4a48adfcd400bb07b719a6fbd5a8af0508820629
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/losses/pisa_loss.py
@@ -0,0 +1,183 @@
+import mmcv
+import torch
+
+from mmdet.core import bbox_overlaps
+
+
+@mmcv.jit(derivate=True, coderize=True)
+def isr_p(cls_score,
+ bbox_pred,
+ bbox_targets,
+ rois,
+ sampling_results,
+ loss_cls,
+ bbox_coder,
+ k=2,
+ bias=0,
+ num_class=80):
+ """Importance-based Sample Reweighting (ISR_P), positive part.
+
+ Args:
+ cls_score (Tensor): Predicted classification scores.
+ bbox_pred (Tensor): Predicted bbox deltas.
+ bbox_targets (tuple[Tensor]): A tuple of bbox targets, the are
+ labels, label_weights, bbox_targets, bbox_weights, respectively.
+ rois (Tensor): Anchors (single_stage) in shape (n, 4) or RoIs
+ (two_stage) in shape (n, 5).
+ sampling_results (obj): Sampling results.
+ loss_cls (func): Classification loss func of the head.
+ bbox_coder (obj): BBox coder of the head.
+ k (float): Power of the non-linear mapping.
+ bias (float): Shift of the non-linear mapping.
+ num_class (int): Number of classes, default: 80.
+
+ Return:
+ tuple([Tensor]): labels, imp_based_label_weights, bbox_targets,
+ bbox_target_weights
+ """
+
+ labels, label_weights, bbox_targets, bbox_weights = bbox_targets
+ pos_label_inds = ((labels >= 0) &
+ (labels < num_class)).nonzero().reshape(-1)
+ pos_labels = labels[pos_label_inds]
+
+ # if no positive samples, return the original targets
+ num_pos = float(pos_label_inds.size(0))
+ if num_pos == 0:
+ return labels, label_weights, bbox_targets, bbox_weights
+
+ # merge pos_assigned_gt_inds of per image to a single tensor
+ gts = list()
+ last_max_gt = 0
+ for i in range(len(sampling_results)):
+ gt_i = sampling_results[i].pos_assigned_gt_inds
+ gts.append(gt_i + last_max_gt)
+ if len(gt_i) != 0:
+ last_max_gt = gt_i.max() + 1
+ gts = torch.cat(gts)
+ assert len(gts) == num_pos
+
+ cls_score = cls_score.detach()
+ bbox_pred = bbox_pred.detach()
+
+ # For single stage detectors, rois here indicate anchors, in shape (N, 4)
+ # For two stage detectors, rois are in shape (N, 5)
+ if rois.size(-1) == 5:
+ pos_rois = rois[pos_label_inds][:, 1:]
+ else:
+ pos_rois = rois[pos_label_inds]
+
+ if bbox_pred.size(-1) > 4:
+ bbox_pred = bbox_pred.view(bbox_pred.size(0), -1, 4)
+ pos_delta_pred = bbox_pred[pos_label_inds, pos_labels].view(-1, 4)
+ else:
+ pos_delta_pred = bbox_pred[pos_label_inds].view(-1, 4)
+
+ # compute iou of the predicted bbox and the corresponding GT
+ pos_delta_target = bbox_targets[pos_label_inds].view(-1, 4)
+ pos_bbox_pred = bbox_coder.decode(pos_rois, pos_delta_pred)
+ target_bbox_pred = bbox_coder.decode(pos_rois, pos_delta_target)
+ ious = bbox_overlaps(pos_bbox_pred, target_bbox_pred, is_aligned=True)
+
+ pos_imp_weights = label_weights[pos_label_inds]
+ # Two steps to compute IoU-HLR. Samples are first sorted by IoU locally,
+ # then sorted again within the same-rank group
+ max_l_num = pos_labels.bincount().max()
+ for label in pos_labels.unique():
+ l_inds = (pos_labels == label).nonzero().view(-1)
+ l_gts = gts[l_inds]
+ for t in l_gts.unique():
+ t_inds = l_inds[l_gts == t]
+ t_ious = ious[t_inds]
+ _, t_iou_rank_idx = t_ious.sort(descending=True)
+ _, t_iou_rank = t_iou_rank_idx.sort()
+ ious[t_inds] += max_l_num - t_iou_rank.float()
+ l_ious = ious[l_inds]
+ _, l_iou_rank_idx = l_ious.sort(descending=True)
+ _, l_iou_rank = l_iou_rank_idx.sort() # IoU-HLR
+ # linearly map HLR to label weights
+ pos_imp_weights[l_inds] *= (max_l_num - l_iou_rank.float()) / max_l_num
+
+ pos_imp_weights = (bias + pos_imp_weights * (1 - bias)).pow(k)
+
+ # normalize to make the new weighted loss value equal to the original loss
+ pos_loss_cls = loss_cls(
+ cls_score[pos_label_inds], pos_labels, reduction_override='none')
+ if pos_loss_cls.dim() > 1:
+ ori_pos_loss_cls = pos_loss_cls * label_weights[pos_label_inds][:,
+ None]
+ new_pos_loss_cls = pos_loss_cls * pos_imp_weights[:, None]
+ else:
+ ori_pos_loss_cls = pos_loss_cls * label_weights[pos_label_inds]
+ new_pos_loss_cls = pos_loss_cls * pos_imp_weights
+ pos_loss_cls_ratio = ori_pos_loss_cls.sum() / new_pos_loss_cls.sum()
+ pos_imp_weights = pos_imp_weights * pos_loss_cls_ratio
+ label_weights[pos_label_inds] = pos_imp_weights
+
+ bbox_targets = labels, label_weights, bbox_targets, bbox_weights
+ return bbox_targets
+
+
+@mmcv.jit(derivate=True, coderize=True)
+def carl_loss(cls_score,
+ labels,
+ bbox_pred,
+ bbox_targets,
+ loss_bbox,
+ k=1,
+ bias=0.2,
+ avg_factor=None,
+ sigmoid=False,
+ num_class=80):
+ """Classification-Aware Regression Loss (CARL).
+
+ Args:
+ cls_score (Tensor): Predicted classification scores.
+ labels (Tensor): Targets of classification.
+ bbox_pred (Tensor): Predicted bbox deltas.
+ bbox_targets (Tensor): Target of bbox regression.
+ loss_bbox (func): Regression loss func of the head.
+ bbox_coder (obj): BBox coder of the head.
+ k (float): Power of the non-linear mapping.
+ bias (float): Shift of the non-linear mapping.
+ avg_factor (int): Average factor used in regression loss.
+ sigmoid (bool): Activation of the classification score.
+ num_class (int): Number of classes, default: 80.
+
+ Return:
+ dict: CARL loss dict.
+ """
+ pos_label_inds = ((labels >= 0) &
+ (labels < num_class)).nonzero().reshape(-1)
+ if pos_label_inds.numel() == 0:
+ return dict(loss_carl=cls_score.sum()[None] * 0.)
+ pos_labels = labels[pos_label_inds]
+
+ # multiply pos_cls_score with the corresponding bbox weight
+ # and remain gradient
+ if sigmoid:
+ pos_cls_score = cls_score.sigmoid()[pos_label_inds, pos_labels]
+ else:
+ pos_cls_score = cls_score.softmax(-1)[pos_label_inds, pos_labels]
+ carl_loss_weights = (bias + (1 - bias) * pos_cls_score).pow(k)
+
+ # normalize carl_loss_weight to make its sum equal to num positive
+ num_pos = float(pos_cls_score.size(0))
+ weight_ratio = num_pos / carl_loss_weights.sum()
+ carl_loss_weights *= weight_ratio
+
+ if avg_factor is None:
+ avg_factor = bbox_targets.size(0)
+ # if is class agnostic, bbox pred is in shape (N, 4)
+ # otherwise, bbox pred is in shape (N, #classes, 4)
+ if bbox_pred.size(-1) > 4:
+ bbox_pred = bbox_pred.view(bbox_pred.size(0), -1, 4)
+ pos_bbox_preds = bbox_pred[pos_label_inds, pos_labels]
+ else:
+ pos_bbox_preds = bbox_pred[pos_label_inds]
+ ori_loss_reg = loss_bbox(
+ pos_bbox_preds,
+ bbox_targets[pos_label_inds],
+ reduction_override='none') / avg_factor
+ loss_carl = (ori_loss_reg * carl_loss_weights[:, None]).sum()
+ return dict(loss_carl=loss_carl[None])
diff --git a/annotator/uniformer/mmdet_null/models/losses/smooth_l1_loss.py b/annotator/uniformer/mmdet_null/models/losses/smooth_l1_loss.py
new file mode 100644
index 0000000000000000000000000000000000000000..ec9c98a52d1932d6ccff18938c17c36755bf1baf
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/losses/smooth_l1_loss.py
@@ -0,0 +1,139 @@
+import mmcv
+import torch
+import torch.nn as nn
+
+from ..builder import LOSSES
+from .utils import weighted_loss
+
+
+@mmcv.jit(derivate=True, coderize=True)
+@weighted_loss
+def smooth_l1_loss(pred, target, beta=1.0):
+ """Smooth L1 loss.
+
+ Args:
+ pred (torch.Tensor): The prediction.
+ target (torch.Tensor): The learning target of the prediction.
+ beta (float, optional): The threshold in the piecewise function.
+ Defaults to 1.0.
+
+ Returns:
+ torch.Tensor: Calculated loss
+ """
+ assert beta > 0
+ assert pred.size() == target.size() and target.numel() > 0
+ diff = torch.abs(pred - target)
+ loss = torch.where(diff < beta, 0.5 * diff * diff / beta,
+ diff - 0.5 * beta)
+ return loss
+
+
+@mmcv.jit(derivate=True, coderize=True)
+@weighted_loss
+def l1_loss(pred, target):
+ """L1 loss.
+
+ Args:
+ pred (torch.Tensor): The prediction.
+ target (torch.Tensor): The learning target of the prediction.
+
+ Returns:
+ torch.Tensor: Calculated loss
+ """
+ assert pred.size() == target.size() and target.numel() > 0
+ loss = torch.abs(pred - target)
+ return loss
+
+
+@LOSSES.register_module()
+class SmoothL1Loss(nn.Module):
+ """Smooth L1 loss.
+
+ Args:
+ beta (float, optional): The threshold in the piecewise function.
+ Defaults to 1.0.
+ reduction (str, optional): The method to reduce the loss.
+ Options are "none", "mean" and "sum". Defaults to "mean".
+ loss_weight (float, optional): The weight of loss.
+ """
+
+ def __init__(self, beta=1.0, reduction='mean', loss_weight=1.0):
+ super(SmoothL1Loss, self).__init__()
+ self.beta = beta
+ self.reduction = reduction
+ self.loss_weight = loss_weight
+
+ def forward(self,
+ pred,
+ target,
+ weight=None,
+ avg_factor=None,
+ reduction_override=None,
+ **kwargs):
+ """Forward function.
+
+ Args:
+ pred (torch.Tensor): The prediction.
+ target (torch.Tensor): The learning target of the prediction.
+ weight (torch.Tensor, optional): The weight of loss for each
+ prediction. Defaults to None.
+ avg_factor (int, optional): Average factor that is used to average
+ the loss. Defaults to None.
+ reduction_override (str, optional): The reduction method used to
+ override the original reduction method of the loss.
+ Defaults to None.
+ """
+ assert reduction_override in (None, 'none', 'mean', 'sum')
+ reduction = (
+ reduction_override if reduction_override else self.reduction)
+ loss_bbox = self.loss_weight * smooth_l1_loss(
+ pred,
+ target,
+ weight,
+ beta=self.beta,
+ reduction=reduction,
+ avg_factor=avg_factor,
+ **kwargs)
+ return loss_bbox
+
+
+@LOSSES.register_module()
+class L1Loss(nn.Module):
+ """L1 loss.
+
+ Args:
+ reduction (str, optional): The method to reduce the loss.
+ Options are "none", "mean" and "sum".
+ loss_weight (float, optional): The weight of loss.
+ """
+
+ def __init__(self, reduction='mean', loss_weight=1.0):
+ super(L1Loss, self).__init__()
+ self.reduction = reduction
+ self.loss_weight = loss_weight
+
+ def forward(self,
+ pred,
+ target,
+ weight=None,
+ avg_factor=None,
+ reduction_override=None):
+ """Forward function.
+
+ Args:
+ pred (torch.Tensor): The prediction.
+ target (torch.Tensor): The learning target of the prediction.
+ weight (torch.Tensor, optional): The weight of loss for each
+ prediction. Defaults to None.
+ avg_factor (int, optional): Average factor that is used to average
+ the loss. Defaults to None.
+ reduction_override (str, optional): The reduction method used to
+ override the original reduction method of the loss.
+ Defaults to None.
+ """
+ assert reduction_override in (None, 'none', 'mean', 'sum')
+ reduction = (
+ reduction_override if reduction_override else self.reduction)
+ loss_bbox = self.loss_weight * l1_loss(
+ pred, target, weight, reduction=reduction, avg_factor=avg_factor)
+ return loss_bbox
diff --git a/annotator/uniformer/mmdet_null/models/losses/utils.py b/annotator/uniformer/mmdet_null/models/losses/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..4756d7fcefd7cda1294c2662b4ca3e90c0a8e124
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/losses/utils.py
@@ -0,0 +1,100 @@
+import functools
+
+import mmcv
+import torch.nn.functional as F
+
+
+def reduce_loss(loss, reduction):
+ """Reduce loss as specified.
+
+ Args:
+ loss (Tensor): Elementwise loss tensor.
+ reduction (str): Options are "none", "mean" and "sum".
+
+ Return:
+ Tensor: Reduced loss tensor.
+ """
+ reduction_enum = F._Reduction.get_enum(reduction)
+ # none: 0, elementwise_mean:1, sum: 2
+ if reduction_enum == 0:
+ return loss
+ elif reduction_enum == 1:
+ return loss.mean()
+ elif reduction_enum == 2:
+ return loss.sum()
+
+
+@mmcv.jit(derivate=True, coderize=True)
+def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):
+ """Apply element-wise weight and reduce loss.
+
+ Args:
+ loss (Tensor): Element-wise loss.
+ weight (Tensor): Element-wise weights.
+ reduction (str): Same as built-in losses of PyTorch.
+ avg_factor (float): Avarage factor when computing the mean of losses.
+
+ Returns:
+ Tensor: Processed loss values.
+ """
+ # if weight is specified, apply element-wise weight
+ if weight is not None:
+ loss = loss * weight
+
+ # if avg_factor is not specified, just reduce the loss
+ if avg_factor is None:
+ loss = reduce_loss(loss, reduction)
+ else:
+ # if reduction is mean, then average the loss by avg_factor
+ if reduction == 'mean':
+ loss = loss.sum() / avg_factor
+ # if reduction is 'none', then do nothing, otherwise raise an error
+ elif reduction != 'none':
+ raise ValueError('avg_factor can not be used with reduction="sum"')
+ return loss
+
+
+def weighted_loss(loss_func):
+ """Create a weighted version of a given loss function.
+
+ To use this decorator, the loss function must have the signature like
+ `loss_func(pred, target, **kwargs)`. The function only needs to compute
+ element-wise loss without any reduction. This decorator will add weight
+ and reduction arguments to the function. The decorated function will have
+ the signature like `loss_func(pred, target, weight=None, reduction='mean',
+ avg_factor=None, **kwargs)`.
+
+ :Example:
+
+ >>> import torch
+ >>> @weighted_loss
+ >>> def l1_loss(pred, target):
+ >>> return (pred - target).abs()
+
+ >>> pred = torch.Tensor([0, 2, 3])
+ >>> target = torch.Tensor([1, 1, 1])
+ >>> weight = torch.Tensor([1, 0, 1])
+
+ >>> l1_loss(pred, target)
+ tensor(1.3333)
+ >>> l1_loss(pred, target, weight)
+ tensor(1.)
+ >>> l1_loss(pred, target, reduction='none')
+ tensor([1., 1., 2.])
+ >>> l1_loss(pred, target, weight, avg_factor=2)
+ tensor(1.5000)
+ """
+
+ @functools.wraps(loss_func)
+ def wrapper(pred,
+ target,
+ weight=None,
+ reduction='mean',
+ avg_factor=None,
+ **kwargs):
+ # get element-wise loss
+ loss = loss_func(pred, target, **kwargs)
+ loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
+ return loss
+
+ return wrapper
diff --git a/annotator/uniformer/mmdet_null/models/losses/varifocal_loss.py b/annotator/uniformer/mmdet_null/models/losses/varifocal_loss.py
new file mode 100644
index 0000000000000000000000000000000000000000..7f00bd6916c04fef45a9aeecb50888266420daf9
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/losses/varifocal_loss.py
@@ -0,0 +1,133 @@
+import mmcv
+import torch.nn as nn
+import torch.nn.functional as F
+
+from ..builder import LOSSES
+from .utils import weight_reduce_loss
+
+
+@mmcv.jit(derivate=True, coderize=True)
+def varifocal_loss(pred,
+ target,
+ weight=None,
+ alpha=0.75,
+ gamma=2.0,
+ iou_weighted=True,
+ reduction='mean',
+ avg_factor=None):
+ """`Varifocal Loss `_
+
+ Args:
+ pred (torch.Tensor): The prediction with shape (N, C), C is the
+ number of classes
+ target (torch.Tensor): The learning target of the iou-aware
+ classification score with shape (N, C), C is the number of classes.
+ weight (torch.Tensor, optional): The weight of loss for each
+ prediction. Defaults to None.
+ alpha (float, optional): A balance factor for the negative part of
+ Varifocal Loss, which is different from the alpha of Focal Loss.
+ Defaults to 0.75.
+ gamma (float, optional): The gamma for calculating the modulating
+ factor. Defaults to 2.0.
+ iou_weighted (bool, optional): Whether to weight the loss of the
+ positive example with the iou target. Defaults to True.
+ reduction (str, optional): The method used to reduce the loss into
+ a scalar. Defaults to 'mean'. Options are "none", "mean" and
+ "sum".
+ avg_factor (int, optional): Average factor that is used to average
+ the loss. Defaults to None.
+ """
+ # pred and target should be of the same size
+ assert pred.size() == target.size()
+ pred_sigmoid = pred.sigmoid()
+ target = target.type_as(pred)
+ if iou_weighted:
+ focal_weight = target * (target > 0.0).float() + \
+ alpha * (pred_sigmoid - target).abs().pow(gamma) * \
+ (target <= 0.0).float()
+ else:
+ focal_weight = (target > 0.0).float() + \
+ alpha * (pred_sigmoid - target).abs().pow(gamma) * \
+ (target <= 0.0).float()
+ loss = F.binary_cross_entropy_with_logits(
+ pred, target, reduction='none') * focal_weight
+ loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
+ return loss
+
+
+@LOSSES.register_module()
+class VarifocalLoss(nn.Module):
+
+ def __init__(self,
+ use_sigmoid=True,
+ alpha=0.75,
+ gamma=2.0,
+ iou_weighted=True,
+ reduction='mean',
+ loss_weight=1.0):
+ """`Varifocal Loss `_
+
+ Args:
+ use_sigmoid (bool, optional): Whether the prediction is
+ used for sigmoid or softmax. Defaults to True.
+ alpha (float, optional): A balance factor for the negative part of
+ Varifocal Loss, which is different from the alpha of Focal
+ Loss. Defaults to 0.75.
+ gamma (float, optional): The gamma for calculating the modulating
+ factor. Defaults to 2.0.
+ iou_weighted (bool, optional): Whether to weight the loss of the
+ positive examples with the iou target. Defaults to True.
+ reduction (str, optional): The method used to reduce the loss into
+ a scalar. Defaults to 'mean'. Options are "none", "mean" and
+ "sum".
+ loss_weight (float, optional): Weight of loss. Defaults to 1.0.
+ """
+ super(VarifocalLoss, self).__init__()
+ assert use_sigmoid is True, \
+ 'Only sigmoid varifocal loss supported now.'
+ assert alpha >= 0.0
+ self.use_sigmoid = use_sigmoid
+ self.alpha = alpha
+ self.gamma = gamma
+ self.iou_weighted = iou_weighted
+ self.reduction = reduction
+ self.loss_weight = loss_weight
+
+ def forward(self,
+ pred,
+ target,
+ weight=None,
+ avg_factor=None,
+ reduction_override=None):
+ """Forward function.
+
+ Args:
+ pred (torch.Tensor): The prediction.
+ target (torch.Tensor): The learning target of the prediction.
+ weight (torch.Tensor, optional): The weight of loss for each
+ prediction. Defaults to None.
+ avg_factor (int, optional): Average factor that is used to average
+ the loss. Defaults to None.
+ reduction_override (str, optional): The reduction method used to
+ override the original reduction method of the loss.
+ Options are "none", "mean" and "sum".
+
+ Returns:
+ torch.Tensor: The calculated loss
+ """
+ assert reduction_override in (None, 'none', 'mean', 'sum')
+ reduction = (
+ reduction_override if reduction_override else self.reduction)
+ if self.use_sigmoid:
+ loss_cls = self.loss_weight * varifocal_loss(
+ pred,
+ target,
+ weight,
+ alpha=self.alpha,
+ gamma=self.gamma,
+ iou_weighted=self.iou_weighted,
+ reduction=reduction,
+ avg_factor=avg_factor)
+ else:
+ raise NotImplementedError
+ return loss_cls
diff --git a/annotator/uniformer/mmdet_null/models/necks/__init__.py b/annotator/uniformer/mmdet_null/models/necks/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..02f833a8a0f538a8c06fef622d1cadc1a1b66ea2
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/necks/__init__.py
@@ -0,0 +1,16 @@
+from .bfp import BFP
+from .channel_mapper import ChannelMapper
+from .fpg import FPG
+from .fpn import FPN
+from .fpn_carafe import FPN_CARAFE
+from .hrfpn import HRFPN
+from .nas_fpn import NASFPN
+from .nasfcos_fpn import NASFCOS_FPN
+from .pafpn import PAFPN
+from .rfp import RFP
+from .yolo_neck import YOLOV3Neck
+
+__all__ = [
+ 'FPN', 'BFP', 'ChannelMapper', 'HRFPN', 'NASFPN', 'FPN_CARAFE', 'PAFPN',
+ 'NASFCOS_FPN', 'RFP', 'YOLOV3Neck', 'FPG'
+]
diff --git a/annotator/uniformer/mmdet_null/models/necks/bfp.py b/annotator/uniformer/mmdet_null/models/necks/bfp.py
new file mode 100644
index 0000000000000000000000000000000000000000..123f5515ab6b51867d5781aa1572a0810670235f
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/necks/bfp.py
@@ -0,0 +1,104 @@
+import torch.nn as nn
+import torch.nn.functional as F
+from mmcv.cnn import ConvModule, xavier_init
+from mmcv.cnn.bricks import NonLocal2d
+
+from ..builder import NECKS
+
+
+@NECKS.register_module()
+class BFP(nn.Module):
+ """BFP (Balanced Feature Pyramids)
+
+ BFP takes multi-level features as inputs and gather them into a single one,
+ then refine the gathered feature and scatter the refined results to
+ multi-level features. This module is used in Libra R-CNN (CVPR 2019), see
+ the paper `Libra R-CNN: Towards Balanced Learning for Object Detection
+ `_ for details.
+
+ Args:
+ in_channels (int): Number of input channels (feature maps of all levels
+ should have the same channels).
+ num_levels (int): Number of input feature levels.
+ conv_cfg (dict): The config dict for convolution layers.
+ norm_cfg (dict): The config dict for normalization layers.
+ refine_level (int): Index of integration and refine level of BSF in
+ multi-level features from bottom to top.
+ refine_type (str): Type of the refine op, currently support
+ [None, 'conv', 'non_local'].
+ """
+
+ def __init__(self,
+ in_channels,
+ num_levels,
+ refine_level=2,
+ refine_type=None,
+ conv_cfg=None,
+ norm_cfg=None):
+ super(BFP, self).__init__()
+ assert refine_type in [None, 'conv', 'non_local']
+
+ self.in_channels = in_channels
+ self.num_levels = num_levels
+ self.conv_cfg = conv_cfg
+ self.norm_cfg = norm_cfg
+
+ self.refine_level = refine_level
+ self.refine_type = refine_type
+ assert 0 <= self.refine_level < self.num_levels
+
+ if self.refine_type == 'conv':
+ self.refine = ConvModule(
+ self.in_channels,
+ self.in_channels,
+ 3,
+ padding=1,
+ conv_cfg=self.conv_cfg,
+ norm_cfg=self.norm_cfg)
+ elif self.refine_type == 'non_local':
+ self.refine = NonLocal2d(
+ self.in_channels,
+ reduction=1,
+ use_scale=False,
+ conv_cfg=self.conv_cfg,
+ norm_cfg=self.norm_cfg)
+
+ def init_weights(self):
+ """Initialize the weights of FPN module."""
+ for m in self.modules():
+ if isinstance(m, nn.Conv2d):
+ xavier_init(m, distribution='uniform')
+
+ def forward(self, inputs):
+ """Forward function."""
+ assert len(inputs) == self.num_levels
+
+ # step 1: gather multi-level features by resize and average
+ feats = []
+ gather_size = inputs[self.refine_level].size()[2:]
+ for i in range(self.num_levels):
+ if i < self.refine_level:
+ gathered = F.adaptive_max_pool2d(
+ inputs[i], output_size=gather_size)
+ else:
+ gathered = F.interpolate(
+ inputs[i], size=gather_size, mode='nearest')
+ feats.append(gathered)
+
+ bsf = sum(feats) / len(feats)
+
+ # step 2: refine gathered features
+ if self.refine_type is not None:
+ bsf = self.refine(bsf)
+
+ # step 3: scatter refined features to multi-levels by a residual path
+ outs = []
+ for i in range(self.num_levels):
+ out_size = inputs[i].size()[2:]
+ if i < self.refine_level:
+ residual = F.interpolate(bsf, size=out_size, mode='nearest')
+ else:
+ residual = F.adaptive_max_pool2d(bsf, output_size=out_size)
+ outs.append(residual + inputs[i])
+
+ return tuple(outs)
diff --git a/annotator/uniformer/mmdet_null/models/necks/channel_mapper.py b/annotator/uniformer/mmdet_null/models/necks/channel_mapper.py
new file mode 100644
index 0000000000000000000000000000000000000000..a4f5ed44caefb1612df67785b1f4f0d9ec46ee93
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/necks/channel_mapper.py
@@ -0,0 +1,74 @@
+import torch.nn as nn
+from mmcv.cnn import ConvModule, xavier_init
+
+from ..builder import NECKS
+
+
+@NECKS.register_module()
+class ChannelMapper(nn.Module):
+ r"""Channel Mapper to reduce/increase channels of backbone features.
+
+ This is used to reduce/increase channels of backbone features.
+
+ Args:
+ in_channels (List[int]): Number of input channels per scale.
+ out_channels (int): Number of output channels (used at each scale).
+ kernel_size (int, optional): kernel_size for reducing channels (used
+ at each scale). Default: 3.
+ conv_cfg (dict, optional): Config dict for convolution layer.
+ Default: None.
+ norm_cfg (dict, optional): Config dict for normalization layer.
+ Default: None.
+ act_cfg (dict, optional): Config dict for activation layer in
+ ConvModule. Default: dict(type='ReLU').
+
+ Example:
+ >>> import torch
+ >>> in_channels = [2, 3, 5, 7]
+ >>> scales = [340, 170, 84, 43]
+ >>> inputs = [torch.rand(1, c, s, s)
+ ... for c, s in zip(in_channels, scales)]
+ >>> self = ChannelMapper(in_channels, 11, 3).eval()
+ >>> outputs = self.forward(inputs)
+ >>> for i in range(len(outputs)):
+ ... print(f'outputs[{i}].shape = {outputs[i].shape}')
+ outputs[0].shape = torch.Size([1, 11, 340, 340])
+ outputs[1].shape = torch.Size([1, 11, 170, 170])
+ outputs[2].shape = torch.Size([1, 11, 84, 84])
+ outputs[3].shape = torch.Size([1, 11, 43, 43])
+ """
+
+ def __init__(self,
+ in_channels,
+ out_channels,
+ kernel_size=3,
+ conv_cfg=None,
+ norm_cfg=None,
+ act_cfg=dict(type='ReLU')):
+ super(ChannelMapper, self).__init__()
+ assert isinstance(in_channels, list)
+
+ self.convs = nn.ModuleList()
+ for in_channel in in_channels:
+ self.convs.append(
+ ConvModule(
+ in_channel,
+ out_channels,
+ kernel_size,
+ padding=(kernel_size - 1) // 2,
+ conv_cfg=conv_cfg,
+ norm_cfg=norm_cfg,
+ act_cfg=act_cfg))
+
+ # default init_weights for conv(msra) and norm in ConvModule
+ def init_weights(self):
+ """Initialize the weights of ChannelMapper module."""
+ for m in self.modules():
+ if isinstance(m, nn.Conv2d):
+ xavier_init(m, distribution='uniform')
+
+ def forward(self, inputs):
+ """Forward function."""
+ assert len(inputs) == len(self.convs)
+ outs = [self.convs[i](inputs[i]) for i in range(len(inputs))]
+ return tuple(outs)
diff --git a/annotator/uniformer/mmdet_null/models/necks/fpg.py b/annotator/uniformer/mmdet_null/models/necks/fpg.py
new file mode 100644
index 0000000000000000000000000000000000000000..c8e0d163ccf8cef6211530ba6c1b4d558ff6403f
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/necks/fpg.py
@@ -0,0 +1,398 @@
+import torch.nn as nn
+import torch.nn.functional as F
+from mmcv.cnn import ConvModule, caffe2_xavier_init, constant_init, is_norm
+
+from ..builder import NECKS
+
+
+class Transition(nn.Module):
+ """Base class for transition.
+
+ Args:
+ in_channels (int): Number of input channels.
+ out_channels (int): Number of output channels.
+ """
+
+ def __init__(self, in_channels, out_channels):
+ super().__init__()
+ self.in_channels = in_channels
+ self.out_channels = out_channels
+
+ def forward(x):
+ pass
+
+
+class UpInterpolationConv(Transition):
+ """A transition used for up-sampling.
+
+ Up-sample the input by interpolation then refines the feature by
+ a convolution layer.
+
+ Args:
+ in_channels (int): Number of input channels.
+ out_channels (int): Number of output channels.
+ scale_factor (int): Up-sampling factor. Default: 2.
+ mode (int): Interpolation mode. Default: nearest.
+ align_corners (bool): Whether align corners when interpolation.
+ Default: None.
+ kernel_size (int): Kernel size for the conv. Default: 3.
+ """
+
+ def __init__(self,
+ in_channels,
+ out_channels,
+ scale_factor=2,
+ mode='nearest',
+ align_corners=None,
+ kernel_size=3,
+ **kwargs):
+ super().__init__(in_channels, out_channels)
+ self.mode = mode
+ self.scale_factor = scale_factor
+ self.align_corners = align_corners
+ self.conv = ConvModule(
+ in_channels,
+ out_channels,
+ kernel_size,
+ padding=(kernel_size - 1) // 2,
+ **kwargs)
+
+ def forward(self, x):
+ x = F.interpolate(
+ x,
+ scale_factor=self.scale_factor,
+ mode=self.mode,
+ align_corners=self.align_corners)
+ x = self.conv(x)
+ return x
+
+
+class LastConv(Transition):
+ """A transition used for refining the output of the last stage.
+
+ Args:
+ in_channels (int): Number of input channels.
+ out_channels (int): Number of output channels.
+ num_inputs (int): Number of inputs of the FPN features.
+ kernel_size (int): Kernel size for the conv. Default: 3.
+ """
+
+ def __init__(self,
+ in_channels,
+ out_channels,
+ num_inputs,
+ kernel_size=3,
+ **kwargs):
+ super().__init__(in_channels, out_channels)
+ self.num_inputs = num_inputs
+ self.conv_out = ConvModule(
+ in_channels,
+ out_channels,
+ kernel_size,
+ padding=(kernel_size - 1) // 2,
+ **kwargs)
+
+ def forward(self, inputs):
+ assert len(inputs) == self.num_inputs
+ return self.conv_out(inputs[-1])
+
+
+@NECKS.register_module()
+class FPG(nn.Module):
+ """FPG.
+
+ Implementation of `Feature Pyramid Grids (FPG)
+ `_.
+ This implementation only gives the basic structure stated in the paper.
+ But users can implement different type of transitions to fully explore the
+ the potential power of the structure of FPG.
+
+ Args:
+ in_channels (int): Number of input channels (feature maps of all levels
+ should have the same channels).
+ out_channels (int): Number of output channels (used at each scale)
+ num_outs (int): Number of output scales.
+ stack_times (int): The number of times the pyramid architecture will
+ be stacked.
+ paths (list[str]): Specify the path order of each stack level.
+ Each element in the list should be either 'bu' (bottom-up) or
+ 'td' (top-down).
+ inter_channels (int): Number of inter channels.
+ same_up_trans (dict): Transition that goes down at the same stage.
+ same_down_trans (dict): Transition that goes up at the same stage.
+ across_lateral_trans (dict): Across-pathway same-stage
+ across_down_trans (dict): Across-pathway bottom-up connection.
+ across_up_trans (dict): Across-pathway top-down connection.
+ across_skip_trans (dict): Across-pathway skip connection.
+ output_trans (dict): Transition that trans the output of the
+ last stage.
+ start_level (int): Index of the start input backbone level used to
+ build the feature pyramid. Default: 0.
+ end_level (int): Index of the end input backbone level (exclusive) to
+ build the feature pyramid. Default: -1, which means the last level.
+ add_extra_convs (bool): It decides whether to add conv
+ layers on top of the original feature maps. Default to False.
+ If True, its actual mode is specified by `extra_convs_on_inputs`.
+ norm_cfg (dict): Config dict for normalization layer. Default: None.
+ """
+
+ transition_types = {
+ 'conv': ConvModule,
+ 'interpolation_conv': UpInterpolationConv,
+ 'last_conv': LastConv,
+ }
+
+ def __init__(self,
+ in_channels,
+ out_channels,
+ num_outs,
+ stack_times,
+ paths,
+ inter_channels=None,
+ same_down_trans=None,
+ same_up_trans=dict(
+ type='conv', kernel_size=3, stride=2, padding=1),
+ across_lateral_trans=dict(type='conv', kernel_size=1),
+ across_down_trans=dict(type='conv', kernel_size=3),
+ across_up_trans=None,
+ across_skip_trans=dict(type='identity'),
+ output_trans=dict(type='last_conv', kernel_size=3),
+ start_level=0,
+ end_level=-1,
+ add_extra_convs=False,
+ norm_cfg=None,
+ skip_inds=None):
+ super(FPG, self).__init__()
+ assert isinstance(in_channels, list)
+ self.in_channels = in_channels
+ self.out_channels = out_channels
+ self.num_ins = len(in_channels)
+ self.num_outs = num_outs
+ if inter_channels is None:
+ self.inter_channels = [out_channels for _ in range(num_outs)]
+ elif isinstance(inter_channels, int):
+ self.inter_channels = [inter_channels for _ in range(num_outs)]
+ else:
+ assert isinstance(inter_channels, list)
+ assert len(inter_channels) == num_outs
+ self.inter_channels = inter_channels
+ self.stack_times = stack_times
+ self.paths = paths
+ assert isinstance(paths, list) and len(paths) == stack_times
+ for d in paths:
+ assert d in ('bu', 'td')
+
+ self.same_down_trans = same_down_trans
+ self.same_up_trans = same_up_trans
+ self.across_lateral_trans = across_lateral_trans
+ self.across_down_trans = across_down_trans
+ self.across_up_trans = across_up_trans
+ self.output_trans = output_trans
+ self.across_skip_trans = across_skip_trans
+
+ self.with_bias = norm_cfg is None
+ # skip inds must be specified if across skip trans is not None
+ if self.across_skip_trans is not None:
+ skip_inds is not None
+ self.skip_inds = skip_inds
+ assert len(self.skip_inds[0]) <= self.stack_times
+
+ if end_level == -1:
+ self.backbone_end_level = self.num_ins
+ assert num_outs >= self.num_ins - start_level
+ else:
+ # if end_level < inputs, no extra level is allowed
+ self.backbone_end_level = end_level
+ assert end_level <= len(in_channels)
+ assert num_outs == end_level - start_level
+ self.start_level = start_level
+ self.end_level = end_level
+ self.add_extra_convs = add_extra_convs
+
+ # build lateral 1x1 convs to reduce channels
+ self.lateral_convs = nn.ModuleList()
+ for i in range(self.start_level, self.backbone_end_level):
+ l_conv = nn.Conv2d(self.in_channels[i],
+ self.inter_channels[i - self.start_level], 1)
+ self.lateral_convs.append(l_conv)
+
+ extra_levels = num_outs - self.backbone_end_level + self.start_level
+ self.extra_downsamples = nn.ModuleList()
+ for i in range(extra_levels):
+ if self.add_extra_convs:
+ fpn_idx = self.backbone_end_level - self.start_level + i
+ extra_conv = nn.Conv2d(
+ self.inter_channels[fpn_idx - 1],
+ self.inter_channels[fpn_idx],
+ 3,
+ stride=2,
+ padding=1)
+ self.extra_downsamples.append(extra_conv)
+ else:
+ self.extra_downsamples.append(nn.MaxPool2d(1, stride=2))
+
+ self.fpn_transitions = nn.ModuleList() # stack times
+ for s in range(self.stack_times):
+ stage_trans = nn.ModuleList() # num of feature levels
+ for i in range(self.num_outs):
+ # same, across_lateral, across_down, across_up
+ trans = nn.ModuleDict()
+ if s in self.skip_inds[i]:
+ stage_trans.append(trans)
+ continue
+ # build same-stage down trans (used in bottom-up paths)
+ if i == 0 or self.same_up_trans is None:
+ same_up_trans = None
+ else:
+ same_up_trans = self.build_trans(
+ self.same_up_trans, self.inter_channels[i - 1],
+ self.inter_channels[i])
+ trans['same_up'] = same_up_trans
+ # build same-stage up trans (used in top-down paths)
+ if i == self.num_outs - 1 or self.same_down_trans is None:
+ same_down_trans = None
+ else:
+ same_down_trans = self.build_trans(
+ self.same_down_trans, self.inter_channels[i + 1],
+ self.inter_channels[i])
+ trans['same_down'] = same_down_trans
+ # build across lateral trans
+ across_lateral_trans = self.build_trans(
+ self.across_lateral_trans, self.inter_channels[i],
+ self.inter_channels[i])
+ trans['across_lateral'] = across_lateral_trans
+ # build across down trans
+ if i == self.num_outs - 1 or self.across_down_trans is None:
+ across_down_trans = None
+ else:
+ across_down_trans = self.build_trans(
+ self.across_down_trans, self.inter_channels[i + 1],
+ self.inter_channels[i])
+ trans['across_down'] = across_down_trans
+ # build across up trans
+ if i == 0 or self.across_up_trans is None:
+ across_up_trans = None
+ else:
+ across_up_trans = self.build_trans(
+ self.across_up_trans, self.inter_channels[i - 1],
+ self.inter_channels[i])
+ trans['across_up'] = across_up_trans
+ if self.across_skip_trans is None:
+ across_skip_trans = None
+ else:
+ across_skip_trans = self.build_trans(
+ self.across_skip_trans, self.inter_channels[i - 1],
+ self.inter_channels[i])
+ trans['across_skip'] = across_skip_trans
+ # build across_skip trans
+ stage_trans.append(trans)
+ self.fpn_transitions.append(stage_trans)
+
+ self.output_transition = nn.ModuleList() # output levels
+ for i in range(self.num_outs):
+ trans = self.build_trans(
+ self.output_trans,
+ self.inter_channels[i],
+ self.out_channels,
+ num_inputs=self.stack_times + 1)
+ self.output_transition.append(trans)
+
+ self.relu = nn.ReLU(inplace=True)
+
+ def build_trans(self, cfg, in_channels, out_channels, **extra_args):
+ cfg_ = cfg.copy()
+ trans_type = cfg_.pop('type')
+ trans_cls = self.transition_types[trans_type]
+ return trans_cls(in_channels, out_channels, **cfg_, **extra_args)
+
+ def init_weights(self):
+ for m in self.modules():
+ if isinstance(m, nn.Conv2d):
+ caffe2_xavier_init(m)
+ elif is_norm(m):
+ constant_init(m, 1.0)
+
+ def fuse(self, fuse_dict):
+ out = None
+ for item in fuse_dict.values():
+ if item is not None:
+ if out is None:
+ out = item
+ else:
+ out = out + item
+ return out
+
+ def forward(self, inputs):
+ assert len(inputs) == len(self.in_channels)
+
+ # build all levels from original feature maps
+ feats = [
+ lateral_conv(inputs[i + self.start_level])
+ for i, lateral_conv in enumerate(self.lateral_convs)
+ ]
+ for downsample in self.extra_downsamples:
+ feats.append(downsample(feats[-1]))
+
+ outs = [feats]
+
+ for i in range(self.stack_times):
+ current_outs = outs[-1]
+ next_outs = []
+ direction = self.paths[i]
+ for j in range(self.num_outs):
+ if i in self.skip_inds[j]:
+ next_outs.append(outs[-1][j])
+ continue
+ # feature level
+ if direction == 'td':
+ lvl = self.num_outs - j - 1
+ else:
+ lvl = j
+ # get transitions
+ if direction == 'td':
+ same_trans = self.fpn_transitions[i][lvl]['same_down']
+ else:
+ same_trans = self.fpn_transitions[i][lvl]['same_up']
+ across_lateral_trans = self.fpn_transitions[i][lvl][
+ 'across_lateral']
+ across_down_trans = self.fpn_transitions[i][lvl]['across_down']
+ across_up_trans = self.fpn_transitions[i][lvl]['across_up']
+ across_skip_trans = self.fpn_transitions[i][lvl]['across_skip']
+ # init output
+ to_fuse = dict(
+ same=None, lateral=None, across_up=None, across_down=None)
+ # same downsample/upsample
+ if same_trans is not None:
+ to_fuse['same'] = same_trans(next_outs[-1])
+ # across lateral
+ if across_lateral_trans is not None:
+ to_fuse['lateral'] = across_lateral_trans(
+ current_outs[lvl])
+ # across downsample
+ if lvl > 0 and across_up_trans is not None:
+ to_fuse['across_up'] = across_up_trans(current_outs[lvl -
+ 1])
+ # across upsample
+ if (lvl < self.num_outs - 1 and across_down_trans is not None):
+ to_fuse['across_down'] = across_down_trans(
+ current_outs[lvl + 1])
+ if across_skip_trans is not None:
+ to_fuse['across_skip'] = across_skip_trans(outs[0][lvl])
+ x = self.fuse(to_fuse)
+ next_outs.append(x)
+
+ if direction == 'td':
+ outs.append(next_outs[::-1])
+ else:
+ outs.append(next_outs)
+
+ # output trans
+ final_outs = []
+ for i in range(self.num_outs):
+ lvl_out_list = []
+ for s in range(len(outs)):
+ lvl_out_list.append(outs[s][i])
+ lvl_out = self.output_transition[i](lvl_out_list)
+ final_outs.append(lvl_out)
+
+ return final_outs
diff --git a/annotator/uniformer/mmdet_null/models/necks/fpn.py b/annotator/uniformer/mmdet_null/models/necks/fpn.py
new file mode 100644
index 0000000000000000000000000000000000000000..5e5dfe685964f06e7a66b63a13e66162e63fcafd
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/necks/fpn.py
@@ -0,0 +1,221 @@
+import warnings
+
+import torch.nn as nn
+import torch.nn.functional as F
+from mmcv.cnn import ConvModule, xavier_init
+from mmcv.runner import auto_fp16
+
+from ..builder import NECKS
+
+
+@NECKS.register_module()
+class FPN(nn.Module):
+ r"""Feature Pyramid Network.
+
+ This is an implementation of paper `Feature Pyramid Networks for Object
+ Detection `_.
+
+ Args:
+ in_channels (List[int]): Number of input channels per scale.
+ out_channels (int): Number of output channels (used at each scale)
+ num_outs (int): Number of output scales.
+ start_level (int): Index of the start input backbone level used to
+ build the feature pyramid. Default: 0.
+ end_level (int): Index of the end input backbone level (exclusive) to
+ build the feature pyramid. Default: -1, which means the last level.
+ add_extra_convs (bool | str): If bool, it decides whether to add conv
+ layers on top of the original feature maps. Default to False.
+ If True, its actual mode is specified by `extra_convs_on_inputs`.
+ If str, it specifies the source feature map of the extra convs.
+ Only the following options are allowed
+
+ - 'on_input': Last feat map of neck inputs (i.e. backbone feature).
+ - 'on_lateral': Last feature map after lateral convs.
+ - 'on_output': The last output feature map after fpn convs.
+ extra_convs_on_inputs (bool, deprecated): Whether to apply extra convs
+ on the original feature from the backbone. If True,
+ it is equivalent to `add_extra_convs='on_input'`. If False, it is
+ equivalent to set `add_extra_convs='on_output'`. Default to True.
+ relu_before_extra_convs (bool): Whether to apply relu before the extra
+ conv. Default: False.
+ no_norm_on_lateral (bool): Whether to apply norm on lateral.
+ Default: False.
+ conv_cfg (dict): Config dict for convolution layer. Default: None.
+ norm_cfg (dict): Config dict for normalization layer. Default: None.
+ act_cfg (str): Config dict for activation layer in ConvModule.
+ Default: None.
+ upsample_cfg (dict): Config dict for interpolate layer.
+ Default: `dict(mode='nearest')`
+
+ Example:
+ >>> import torch
+ >>> in_channels = [2, 3, 5, 7]
+ >>> scales = [340, 170, 84, 43]
+ >>> inputs = [torch.rand(1, c, s, s)
+ ... for c, s in zip(in_channels, scales)]
+ >>> self = FPN(in_channels, 11, len(in_channels)).eval()
+ >>> outputs = self.forward(inputs)
+ >>> for i in range(len(outputs)):
+ ... print(f'outputs[{i}].shape = {outputs[i].shape}')
+ outputs[0].shape = torch.Size([1, 11, 340, 340])
+ outputs[1].shape = torch.Size([1, 11, 170, 170])
+ outputs[2].shape = torch.Size([1, 11, 84, 84])
+ outputs[3].shape = torch.Size([1, 11, 43, 43])
+ """
+
+ def __init__(self,
+ in_channels,
+ out_channels,
+ num_outs,
+ start_level=0,
+ end_level=-1,
+ add_extra_convs=False,
+ extra_convs_on_inputs=True,
+ relu_before_extra_convs=False,
+ no_norm_on_lateral=False,
+ conv_cfg=None,
+ norm_cfg=None,
+ act_cfg=None,
+ upsample_cfg=dict(mode='nearest')):
+ super(FPN, self).__init__()
+ assert isinstance(in_channels, list)
+ self.in_channels = in_channels
+ self.out_channels = out_channels
+ self.num_ins = len(in_channels)
+ self.num_outs = num_outs
+ self.relu_before_extra_convs = relu_before_extra_convs
+ self.no_norm_on_lateral = no_norm_on_lateral
+ self.fp16_enabled = False
+ self.upsample_cfg = upsample_cfg.copy()
+
+ if end_level == -1:
+ self.backbone_end_level = self.num_ins
+ assert num_outs >= self.num_ins - start_level
+ else:
+ # if end_level < inputs, no extra level is allowed
+ self.backbone_end_level = end_level
+ assert end_level <= len(in_channels)
+ assert num_outs == end_level - start_level
+ self.start_level = start_level
+ self.end_level = end_level
+ self.add_extra_convs = add_extra_convs
+ assert isinstance(add_extra_convs, (str, bool))
+ if isinstance(add_extra_convs, str):
+ # Extra_convs_source choices: 'on_input', 'on_lateral', 'on_output'
+ assert add_extra_convs in ('on_input', 'on_lateral', 'on_output')
+ elif add_extra_convs: # True
+ if extra_convs_on_inputs:
+ # TODO: deprecate `extra_convs_on_inputs`
+ warnings.simplefilter('once')
+ warnings.warn(
+ '"extra_convs_on_inputs" will be deprecated in v2.9.0,'
+ 'Please use "add_extra_convs"', DeprecationWarning)
+ self.add_extra_convs = 'on_input'
+ else:
+ self.add_extra_convs = 'on_output'
+
+ self.lateral_convs = nn.ModuleList()
+ self.fpn_convs = nn.ModuleList()
+
+ for i in range(self.start_level, self.backbone_end_level):
+ l_conv = ConvModule(
+ in_channels[i],
+ out_channels,
+ 1,
+ conv_cfg=conv_cfg,
+ norm_cfg=norm_cfg if not self.no_norm_on_lateral else None,
+ act_cfg=act_cfg,
+ inplace=False)
+ fpn_conv = ConvModule(
+ out_channels,
+ out_channels,
+ 3,
+ padding=1,
+ conv_cfg=conv_cfg,
+ norm_cfg=norm_cfg,
+ act_cfg=act_cfg,
+ inplace=False)
+
+ self.lateral_convs.append(l_conv)
+ self.fpn_convs.append(fpn_conv)
+
+ # add extra conv layers (e.g., RetinaNet)
+ extra_levels = num_outs - self.backbone_end_level + self.start_level
+ if self.add_extra_convs and extra_levels >= 1:
+ for i in range(extra_levels):
+ if i == 0 and self.add_extra_convs == 'on_input':
+ in_channels = self.in_channels[self.backbone_end_level - 1]
+ else:
+ in_channels = out_channels
+ extra_fpn_conv = ConvModule(
+ in_channels,
+ out_channels,
+ 3,
+ stride=2,
+ padding=1,
+ conv_cfg=conv_cfg,
+ norm_cfg=norm_cfg,
+ act_cfg=act_cfg,
+ inplace=False)
+ self.fpn_convs.append(extra_fpn_conv)
+
+ # default init_weights for conv(msra) and norm in ConvModule
+ def init_weights(self):
+ """Initialize the weights of FPN module."""
+ for m in self.modules():
+ if isinstance(m, nn.Conv2d):
+ xavier_init(m, distribution='uniform')
+
+ @auto_fp16()
+ def forward(self, inputs):
+ """Forward function."""
+ assert len(inputs) == len(self.in_channels)
+
+ # build laterals
+ laterals = [
+ lateral_conv(inputs[i + self.start_level])
+ for i, lateral_conv in enumerate(self.lateral_convs)
+ ]
+
+ # build top-down path
+ used_backbone_levels = len(laterals)
+ for i in range(used_backbone_levels - 1, 0, -1):
+ # In some cases, fixing `scale factor` (e.g. 2) is preferred, but
+ # it cannot co-exist with `size` in `F.interpolate`.
+ if 'scale_factor' in self.upsample_cfg:
+ laterals[i - 1] += F.interpolate(laterals[i],
+ **self.upsample_cfg)
+ else:
+ prev_shape = laterals[i - 1].shape[2:]
+ laterals[i - 1] += F.interpolate(
+ laterals[i], size=prev_shape, **self.upsample_cfg)
+
+ # build outputs
+ # part 1: from original levels
+ outs = [
+ self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels)
+ ]
+ # part 2: add extra levels
+ if self.num_outs > len(outs):
+ # use max pool to get more levels on top of outputs
+ # (e.g., Faster R-CNN, Mask R-CNN)
+ if not self.add_extra_convs:
+ for i in range(self.num_outs - used_backbone_levels):
+ outs.append(F.max_pool2d(outs[-1], 1, stride=2))
+ # add conv layers on top of original feature maps (RetinaNet)
+ else:
+ if self.add_extra_convs == 'on_input':
+ extra_source = inputs[self.backbone_end_level - 1]
+ elif self.add_extra_convs == 'on_lateral':
+ extra_source = laterals[-1]
+ elif self.add_extra_convs == 'on_output':
+ extra_source = outs[-1]
+ else:
+ raise NotImplementedError
+ outs.append(self.fpn_convs[used_backbone_levels](extra_source))
+ for i in range(used_backbone_levels + 1, self.num_outs):
+ if self.relu_before_extra_convs:
+ outs.append(self.fpn_convs[i](F.relu(outs[-1])))
+ else:
+ outs.append(self.fpn_convs[i](outs[-1]))
+ return tuple(outs)
diff --git a/annotator/uniformer/mmdet_null/models/necks/fpn_carafe.py b/annotator/uniformer/mmdet_null/models/necks/fpn_carafe.py
new file mode 100644
index 0000000000000000000000000000000000000000..302e6576df9914e49166539108d6048b78c1fe71
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/necks/fpn_carafe.py
@@ -0,0 +1,267 @@
+import torch.nn as nn
+from mmcv.cnn import ConvModule, build_upsample_layer, xavier_init
+from mmcv.ops.carafe import CARAFEPack
+
+from ..builder import NECKS
+
+
+@NECKS.register_module()
+class FPN_CARAFE(nn.Module):
+ """FPN_CARAFE is a more flexible implementation of FPN. It allows more
+ choice for upsample methods during the top-down pathway.
+
+ It can reproduce the performance of ICCV 2019 paper
+ CARAFE: Content-Aware ReAssembly of FEatures
+ Please refer to https://arxiv.org/abs/1905.02188 for more details.
+
+ Args:
+ in_channels (list[int]): Number of channels for each input feature map.
+ out_channels (int): Output channels of feature pyramids.
+ num_outs (int): Number of output stages.
+ start_level (int): Start level of feature pyramids.
+ (Default: 0)
+ end_level (int): End level of feature pyramids.
+ (Default: -1 indicates the last level).
+ norm_cfg (dict): Dictionary to construct and config norm layer.
+ activate (str): Type of activation function in ConvModule
+ (Default: None indicates w/o activation).
+ order (dict): Order of components in ConvModule.
+ upsample (str): Type of upsample layer.
+ upsample_cfg (dict): Dictionary to construct and config upsample layer.
+ """
+
+ def __init__(self,
+ in_channels,
+ out_channels,
+ num_outs,
+ start_level=0,
+ end_level=-1,
+ norm_cfg=None,
+ act_cfg=None,
+ order=('conv', 'norm', 'act'),
+ upsample_cfg=dict(
+ type='carafe',
+ up_kernel=5,
+ up_group=1,
+ encoder_kernel=3,
+ encoder_dilation=1)):
+ super(FPN_CARAFE, self).__init__()
+ assert isinstance(in_channels, list)
+ self.in_channels = in_channels
+ self.out_channels = out_channels
+ self.num_ins = len(in_channels)
+ self.num_outs = num_outs
+ self.norm_cfg = norm_cfg
+ self.act_cfg = act_cfg
+ self.with_bias = norm_cfg is None
+ self.upsample_cfg = upsample_cfg.copy()
+ self.upsample = self.upsample_cfg.get('type')
+ self.relu = nn.ReLU(inplace=False)
+
+ self.order = order
+ assert order in [('conv', 'norm', 'act'), ('act', 'conv', 'norm')]
+
+ assert self.upsample in [
+ 'nearest', 'bilinear', 'deconv', 'pixel_shuffle', 'carafe', None
+ ]
+ if self.upsample in ['deconv', 'pixel_shuffle']:
+ assert hasattr(
+ self.upsample_cfg,
+ 'upsample_kernel') and self.upsample_cfg.upsample_kernel > 0
+ self.upsample_kernel = self.upsample_cfg.pop('upsample_kernel')
+
+ if end_level == -1:
+ self.backbone_end_level = self.num_ins
+ assert num_outs >= self.num_ins - start_level
+ else:
+ # if end_level < inputs, no extra level is allowed
+ self.backbone_end_level = end_level
+ assert end_level <= len(in_channels)
+ assert num_outs == end_level - start_level
+ self.start_level = start_level
+ self.end_level = end_level
+
+ self.lateral_convs = nn.ModuleList()
+ self.fpn_convs = nn.ModuleList()
+ self.upsample_modules = nn.ModuleList()
+
+ for i in range(self.start_level, self.backbone_end_level):
+ l_conv = ConvModule(
+ in_channels[i],
+ out_channels,
+ 1,
+ norm_cfg=norm_cfg,
+ bias=self.with_bias,
+ act_cfg=act_cfg,
+ inplace=False,
+ order=self.order)
+ fpn_conv = ConvModule(
+ out_channels,
+ out_channels,
+ 3,
+ padding=1,
+ norm_cfg=self.norm_cfg,
+ bias=self.with_bias,
+ act_cfg=act_cfg,
+ inplace=False,
+ order=self.order)
+ if i != self.backbone_end_level - 1:
+ upsample_cfg_ = self.upsample_cfg.copy()
+ if self.upsample == 'deconv':
+ upsample_cfg_.update(
+ in_channels=out_channels,
+ out_channels=out_channels,
+ kernel_size=self.upsample_kernel,
+ stride=2,
+ padding=(self.upsample_kernel - 1) // 2,
+ output_padding=(self.upsample_kernel - 1) // 2)
+ elif self.upsample == 'pixel_shuffle':
+ upsample_cfg_.update(
+ in_channels=out_channels,
+ out_channels=out_channels,
+ scale_factor=2,
+ upsample_kernel=self.upsample_kernel)
+ elif self.upsample == 'carafe':
+ upsample_cfg_.update(channels=out_channels, scale_factor=2)
+ else:
+ # suppress warnings
+ align_corners = (None
+ if self.upsample == 'nearest' else False)
+ upsample_cfg_.update(
+ scale_factor=2,
+ mode=self.upsample,
+ align_corners=align_corners)
+ upsample_module = build_upsample_layer(upsample_cfg_)
+ self.upsample_modules.append(upsample_module)
+ self.lateral_convs.append(l_conv)
+ self.fpn_convs.append(fpn_conv)
+
+ # add extra conv layers (e.g., RetinaNet)
+ extra_out_levels = (
+ num_outs - self.backbone_end_level + self.start_level)
+ if extra_out_levels >= 1:
+ for i in range(extra_out_levels):
+ in_channels = (
+ self.in_channels[self.backbone_end_level -
+ 1] if i == 0 else out_channels)
+ extra_l_conv = ConvModule(
+ in_channels,
+ out_channels,
+ 3,
+ stride=2,
+ padding=1,
+ norm_cfg=norm_cfg,
+ bias=self.with_bias,
+ act_cfg=act_cfg,
+ inplace=False,
+ order=self.order)
+ if self.upsample == 'deconv':
+ upsampler_cfg_ = dict(
+ in_channels=out_channels,
+ out_channels=out_channels,
+ kernel_size=self.upsample_kernel,
+ stride=2,
+ padding=(self.upsample_kernel - 1) // 2,
+ output_padding=(self.upsample_kernel - 1) // 2)
+ elif self.upsample == 'pixel_shuffle':
+ upsampler_cfg_ = dict(
+ in_channels=out_channels,
+ out_channels=out_channels,
+ scale_factor=2,
+ upsample_kernel=self.upsample_kernel)
+ elif self.upsample == 'carafe':
+ upsampler_cfg_ = dict(
+ channels=out_channels,
+ scale_factor=2,
+ **self.upsample_cfg)
+ else:
+ # suppress warnings
+ align_corners = (None
+ if self.upsample == 'nearest' else False)
+ upsampler_cfg_ = dict(
+ scale_factor=2,
+ mode=self.upsample,
+ align_corners=align_corners)
+ upsampler_cfg_['type'] = self.upsample
+ upsample_module = build_upsample_layer(upsampler_cfg_)
+ extra_fpn_conv = ConvModule(
+ out_channels,
+ out_channels,
+ 3,
+ padding=1,
+ norm_cfg=self.norm_cfg,
+ bias=self.with_bias,
+ act_cfg=act_cfg,
+ inplace=False,
+ order=self.order)
+ self.upsample_modules.append(upsample_module)
+ self.fpn_convs.append(extra_fpn_conv)
+ self.lateral_convs.append(extra_l_conv)
+
+ # default init_weights for conv(msra) and norm in ConvModule
+ def init_weights(self):
+ """Initialize the weights of module."""
+ for m in self.modules():
+ if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):
+ xavier_init(m, distribution='uniform')
+ for m in self.modules():
+ if isinstance(m, CARAFEPack):
+ m.init_weights()
+
+ def slice_as(self, src, dst):
+ """Slice ``src`` as ``dst``
+
+ Note:
+ ``src`` should have the same or larger size than ``dst``.
+
+ Args:
+ src (torch.Tensor): Tensors to be sliced.
+ dst (torch.Tensor): ``src`` will be sliced to have the same
+ size as ``dst``.
+
+ Returns:
+ torch.Tensor: Sliced tensor.
+ """
+ assert (src.size(2) >= dst.size(2)) and (src.size(3) >= dst.size(3))
+ if src.size(2) == dst.size(2) and src.size(3) == dst.size(3):
+ return src
+ else:
+ return src[:, :, :dst.size(2), :dst.size(3)]
+
+ def tensor_add(self, a, b):
+ """Add tensors ``a`` and ``b`` that might have different sizes."""
+ if a.size() == b.size():
+ c = a + b
+ else:
+ c = a + self.slice_as(b, a)
+ return c
+
+ def forward(self, inputs):
+ """Forward function."""
+ assert len(inputs) == len(self.in_channels)
+
+ # build laterals
+ laterals = []
+ for i, lateral_conv in enumerate(self.lateral_convs):
+ if i <= self.backbone_end_level - self.start_level:
+ input = inputs[min(i + self.start_level, len(inputs) - 1)]
+ else:
+ input = laterals[-1]
+ lateral = lateral_conv(input)
+ laterals.append(lateral)
+
+ # build top-down path
+ for i in range(len(laterals) - 1, 0, -1):
+ if self.upsample is not None:
+ upsample_feat = self.upsample_modules[i - 1](laterals[i])
+ else:
+ upsample_feat = laterals[i]
+ laterals[i - 1] = self.tensor_add(laterals[i - 1], upsample_feat)
+
+ # build outputs
+ num_conv_outs = len(self.fpn_convs)
+ outs = []
+ for i in range(num_conv_outs):
+ out = self.fpn_convs[i](laterals[i])
+ outs.append(out)
+ return tuple(outs)
diff --git a/annotator/uniformer/mmdet_null/models/necks/hrfpn.py b/annotator/uniformer/mmdet_null/models/necks/hrfpn.py
new file mode 100644
index 0000000000000000000000000000000000000000..ed4f194832fc4b6ea77ce54262fb8ffa8675fc4e
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/necks/hrfpn.py
@@ -0,0 +1,102 @@
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from mmcv.cnn import ConvModule, caffe2_xavier_init
+from torch.utils.checkpoint import checkpoint
+
+from ..builder import NECKS
+
+
+@NECKS.register_module()
+class HRFPN(nn.Module):
+ """HRFPN (High Resolution Feature Pyramids)
+
+ paper: `High-Resolution Representations for Labeling Pixels and Regions
+ `_.
+
+ Args:
+ in_channels (list): number of channels for each branch.
+ out_channels (int): output channels of feature pyramids.
+ num_outs (int): number of output stages.
+ pooling_type (str): pooling for generating feature pyramids
+ from {MAX, AVG}.
+ conv_cfg (dict): dictionary to construct and config conv layer.
+ norm_cfg (dict): dictionary to construct and config norm layer.
+ with_cp (bool): Use checkpoint or not. Using checkpoint will save some
+ memory while slowing down the training speed.
+ stride (int): stride of 3x3 convolutional layers
+ """
+
+ def __init__(self,
+ in_channels,
+ out_channels,
+ num_outs=5,
+ pooling_type='AVG',
+ conv_cfg=None,
+ norm_cfg=None,
+ with_cp=False,
+ stride=1):
+ super(HRFPN, self).__init__()
+ assert isinstance(in_channels, list)
+ self.in_channels = in_channels
+ self.out_channels = out_channels
+ self.num_ins = len(in_channels)
+ self.num_outs = num_outs
+ self.with_cp = with_cp
+ self.conv_cfg = conv_cfg
+ self.norm_cfg = norm_cfg
+
+ self.reduction_conv = ConvModule(
+ sum(in_channels),
+ out_channels,
+ kernel_size=1,
+ conv_cfg=self.conv_cfg,
+ act_cfg=None)
+
+ self.fpn_convs = nn.ModuleList()
+ for i in range(self.num_outs):
+ self.fpn_convs.append(
+ ConvModule(
+ out_channels,
+ out_channels,
+ kernel_size=3,
+ padding=1,
+ stride=stride,
+ conv_cfg=self.conv_cfg,
+ act_cfg=None))
+
+ if pooling_type == 'MAX':
+ self.pooling = F.max_pool2d
+ else:
+ self.pooling = F.avg_pool2d
+
+ def init_weights(self):
+ """Initialize the weights of module."""
+ for m in self.modules():
+ if isinstance(m, nn.Conv2d):
+ caffe2_xavier_init(m)
+
+ def forward(self, inputs):
+ """Forward function."""
+ assert len(inputs) == self.num_ins
+ outs = [inputs[0]]
+ for i in range(1, self.num_ins):
+ outs.append(
+ F.interpolate(inputs[i], scale_factor=2**i, mode='bilinear'))
+ out = torch.cat(outs, dim=1)
+ if out.requires_grad and self.with_cp:
+ out = checkpoint(self.reduction_conv, out)
+ else:
+ out = self.reduction_conv(out)
+ outs = [out]
+ for i in range(1, self.num_outs):
+ outs.append(self.pooling(out, kernel_size=2**i, stride=2**i))
+ outputs = []
+
+ for i in range(self.num_outs):
+ if outs[i].requires_grad and self.with_cp:
+ tmp_out = checkpoint(self.fpn_convs[i], outs[i])
+ else:
+ tmp_out = self.fpn_convs[i](outs[i])
+ outputs.append(tmp_out)
+ return tuple(outputs)
diff --git a/annotator/uniformer/mmdet_null/models/necks/nas_fpn.py b/annotator/uniformer/mmdet_null/models/necks/nas_fpn.py
new file mode 100644
index 0000000000000000000000000000000000000000..8e333ce65d4d06c47c29af489526ba3142736ad7
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/necks/nas_fpn.py
@@ -0,0 +1,160 @@
+import torch.nn as nn
+from mmcv.cnn import ConvModule, caffe2_xavier_init
+from mmcv.ops.merge_cells import GlobalPoolingCell, SumCell
+
+from ..builder import NECKS
+
+
+@NECKS.register_module()
+class NASFPN(nn.Module):
+ """NAS-FPN.
+
+ Implementation of `NAS-FPN: Learning Scalable Feature Pyramid Architecture
+ for Object Detection `_
+
+ Args:
+ in_channels (List[int]): Number of input channels per scale.
+ out_channels (int): Number of output channels (used at each scale)
+ num_outs (int): Number of output scales.
+ stack_times (int): The number of times the pyramid architecture will
+ be stacked.
+ start_level (int): Index of the start input backbone level used to
+ build the feature pyramid. Default: 0.
+ end_level (int): Index of the end input backbone level (exclusive) to
+ build the feature pyramid. Default: -1, which means the last level.
+ add_extra_convs (bool): It decides whether to add conv
+ layers on top of the original feature maps. Default to False.
+ If True, its actual mode is specified by `extra_convs_on_inputs`.
+ """
+
+ def __init__(self,
+ in_channels,
+ out_channels,
+ num_outs,
+ stack_times,
+ start_level=0,
+ end_level=-1,
+ add_extra_convs=False,
+ norm_cfg=None):
+ super(NASFPN, self).__init__()
+ assert isinstance(in_channels, list)
+ self.in_channels = in_channels
+ self.out_channels = out_channels
+ self.num_ins = len(in_channels) # num of input feature levels
+ self.num_outs = num_outs # num of output feature levels
+ self.stack_times = stack_times
+ self.norm_cfg = norm_cfg
+
+ if end_level == -1:
+ self.backbone_end_level = self.num_ins
+ assert num_outs >= self.num_ins - start_level
+ else:
+ # if end_level < inputs, no extra level is allowed
+ self.backbone_end_level = end_level
+ assert end_level <= len(in_channels)
+ assert num_outs == end_level - start_level
+ self.start_level = start_level
+ self.end_level = end_level
+ self.add_extra_convs = add_extra_convs
+
+ # add lateral connections
+ self.lateral_convs = nn.ModuleList()
+ for i in range(self.start_level, self.backbone_end_level):
+ l_conv = ConvModule(
+ in_channels[i],
+ out_channels,
+ 1,
+ norm_cfg=norm_cfg,
+ act_cfg=None)
+ self.lateral_convs.append(l_conv)
+
+ # add extra downsample layers (stride-2 pooling or conv)
+ extra_levels = num_outs - self.backbone_end_level + self.start_level
+ self.extra_downsamples = nn.ModuleList()
+ for i in range(extra_levels):
+ extra_conv = ConvModule(
+ out_channels, out_channels, 1, norm_cfg=norm_cfg, act_cfg=None)
+ self.extra_downsamples.append(
+ nn.Sequential(extra_conv, nn.MaxPool2d(2, 2)))
+
+ # add NAS FPN connections
+ self.fpn_stages = nn.ModuleList()
+ for _ in range(self.stack_times):
+ stage = nn.ModuleDict()
+ # gp(p6, p4) -> p4_1
+ stage['gp_64_4'] = GlobalPoolingCell(
+ in_channels=out_channels,
+ out_channels=out_channels,
+ out_norm_cfg=norm_cfg)
+ # sum(p4_1, p4) -> p4_2
+ stage['sum_44_4'] = SumCell(
+ in_channels=out_channels,
+ out_channels=out_channels,
+ out_norm_cfg=norm_cfg)
+ # sum(p4_2, p3) -> p3_out
+ stage['sum_43_3'] = SumCell(
+ in_channels=out_channels,
+ out_channels=out_channels,
+ out_norm_cfg=norm_cfg)
+ # sum(p3_out, p4_2) -> p4_out
+ stage['sum_34_4'] = SumCell(
+ in_channels=out_channels,
+ out_channels=out_channels,
+ out_norm_cfg=norm_cfg)
+ # sum(p5, gp(p4_out, p3_out)) -> p5_out
+ stage['gp_43_5'] = GlobalPoolingCell(with_out_conv=False)
+ stage['sum_55_5'] = SumCell(
+ in_channels=out_channels,
+ out_channels=out_channels,
+ out_norm_cfg=norm_cfg)
+ # sum(p7, gp(p5_out, p4_2)) -> p7_out
+ stage['gp_54_7'] = GlobalPoolingCell(with_out_conv=False)
+ stage['sum_77_7'] = SumCell(
+ in_channels=out_channels,
+ out_channels=out_channels,
+ out_norm_cfg=norm_cfg)
+ # gp(p7_out, p5_out) -> p6_out
+ stage['gp_75_6'] = GlobalPoolingCell(
+ in_channels=out_channels,
+ out_channels=out_channels,
+ out_norm_cfg=norm_cfg)
+ self.fpn_stages.append(stage)
+
+ def init_weights(self):
+ """Initialize the weights of module."""
+ for m in self.modules():
+ if isinstance(m, nn.Conv2d):
+ caffe2_xavier_init(m)
+
+ def forward(self, inputs):
+ """Forward function."""
+ # build P3-P5
+ feats = [
+ lateral_conv(inputs[i + self.start_level])
+ for i, lateral_conv in enumerate(self.lateral_convs)
+ ]
+ # build P6-P7 on top of P5
+ for downsample in self.extra_downsamples:
+ feats.append(downsample(feats[-1]))
+
+ p3, p4, p5, p6, p7 = feats
+
+ for stage in self.fpn_stages:
+ # gp(p6, p4) -> p4_1
+ p4_1 = stage['gp_64_4'](p6, p4, out_size=p4.shape[-2:])
+ # sum(p4_1, p4) -> p4_2
+ p4_2 = stage['sum_44_4'](p4_1, p4, out_size=p4.shape[-2:])
+ # sum(p4_2, p3) -> p3_out
+ p3 = stage['sum_43_3'](p4_2, p3, out_size=p3.shape[-2:])
+ # sum(p3_out, p4_2) -> p4_out
+ p4 = stage['sum_34_4'](p3, p4_2, out_size=p4.shape[-2:])
+ # sum(p5, gp(p4_out, p3_out)) -> p5_out
+ p5_tmp = stage['gp_43_5'](p4, p3, out_size=p5.shape[-2:])
+ p5 = stage['sum_55_5'](p5, p5_tmp, out_size=p5.shape[-2:])
+ # sum(p7, gp(p5_out, p4_2)) -> p7_out
+ p7_tmp = stage['gp_54_7'](p5, p4_2, out_size=p7.shape[-2:])
+ p7 = stage['sum_77_7'](p7, p7_tmp, out_size=p7.shape[-2:])
+ # gp(p7_out, p5_out) -> p6_out
+ p6 = stage['gp_75_6'](p7, p5, out_size=p6.shape[-2:])
+
+ return p3, p4, p5, p6, p7
diff --git a/annotator/uniformer/mmdet_null/models/necks/nasfcos_fpn.py b/annotator/uniformer/mmdet_null/models/necks/nasfcos_fpn.py
new file mode 100644
index 0000000000000000000000000000000000000000..2daf79ef591373499184c624ccd27fb7456dec06
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/necks/nasfcos_fpn.py
@@ -0,0 +1,161 @@
+import torch.nn as nn
+import torch.nn.functional as F
+from mmcv.cnn import ConvModule, caffe2_xavier_init
+from mmcv.ops.merge_cells import ConcatCell
+
+from ..builder import NECKS
+
+
+@NECKS.register_module()
+class NASFCOS_FPN(nn.Module):
+ """FPN structure in NASFPN.
+
+ Implementation of paper `NAS-FCOS: Fast Neural Architecture Search for
+ Object Detection `_
+
+ Args:
+ in_channels (List[int]): Number of input channels per scale.
+ out_channels (int): Number of output channels (used at each scale)
+ num_outs (int): Number of output scales.
+ start_level (int): Index of the start input backbone level used to
+ build the feature pyramid. Default: 0.
+ end_level (int): Index of the end input backbone level (exclusive) to
+ build the feature pyramid. Default: -1, which means the last level.
+ add_extra_convs (bool): It decides whether to add conv
+ layers on top of the original feature maps. Default to False.
+ If True, its actual mode is specified by `extra_convs_on_inputs`.
+ conv_cfg (dict): dictionary to construct and config conv layer.
+ norm_cfg (dict): dictionary to construct and config norm layer.
+ """
+
+ def __init__(self,
+ in_channels,
+ out_channels,
+ num_outs,
+ start_level=1,
+ end_level=-1,
+ add_extra_convs=False,
+ conv_cfg=None,
+ norm_cfg=None):
+ super(NASFCOS_FPN, self).__init__()
+ assert isinstance(in_channels, list)
+ self.in_channels = in_channels
+ self.out_channels = out_channels
+ self.num_ins = len(in_channels)
+ self.num_outs = num_outs
+ self.norm_cfg = norm_cfg
+ self.conv_cfg = conv_cfg
+
+ if end_level == -1:
+ self.backbone_end_level = self.num_ins
+ assert num_outs >= self.num_ins - start_level
+ else:
+ self.backbone_end_level = end_level
+ assert end_level <= len(in_channels)
+ assert num_outs == end_level - start_level
+ self.start_level = start_level
+ self.end_level = end_level
+ self.add_extra_convs = add_extra_convs
+
+ self.adapt_convs = nn.ModuleList()
+ for i in range(self.start_level, self.backbone_end_level):
+ adapt_conv = ConvModule(
+ in_channels[i],
+ out_channels,
+ 1,
+ stride=1,
+ padding=0,
+ bias=False,
+ norm_cfg=dict(type='BN'),
+ act_cfg=dict(type='ReLU', inplace=False))
+ self.adapt_convs.append(adapt_conv)
+
+ # C2 is omitted according to the paper
+ extra_levels = num_outs - self.backbone_end_level + self.start_level
+
+ def build_concat_cell(with_input1_conv, with_input2_conv):
+ cell_conv_cfg = dict(
+ kernel_size=1, padding=0, bias=False, groups=out_channels)
+ return ConcatCell(
+ in_channels=out_channels,
+ out_channels=out_channels,
+ with_out_conv=True,
+ out_conv_cfg=cell_conv_cfg,
+ out_norm_cfg=dict(type='BN'),
+ out_conv_order=('norm', 'act', 'conv'),
+ with_input1_conv=with_input1_conv,
+ with_input2_conv=with_input2_conv,
+ input_conv_cfg=conv_cfg,
+ input_norm_cfg=norm_cfg,
+ upsample_mode='nearest')
+
+ # Denote c3=f0, c4=f1, c5=f2 for convince
+ self.fpn = nn.ModuleDict()
+ self.fpn['c22_1'] = build_concat_cell(True, True)
+ self.fpn['c22_2'] = build_concat_cell(True, True)
+ self.fpn['c32'] = build_concat_cell(True, False)
+ self.fpn['c02'] = build_concat_cell(True, False)
+ self.fpn['c42'] = build_concat_cell(True, True)
+ self.fpn['c36'] = build_concat_cell(True, True)
+ self.fpn['c61'] = build_concat_cell(True, True) # f9
+ self.extra_downsamples = nn.ModuleList()
+ for i in range(extra_levels):
+ extra_act_cfg = None if i == 0 \
+ else dict(type='ReLU', inplace=False)
+ self.extra_downsamples.append(
+ ConvModule(
+ out_channels,
+ out_channels,
+ 3,
+ stride=2,
+ padding=1,
+ act_cfg=extra_act_cfg,
+ order=('act', 'norm', 'conv')))
+
+ def forward(self, inputs):
+ """Forward function."""
+ feats = [
+ adapt_conv(inputs[i + self.start_level])
+ for i, adapt_conv in enumerate(self.adapt_convs)
+ ]
+
+ for (i, module_name) in enumerate(self.fpn):
+ idx_1, idx_2 = int(module_name[1]), int(module_name[2])
+ res = self.fpn[module_name](feats[idx_1], feats[idx_2])
+ feats.append(res)
+
+ ret = []
+ for (idx, input_idx) in zip([9, 8, 7], [1, 2, 3]): # add P3, P4, P5
+ feats1, feats2 = feats[idx], feats[5]
+ feats2_resize = F.interpolate(
+ feats2,
+ size=feats1.size()[2:],
+ mode='bilinear',
+ align_corners=False)
+
+ feats_sum = feats1 + feats2_resize
+ ret.append(
+ F.interpolate(
+ feats_sum,
+ size=inputs[input_idx].size()[2:],
+ mode='bilinear',
+ align_corners=False))
+
+ for submodule in self.extra_downsamples:
+ ret.append(submodule(ret[-1]))
+
+ return tuple(ret)
+
+ def init_weights(self):
+ """Initialize the weights of module."""
+ for module in self.fpn.values():
+ if hasattr(module, 'conv_out'):
+ caffe2_xavier_init(module.out_conv.conv)
+
+ for modules in [
+ self.adapt_convs.modules(),
+ self.extra_downsamples.modules()
+ ]:
+ for module in modules:
+ if isinstance(module, nn.Conv2d):
+ caffe2_xavier_init(module)
diff --git a/annotator/uniformer/mmdet_null/models/necks/pafpn.py b/annotator/uniformer/mmdet_null/models/necks/pafpn.py
new file mode 100644
index 0000000000000000000000000000000000000000..d7c0b50f29e882aacb5158b33ead3d4566d0ce0b
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/necks/pafpn.py
@@ -0,0 +1,142 @@
+import torch.nn as nn
+import torch.nn.functional as F
+from mmcv.cnn import ConvModule
+from mmcv.runner import auto_fp16
+
+from ..builder import NECKS
+from .fpn import FPN
+
+
+@NECKS.register_module()
+class PAFPN(FPN):
+ """Path Aggregation Network for Instance Segmentation.
+
+ This is an implementation of the `PAFPN in Path Aggregation Network
+ `_.
+
+ Args:
+ in_channels (List[int]): Number of input channels per scale.
+ out_channels (int): Number of output channels (used at each scale)
+ num_outs (int): Number of output scales.
+ start_level (int): Index of the start input backbone level used to
+ build the feature pyramid. Default: 0.
+ end_level (int): Index of the end input backbone level (exclusive) to
+ build the feature pyramid. Default: -1, which means the last level.
+ add_extra_convs (bool): Whether to add conv layers on top of the
+ original feature maps. Default: False.
+ extra_convs_on_inputs (bool): Whether to apply extra conv on
+ the original feature from the backbone. Default: False.
+ relu_before_extra_convs (bool): Whether to apply relu before the extra
+ conv. Default: False.
+ no_norm_on_lateral (bool): Whether to apply norm on lateral.
+ Default: False.
+ conv_cfg (dict): Config dict for convolution layer. Default: None.
+ norm_cfg (dict): Config dict for normalization layer. Default: None.
+ act_cfg (str): Config dict for activation layer in ConvModule.
+ Default: None.
+ """
+
+ def __init__(self,
+ in_channels,
+ out_channels,
+ num_outs,
+ start_level=0,
+ end_level=-1,
+ add_extra_convs=False,
+ extra_convs_on_inputs=True,
+ relu_before_extra_convs=False,
+ no_norm_on_lateral=False,
+ conv_cfg=None,
+ norm_cfg=None,
+ act_cfg=None):
+ super(PAFPN,
+ self).__init__(in_channels, out_channels, num_outs, start_level,
+ end_level, add_extra_convs, extra_convs_on_inputs,
+ relu_before_extra_convs, no_norm_on_lateral,
+ conv_cfg, norm_cfg, act_cfg)
+ # add extra bottom up pathway
+ self.downsample_convs = nn.ModuleList()
+ self.pafpn_convs = nn.ModuleList()
+ for i in range(self.start_level + 1, self.backbone_end_level):
+ d_conv = ConvModule(
+ out_channels,
+ out_channels,
+ 3,
+ stride=2,
+ padding=1,
+ conv_cfg=conv_cfg,
+ norm_cfg=norm_cfg,
+ act_cfg=act_cfg,
+ inplace=False)
+ pafpn_conv = ConvModule(
+ out_channels,
+ out_channels,
+ 3,
+ padding=1,
+ conv_cfg=conv_cfg,
+ norm_cfg=norm_cfg,
+ act_cfg=act_cfg,
+ inplace=False)
+ self.downsample_convs.append(d_conv)
+ self.pafpn_convs.append(pafpn_conv)
+
+ @auto_fp16()
+ def forward(self, inputs):
+ """Forward function."""
+ assert len(inputs) == len(self.in_channels)
+
+ # build laterals
+ laterals = [
+ lateral_conv(inputs[i + self.start_level])
+ for i, lateral_conv in enumerate(self.lateral_convs)
+ ]
+
+ # build top-down path
+ used_backbone_levels = len(laterals)
+ for i in range(used_backbone_levels - 1, 0, -1):
+ prev_shape = laterals[i - 1].shape[2:]
+ laterals[i - 1] += F.interpolate(
+ laterals[i], size=prev_shape, mode='nearest')
+
+ # build outputs
+ # part 1: from original levels
+ inter_outs = [
+ self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels)
+ ]
+
+ # part 2: add bottom-up path
+ for i in range(0, used_backbone_levels - 1):
+ inter_outs[i + 1] += self.downsample_convs[i](inter_outs[i])
+
+ outs = []
+ outs.append(inter_outs[0])
+ outs.extend([
+ self.pafpn_convs[i - 1](inter_outs[i])
+ for i in range(1, used_backbone_levels)
+ ])
+
+ # part 3: add extra levels
+ if self.num_outs > len(outs):
+ # use max pool to get more levels on top of outputs
+ # (e.g., Faster R-CNN, Mask R-CNN)
+ if not self.add_extra_convs:
+ for i in range(self.num_outs - used_backbone_levels):
+ outs.append(F.max_pool2d(outs[-1], 1, stride=2))
+ # add conv layers on top of original feature maps (RetinaNet)
+ else:
+ if self.add_extra_convs == 'on_input':
+ orig = inputs[self.backbone_end_level - 1]
+ outs.append(self.fpn_convs[used_backbone_levels](orig))
+ elif self.add_extra_convs == 'on_lateral':
+ outs.append(self.fpn_convs[used_backbone_levels](
+ laterals[-1]))
+ elif self.add_extra_convs == 'on_output':
+ outs.append(self.fpn_convs[used_backbone_levels](outs[-1]))
+ else:
+ raise NotImplementedError
+ for i in range(used_backbone_levels + 1, self.num_outs):
+ if self.relu_before_extra_convs:
+ outs.append(self.fpn_convs[i](F.relu(outs[-1])))
+ else:
+ outs.append(self.fpn_convs[i](outs[-1]))
+ return tuple(outs)
diff --git a/annotator/uniformer/mmdet_null/models/necks/rfp.py b/annotator/uniformer/mmdet_null/models/necks/rfp.py
new file mode 100644
index 0000000000000000000000000000000000000000..8a63e63bdef0094c26c17526d5ddde75bd309cea
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/necks/rfp.py
@@ -0,0 +1,128 @@
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from mmcv.cnn import constant_init, kaiming_init, xavier_init
+
+from ..builder import NECKS, build_backbone
+from .fpn import FPN
+
+
+class ASPP(nn.Module):
+ """ASPP (Atrous Spatial Pyramid Pooling)
+
+ This is an implementation of the ASPP module used in DetectoRS
+ (https://arxiv.org/pdf/2006.02334.pdf)
+
+ Args:
+ in_channels (int): Number of input channels.
+ out_channels (int): Number of channels produced by this module
+ dilations (tuple[int]): Dilations of the four branches.
+ Default: (1, 3, 6, 1)
+ """
+
+ def __init__(self, in_channels, out_channels, dilations=(1, 3, 6, 1)):
+ super().__init__()
+ assert dilations[-1] == 1
+ self.aspp = nn.ModuleList()
+ for dilation in dilations:
+ kernel_size = 3 if dilation > 1 else 1
+ padding = dilation if dilation > 1 else 0
+ conv = nn.Conv2d(
+ in_channels,
+ out_channels,
+ kernel_size=kernel_size,
+ stride=1,
+ dilation=dilation,
+ padding=padding,
+ bias=True)
+ self.aspp.append(conv)
+ self.gap = nn.AdaptiveAvgPool2d(1)
+ self.init_weights()
+
+ def init_weights(self):
+ for m in self.modules():
+ if isinstance(m, nn.Conv2d):
+ kaiming_init(m)
+
+ def forward(self, x):
+ avg_x = self.gap(x)
+ out = []
+ for aspp_idx in range(len(self.aspp)):
+ inp = avg_x if (aspp_idx == len(self.aspp) - 1) else x
+ out.append(F.relu_(self.aspp[aspp_idx](inp)))
+ out[-1] = out[-1].expand_as(out[-2])
+ out = torch.cat(out, dim=1)
+ return out
+
+
+@NECKS.register_module()
+class RFP(FPN):
+ """RFP (Recursive Feature Pyramid)
+
+ This is an implementation of RFP in `DetectoRS
+ `_. Different from standard FPN, the
+ input of RFP should be multi level features along with origin input image
+ of backbone.
+
+ Args:
+ rfp_steps (int): Number of unrolled steps of RFP.
+ rfp_backbone (dict): Configuration of the backbone for RFP.
+ aspp_out_channels (int): Number of output channels of ASPP module.
+ aspp_dilations (tuple[int]): Dilation rates of four branches.
+ Default: (1, 3, 6, 1)
+ """
+
+ def __init__(self,
+ rfp_steps,
+ rfp_backbone,
+ aspp_out_channels,
+ aspp_dilations=(1, 3, 6, 1),
+ **kwargs):
+ super().__init__(**kwargs)
+ self.rfp_steps = rfp_steps
+ self.rfp_modules = nn.ModuleList()
+ for rfp_idx in range(1, rfp_steps):
+ rfp_module = build_backbone(rfp_backbone)
+ self.rfp_modules.append(rfp_module)
+ self.rfp_aspp = ASPP(self.out_channels, aspp_out_channels,
+ aspp_dilations)
+ self.rfp_weight = nn.Conv2d(
+ self.out_channels,
+ 1,
+ kernel_size=1,
+ stride=1,
+ padding=0,
+ bias=True)
+
+ def init_weights(self):
+ # Avoid using super().init_weights(), which may alter the default
+ # initialization of the modules in self.rfp_modules that have missing
+ # keys in the pretrained checkpoint.
+ for convs in [self.lateral_convs, self.fpn_convs]:
+ for m in convs.modules():
+ if isinstance(m, nn.Conv2d):
+ xavier_init(m, distribution='uniform')
+ for rfp_idx in range(self.rfp_steps - 1):
+ self.rfp_modules[rfp_idx].init_weights(
+ self.rfp_modules[rfp_idx].pretrained)
+ constant_init(self.rfp_weight, 0)
+
+ def forward(self, inputs):
+ inputs = list(inputs)
+ assert len(inputs) == len(self.in_channels) + 1 # +1 for input image
+ img = inputs.pop(0)
+ # FPN forward
+ x = super().forward(tuple(inputs))
+ for rfp_idx in range(self.rfp_steps - 1):
+ rfp_feats = [x[0]] + list(
+ self.rfp_aspp(x[i]) for i in range(1, len(x)))
+ x_idx = self.rfp_modules[rfp_idx].rfp_forward(img, rfp_feats)
+ # FPN forward
+ x_idx = super().forward(x_idx)
+ x_new = []
+ for ft_idx in range(len(x_idx)):
+ add_weight = torch.sigmoid(self.rfp_weight(x_idx[ft_idx]))
+ x_new.append(add_weight * x_idx[ft_idx] +
+ (1 - add_weight) * x[ft_idx])
+ x = x_new
+ return x
diff --git a/annotator/uniformer/mmdet_null/models/necks/yolo_neck.py b/annotator/uniformer/mmdet_null/models/necks/yolo_neck.py
new file mode 100644
index 0000000000000000000000000000000000000000..c2f9b9ef3859796c284c16ad1a92fe41ecbed613
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/necks/yolo_neck.py
@@ -0,0 +1,136 @@
+# Copyright (c) 2019 Western Digital Corporation or its affiliates.
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from mmcv.cnn import ConvModule
+
+from ..builder import NECKS
+
+
+class DetectionBlock(nn.Module):
+ """Detection block in YOLO neck.
+
+ Let out_channels = n, the DetectionBlock contains:
+ Six ConvLayers, 1 Conv2D Layer and 1 YoloLayer.
+ The first 6 ConvLayers are formed the following way:
+ 1x1xn, 3x3x2n, 1x1xn, 3x3x2n, 1x1xn, 3x3x2n.
+ The Conv2D layer is 1x1x255.
+ Some block will have branch after the fifth ConvLayer.
+ The input channel is arbitrary (in_channels)
+
+ Args:
+ in_channels (int): The number of input channels.
+ out_channels (int): The number of output channels.
+ conv_cfg (dict): Config dict for convolution layer. Default: None.
+ norm_cfg (dict): Dictionary to construct and config norm layer.
+ Default: dict(type='BN', requires_grad=True)
+ act_cfg (dict): Config dict for activation layer.
+ Default: dict(type='LeakyReLU', negative_slope=0.1).
+ """
+
+ def __init__(self,
+ in_channels,
+ out_channels,
+ conv_cfg=None,
+ norm_cfg=dict(type='BN', requires_grad=True),
+ act_cfg=dict(type='LeakyReLU', negative_slope=0.1)):
+ super(DetectionBlock, self).__init__()
+ double_out_channels = out_channels * 2
+
+ # shortcut
+ cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)
+ self.conv1 = ConvModule(in_channels, out_channels, 1, **cfg)
+ self.conv2 = ConvModule(
+ out_channels, double_out_channels, 3, padding=1, **cfg)
+ self.conv3 = ConvModule(double_out_channels, out_channels, 1, **cfg)
+ self.conv4 = ConvModule(
+ out_channels, double_out_channels, 3, padding=1, **cfg)
+ self.conv5 = ConvModule(double_out_channels, out_channels, 1, **cfg)
+
+ def forward(self, x):
+ tmp = self.conv1(x)
+ tmp = self.conv2(tmp)
+ tmp = self.conv3(tmp)
+ tmp = self.conv4(tmp)
+ out = self.conv5(tmp)
+ return out
+
+
+@NECKS.register_module()
+class YOLOV3Neck(nn.Module):
+ """The neck of YOLOV3.
+
+ It can be treated as a simplified version of FPN. It
+ will take the result from Darknet backbone and do some upsampling and
+ concatenation. It will finally output the detection result.
+
+ Note:
+ The input feats should be from top to bottom.
+ i.e., from high-lvl to low-lvl
+ But YOLOV3Neck will process them in reversed order.
+ i.e., from bottom (high-lvl) to top (low-lvl)
+
+ Args:
+ num_scales (int): The number of scales / stages.
+ in_channels (int): The number of input channels.
+ out_channels (int): The number of output channels.
+ conv_cfg (dict): Config dict for convolution layer. Default: None.
+ norm_cfg (dict): Dictionary to construct and config norm layer.
+ Default: dict(type='BN', requires_grad=True)
+ act_cfg (dict): Config dict for activation layer.
+ Default: dict(type='LeakyReLU', negative_slope=0.1).
+ """
+
+ def __init__(self,
+ num_scales,
+ in_channels,
+ out_channels,
+ conv_cfg=None,
+ norm_cfg=dict(type='BN', requires_grad=True),
+ act_cfg=dict(type='LeakyReLU', negative_slope=0.1)):
+ super(YOLOV3Neck, self).__init__()
+ assert (num_scales == len(in_channels) == len(out_channels))
+ self.num_scales = num_scales
+ self.in_channels = in_channels
+ self.out_channels = out_channels
+
+ # shortcut
+ cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)
+
+ # To support arbitrary scales, the code looks awful, but it works.
+ # Better solution is welcomed.
+ self.detect1 = DetectionBlock(in_channels[0], out_channels[0], **cfg)
+ for i in range(1, self.num_scales):
+ in_c, out_c = self.in_channels[i], self.out_channels[i]
+ self.add_module(f'conv{i}', ConvModule(in_c, out_c, 1, **cfg))
+ # in_c + out_c : High-lvl feats will be cat with low-lvl feats
+ self.add_module(f'detect{i+1}',
+ DetectionBlock(in_c + out_c, out_c, **cfg))
+
+ def forward(self, feats):
+ assert len(feats) == self.num_scales
+
+ # processed from bottom (high-lvl) to top (low-lvl)
+ outs = []
+ out = self.detect1(feats[-1])
+ outs.append(out)
+
+ for i, x in enumerate(reversed(feats[:-1])):
+ conv = getattr(self, f'conv{i+1}')
+ tmp = conv(out)
+
+ # Cat with low-lvl feats
+ tmp = F.interpolate(tmp, scale_factor=2)
+ tmp = torch.cat((tmp, x), 1)
+
+ detect = getattr(self, f'detect{i+2}')
+ out = detect(tmp)
+ outs.append(out)
+
+ return tuple(outs)
+
+ def init_weights(self):
+ """Initialize the weights of module."""
+ # init is done in ConvModule
+ pass
diff --git a/annotator/uniformer/mmdet_null/models/roi_heads/__init__.py b/annotator/uniformer/mmdet_null/models/roi_heads/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..ca0a38ec42cd41fbd97e07589a13d1af46f47f2f
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/roi_heads/__init__.py
@@ -0,0 +1,34 @@
+from .base_roi_head import BaseRoIHead
+from .bbox_heads import (BBoxHead, ConvFCBBoxHead, DoubleConvFCBBoxHead,
+ SCNetBBoxHead, Shared2FCBBoxHead,
+ Shared4Conv1FCBBoxHead)
+from .cascade_roi_head import CascadeRoIHead
+from .double_roi_head import DoubleHeadRoIHead
+from .dynamic_roi_head import DynamicRoIHead
+from .grid_roi_head import GridRoIHead
+from .htc_roi_head import HybridTaskCascadeRoIHead
+from .mask_heads import (CoarseMaskHead, FCNMaskHead, FeatureRelayHead,
+ FusedSemanticHead, GlobalContextHead, GridHead,
+ HTCMaskHead, MaskIoUHead, MaskPointHead,
+ SCNetMaskHead, SCNetSemanticHead)
+from .mask_scoring_roi_head import MaskScoringRoIHead
+from .pisa_roi_head import PISARoIHead
+from .point_rend_roi_head import PointRendRoIHead
+from .roi_extractors import SingleRoIExtractor
+from .scnet_roi_head import SCNetRoIHead
+from .shared_heads import ResLayer
+from .sparse_roi_head import SparseRoIHead
+from .standard_roi_head import StandardRoIHead
+from .trident_roi_head import TridentRoIHead
+
+__all__ = [
+ 'BaseRoIHead', 'CascadeRoIHead', 'DoubleHeadRoIHead', 'MaskScoringRoIHead',
+ 'HybridTaskCascadeRoIHead', 'GridRoIHead', 'ResLayer', 'BBoxHead',
+ 'ConvFCBBoxHead', 'Shared2FCBBoxHead', 'StandardRoIHead',
+ 'Shared4Conv1FCBBoxHead', 'DoubleConvFCBBoxHead', 'FCNMaskHead',
+ 'HTCMaskHead', 'FusedSemanticHead', 'GridHead', 'MaskIoUHead',
+ 'SingleRoIExtractor', 'PISARoIHead', 'PointRendRoIHead', 'MaskPointHead',
+ 'CoarseMaskHead', 'DynamicRoIHead', 'SparseRoIHead', 'TridentRoIHead',
+ 'SCNetRoIHead', 'SCNetMaskHead', 'SCNetSemanticHead', 'SCNetBBoxHead',
+ 'FeatureRelayHead', 'GlobalContextHead'
+]
diff --git a/annotator/uniformer/mmdet_null/models/roi_heads/base_roi_head.py b/annotator/uniformer/mmdet_null/models/roi_heads/base_roi_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..2d61cc08007924c61b4a53d7fbc6e6fedfd68f08
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/roi_heads/base_roi_head.py
@@ -0,0 +1,103 @@
+from abc import ABCMeta, abstractmethod
+
+import torch.nn as nn
+
+from ..builder import build_shared_head
+
+
+class BaseRoIHead(nn.Module, metaclass=ABCMeta):
+ """Base class for RoIHeads."""
+
+ def __init__(self,
+ bbox_roi_extractor=None,
+ bbox_head=None,
+ mask_roi_extractor=None,
+ mask_head=None,
+ shared_head=None,
+ train_cfg=None,
+ test_cfg=None):
+ super(BaseRoIHead, self).__init__()
+ self.train_cfg = train_cfg
+ self.test_cfg = test_cfg
+ if shared_head is not None:
+ self.shared_head = build_shared_head(shared_head)
+
+ if bbox_head is not None:
+ self.init_bbox_head(bbox_roi_extractor, bbox_head)
+
+ if mask_head is not None:
+ self.init_mask_head(mask_roi_extractor, mask_head)
+
+ self.init_assigner_sampler()
+
+ @property
+ def with_bbox(self):
+ """bool: whether the RoI head contains a `bbox_head`"""
+ return hasattr(self, 'bbox_head') and self.bbox_head is not None
+
+ @property
+ def with_mask(self):
+ """bool: whether the RoI head contains a `mask_head`"""
+ return hasattr(self, 'mask_head') and self.mask_head is not None
+
+ @property
+ def with_shared_head(self):
+ """bool: whether the RoI head contains a `shared_head`"""
+ return hasattr(self, 'shared_head') and self.shared_head is not None
+
+ @abstractmethod
+ def init_weights(self, pretrained):
+ """Initialize the weights in head.
+
+ Args:
+ pretrained (str, optional): Path to pre-trained weights.
+ Defaults to None.
+ """
+ pass
+
+ @abstractmethod
+ def init_bbox_head(self):
+ """Initialize ``bbox_head``"""
+ pass
+
+ @abstractmethod
+ def init_mask_head(self):
+ """Initialize ``mask_head``"""
+ pass
+
+ @abstractmethod
+ def init_assigner_sampler(self):
+ """Initialize assigner and sampler."""
+ pass
+
+ @abstractmethod
+ def forward_train(self,
+ x,
+ img_meta,
+ proposal_list,
+ gt_bboxes,
+ gt_labels,
+ gt_bboxes_ignore=None,
+ gt_masks=None,
+ **kwargs):
+ """Forward function during training."""
+
+ async def async_simple_test(self, x, img_meta, **kwargs):
+ """Asynchronized test function."""
+ raise NotImplementedError
+
+ def simple_test(self,
+ x,
+ proposal_list,
+ img_meta,
+ proposals=None,
+ rescale=False,
+ **kwargs):
+ """Test without augmentation."""
+
+ def aug_test(self, x, proposal_list, img_metas, rescale=False, **kwargs):
+ """Test with augmentations.
+
+ If rescale is False, then returned bboxes and masks will fit the scale
+ of imgs[0].
+ """
diff --git a/annotator/uniformer/mmdet_null/models/roi_heads/bbox_heads/__init__.py b/annotator/uniformer/mmdet_null/models/roi_heads/bbox_heads/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..bc5d29ece5bbf2f168f538f151f06d1b263a5153
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/roi_heads/bbox_heads/__init__.py
@@ -0,0 +1,13 @@
+from .bbox_head import BBoxHead
+from .convfc_bbox_head import (ConvFCBBoxHead, Shared2FCBBoxHead,
+ Shared4Conv1FCBBoxHead)
+from .dii_head import DIIHead
+from .double_bbox_head import DoubleConvFCBBoxHead
+from .sabl_head import SABLHead
+from .scnet_bbox_head import SCNetBBoxHead
+
+__all__ = [
+ 'BBoxHead', 'ConvFCBBoxHead', 'Shared2FCBBoxHead',
+ 'Shared4Conv1FCBBoxHead', 'DoubleConvFCBBoxHead', 'SABLHead', 'DIIHead',
+ 'SCNetBBoxHead'
+]
diff --git a/annotator/uniformer/mmdet_null/models/roi_heads/bbox_heads/bbox_head.py b/annotator/uniformer/mmdet_null/models/roi_heads/bbox_heads/bbox_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..408abef3a244115b4e73748049a228e37ad0665c
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/roi_heads/bbox_heads/bbox_head.py
@@ -0,0 +1,483 @@
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from mmcv.runner import auto_fp16, force_fp32
+from torch.nn.modules.utils import _pair
+
+from mmdet.core import build_bbox_coder, multi_apply, multiclass_nms
+from mmdet.models.builder import HEADS, build_loss
+from mmdet.models.losses import accuracy
+
+
+@HEADS.register_module()
+class BBoxHead(nn.Module):
+ """Simplest RoI head, with only two fc layers for classification and
+ regression respectively."""
+
+ def __init__(self,
+ with_avg_pool=False,
+ with_cls=True,
+ with_reg=True,
+ roi_feat_size=7,
+ in_channels=256,
+ num_classes=80,
+ bbox_coder=dict(
+ type='DeltaXYWHBBoxCoder',
+ clip_border=True,
+ target_means=[0., 0., 0., 0.],
+ target_stds=[0.1, 0.1, 0.2, 0.2]),
+ reg_class_agnostic=False,
+ reg_decoded_bbox=False,
+ loss_cls=dict(
+ type='CrossEntropyLoss',
+ use_sigmoid=False,
+ loss_weight=1.0),
+ loss_bbox=dict(
+ type='SmoothL1Loss', beta=1.0, loss_weight=1.0)):
+ super(BBoxHead, self).__init__()
+ assert with_cls or with_reg
+ self.with_avg_pool = with_avg_pool
+ self.with_cls = with_cls
+ self.with_reg = with_reg
+ self.roi_feat_size = _pair(roi_feat_size)
+ self.roi_feat_area = self.roi_feat_size[0] * self.roi_feat_size[1]
+ self.in_channels = in_channels
+ self.num_classes = num_classes
+ self.reg_class_agnostic = reg_class_agnostic
+ self.reg_decoded_bbox = reg_decoded_bbox
+ self.fp16_enabled = False
+
+ self.bbox_coder = build_bbox_coder(bbox_coder)
+ self.loss_cls = build_loss(loss_cls)
+ self.loss_bbox = build_loss(loss_bbox)
+
+ in_channels = self.in_channels
+ if self.with_avg_pool:
+ self.avg_pool = nn.AvgPool2d(self.roi_feat_size)
+ else:
+ in_channels *= self.roi_feat_area
+ if self.with_cls:
+ # need to add background class
+ self.fc_cls = nn.Linear(in_channels, num_classes + 1)
+ if self.with_reg:
+ out_dim_reg = 4 if reg_class_agnostic else 4 * num_classes
+ self.fc_reg = nn.Linear(in_channels, out_dim_reg)
+ self.debug_imgs = None
+
+ def init_weights(self):
+ # conv layers are already initialized by ConvModule
+ if self.with_cls:
+ nn.init.normal_(self.fc_cls.weight, 0, 0.01)
+ nn.init.constant_(self.fc_cls.bias, 0)
+ if self.with_reg:
+ nn.init.normal_(self.fc_reg.weight, 0, 0.001)
+ nn.init.constant_(self.fc_reg.bias, 0)
+
+ @auto_fp16()
+ def forward(self, x):
+ if self.with_avg_pool:
+ x = self.avg_pool(x)
+ x = x.view(x.size(0), -1)
+ cls_score = self.fc_cls(x) if self.with_cls else None
+ bbox_pred = self.fc_reg(x) if self.with_reg else None
+ return cls_score, bbox_pred
+
+ def _get_target_single(self, pos_bboxes, neg_bboxes, pos_gt_bboxes,
+ pos_gt_labels, cfg):
+ """Calculate the ground truth for proposals in the single image
+ according to the sampling results.
+
+ Args:
+ pos_bboxes (Tensor): Contains all the positive boxes,
+ has shape (num_pos, 4), the last dimension 4
+ represents [tl_x, tl_y, br_x, br_y].
+ neg_bboxes (Tensor): Contains all the negative boxes,
+ has shape (num_neg, 4), the last dimension 4
+ represents [tl_x, tl_y, br_x, br_y].
+ pos_gt_bboxes (Tensor): Contains all the gt_boxes,
+ has shape (num_gt, 4), the last dimension 4
+ represents [tl_x, tl_y, br_x, br_y].
+ pos_gt_labels (Tensor): Contains all the gt_labels,
+ has shape (num_gt).
+ cfg (obj:`ConfigDict`): `train_cfg` of R-CNN.
+
+ Returns:
+ Tuple[Tensor]: Ground truth for proposals
+ in a single image. Containing the following Tensors:
+
+ - labels(Tensor): Gt_labels for all proposals, has
+ shape (num_proposals,).
+ - label_weights(Tensor): Labels_weights for all
+ proposals, has shape (num_proposals,).
+ - bbox_targets(Tensor):Regression target for all
+ proposals, has shape (num_proposals, 4), the
+ last dimension 4 represents [tl_x, tl_y, br_x, br_y].
+ - bbox_weights(Tensor):Regression weights for all
+ proposals, has shape (num_proposals, 4).
+ """
+ num_pos = pos_bboxes.size(0)
+ num_neg = neg_bboxes.size(0)
+ num_samples = num_pos + num_neg
+
+ # original implementation uses new_zeros since BG are set to be 0
+ # now use empty & fill because BG cat_id = num_classes,
+ # FG cat_id = [0, num_classes-1]
+ labels = pos_bboxes.new_full((num_samples, ),
+ self.num_classes,
+ dtype=torch.long)
+ label_weights = pos_bboxes.new_zeros(num_samples)
+ bbox_targets = pos_bboxes.new_zeros(num_samples, 4)
+ bbox_weights = pos_bboxes.new_zeros(num_samples, 4)
+ if num_pos > 0:
+ labels[:num_pos] = pos_gt_labels
+ pos_weight = 1.0 if cfg.pos_weight <= 0 else cfg.pos_weight
+ label_weights[:num_pos] = pos_weight
+ if not self.reg_decoded_bbox:
+ pos_bbox_targets = self.bbox_coder.encode(
+ pos_bboxes, pos_gt_bboxes)
+ else:
+ # When the regression loss (e.g. `IouLoss`, `GIouLoss`)
+ # is applied directly on the decoded bounding boxes, both
+ # the predicted boxes and regression targets should be with
+ # absolute coordinate format.
+ pos_bbox_targets = pos_gt_bboxes
+ bbox_targets[:num_pos, :] = pos_bbox_targets
+ bbox_weights[:num_pos, :] = 1
+ if num_neg > 0:
+ label_weights[-num_neg:] = 1.0
+
+ return labels, label_weights, bbox_targets, bbox_weights
+
+ def get_targets(self,
+ sampling_results,
+ gt_bboxes,
+ gt_labels,
+ rcnn_train_cfg,
+ concat=True):
+ """Calculate the ground truth for all samples in a batch according to
+ the sampling_results.
+
+ Almost the same as the implementation in bbox_head, we passed
+ additional parameters pos_inds_list and neg_inds_list to
+ `_get_target_single` function.
+
+ Args:
+ sampling_results (List[obj:SamplingResults]): Assign results of
+ all images in a batch after sampling.
+ gt_bboxes (list[Tensor]): Gt_bboxes of all images in a batch,
+ each tensor has shape (num_gt, 4), the last dimension 4
+ represents [tl_x, tl_y, br_x, br_y].
+ gt_labels (list[Tensor]): Gt_labels of all images in a batch,
+ each tensor has shape (num_gt,).
+ rcnn_train_cfg (obj:ConfigDict): `train_cfg` of RCNN.
+ concat (bool): Whether to concatenate the results of all
+ the images in a single batch.
+
+ Returns:
+ Tuple[Tensor]: Ground truth for proposals in a single image.
+ Containing the following list of Tensors:
+
+ - labels (list[Tensor],Tensor): Gt_labels for all
+ proposals in a batch, each tensor in list has
+ shape (num_proposals,) when `concat=False`, otherwise
+ just a single tensor has shape (num_all_proposals,).
+ - label_weights (list[Tensor]): Labels_weights for
+ all proposals in a batch, each tensor in list has
+ shape (num_proposals,) when `concat=False`, otherwise
+ just a single tensor has shape (num_all_proposals,).
+ - bbox_targets (list[Tensor],Tensor): Regression target
+ for all proposals in a batch, each tensor in list
+ has shape (num_proposals, 4) when `concat=False`,
+ otherwise just a single tensor has shape
+ (num_all_proposals, 4), the last dimension 4 represents
+ [tl_x, tl_y, br_x, br_y].
+ - bbox_weights (list[tensor],Tensor): Regression weights for
+ all proposals in a batch, each tensor in list has shape
+ (num_proposals, 4) when `concat=False`, otherwise just a
+ single tensor has shape (num_all_proposals, 4).
+ """
+ pos_bboxes_list = [res.pos_bboxes for res in sampling_results]
+ neg_bboxes_list = [res.neg_bboxes for res in sampling_results]
+ pos_gt_bboxes_list = [res.pos_gt_bboxes for res in sampling_results]
+ pos_gt_labels_list = [res.pos_gt_labels for res in sampling_results]
+ labels, label_weights, bbox_targets, bbox_weights = multi_apply(
+ self._get_target_single,
+ pos_bboxes_list,
+ neg_bboxes_list,
+ pos_gt_bboxes_list,
+ pos_gt_labels_list,
+ cfg=rcnn_train_cfg)
+
+ if concat:
+ labels = torch.cat(labels, 0)
+ label_weights = torch.cat(label_weights, 0)
+ bbox_targets = torch.cat(bbox_targets, 0)
+ bbox_weights = torch.cat(bbox_weights, 0)
+ return labels, label_weights, bbox_targets, bbox_weights
+
+ @force_fp32(apply_to=('cls_score', 'bbox_pred'))
+ def loss(self,
+ cls_score,
+ bbox_pred,
+ rois,
+ labels,
+ label_weights,
+ bbox_targets,
+ bbox_weights,
+ reduction_override=None):
+ losses = dict()
+ if cls_score is not None:
+ avg_factor = max(torch.sum(label_weights > 0).float().item(), 1.)
+ if cls_score.numel() > 0:
+ losses['loss_cls'] = self.loss_cls(
+ cls_score,
+ labels,
+ label_weights,
+ avg_factor=avg_factor,
+ reduction_override=reduction_override)
+ losses['acc'] = accuracy(cls_score, labels)
+ if bbox_pred is not None:
+ bg_class_ind = self.num_classes
+ # 0~self.num_classes-1 are FG, self.num_classes is BG
+ pos_inds = (labels >= 0) & (labels < bg_class_ind)
+ # do not perform bounding box regression for BG anymore.
+ if pos_inds.any():
+ if self.reg_decoded_bbox:
+ # When the regression loss (e.g. `IouLoss`,
+ # `GIouLoss`, `DIouLoss`) is applied directly on
+ # the decoded bounding boxes, it decodes the
+ # already encoded coordinates to absolute format.
+ bbox_pred = self.bbox_coder.decode(rois[:, 1:], bbox_pred)
+ if self.reg_class_agnostic:
+ pos_bbox_pred = bbox_pred.view(
+ bbox_pred.size(0), 4)[pos_inds.type(torch.bool)]
+ else:
+ pos_bbox_pred = bbox_pred.view(
+ bbox_pred.size(0), -1,
+ 4)[pos_inds.type(torch.bool),
+ labels[pos_inds.type(torch.bool)]]
+ losses['loss_bbox'] = self.loss_bbox(
+ pos_bbox_pred,
+ bbox_targets[pos_inds.type(torch.bool)],
+ bbox_weights[pos_inds.type(torch.bool)],
+ avg_factor=bbox_targets.size(0),
+ reduction_override=reduction_override)
+ else:
+ losses['loss_bbox'] = bbox_pred[pos_inds].sum()
+ return losses
+
+ @force_fp32(apply_to=('cls_score', 'bbox_pred'))
+ def get_bboxes(self,
+ rois,
+ cls_score,
+ bbox_pred,
+ img_shape,
+ scale_factor,
+ rescale=False,
+ cfg=None):
+ """Transform network output for a batch into bbox predictions.
+
+ If the input rois has batch dimension, the function would be in
+ `batch_mode` and return is a tuple[list[Tensor], list[Tensor]],
+ otherwise, the return is a tuple[Tensor, Tensor].
+
+ Args:
+ rois (Tensor): Boxes to be transformed. Has shape (num_boxes, 5)
+ or (B, num_boxes, 5)
+ cls_score (list[Tensor] or Tensor): Box scores for
+ each scale level, each is a 4D-tensor, the channel number is
+ num_points * num_classes.
+ bbox_pred (Tensor, optional): Box energies / deltas for each scale
+ level, each is a 4D-tensor, the channel number is
+ num_classes * 4.
+ img_shape (Sequence[int] or torch.Tensor or Sequence[
+ Sequence[int]], optional): Maximum bounds for boxes, specifies
+ (H, W, C) or (H, W). If rois shape is (B, num_boxes, 4), then
+ the max_shape should be a Sequence[Sequence[int]]
+ and the length of max_shape should also be B.
+ scale_factor (tuple[ndarray] or ndarray): Scale factor of the
+ image arange as (w_scale, h_scale, w_scale, h_scale). In
+ `batch_mode`, the scale_factor shape is tuple[ndarray].
+ rescale (bool): If True, return boxes in original image space.
+ Default: False.
+ cfg (obj:`ConfigDict`): `test_cfg` of Bbox Head. Default: None
+
+ Returns:
+ tuple[list[Tensor], list[Tensor]] or tuple[Tensor, Tensor]:
+ If the input has a batch dimension, the return value is
+ a tuple of the list. The first list contains the boxes of
+ the corresponding image in a batch, each tensor has the
+ shape (num_boxes, 5) and last dimension 5 represent
+ (tl_x, tl_y, br_x, br_y, score). Each Tensor in the second
+ list is the labels with shape (num_boxes, ). The length of
+ both lists should be equal to batch_size. Otherwise return
+ value is a tuple of two tensors, the first tensor is the
+ boxes with scores, the second tensor is the labels, both
+ have the same shape as the first case.
+ """
+ if isinstance(cls_score, list):
+ cls_score = sum(cls_score) / float(len(cls_score))
+
+ scores = F.softmax(
+ cls_score, dim=-1) if cls_score is not None else None
+
+ batch_mode = True
+ if rois.ndim == 2:
+ # e.g. AugTest, Cascade R-CNN, HTC, SCNet...
+ batch_mode = False
+
+ # add batch dimension
+ if scores is not None:
+ scores = scores.unsqueeze(0)
+ if bbox_pred is not None:
+ bbox_pred = bbox_pred.unsqueeze(0)
+ rois = rois.unsqueeze(0)
+
+ if bbox_pred is not None:
+ bboxes = self.bbox_coder.decode(
+ rois[..., 1:], bbox_pred, max_shape=img_shape)
+ else:
+ bboxes = rois[..., 1:].clone()
+ if img_shape is not None:
+ max_shape = bboxes.new_tensor(img_shape)[..., :2]
+ min_xy = bboxes.new_tensor(0)
+ max_xy = torch.cat(
+ [max_shape] * 2, dim=-1).flip(-1).unsqueeze(-2)
+ bboxes = torch.where(bboxes < min_xy, min_xy, bboxes)
+ bboxes = torch.where(bboxes > max_xy, max_xy, bboxes)
+
+ if rescale and bboxes.size(-2) > 0:
+ if not isinstance(scale_factor, tuple):
+ scale_factor = tuple([scale_factor])
+ # B, 1, bboxes.size(-1)
+ scale_factor = bboxes.new_tensor(scale_factor).unsqueeze(1).repeat(
+ 1, 1,
+ bboxes.size(-1) // 4)
+ bboxes /= scale_factor
+
+ det_bboxes = []
+ det_labels = []
+ for (bbox, score) in zip(bboxes, scores):
+ if cfg is not None:
+ det_bbox, det_label = multiclass_nms(bbox, score,
+ cfg.score_thr, cfg.nms,
+ cfg.max_per_img)
+ else:
+ det_bbox, det_label = bbox, score
+ det_bboxes.append(det_bbox)
+ det_labels.append(det_label)
+
+ if not batch_mode:
+ det_bboxes = det_bboxes[0]
+ det_labels = det_labels[0]
+ return det_bboxes, det_labels
+
+ @force_fp32(apply_to=('bbox_preds', ))
+ def refine_bboxes(self, rois, labels, bbox_preds, pos_is_gts, img_metas):
+ """Refine bboxes during training.
+
+ Args:
+ rois (Tensor): Shape (n*bs, 5), where n is image number per GPU,
+ and bs is the sampled RoIs per image. The first column is
+ the image id and the next 4 columns are x1, y1, x2, y2.
+ labels (Tensor): Shape (n*bs, ).
+ bbox_preds (Tensor): Shape (n*bs, 4) or (n*bs, 4*#class).
+ pos_is_gts (list[Tensor]): Flags indicating if each positive bbox
+ is a gt bbox.
+ img_metas (list[dict]): Meta info of each image.
+
+ Returns:
+ list[Tensor]: Refined bboxes of each image in a mini-batch.
+
+ Example:
+ >>> # xdoctest: +REQUIRES(module:kwarray)
+ >>> import kwarray
+ >>> import numpy as np
+ >>> from mmdet.core.bbox.demodata import random_boxes
+ >>> self = BBoxHead(reg_class_agnostic=True)
+ >>> n_roi = 2
+ >>> n_img = 4
+ >>> scale = 512
+ >>> rng = np.random.RandomState(0)
+ >>> img_metas = [{'img_shape': (scale, scale)}
+ ... for _ in range(n_img)]
+ >>> # Create rois in the expected format
+ >>> roi_boxes = random_boxes(n_roi, scale=scale, rng=rng)
+ >>> img_ids = torch.randint(0, n_img, (n_roi,))
+ >>> img_ids = img_ids.float()
+ >>> rois = torch.cat([img_ids[:, None], roi_boxes], dim=1)
+ >>> # Create other args
+ >>> labels = torch.randint(0, 2, (n_roi,)).long()
+ >>> bbox_preds = random_boxes(n_roi, scale=scale, rng=rng)
+ >>> # For each image, pretend random positive boxes are gts
+ >>> is_label_pos = (labels.numpy() > 0).astype(np.int)
+ >>> lbl_per_img = kwarray.group_items(is_label_pos,
+ ... img_ids.numpy())
+ >>> pos_per_img = [sum(lbl_per_img.get(gid, []))
+ ... for gid in range(n_img)]
+ >>> pos_is_gts = [
+ >>> torch.randint(0, 2, (npos,)).byte().sort(
+ >>> descending=True)[0]
+ >>> for npos in pos_per_img
+ >>> ]
+ >>> bboxes_list = self.refine_bboxes(rois, labels, bbox_preds,
+ >>> pos_is_gts, img_metas)
+ >>> print(bboxes_list)
+ """
+ img_ids = rois[:, 0].long().unique(sorted=True)
+ assert img_ids.numel() <= len(img_metas)
+
+ bboxes_list = []
+ for i in range(len(img_metas)):
+ inds = torch.nonzero(
+ rois[:, 0] == i, as_tuple=False).squeeze(dim=1)
+ num_rois = inds.numel()
+
+ bboxes_ = rois[inds, 1:]
+ label_ = labels[inds]
+ bbox_pred_ = bbox_preds[inds]
+ img_meta_ = img_metas[i]
+ pos_is_gts_ = pos_is_gts[i]
+
+ bboxes = self.regress_by_class(bboxes_, label_, bbox_pred_,
+ img_meta_)
+
+ # filter gt bboxes
+ pos_keep = 1 - pos_is_gts_
+ keep_inds = pos_is_gts_.new_ones(num_rois)
+ keep_inds[:len(pos_is_gts_)] = pos_keep
+
+ bboxes_list.append(bboxes[keep_inds.type(torch.bool)])
+
+ return bboxes_list
+
+ @force_fp32(apply_to=('bbox_pred', ))
+ def regress_by_class(self, rois, label, bbox_pred, img_meta):
+ """Regress the bbox for the predicted class. Used in Cascade R-CNN.
+
+ Args:
+ rois (Tensor): shape (n, 4) or (n, 5)
+ label (Tensor): shape (n, )
+ bbox_pred (Tensor): shape (n, 4*(#class)) or (n, 4)
+ img_meta (dict): Image meta info.
+
+ Returns:
+ Tensor: Regressed bboxes, the same shape as input rois.
+ """
+ assert rois.size(1) == 4 or rois.size(1) == 5, repr(rois.shape)
+
+ if not self.reg_class_agnostic:
+ label = label * 4
+ inds = torch.stack((label, label + 1, label + 2, label + 3), 1)
+ bbox_pred = torch.gather(bbox_pred, 1, inds)
+ assert bbox_pred.size(1) == 4
+
+ if rois.size(1) == 4:
+ new_rois = self.bbox_coder.decode(
+ rois, bbox_pred, max_shape=img_meta['img_shape'])
+ else:
+ bboxes = self.bbox_coder.decode(
+ rois[:, 1:], bbox_pred, max_shape=img_meta['img_shape'])
+ new_rois = torch.cat((rois[:, [0]], bboxes), dim=1)
+
+ return new_rois
diff --git a/annotator/uniformer/mmdet_null/models/roi_heads/bbox_heads/convfc_bbox_head.py b/annotator/uniformer/mmdet_null/models/roi_heads/bbox_heads/convfc_bbox_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..0e86d2ea67e154fae18dbf9d2bfde6d0a70e582c
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/roi_heads/bbox_heads/convfc_bbox_head.py
@@ -0,0 +1,205 @@
+import torch.nn as nn
+from mmcv.cnn import ConvModule
+
+from mmdet.models.builder import HEADS
+from .bbox_head import BBoxHead
+
+
+@HEADS.register_module()
+class ConvFCBBoxHead(BBoxHead):
+ r"""More general bbox head, with shared conv and fc layers and two optional
+ separated branches.
+
+ .. code-block:: none
+
+ /-> cls convs -> cls fcs -> cls
+ shared convs -> shared fcs
+ \-> reg convs -> reg fcs -> reg
+ """ # noqa: W605
+
+ def __init__(self,
+ num_shared_convs=0,
+ num_shared_fcs=0,
+ num_cls_convs=0,
+ num_cls_fcs=0,
+ num_reg_convs=0,
+ num_reg_fcs=0,
+ conv_out_channels=256,
+ fc_out_channels=1024,
+ conv_cfg=None,
+ norm_cfg=None,
+ *args,
+ **kwargs):
+ super(ConvFCBBoxHead, self).__init__(*args, **kwargs)
+ assert (num_shared_convs + num_shared_fcs + num_cls_convs +
+ num_cls_fcs + num_reg_convs + num_reg_fcs > 0)
+ if num_cls_convs > 0 or num_reg_convs > 0:
+ assert num_shared_fcs == 0
+ if not self.with_cls:
+ assert num_cls_convs == 0 and num_cls_fcs == 0
+ if not self.with_reg:
+ assert num_reg_convs == 0 and num_reg_fcs == 0
+ self.num_shared_convs = num_shared_convs
+ self.num_shared_fcs = num_shared_fcs
+ self.num_cls_convs = num_cls_convs
+ self.num_cls_fcs = num_cls_fcs
+ self.num_reg_convs = num_reg_convs
+ self.num_reg_fcs = num_reg_fcs
+ self.conv_out_channels = conv_out_channels
+ self.fc_out_channels = fc_out_channels
+ self.conv_cfg = conv_cfg
+ self.norm_cfg = norm_cfg
+
+ # add shared convs and fcs
+ self.shared_convs, self.shared_fcs, last_layer_dim = \
+ self._add_conv_fc_branch(
+ self.num_shared_convs, self.num_shared_fcs, self.in_channels,
+ True)
+ self.shared_out_channels = last_layer_dim
+
+ # add cls specific branch
+ self.cls_convs, self.cls_fcs, self.cls_last_dim = \
+ self._add_conv_fc_branch(
+ self.num_cls_convs, self.num_cls_fcs, self.shared_out_channels)
+
+ # add reg specific branch
+ self.reg_convs, self.reg_fcs, self.reg_last_dim = \
+ self._add_conv_fc_branch(
+ self.num_reg_convs, self.num_reg_fcs, self.shared_out_channels)
+
+ if self.num_shared_fcs == 0 and not self.with_avg_pool:
+ if self.num_cls_fcs == 0:
+ self.cls_last_dim *= self.roi_feat_area
+ if self.num_reg_fcs == 0:
+ self.reg_last_dim *= self.roi_feat_area
+
+ self.relu = nn.ReLU(inplace=True)
+ # reconstruct fc_cls and fc_reg since input channels are changed
+ if self.with_cls:
+ self.fc_cls = nn.Linear(self.cls_last_dim, self.num_classes + 1)
+ if self.with_reg:
+ out_dim_reg = (4 if self.reg_class_agnostic else 4 *
+ self.num_classes)
+ self.fc_reg = nn.Linear(self.reg_last_dim, out_dim_reg)
+
+ def _add_conv_fc_branch(self,
+ num_branch_convs,
+ num_branch_fcs,
+ in_channels,
+ is_shared=False):
+ """Add shared or separable branch.
+
+ convs -> avg pool (optional) -> fcs
+ """
+ last_layer_dim = in_channels
+ # add branch specific conv layers
+ branch_convs = nn.ModuleList()
+ if num_branch_convs > 0:
+ for i in range(num_branch_convs):
+ conv_in_channels = (
+ last_layer_dim if i == 0 else self.conv_out_channels)
+ branch_convs.append(
+ ConvModule(
+ conv_in_channels,
+ self.conv_out_channels,
+ 3,
+ padding=1,
+ conv_cfg=self.conv_cfg,
+ norm_cfg=self.norm_cfg))
+ last_layer_dim = self.conv_out_channels
+ # add branch specific fc layers
+ branch_fcs = nn.ModuleList()
+ if num_branch_fcs > 0:
+ # for shared branch, only consider self.with_avg_pool
+ # for separated branches, also consider self.num_shared_fcs
+ if (is_shared
+ or self.num_shared_fcs == 0) and not self.with_avg_pool:
+ last_layer_dim *= self.roi_feat_area
+ for i in range(num_branch_fcs):
+ fc_in_channels = (
+ last_layer_dim if i == 0 else self.fc_out_channels)
+ branch_fcs.append(
+ nn.Linear(fc_in_channels, self.fc_out_channels))
+ last_layer_dim = self.fc_out_channels
+ return branch_convs, branch_fcs, last_layer_dim
+
+ def init_weights(self):
+ super(ConvFCBBoxHead, self).init_weights()
+ # conv layers are already initialized by ConvModule
+ for module_list in [self.shared_fcs, self.cls_fcs, self.reg_fcs]:
+ for m in module_list.modules():
+ if isinstance(m, nn.Linear):
+ nn.init.xavier_uniform_(m.weight)
+ nn.init.constant_(m.bias, 0)
+
+ def forward(self, x):
+ # shared part
+ if self.num_shared_convs > 0:
+ for conv in self.shared_convs:
+ x = conv(x)
+
+ if self.num_shared_fcs > 0:
+ if self.with_avg_pool:
+ x = self.avg_pool(x)
+
+ x = x.flatten(1)
+
+ for fc in self.shared_fcs:
+ x = self.relu(fc(x))
+ # separate branches
+ x_cls = x
+ x_reg = x
+
+ for conv in self.cls_convs:
+ x_cls = conv(x_cls)
+ if x_cls.dim() > 2:
+ if self.with_avg_pool:
+ x_cls = self.avg_pool(x_cls)
+ x_cls = x_cls.flatten(1)
+ for fc in self.cls_fcs:
+ x_cls = self.relu(fc(x_cls))
+
+ for conv in self.reg_convs:
+ x_reg = conv(x_reg)
+ if x_reg.dim() > 2:
+ if self.with_avg_pool:
+ x_reg = self.avg_pool(x_reg)
+ x_reg = x_reg.flatten(1)
+ for fc in self.reg_fcs:
+ x_reg = self.relu(fc(x_reg))
+
+ cls_score = self.fc_cls(x_cls) if self.with_cls else None
+ bbox_pred = self.fc_reg(x_reg) if self.with_reg else None
+ return cls_score, bbox_pred
+
+
+@HEADS.register_module()
+class Shared2FCBBoxHead(ConvFCBBoxHead):
+
+ def __init__(self, fc_out_channels=1024, *args, **kwargs):
+ super(Shared2FCBBoxHead, self).__init__(
+ num_shared_convs=0,
+ num_shared_fcs=2,
+ num_cls_convs=0,
+ num_cls_fcs=0,
+ num_reg_convs=0,
+ num_reg_fcs=0,
+ fc_out_channels=fc_out_channels,
+ *args,
+ **kwargs)
+
+
+@HEADS.register_module()
+class Shared4Conv1FCBBoxHead(ConvFCBBoxHead):
+
+ def __init__(self, fc_out_channels=1024, *args, **kwargs):
+ super(Shared4Conv1FCBBoxHead, self).__init__(
+ num_shared_convs=4,
+ num_shared_fcs=1,
+ num_cls_convs=0,
+ num_cls_fcs=0,
+ num_reg_convs=0,
+ num_reg_fcs=0,
+ fc_out_channels=fc_out_channels,
+ *args,
+ **kwargs)
diff --git a/annotator/uniformer/mmdet_null/models/roi_heads/bbox_heads/dii_head.py b/annotator/uniformer/mmdet_null/models/roi_heads/bbox_heads/dii_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..8c970a78184672aaaa95edcdaecec03a26604390
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/roi_heads/bbox_heads/dii_head.py
@@ -0,0 +1,415 @@
+import torch
+import torch.nn as nn
+from mmcv.cnn import (bias_init_with_prob, build_activation_layer,
+ build_norm_layer)
+from mmcv.runner import auto_fp16, force_fp32
+
+from mmdet.core import multi_apply
+from mmdet.models.builder import HEADS, build_loss
+from mmdet.models.dense_heads.atss_head import reduce_mean
+from mmdet.models.losses import accuracy
+from mmdet.models.utils import FFN, MultiheadAttention, build_transformer
+from .bbox_head import BBoxHead
+
+
+@HEADS.register_module()
+class DIIHead(BBoxHead):
+ r"""Dynamic Instance Interactive Head for `Sparse R-CNN: End-to-End Object
+ Detection with Learnable Proposals `_
+
+ Args:
+ num_classes (int): Number of class in dataset.
+ Defaults to 80.
+ num_ffn_fcs (int): The number of fully-connected
+ layers in FFNs. Defaults to 2.
+ num_heads (int): The hidden dimension of FFNs.
+ Defaults to 8.
+ num_cls_fcs (int): The number of fully-connected
+ layers in classification subnet. Defaults to 1.
+ num_reg_fcs (int): The number of fully-connected
+ layers in regression subnet. Defaults to 3.
+ feedforward_channels (int): The hidden dimension
+ of FFNs. Defaults to 2048
+ in_channels (int): Hidden_channels of MultiheadAttention.
+ Defaults to 256.
+ dropout (float): Probability of drop the channel.
+ Defaults to 0.0
+ ffn_act_cfg (dict): The activation config for FFNs.
+ dynamic_conv_cfg (dict): The convolution config
+ for DynamicConv.
+ loss_iou (dict): The config for iou or giou loss.
+
+ """
+
+ def __init__(self,
+ num_classes=80,
+ num_ffn_fcs=2,
+ num_heads=8,
+ num_cls_fcs=1,
+ num_reg_fcs=3,
+ feedforward_channels=2048,
+ in_channels=256,
+ dropout=0.0,
+ ffn_act_cfg=dict(type='ReLU', inplace=True),
+ dynamic_conv_cfg=dict(
+ type='DynamicConv',
+ in_channels=256,
+ feat_channels=64,
+ out_channels=256,
+ input_feat_shape=7,
+ act_cfg=dict(type='ReLU', inplace=True),
+ norm_cfg=dict(type='LN')),
+ loss_iou=dict(type='GIoULoss', loss_weight=2.0),
+ **kwargs):
+ super(DIIHead, self).__init__(
+ num_classes=num_classes,
+ reg_decoded_bbox=True,
+ reg_class_agnostic=True,
+ **kwargs)
+ self.loss_iou = build_loss(loss_iou)
+ self.in_channels = in_channels
+ self.fp16_enabled = False
+ self.attention = MultiheadAttention(in_channels, num_heads, dropout)
+ self.attention_norm = build_norm_layer(dict(type='LN'), in_channels)[1]
+
+ self.instance_interactive_conv = build_transformer(dynamic_conv_cfg)
+ self.instance_interactive_conv_dropout = nn.Dropout(dropout)
+ self.instance_interactive_conv_norm = build_norm_layer(
+ dict(type='LN'), in_channels)[1]
+
+ self.ffn = FFN(
+ in_channels,
+ feedforward_channels,
+ num_ffn_fcs,
+ act_cfg=ffn_act_cfg,
+ dropout=dropout)
+ self.ffn_norm = build_norm_layer(dict(type='LN'), in_channels)[1]
+
+ self.cls_fcs = nn.ModuleList()
+ for _ in range(num_cls_fcs):
+ self.cls_fcs.append(
+ nn.Linear(in_channels, in_channels, bias=False))
+ self.cls_fcs.append(
+ build_norm_layer(dict(type='LN'), in_channels)[1])
+ self.cls_fcs.append(
+ build_activation_layer(dict(type='ReLU', inplace=True)))
+
+ # over load the self.fc_cls in BBoxHead
+ if self.loss_cls.use_sigmoid:
+ self.fc_cls = nn.Linear(in_channels, self.num_classes)
+ else:
+ self.fc_cls = nn.Linear(in_channels, self.num_classes + 1)
+
+ self.reg_fcs = nn.ModuleList()
+ for _ in range(num_reg_fcs):
+ self.reg_fcs.append(
+ nn.Linear(in_channels, in_channels, bias=False))
+ self.reg_fcs.append(
+ build_norm_layer(dict(type='LN'), in_channels)[1])
+ self.reg_fcs.append(
+ build_activation_layer(dict(type='ReLU', inplace=True)))
+ # over load the self.fc_cls in BBoxHead
+ self.fc_reg = nn.Linear(in_channels, 4)
+
+ assert self.reg_class_agnostic, 'DIIHead only ' \
+ 'suppport `reg_class_agnostic=True` '
+ assert self.reg_decoded_bbox, 'DIIHead only ' \
+ 'suppport `reg_decoded_bbox=True`'
+
+ def init_weights(self):
+ """Use xavier initialization for all weight parameter and set
+ classification head bias as a specific value when use focal loss."""
+ for p in self.parameters():
+ if p.dim() > 1:
+ nn.init.xavier_uniform_(p)
+ else:
+ # adopt the default initialization for
+ # the weight and bias of the layer norm
+ pass
+ if self.loss_cls.use_sigmoid:
+ bias_init = bias_init_with_prob(0.01)
+ nn.init.constant_(self.fc_cls.bias, bias_init)
+
+ @auto_fp16()
+ def forward(self, roi_feat, proposal_feat):
+ """Forward function of Dynamic Instance Interactive Head.
+
+ Args:
+ roi_feat (Tensor): Roi-pooling features with shape
+ (batch_size*num_proposals, feature_dimensions,
+ pooling_h , pooling_w).
+ proposal_feat (Tensor): Intermediate feature get from
+ diihead in last stage, has shape
+ (batch_size, num_proposals, feature_dimensions)
+
+ Returns:
+ tuple[Tensor]: Usually a tuple of classification scores
+ and bbox prediction and a intermediate feature.
+
+ - cls_scores (Tensor): Classification scores for
+ all proposals, has shape
+ (batch_size, num_proposals, num_classes).
+ - bbox_preds (Tensor): Box energies / deltas for
+ all proposals, has shape
+ (batch_size, num_proposals, 4).
+ - obj_feat (Tensor): Object feature before classification
+ and regression subnet, has shape
+ (batch_size, num_proposal, feature_dimensions).
+ """
+ N, num_proposals = proposal_feat.shape[:2]
+
+ # Self attention
+ proposal_feat = proposal_feat.permute(1, 0, 2)
+ proposal_feat = self.attention_norm(self.attention(proposal_feat))
+
+ # instance interactive
+ proposal_feat = proposal_feat.permute(1, 0,
+ 2).reshape(-1, self.in_channels)
+ proposal_feat_iic = self.instance_interactive_conv(
+ proposal_feat, roi_feat)
+ proposal_feat = proposal_feat + self.instance_interactive_conv_dropout(
+ proposal_feat_iic)
+ obj_feat = self.instance_interactive_conv_norm(proposal_feat)
+
+ # FFN
+ obj_feat = self.ffn_norm(self.ffn(obj_feat))
+
+ cls_feat = obj_feat
+ reg_feat = obj_feat
+
+ for cls_layer in self.cls_fcs:
+ cls_feat = cls_layer(cls_feat)
+ for reg_layer in self.reg_fcs:
+ reg_feat = reg_layer(reg_feat)
+
+ cls_score = self.fc_cls(cls_feat).view(N, num_proposals, -1)
+ bbox_delta = self.fc_reg(reg_feat).view(N, num_proposals, -1)
+
+ return cls_score, bbox_delta, obj_feat.view(N, num_proposals, -1)
+
+ @force_fp32(apply_to=('cls_score', 'bbox_pred'))
+ def loss(self,
+ cls_score,
+ bbox_pred,
+ labels,
+ label_weights,
+ bbox_targets,
+ bbox_weights,
+ imgs_whwh=None,
+ reduction_override=None,
+ **kwargs):
+ """"Loss function of DIIHead, get loss of all images.
+
+ Args:
+ cls_score (Tensor): Classification prediction
+ results of all class, has shape
+ (batch_size * num_proposals_single_image, num_classes)
+ bbox_pred (Tensor): Regression prediction results,
+ has shape
+ (batch_size * num_proposals_single_image, 4), the last
+ dimension 4 represents [tl_x, tl_y, br_x, br_y].
+ labels (Tensor): Label of each proposals, has shape
+ (batch_size * num_proposals_single_image
+ label_weights (Tensor): Classification loss
+ weight of each proposals, has shape
+ (batch_size * num_proposals_single_image
+ bbox_targets (Tensor): Regression targets of each
+ proposals, has shape
+ (batch_size * num_proposals_single_image, 4),
+ the last dimension 4 represents
+ [tl_x, tl_y, br_x, br_y].
+ bbox_weights (Tensor): Regression loss weight of each
+ proposals's coordinate, has shape
+ (batch_size * num_proposals_single_image, 4),
+ imgs_whwh (Tensor): imgs_whwh (Tensor): Tensor with\
+ shape (batch_size, num_proposals, 4), the last
+ dimension means
+ [img_width,img_height, img_width, img_height].
+ reduction_override (str, optional): The reduction
+ method used to override the original reduction
+ method of the loss. Options are "none",
+ "mean" and "sum". Defaults to None,
+
+ Returns:
+ dict[str, Tensor]: Dictionary of loss components
+ """
+ losses = dict()
+ bg_class_ind = self.num_classes
+ # note in spare rcnn num_gt == num_pos
+ pos_inds = (labels >= 0) & (labels < bg_class_ind)
+ num_pos = pos_inds.sum().float()
+ avg_factor = reduce_mean(num_pos)
+ if cls_score is not None:
+ if cls_score.numel() > 0:
+ losses['loss_cls'] = self.loss_cls(
+ cls_score,
+ labels,
+ label_weights,
+ avg_factor=avg_factor,
+ reduction_override=reduction_override)
+ losses['pos_acc'] = accuracy(cls_score[pos_inds],
+ labels[pos_inds])
+ if bbox_pred is not None:
+ # 0~self.num_classes-1 are FG, self.num_classes is BG
+ # do not perform bounding box regression for BG anymore.
+ if pos_inds.any():
+ pos_bbox_pred = bbox_pred.reshape(bbox_pred.size(0),
+ 4)[pos_inds.type(torch.bool)]
+ imgs_whwh = imgs_whwh.reshape(bbox_pred.size(0),
+ 4)[pos_inds.type(torch.bool)]
+ losses['loss_bbox'] = self.loss_bbox(
+ pos_bbox_pred / imgs_whwh,
+ bbox_targets[pos_inds.type(torch.bool)] / imgs_whwh,
+ bbox_weights[pos_inds.type(torch.bool)],
+ avg_factor=avg_factor)
+ losses['loss_iou'] = self.loss_iou(
+ pos_bbox_pred,
+ bbox_targets[pos_inds.type(torch.bool)],
+ bbox_weights[pos_inds.type(torch.bool)],
+ avg_factor=avg_factor)
+ else:
+ losses['loss_bbox'] = bbox_pred.sum() * 0
+ losses['loss_iou'] = bbox_pred.sum() * 0
+ return losses
+
+ def _get_target_single(self, pos_inds, neg_inds, pos_bboxes, neg_bboxes,
+ pos_gt_bboxes, pos_gt_labels, cfg):
+ """Calculate the ground truth for proposals in the single image
+ according to the sampling results.
+
+ Almost the same as the implementation in `bbox_head`,
+ we add pos_inds and neg_inds to select positive and
+ negative samples instead of selecting the first num_pos
+ as positive samples.
+
+ Args:
+ pos_inds (Tensor): The length is equal to the
+ positive sample numbers contain all index
+ of the positive sample in the origin proposal set.
+ neg_inds (Tensor): The length is equal to the
+ negative sample numbers contain all index
+ of the negative sample in the origin proposal set.
+ pos_bboxes (Tensor): Contains all the positive boxes,
+ has shape (num_pos, 4), the last dimension 4
+ represents [tl_x, tl_y, br_x, br_y].
+ neg_bboxes (Tensor): Contains all the negative boxes,
+ has shape (num_neg, 4), the last dimension 4
+ represents [tl_x, tl_y, br_x, br_y].
+ pos_gt_bboxes (Tensor): Contains all the gt_boxes,
+ has shape (num_gt, 4), the last dimension 4
+ represents [tl_x, tl_y, br_x, br_y].
+ pos_gt_labels (Tensor): Contains all the gt_labels,
+ has shape (num_gt).
+ cfg (obj:`ConfigDict`): `train_cfg` of R-CNN.
+
+ Returns:
+ Tuple[Tensor]: Ground truth for proposals in a single image.
+ Containing the following Tensors:
+
+ - labels(Tensor): Gt_labels for all proposals, has
+ shape (num_proposals,).
+ - label_weights(Tensor): Labels_weights for all proposals, has
+ shape (num_proposals,).
+ - bbox_targets(Tensor):Regression target for all proposals, has
+ shape (num_proposals, 4), the last dimension 4
+ represents [tl_x, tl_y, br_x, br_y].
+ - bbox_weights(Tensor):Regression weights for all proposals,
+ has shape (num_proposals, 4).
+ """
+ num_pos = pos_bboxes.size(0)
+ num_neg = neg_bboxes.size(0)
+ num_samples = num_pos + num_neg
+
+ # original implementation uses new_zeros since BG are set to be 0
+ # now use empty & fill because BG cat_id = num_classes,
+ # FG cat_id = [0, num_classes-1]
+ labels = pos_bboxes.new_full((num_samples, ),
+ self.num_classes,
+ dtype=torch.long)
+ label_weights = pos_bboxes.new_zeros(num_samples)
+ bbox_targets = pos_bboxes.new_zeros(num_samples, 4)
+ bbox_weights = pos_bboxes.new_zeros(num_samples, 4)
+ if num_pos > 0:
+ labels[pos_inds] = pos_gt_labels
+ pos_weight = 1.0 if cfg.pos_weight <= 0 else cfg.pos_weight
+ label_weights[pos_inds] = pos_weight
+ if not self.reg_decoded_bbox:
+ pos_bbox_targets = self.bbox_coder.encode(
+ pos_bboxes, pos_gt_bboxes)
+ else:
+ pos_bbox_targets = pos_gt_bboxes
+ bbox_targets[pos_inds, :] = pos_bbox_targets
+ bbox_weights[pos_inds, :] = 1
+ if num_neg > 0:
+ label_weights[neg_inds] = 1.0
+
+ return labels, label_weights, bbox_targets, bbox_weights
+
+ def get_targets(self,
+ sampling_results,
+ gt_bboxes,
+ gt_labels,
+ rcnn_train_cfg,
+ concat=True):
+ """Calculate the ground truth for all samples in a batch according to
+ the sampling_results.
+
+ Almost the same as the implementation in bbox_head, we passed
+ additional parameters pos_inds_list and neg_inds_list to
+ `_get_target_single` function.
+
+ Args:
+ sampling_results (List[obj:SamplingResults]): Assign results of
+ all images in a batch after sampling.
+ gt_bboxes (list[Tensor]): Gt_bboxes of all images in a batch,
+ each tensor has shape (num_gt, 4), the last dimension 4
+ represents [tl_x, tl_y, br_x, br_y].
+ gt_labels (list[Tensor]): Gt_labels of all images in a batch,
+ each tensor has shape (num_gt,).
+ rcnn_train_cfg (obj:`ConfigDict`): `train_cfg` of RCNN.
+ concat (bool): Whether to concatenate the results of all
+ the images in a single batch.
+
+ Returns:
+ Tuple[Tensor]: Ground truth for proposals in a single image.
+ Containing the following list of Tensors:
+
+ - labels (list[Tensor],Tensor): Gt_labels for all
+ proposals in a batch, each tensor in list has
+ shape (num_proposals,) when `concat=False`, otherwise just
+ a single tensor has shape (num_all_proposals,).
+ - label_weights (list[Tensor]): Labels_weights for
+ all proposals in a batch, each tensor in list has shape
+ (num_proposals,) when `concat=False`, otherwise just a
+ single tensor has shape (num_all_proposals,).
+ - bbox_targets (list[Tensor],Tensor): Regression target
+ for all proposals in a batch, each tensor in list has
+ shape (num_proposals, 4) when `concat=False`, otherwise
+ just a single tensor has shape (num_all_proposals, 4),
+ the last dimension 4 represents [tl_x, tl_y, br_x, br_y].
+ - bbox_weights (list[tensor],Tensor): Regression weights for
+ all proposals in a batch, each tensor in list has shape
+ (num_proposals, 4) when `concat=False`, otherwise just a
+ single tensor has shape (num_all_proposals, 4).
+ """
+ pos_inds_list = [res.pos_inds for res in sampling_results]
+ neg_inds_list = [res.neg_inds for res in sampling_results]
+ pos_bboxes_list = [res.pos_bboxes for res in sampling_results]
+ neg_bboxes_list = [res.neg_bboxes for res in sampling_results]
+ pos_gt_bboxes_list = [res.pos_gt_bboxes for res in sampling_results]
+ pos_gt_labels_list = [res.pos_gt_labels for res in sampling_results]
+ labels, label_weights, bbox_targets, bbox_weights = multi_apply(
+ self._get_target_single,
+ pos_inds_list,
+ neg_inds_list,
+ pos_bboxes_list,
+ neg_bboxes_list,
+ pos_gt_bboxes_list,
+ pos_gt_labels_list,
+ cfg=rcnn_train_cfg)
+ if concat:
+ labels = torch.cat(labels, 0)
+ label_weights = torch.cat(label_weights, 0)
+ bbox_targets = torch.cat(bbox_targets, 0)
+ bbox_weights = torch.cat(bbox_weights, 0)
+ return labels, label_weights, bbox_targets, bbox_weights
diff --git a/annotator/uniformer/mmdet_null/models/roi_heads/bbox_heads/double_bbox_head.py b/annotator/uniformer/mmdet_null/models/roi_heads/bbox_heads/double_bbox_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..6c154cb3c0d9d7639c3d4a2a1272406d3fab8acd
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/roi_heads/bbox_heads/double_bbox_head.py
@@ -0,0 +1,172 @@
+import torch.nn as nn
+from mmcv.cnn import ConvModule, normal_init, xavier_init
+
+from mmdet.models.backbones.resnet import Bottleneck
+from mmdet.models.builder import HEADS
+from .bbox_head import BBoxHead
+
+
+class BasicResBlock(nn.Module):
+ """Basic residual block.
+
+ This block is a little different from the block in the ResNet backbone.
+ The kernel size of conv1 is 1 in this block while 3 in ResNet BasicBlock.
+
+ Args:
+ in_channels (int): Channels of the input feature map.
+ out_channels (int): Channels of the output feature map.
+ conv_cfg (dict): The config dict for convolution layers.
+ norm_cfg (dict): The config dict for normalization layers.
+ """
+
+ def __init__(self,
+ in_channels,
+ out_channels,
+ conv_cfg=None,
+ norm_cfg=dict(type='BN')):
+ super(BasicResBlock, self).__init__()
+
+ # main path
+ self.conv1 = ConvModule(
+ in_channels,
+ in_channels,
+ kernel_size=3,
+ padding=1,
+ bias=False,
+ conv_cfg=conv_cfg,
+ norm_cfg=norm_cfg)
+ self.conv2 = ConvModule(
+ in_channels,
+ out_channels,
+ kernel_size=1,
+ bias=False,
+ conv_cfg=conv_cfg,
+ norm_cfg=norm_cfg,
+ act_cfg=None)
+
+ # identity path
+ self.conv_identity = ConvModule(
+ in_channels,
+ out_channels,
+ kernel_size=1,
+ conv_cfg=conv_cfg,
+ norm_cfg=norm_cfg,
+ act_cfg=None)
+
+ self.relu = nn.ReLU(inplace=True)
+
+ def forward(self, x):
+ identity = x
+
+ x = self.conv1(x)
+ x = self.conv2(x)
+
+ identity = self.conv_identity(identity)
+ out = x + identity
+
+ out = self.relu(out)
+ return out
+
+
+@HEADS.register_module()
+class DoubleConvFCBBoxHead(BBoxHead):
+ r"""Bbox head used in Double-Head R-CNN
+
+ .. code-block:: none
+
+ /-> cls
+ /-> shared convs ->
+ \-> reg
+ roi features
+ /-> cls
+ \-> shared fc ->
+ \-> reg
+ """ # noqa: W605
+
+ def __init__(self,
+ num_convs=0,
+ num_fcs=0,
+ conv_out_channels=1024,
+ fc_out_channels=1024,
+ conv_cfg=None,
+ norm_cfg=dict(type='BN'),
+ **kwargs):
+ kwargs.setdefault('with_avg_pool', True)
+ super(DoubleConvFCBBoxHead, self).__init__(**kwargs)
+ assert self.with_avg_pool
+ assert num_convs > 0
+ assert num_fcs > 0
+ self.num_convs = num_convs
+ self.num_fcs = num_fcs
+ self.conv_out_channels = conv_out_channels
+ self.fc_out_channels = fc_out_channels
+ self.conv_cfg = conv_cfg
+ self.norm_cfg = norm_cfg
+
+ # increase the channel of input features
+ self.res_block = BasicResBlock(self.in_channels,
+ self.conv_out_channels)
+
+ # add conv heads
+ self.conv_branch = self._add_conv_branch()
+ # add fc heads
+ self.fc_branch = self._add_fc_branch()
+
+ out_dim_reg = 4 if self.reg_class_agnostic else 4 * self.num_classes
+ self.fc_reg = nn.Linear(self.conv_out_channels, out_dim_reg)
+
+ self.fc_cls = nn.Linear(self.fc_out_channels, self.num_classes + 1)
+ self.relu = nn.ReLU(inplace=True)
+
+ def _add_conv_branch(self):
+ """Add the fc branch which consists of a sequential of conv layers."""
+ branch_convs = nn.ModuleList()
+ for i in range(self.num_convs):
+ branch_convs.append(
+ Bottleneck(
+ inplanes=self.conv_out_channels,
+ planes=self.conv_out_channels // 4,
+ conv_cfg=self.conv_cfg,
+ norm_cfg=self.norm_cfg))
+ return branch_convs
+
+ def _add_fc_branch(self):
+ """Add the fc branch which consists of a sequential of fc layers."""
+ branch_fcs = nn.ModuleList()
+ for i in range(self.num_fcs):
+ fc_in_channels = (
+ self.in_channels *
+ self.roi_feat_area if i == 0 else self.fc_out_channels)
+ branch_fcs.append(nn.Linear(fc_in_channels, self.fc_out_channels))
+ return branch_fcs
+
+ def init_weights(self):
+ # conv layers are already initialized by ConvModule
+ normal_init(self.fc_cls, std=0.01)
+ normal_init(self.fc_reg, std=0.001)
+
+ for m in self.fc_branch.modules():
+ if isinstance(m, nn.Linear):
+ xavier_init(m, distribution='uniform')
+
+ def forward(self, x_cls, x_reg):
+ # conv head
+ x_conv = self.res_block(x_reg)
+
+ for conv in self.conv_branch:
+ x_conv = conv(x_conv)
+
+ if self.with_avg_pool:
+ x_conv = self.avg_pool(x_conv)
+
+ x_conv = x_conv.view(x_conv.size(0), -1)
+ bbox_pred = self.fc_reg(x_conv)
+
+ # fc head
+ x_fc = x_cls.view(x_cls.size(0), -1)
+ for fc in self.fc_branch:
+ x_fc = self.relu(fc(x_fc))
+
+ cls_score = self.fc_cls(x_fc)
+
+ return cls_score, bbox_pred
diff --git a/annotator/uniformer/mmdet_null/models/roi_heads/bbox_heads/sabl_head.py b/annotator/uniformer/mmdet_null/models/roi_heads/bbox_heads/sabl_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..5153996aeb706d103d1ad14b61734914eddb7693
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/roi_heads/bbox_heads/sabl_head.py
@@ -0,0 +1,572 @@
+import numpy as np
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from mmcv.cnn import ConvModule, kaiming_init, normal_init, xavier_init
+from mmcv.runner import force_fp32
+
+from mmdet.core import build_bbox_coder, multi_apply, multiclass_nms
+from mmdet.models.builder import HEADS, build_loss
+from mmdet.models.losses import accuracy
+
+
+@HEADS.register_module()
+class SABLHead(nn.Module):
+ """Side-Aware Boundary Localization (SABL) for RoI-Head.
+
+ Side-Aware features are extracted by conv layers
+ with an attention mechanism.
+ Boundary Localization with Bucketing and Bucketing Guided Rescoring
+ are implemented in BucketingBBoxCoder.
+
+ Please refer to https://arxiv.org/abs/1912.04260 for more details.
+
+ Args:
+ cls_in_channels (int): Input channels of cls RoI feature. \
+ Defaults to 256.
+ reg_in_channels (int): Input channels of reg RoI feature. \
+ Defaults to 256.
+ roi_feat_size (int): Size of RoI features. Defaults to 7.
+ reg_feat_up_ratio (int): Upsample ratio of reg features. \
+ Defaults to 2.
+ reg_pre_kernel (int): Kernel of 2D conv layers before \
+ attention pooling. Defaults to 3.
+ reg_post_kernel (int): Kernel of 1D conv layers after \
+ attention pooling. Defaults to 3.
+ reg_pre_num (int): Number of pre convs. Defaults to 2.
+ reg_post_num (int): Number of post convs. Defaults to 1.
+ num_classes (int): Number of classes in dataset. Defaults to 80.
+ cls_out_channels (int): Hidden channels in cls fcs. Defaults to 1024.
+ reg_offset_out_channels (int): Hidden and output channel \
+ of reg offset branch. Defaults to 256.
+ reg_cls_out_channels (int): Hidden and output channel \
+ of reg cls branch. Defaults to 256.
+ num_cls_fcs (int): Number of fcs for cls branch. Defaults to 1.
+ num_reg_fcs (int): Number of fcs for reg branch.. Defaults to 0.
+ reg_class_agnostic (bool): Class agnostic regresion or not. \
+ Defaults to True.
+ norm_cfg (dict): Config of norm layers. Defaults to None.
+ bbox_coder (dict): Config of bbox coder. Defaults 'BucketingBBoxCoder'.
+ loss_cls (dict): Config of classification loss.
+ loss_bbox_cls (dict): Config of classification loss for bbox branch.
+ loss_bbox_reg (dict): Config of regression loss for bbox branch.
+ """
+
+ def __init__(self,
+ num_classes,
+ cls_in_channels=256,
+ reg_in_channels=256,
+ roi_feat_size=7,
+ reg_feat_up_ratio=2,
+ reg_pre_kernel=3,
+ reg_post_kernel=3,
+ reg_pre_num=2,
+ reg_post_num=1,
+ cls_out_channels=1024,
+ reg_offset_out_channels=256,
+ reg_cls_out_channels=256,
+ num_cls_fcs=1,
+ num_reg_fcs=0,
+ reg_class_agnostic=True,
+ norm_cfg=None,
+ bbox_coder=dict(
+ type='BucketingBBoxCoder',
+ num_buckets=14,
+ scale_factor=1.7),
+ loss_cls=dict(
+ type='CrossEntropyLoss',
+ use_sigmoid=False,
+ loss_weight=1.0),
+ loss_bbox_cls=dict(
+ type='CrossEntropyLoss',
+ use_sigmoid=True,
+ loss_weight=1.0),
+ loss_bbox_reg=dict(
+ type='SmoothL1Loss', beta=0.1, loss_weight=1.0)):
+ super(SABLHead, self).__init__()
+ self.cls_in_channels = cls_in_channels
+ self.reg_in_channels = reg_in_channels
+ self.roi_feat_size = roi_feat_size
+ self.reg_feat_up_ratio = int(reg_feat_up_ratio)
+ self.num_buckets = bbox_coder['num_buckets']
+ assert self.reg_feat_up_ratio // 2 >= 1
+ self.up_reg_feat_size = roi_feat_size * self.reg_feat_up_ratio
+ assert self.up_reg_feat_size == bbox_coder['num_buckets']
+ self.reg_pre_kernel = reg_pre_kernel
+ self.reg_post_kernel = reg_post_kernel
+ self.reg_pre_num = reg_pre_num
+ self.reg_post_num = reg_post_num
+ self.num_classes = num_classes
+ self.cls_out_channels = cls_out_channels
+ self.reg_offset_out_channels = reg_offset_out_channels
+ self.reg_cls_out_channels = reg_cls_out_channels
+ self.num_cls_fcs = num_cls_fcs
+ self.num_reg_fcs = num_reg_fcs
+ self.reg_class_agnostic = reg_class_agnostic
+ assert self.reg_class_agnostic
+ self.norm_cfg = norm_cfg
+
+ self.bbox_coder = build_bbox_coder(bbox_coder)
+ self.loss_cls = build_loss(loss_cls)
+ self.loss_bbox_cls = build_loss(loss_bbox_cls)
+ self.loss_bbox_reg = build_loss(loss_bbox_reg)
+
+ self.cls_fcs = self._add_fc_branch(self.num_cls_fcs,
+ self.cls_in_channels,
+ self.roi_feat_size,
+ self.cls_out_channels)
+
+ self.side_num = int(np.ceil(self.num_buckets / 2))
+
+ if self.reg_feat_up_ratio > 1:
+ self.upsample_x = nn.ConvTranspose1d(
+ reg_in_channels,
+ reg_in_channels,
+ self.reg_feat_up_ratio,
+ stride=self.reg_feat_up_ratio)
+ self.upsample_y = nn.ConvTranspose1d(
+ reg_in_channels,
+ reg_in_channels,
+ self.reg_feat_up_ratio,
+ stride=self.reg_feat_up_ratio)
+
+ self.reg_pre_convs = nn.ModuleList()
+ for i in range(self.reg_pre_num):
+ reg_pre_conv = ConvModule(
+ reg_in_channels,
+ reg_in_channels,
+ kernel_size=reg_pre_kernel,
+ padding=reg_pre_kernel // 2,
+ norm_cfg=norm_cfg,
+ act_cfg=dict(type='ReLU'))
+ self.reg_pre_convs.append(reg_pre_conv)
+
+ self.reg_post_conv_xs = nn.ModuleList()
+ for i in range(self.reg_post_num):
+ reg_post_conv_x = ConvModule(
+ reg_in_channels,
+ reg_in_channels,
+ kernel_size=(1, reg_post_kernel),
+ padding=(0, reg_post_kernel // 2),
+ norm_cfg=norm_cfg,
+ act_cfg=dict(type='ReLU'))
+ self.reg_post_conv_xs.append(reg_post_conv_x)
+ self.reg_post_conv_ys = nn.ModuleList()
+ for i in range(self.reg_post_num):
+ reg_post_conv_y = ConvModule(
+ reg_in_channels,
+ reg_in_channels,
+ kernel_size=(reg_post_kernel, 1),
+ padding=(reg_post_kernel // 2, 0),
+ norm_cfg=norm_cfg,
+ act_cfg=dict(type='ReLU'))
+ self.reg_post_conv_ys.append(reg_post_conv_y)
+
+ self.reg_conv_att_x = nn.Conv2d(reg_in_channels, 1, 1)
+ self.reg_conv_att_y = nn.Conv2d(reg_in_channels, 1, 1)
+
+ self.fc_cls = nn.Linear(self.cls_out_channels, self.num_classes + 1)
+ self.relu = nn.ReLU(inplace=True)
+
+ self.reg_cls_fcs = self._add_fc_branch(self.num_reg_fcs,
+ self.reg_in_channels, 1,
+ self.reg_cls_out_channels)
+ self.reg_offset_fcs = self._add_fc_branch(self.num_reg_fcs,
+ self.reg_in_channels, 1,
+ self.reg_offset_out_channels)
+ self.fc_reg_cls = nn.Linear(self.reg_cls_out_channels, 1)
+ self.fc_reg_offset = nn.Linear(self.reg_offset_out_channels, 1)
+
+ def _add_fc_branch(self, num_branch_fcs, in_channels, roi_feat_size,
+ fc_out_channels):
+ in_channels = in_channels * roi_feat_size * roi_feat_size
+ branch_fcs = nn.ModuleList()
+ for i in range(num_branch_fcs):
+ fc_in_channels = (in_channels if i == 0 else fc_out_channels)
+ branch_fcs.append(nn.Linear(fc_in_channels, fc_out_channels))
+ return branch_fcs
+
+ def init_weights(self):
+ for module_list in [
+ self.reg_cls_fcs, self.reg_offset_fcs, self.cls_fcs
+ ]:
+ for m in module_list.modules():
+ if isinstance(m, nn.Linear):
+ xavier_init(m, distribution='uniform')
+ if self.reg_feat_up_ratio > 1:
+ kaiming_init(self.upsample_x, distribution='normal')
+ kaiming_init(self.upsample_y, distribution='normal')
+
+ normal_init(self.reg_conv_att_x, 0, 0.01)
+ normal_init(self.reg_conv_att_y, 0, 0.01)
+ normal_init(self.fc_reg_offset, 0, 0.001)
+ normal_init(self.fc_reg_cls, 0, 0.01)
+ normal_init(self.fc_cls, 0, 0.01)
+
+ def cls_forward(self, cls_x):
+ cls_x = cls_x.view(cls_x.size(0), -1)
+ for fc in self.cls_fcs:
+ cls_x = self.relu(fc(cls_x))
+ cls_score = self.fc_cls(cls_x)
+ return cls_score
+
+ def attention_pool(self, reg_x):
+ """Extract direction-specific features fx and fy with attention
+ methanism."""
+ reg_fx = reg_x
+ reg_fy = reg_x
+ reg_fx_att = self.reg_conv_att_x(reg_fx).sigmoid()
+ reg_fy_att = self.reg_conv_att_y(reg_fy).sigmoid()
+ reg_fx_att = reg_fx_att / reg_fx_att.sum(dim=2).unsqueeze(2)
+ reg_fy_att = reg_fy_att / reg_fy_att.sum(dim=3).unsqueeze(3)
+ reg_fx = (reg_fx * reg_fx_att).sum(dim=2)
+ reg_fy = (reg_fy * reg_fy_att).sum(dim=3)
+ return reg_fx, reg_fy
+
+ def side_aware_feature_extractor(self, reg_x):
+ """Refine and extract side-aware features without split them."""
+ for reg_pre_conv in self.reg_pre_convs:
+ reg_x = reg_pre_conv(reg_x)
+ reg_fx, reg_fy = self.attention_pool(reg_x)
+
+ if self.reg_post_num > 0:
+ reg_fx = reg_fx.unsqueeze(2)
+ reg_fy = reg_fy.unsqueeze(3)
+ for i in range(self.reg_post_num):
+ reg_fx = self.reg_post_conv_xs[i](reg_fx)
+ reg_fy = self.reg_post_conv_ys[i](reg_fy)
+ reg_fx = reg_fx.squeeze(2)
+ reg_fy = reg_fy.squeeze(3)
+ if self.reg_feat_up_ratio > 1:
+ reg_fx = self.relu(self.upsample_x(reg_fx))
+ reg_fy = self.relu(self.upsample_y(reg_fy))
+ reg_fx = torch.transpose(reg_fx, 1, 2)
+ reg_fy = torch.transpose(reg_fy, 1, 2)
+ return reg_fx.contiguous(), reg_fy.contiguous()
+
+ def reg_pred(self, x, offset_fcs, cls_fcs):
+ """Predict bucketing estimation (cls_pred) and fine regression (offset
+ pred) with side-aware features."""
+ x_offset = x.view(-1, self.reg_in_channels)
+ x_cls = x.view(-1, self.reg_in_channels)
+
+ for fc in offset_fcs:
+ x_offset = self.relu(fc(x_offset))
+ for fc in cls_fcs:
+ x_cls = self.relu(fc(x_cls))
+ offset_pred = self.fc_reg_offset(x_offset)
+ cls_pred = self.fc_reg_cls(x_cls)
+
+ offset_pred = offset_pred.view(x.size(0), -1)
+ cls_pred = cls_pred.view(x.size(0), -1)
+
+ return offset_pred, cls_pred
+
+ def side_aware_split(self, feat):
+ """Split side-aware features aligned with orders of bucketing
+ targets."""
+ l_end = int(np.ceil(self.up_reg_feat_size / 2))
+ r_start = int(np.floor(self.up_reg_feat_size / 2))
+ feat_fl = feat[:, :l_end]
+ feat_fr = feat[:, r_start:].flip(dims=(1, ))
+ feat_fl = feat_fl.contiguous()
+ feat_fr = feat_fr.contiguous()
+ feat = torch.cat([feat_fl, feat_fr], dim=-1)
+ return feat
+
+ def bbox_pred_split(self, bbox_pred, num_proposals_per_img):
+ """Split batch bbox prediction back to each image."""
+ bucket_cls_preds, bucket_offset_preds = bbox_pred
+ bucket_cls_preds = bucket_cls_preds.split(num_proposals_per_img, 0)
+ bucket_offset_preds = bucket_offset_preds.split(
+ num_proposals_per_img, 0)
+ bbox_pred = tuple(zip(bucket_cls_preds, bucket_offset_preds))
+ return bbox_pred
+
+ def reg_forward(self, reg_x):
+ outs = self.side_aware_feature_extractor(reg_x)
+ edge_offset_preds = []
+ edge_cls_preds = []
+ reg_fx = outs[0]
+ reg_fy = outs[1]
+ offset_pred_x, cls_pred_x = self.reg_pred(reg_fx, self.reg_offset_fcs,
+ self.reg_cls_fcs)
+ offset_pred_y, cls_pred_y = self.reg_pred(reg_fy, self.reg_offset_fcs,
+ self.reg_cls_fcs)
+ offset_pred_x = self.side_aware_split(offset_pred_x)
+ offset_pred_y = self.side_aware_split(offset_pred_y)
+ cls_pred_x = self.side_aware_split(cls_pred_x)
+ cls_pred_y = self.side_aware_split(cls_pred_y)
+ edge_offset_preds = torch.cat([offset_pred_x, offset_pred_y], dim=-1)
+ edge_cls_preds = torch.cat([cls_pred_x, cls_pred_y], dim=-1)
+
+ return (edge_cls_preds, edge_offset_preds)
+
+ def forward(self, x):
+
+ bbox_pred = self.reg_forward(x)
+ cls_score = self.cls_forward(x)
+
+ return cls_score, bbox_pred
+
+ def get_targets(self, sampling_results, gt_bboxes, gt_labels,
+ rcnn_train_cfg):
+ pos_proposals = [res.pos_bboxes for res in sampling_results]
+ neg_proposals = [res.neg_bboxes for res in sampling_results]
+ pos_gt_bboxes = [res.pos_gt_bboxes for res in sampling_results]
+ pos_gt_labels = [res.pos_gt_labels for res in sampling_results]
+ cls_reg_targets = self.bucket_target(pos_proposals, neg_proposals,
+ pos_gt_bboxes, pos_gt_labels,
+ rcnn_train_cfg)
+ (labels, label_weights, bucket_cls_targets, bucket_cls_weights,
+ bucket_offset_targets, bucket_offset_weights) = cls_reg_targets
+ return (labels, label_weights, (bucket_cls_targets,
+ bucket_offset_targets),
+ (bucket_cls_weights, bucket_offset_weights))
+
+ def bucket_target(self,
+ pos_proposals_list,
+ neg_proposals_list,
+ pos_gt_bboxes_list,
+ pos_gt_labels_list,
+ rcnn_train_cfg,
+ concat=True):
+ (labels, label_weights, bucket_cls_targets, bucket_cls_weights,
+ bucket_offset_targets, bucket_offset_weights) = multi_apply(
+ self._bucket_target_single,
+ pos_proposals_list,
+ neg_proposals_list,
+ pos_gt_bboxes_list,
+ pos_gt_labels_list,
+ cfg=rcnn_train_cfg)
+
+ if concat:
+ labels = torch.cat(labels, 0)
+ label_weights = torch.cat(label_weights, 0)
+ bucket_cls_targets = torch.cat(bucket_cls_targets, 0)
+ bucket_cls_weights = torch.cat(bucket_cls_weights, 0)
+ bucket_offset_targets = torch.cat(bucket_offset_targets, 0)
+ bucket_offset_weights = torch.cat(bucket_offset_weights, 0)
+ return (labels, label_weights, bucket_cls_targets, bucket_cls_weights,
+ bucket_offset_targets, bucket_offset_weights)
+
+ def _bucket_target_single(self, pos_proposals, neg_proposals,
+ pos_gt_bboxes, pos_gt_labels, cfg):
+ """Compute bucketing estimation targets and fine regression targets for
+ a single image.
+
+ Args:
+ pos_proposals (Tensor): positive proposals of a single image,
+ Shape (n_pos, 4)
+ neg_proposals (Tensor): negative proposals of a single image,
+ Shape (n_neg, 4).
+ pos_gt_bboxes (Tensor): gt bboxes assigned to positive proposals
+ of a single image, Shape (n_pos, 4).
+ pos_gt_labels (Tensor): gt labels assigned to positive proposals
+ of a single image, Shape (n_pos, ).
+ cfg (dict): Config of calculating targets
+
+ Returns:
+ tuple:
+
+ - labels (Tensor): Labels in a single image. \
+ Shape (n,).
+ - label_weights (Tensor): Label weights in a single image.\
+ Shape (n,)
+ - bucket_cls_targets (Tensor): Bucket cls targets in \
+ a single image. Shape (n, num_buckets*2).
+ - bucket_cls_weights (Tensor): Bucket cls weights in \
+ a single image. Shape (n, num_buckets*2).
+ - bucket_offset_targets (Tensor): Bucket offset targets \
+ in a single image. Shape (n, num_buckets*2).
+ - bucket_offset_targets (Tensor): Bucket offset weights \
+ in a single image. Shape (n, num_buckets*2).
+ """
+ num_pos = pos_proposals.size(0)
+ num_neg = neg_proposals.size(0)
+ num_samples = num_pos + num_neg
+ labels = pos_gt_bboxes.new_full((num_samples, ),
+ self.num_classes,
+ dtype=torch.long)
+ label_weights = pos_proposals.new_zeros(num_samples)
+ bucket_cls_targets = pos_proposals.new_zeros(num_samples,
+ 4 * self.side_num)
+ bucket_cls_weights = pos_proposals.new_zeros(num_samples,
+ 4 * self.side_num)
+ bucket_offset_targets = pos_proposals.new_zeros(
+ num_samples, 4 * self.side_num)
+ bucket_offset_weights = pos_proposals.new_zeros(
+ num_samples, 4 * self.side_num)
+ if num_pos > 0:
+ labels[:num_pos] = pos_gt_labels
+ label_weights[:num_pos] = 1.0
+ (pos_bucket_offset_targets, pos_bucket_offset_weights,
+ pos_bucket_cls_targets,
+ pos_bucket_cls_weights) = self.bbox_coder.encode(
+ pos_proposals, pos_gt_bboxes)
+ bucket_cls_targets[:num_pos, :] = pos_bucket_cls_targets
+ bucket_cls_weights[:num_pos, :] = pos_bucket_cls_weights
+ bucket_offset_targets[:num_pos, :] = pos_bucket_offset_targets
+ bucket_offset_weights[:num_pos, :] = pos_bucket_offset_weights
+ if num_neg > 0:
+ label_weights[-num_neg:] = 1.0
+ return (labels, label_weights, bucket_cls_targets, bucket_cls_weights,
+ bucket_offset_targets, bucket_offset_weights)
+
+ def loss(self,
+ cls_score,
+ bbox_pred,
+ rois,
+ labels,
+ label_weights,
+ bbox_targets,
+ bbox_weights,
+ reduction_override=None):
+ losses = dict()
+ if cls_score is not None:
+ avg_factor = max(torch.sum(label_weights > 0).float().item(), 1.)
+ losses['loss_cls'] = self.loss_cls(
+ cls_score,
+ labels,
+ label_weights,
+ avg_factor=avg_factor,
+ reduction_override=reduction_override)
+ losses['acc'] = accuracy(cls_score, labels)
+
+ if bbox_pred is not None:
+ bucket_cls_preds, bucket_offset_preds = bbox_pred
+ bucket_cls_targets, bucket_offset_targets = bbox_targets
+ bucket_cls_weights, bucket_offset_weights = bbox_weights
+ # edge cls
+ bucket_cls_preds = bucket_cls_preds.view(-1, self.side_num)
+ bucket_cls_targets = bucket_cls_targets.view(-1, self.side_num)
+ bucket_cls_weights = bucket_cls_weights.view(-1, self.side_num)
+ losses['loss_bbox_cls'] = self.loss_bbox_cls(
+ bucket_cls_preds,
+ bucket_cls_targets,
+ bucket_cls_weights,
+ avg_factor=bucket_cls_targets.size(0),
+ reduction_override=reduction_override)
+
+ losses['loss_bbox_reg'] = self.loss_bbox_reg(
+ bucket_offset_preds,
+ bucket_offset_targets,
+ bucket_offset_weights,
+ avg_factor=bucket_offset_targets.size(0),
+ reduction_override=reduction_override)
+
+ return losses
+
+ @force_fp32(apply_to=('cls_score', 'bbox_pred'))
+ def get_bboxes(self,
+ rois,
+ cls_score,
+ bbox_pred,
+ img_shape,
+ scale_factor,
+ rescale=False,
+ cfg=None):
+ if isinstance(cls_score, list):
+ cls_score = sum(cls_score) / float(len(cls_score))
+ scores = F.softmax(cls_score, dim=1) if cls_score is not None else None
+
+ if bbox_pred is not None:
+ bboxes, confids = self.bbox_coder.decode(rois[:, 1:], bbox_pred,
+ img_shape)
+ else:
+ bboxes = rois[:, 1:].clone()
+ confids = None
+ if img_shape is not None:
+ bboxes[:, [0, 2]].clamp_(min=0, max=img_shape[1] - 1)
+ bboxes[:, [1, 3]].clamp_(min=0, max=img_shape[0] - 1)
+
+ if rescale and bboxes.size(0) > 0:
+ if isinstance(scale_factor, float):
+ bboxes /= scale_factor
+ else:
+ bboxes /= torch.from_numpy(scale_factor).to(bboxes.device)
+
+ if cfg is None:
+ return bboxes, scores
+ else:
+ det_bboxes, det_labels = multiclass_nms(
+ bboxes,
+ scores,
+ cfg.score_thr,
+ cfg.nms,
+ cfg.max_per_img,
+ score_factors=confids)
+
+ return det_bboxes, det_labels
+
+ @force_fp32(apply_to=('bbox_preds', ))
+ def refine_bboxes(self, rois, labels, bbox_preds, pos_is_gts, img_metas):
+ """Refine bboxes during training.
+
+ Args:
+ rois (Tensor): Shape (n*bs, 5), where n is image number per GPU,
+ and bs is the sampled RoIs per image.
+ labels (Tensor): Shape (n*bs, ).
+ bbox_preds (list[Tensor]): Shape [(n*bs, num_buckets*2), \
+ (n*bs, num_buckets*2)].
+ pos_is_gts (list[Tensor]): Flags indicating if each positive bbox
+ is a gt bbox.
+ img_metas (list[dict]): Meta info of each image.
+
+ Returns:
+ list[Tensor]: Refined bboxes of each image in a mini-batch.
+ """
+ img_ids = rois[:, 0].long().unique(sorted=True)
+ assert img_ids.numel() == len(img_metas)
+
+ bboxes_list = []
+ for i in range(len(img_metas)):
+ inds = torch.nonzero(
+ rois[:, 0] == i, as_tuple=False).squeeze(dim=1)
+ num_rois = inds.numel()
+
+ bboxes_ = rois[inds, 1:]
+ label_ = labels[inds]
+ edge_cls_preds, edge_offset_preds = bbox_preds
+ edge_cls_preds_ = edge_cls_preds[inds]
+ edge_offset_preds_ = edge_offset_preds[inds]
+ bbox_pred_ = [edge_cls_preds_, edge_offset_preds_]
+ img_meta_ = img_metas[i]
+ pos_is_gts_ = pos_is_gts[i]
+
+ bboxes = self.regress_by_class(bboxes_, label_, bbox_pred_,
+ img_meta_)
+ # filter gt bboxes
+ pos_keep = 1 - pos_is_gts_
+ keep_inds = pos_is_gts_.new_ones(num_rois)
+ keep_inds[:len(pos_is_gts_)] = pos_keep
+
+ bboxes_list.append(bboxes[keep_inds.type(torch.bool)])
+
+ return bboxes_list
+
+ @force_fp32(apply_to=('bbox_pred', ))
+ def regress_by_class(self, rois, label, bbox_pred, img_meta):
+ """Regress the bbox for the predicted class. Used in Cascade R-CNN.
+
+ Args:
+ rois (Tensor): shape (n, 4) or (n, 5)
+ label (Tensor): shape (n, )
+ bbox_pred (list[Tensor]): shape [(n, num_buckets *2), \
+ (n, num_buckets *2)]
+ img_meta (dict): Image meta info.
+
+ Returns:
+ Tensor: Regressed bboxes, the same shape as input rois.
+ """
+ assert rois.size(1) == 4 or rois.size(1) == 5
+
+ if rois.size(1) == 4:
+ new_rois, _ = self.bbox_coder.decode(rois, bbox_pred,
+ img_meta['img_shape'])
+ else:
+ bboxes, _ = self.bbox_coder.decode(rois[:, 1:], bbox_pred,
+ img_meta['img_shape'])
+ new_rois = torch.cat((rois[:, [0]], bboxes), dim=1)
+
+ return new_rois
diff --git a/annotator/uniformer/mmdet_null/models/roi_heads/bbox_heads/scnet_bbox_head.py b/annotator/uniformer/mmdet_null/models/roi_heads/bbox_heads/scnet_bbox_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..35758f4f4e3b2bddd460edb8a7f482b3a9da2919
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/roi_heads/bbox_heads/scnet_bbox_head.py
@@ -0,0 +1,76 @@
+from mmdet.models.builder import HEADS
+from .convfc_bbox_head import ConvFCBBoxHead
+
+
+@HEADS.register_module()
+class SCNetBBoxHead(ConvFCBBoxHead):
+ """BBox head for `SCNet `_.
+
+ This inherits ``ConvFCBBoxHead`` with modified forward() function, allow us
+ to get intermediate shared feature.
+ """
+
+ def _forward_shared(self, x):
+ """Forward function for shared part."""
+ if self.num_shared_convs > 0:
+ for conv in self.shared_convs:
+ x = conv(x)
+
+ if self.num_shared_fcs > 0:
+ if self.with_avg_pool:
+ x = self.avg_pool(x)
+
+ x = x.flatten(1)
+
+ for fc in self.shared_fcs:
+ x = self.relu(fc(x))
+
+ return x
+
+ def _forward_cls_reg(self, x):
+ """Forward function for classification and regression parts."""
+ x_cls = x
+ x_reg = x
+
+ for conv in self.cls_convs:
+ x_cls = conv(x_cls)
+ if x_cls.dim() > 2:
+ if self.with_avg_pool:
+ x_cls = self.avg_pool(x_cls)
+ x_cls = x_cls.flatten(1)
+ for fc in self.cls_fcs:
+ x_cls = self.relu(fc(x_cls))
+
+ for conv in self.reg_convs:
+ x_reg = conv(x_reg)
+ if x_reg.dim() > 2:
+ if self.with_avg_pool:
+ x_reg = self.avg_pool(x_reg)
+ x_reg = x_reg.flatten(1)
+ for fc in self.reg_fcs:
+ x_reg = self.relu(fc(x_reg))
+
+ cls_score = self.fc_cls(x_cls) if self.with_cls else None
+ bbox_pred = self.fc_reg(x_reg) if self.with_reg else None
+
+ return cls_score, bbox_pred
+
+ def forward(self, x, return_shared_feat=False):
+ """Forward function.
+
+ Args:
+ x (Tensor): input features
+ return_shared_feat (bool): If True, return cls-reg-shared feature.
+
+ Return:
+ out (tuple[Tensor]): contain ``cls_score`` and ``bbox_pred``,
+ if ``return_shared_feat`` is True, append ``x_shared`` to the
+ returned tuple.
+ """
+ x_shared = self._forward_shared(x)
+ out = self._forward_cls_reg(x_shared)
+
+ if return_shared_feat:
+ out += (x_shared, )
+
+ return out
diff --git a/annotator/uniformer/mmdet_null/models/roi_heads/cascade_roi_head.py b/annotator/uniformer/mmdet_null/models/roi_heads/cascade_roi_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..45b6f36a386cd37c50cc43666fcc516f2e14d868
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/roi_heads/cascade_roi_head.py
@@ -0,0 +1,507 @@
+import torch
+import torch.nn as nn
+
+from mmdet.core import (bbox2result, bbox2roi, bbox_mapping, build_assigner,
+ build_sampler, merge_aug_bboxes, merge_aug_masks,
+ multiclass_nms)
+from ..builder import HEADS, build_head, build_roi_extractor
+from .base_roi_head import BaseRoIHead
+from .test_mixins import BBoxTestMixin, MaskTestMixin
+
+
+@HEADS.register_module()
+class CascadeRoIHead(BaseRoIHead, BBoxTestMixin, MaskTestMixin):
+ """Cascade roi head including one bbox head and one mask head.
+
+ https://arxiv.org/abs/1712.00726
+ """
+
+ def __init__(self,
+ num_stages,
+ stage_loss_weights,
+ bbox_roi_extractor=None,
+ bbox_head=None,
+ mask_roi_extractor=None,
+ mask_head=None,
+ shared_head=None,
+ train_cfg=None,
+ test_cfg=None):
+ assert bbox_roi_extractor is not None
+ assert bbox_head is not None
+ assert shared_head is None, \
+ 'Shared head is not supported in Cascade RCNN anymore'
+ self.num_stages = num_stages
+ self.stage_loss_weights = stage_loss_weights
+ super(CascadeRoIHead, self).__init__(
+ bbox_roi_extractor=bbox_roi_extractor,
+ bbox_head=bbox_head,
+ mask_roi_extractor=mask_roi_extractor,
+ mask_head=mask_head,
+ shared_head=shared_head,
+ train_cfg=train_cfg,
+ test_cfg=test_cfg)
+
+ def init_bbox_head(self, bbox_roi_extractor, bbox_head):
+ """Initialize box head and box roi extractor.
+
+ Args:
+ bbox_roi_extractor (dict): Config of box roi extractor.
+ bbox_head (dict): Config of box in box head.
+ """
+ self.bbox_roi_extractor = nn.ModuleList()
+ self.bbox_head = nn.ModuleList()
+ if not isinstance(bbox_roi_extractor, list):
+ bbox_roi_extractor = [
+ bbox_roi_extractor for _ in range(self.num_stages)
+ ]
+ if not isinstance(bbox_head, list):
+ bbox_head = [bbox_head for _ in range(self.num_stages)]
+ assert len(bbox_roi_extractor) == len(bbox_head) == self.num_stages
+ for roi_extractor, head in zip(bbox_roi_extractor, bbox_head):
+ self.bbox_roi_extractor.append(build_roi_extractor(roi_extractor))
+ self.bbox_head.append(build_head(head))
+
+ def init_mask_head(self, mask_roi_extractor, mask_head):
+ """Initialize mask head and mask roi extractor.
+
+ Args:
+ mask_roi_extractor (dict): Config of mask roi extractor.
+ mask_head (dict): Config of mask in mask head.
+ """
+ self.mask_head = nn.ModuleList()
+ if not isinstance(mask_head, list):
+ mask_head = [mask_head for _ in range(self.num_stages)]
+ assert len(mask_head) == self.num_stages
+ for head in mask_head:
+ self.mask_head.append(build_head(head))
+ if mask_roi_extractor is not None:
+ self.share_roi_extractor = False
+ self.mask_roi_extractor = nn.ModuleList()
+ if not isinstance(mask_roi_extractor, list):
+ mask_roi_extractor = [
+ mask_roi_extractor for _ in range(self.num_stages)
+ ]
+ assert len(mask_roi_extractor) == self.num_stages
+ for roi_extractor in mask_roi_extractor:
+ self.mask_roi_extractor.append(
+ build_roi_extractor(roi_extractor))
+ else:
+ self.share_roi_extractor = True
+ self.mask_roi_extractor = self.bbox_roi_extractor
+
+ def init_assigner_sampler(self):
+ """Initialize assigner and sampler for each stage."""
+ self.bbox_assigner = []
+ self.bbox_sampler = []
+ if self.train_cfg is not None:
+ for idx, rcnn_train_cfg in enumerate(self.train_cfg):
+ self.bbox_assigner.append(
+ build_assigner(rcnn_train_cfg.assigner))
+ self.current_stage = idx
+ self.bbox_sampler.append(
+ build_sampler(rcnn_train_cfg.sampler, context=self))
+
+ def init_weights(self, pretrained):
+ """Initialize the weights in head.
+
+ Args:
+ pretrained (str, optional): Path to pre-trained weights.
+ Defaults to None.
+ """
+ if self.with_shared_head:
+ self.shared_head.init_weights(pretrained=pretrained)
+ for i in range(self.num_stages):
+ if self.with_bbox:
+ self.bbox_roi_extractor[i].init_weights()
+ self.bbox_head[i].init_weights()
+ if self.with_mask:
+ if not self.share_roi_extractor:
+ self.mask_roi_extractor[i].init_weights()
+ self.mask_head[i].init_weights()
+
+ def forward_dummy(self, x, proposals):
+ """Dummy forward function."""
+ # bbox head
+ outs = ()
+ rois = bbox2roi([proposals])
+ if self.with_bbox:
+ for i in range(self.num_stages):
+ bbox_results = self._bbox_forward(i, x, rois)
+ outs = outs + (bbox_results['cls_score'],
+ bbox_results['bbox_pred'])
+ # mask heads
+ if self.with_mask:
+ mask_rois = rois[:100]
+ for i in range(self.num_stages):
+ mask_results = self._mask_forward(i, x, mask_rois)
+ outs = outs + (mask_results['mask_pred'], )
+ return outs
+
+ def _bbox_forward(self, stage, x, rois):
+ """Box head forward function used in both training and testing."""
+ bbox_roi_extractor = self.bbox_roi_extractor[stage]
+ bbox_head = self.bbox_head[stage]
+ bbox_feats = bbox_roi_extractor(x[:bbox_roi_extractor.num_inputs],
+ rois)
+ # do not support caffe_c4 model anymore
+ cls_score, bbox_pred = bbox_head(bbox_feats)
+
+ bbox_results = dict(
+ cls_score=cls_score, bbox_pred=bbox_pred, bbox_feats=bbox_feats)
+ return bbox_results
+
+ def _bbox_forward_train(self, stage, x, sampling_results, gt_bboxes,
+ gt_labels, rcnn_train_cfg):
+ """Run forward function and calculate loss for box head in training."""
+ rois = bbox2roi([res.bboxes for res in sampling_results])
+ bbox_results = self._bbox_forward(stage, x, rois)
+ bbox_targets = self.bbox_head[stage].get_targets(
+ sampling_results, gt_bboxes, gt_labels, rcnn_train_cfg)
+ loss_bbox = self.bbox_head[stage].loss(bbox_results['cls_score'],
+ bbox_results['bbox_pred'], rois,
+ *bbox_targets)
+
+ bbox_results.update(
+ loss_bbox=loss_bbox, rois=rois, bbox_targets=bbox_targets)
+ return bbox_results
+
+ def _mask_forward(self, stage, x, rois):
+ """Mask head forward function used in both training and testing."""
+ mask_roi_extractor = self.mask_roi_extractor[stage]
+ mask_head = self.mask_head[stage]
+ mask_feats = mask_roi_extractor(x[:mask_roi_extractor.num_inputs],
+ rois)
+ # do not support caffe_c4 model anymore
+ mask_pred = mask_head(mask_feats)
+
+ mask_results = dict(mask_pred=mask_pred)
+ return mask_results
+
+ def _mask_forward_train(self,
+ stage,
+ x,
+ sampling_results,
+ gt_masks,
+ rcnn_train_cfg,
+ bbox_feats=None):
+ """Run forward function and calculate loss for mask head in
+ training."""
+ pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results])
+ mask_results = self._mask_forward(stage, x, pos_rois)
+
+ mask_targets = self.mask_head[stage].get_targets(
+ sampling_results, gt_masks, rcnn_train_cfg)
+ pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results])
+ loss_mask = self.mask_head[stage].loss(mask_results['mask_pred'],
+ mask_targets, pos_labels)
+
+ mask_results.update(loss_mask=loss_mask)
+ return mask_results
+
+ def forward_train(self,
+ x,
+ img_metas,
+ proposal_list,
+ gt_bboxes,
+ gt_labels,
+ gt_bboxes_ignore=None,
+ gt_masks=None):
+ """
+ Args:
+ x (list[Tensor]): list of multi-level img features.
+ img_metas (list[dict]): list of image info dict where each dict
+ has: 'img_shape', 'scale_factor', 'flip', and may also contain
+ 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
+ For details on the values of these keys see
+ `mmdet/datasets/pipelines/formatting.py:Collect`.
+ proposals (list[Tensors]): list of region proposals.
+ gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
+ shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
+ gt_labels (list[Tensor]): class indices corresponding to each box
+ gt_bboxes_ignore (None | list[Tensor]): specify which bounding
+ boxes can be ignored when computing the loss.
+ gt_masks (None | Tensor) : true segmentation masks for each box
+ used if the architecture supports a segmentation task.
+
+ Returns:
+ dict[str, Tensor]: a dictionary of loss components
+ """
+ losses = dict()
+ for i in range(self.num_stages):
+ self.current_stage = i
+ rcnn_train_cfg = self.train_cfg[i]
+ lw = self.stage_loss_weights[i]
+
+ # assign gts and sample proposals
+ sampling_results = []
+ if self.with_bbox or self.with_mask:
+ bbox_assigner = self.bbox_assigner[i]
+ bbox_sampler = self.bbox_sampler[i]
+ num_imgs = len(img_metas)
+ if gt_bboxes_ignore is None:
+ gt_bboxes_ignore = [None for _ in range(num_imgs)]
+
+ for j in range(num_imgs):
+ assign_result = bbox_assigner.assign(
+ proposal_list[j], gt_bboxes[j], gt_bboxes_ignore[j],
+ gt_labels[j])
+ sampling_result = bbox_sampler.sample(
+ assign_result,
+ proposal_list[j],
+ gt_bboxes[j],
+ gt_labels[j],
+ feats=[lvl_feat[j][None] for lvl_feat in x])
+ sampling_results.append(sampling_result)
+
+ # bbox head forward and loss
+ bbox_results = self._bbox_forward_train(i, x, sampling_results,
+ gt_bboxes, gt_labels,
+ rcnn_train_cfg)
+
+ for name, value in bbox_results['loss_bbox'].items():
+ losses[f's{i}.{name}'] = (
+ value * lw if 'loss' in name else value)
+
+ # mask head forward and loss
+ if self.with_mask:
+ mask_results = self._mask_forward_train(
+ i, x, sampling_results, gt_masks, rcnn_train_cfg,
+ bbox_results['bbox_feats'])
+ for name, value in mask_results['loss_mask'].items():
+ losses[f's{i}.{name}'] = (
+ value * lw if 'loss' in name else value)
+
+ # refine bboxes
+ if i < self.num_stages - 1:
+ pos_is_gts = [res.pos_is_gt for res in sampling_results]
+ # bbox_targets is a tuple
+ roi_labels = bbox_results['bbox_targets'][0]
+ with torch.no_grad():
+ roi_labels = torch.where(
+ roi_labels == self.bbox_head[i].num_classes,
+ bbox_results['cls_score'][:, :-1].argmax(1),
+ roi_labels)
+ proposal_list = self.bbox_head[i].refine_bboxes(
+ bbox_results['rois'], roi_labels,
+ bbox_results['bbox_pred'], pos_is_gts, img_metas)
+
+ return losses
+
+ def simple_test(self, x, proposal_list, img_metas, rescale=False):
+ """Test without augmentation."""
+ assert self.with_bbox, 'Bbox head must be implemented.'
+ num_imgs = len(proposal_list)
+ img_shapes = tuple(meta['img_shape'] for meta in img_metas)
+ ori_shapes = tuple(meta['ori_shape'] for meta in img_metas)
+ scale_factors = tuple(meta['scale_factor'] for meta in img_metas)
+
+ # "ms" in variable names means multi-stage
+ ms_bbox_result = {}
+ ms_segm_result = {}
+ ms_scores = []
+ rcnn_test_cfg = self.test_cfg
+
+ rois = bbox2roi(proposal_list)
+ for i in range(self.num_stages):
+ bbox_results = self._bbox_forward(i, x, rois)
+
+ # split batch bbox prediction back to each image
+ cls_score = bbox_results['cls_score']
+ bbox_pred = bbox_results['bbox_pred']
+ num_proposals_per_img = tuple(
+ len(proposals) for proposals in proposal_list)
+ rois = rois.split(num_proposals_per_img, 0)
+ cls_score = cls_score.split(num_proposals_per_img, 0)
+ if isinstance(bbox_pred, torch.Tensor):
+ bbox_pred = bbox_pred.split(num_proposals_per_img, 0)
+ else:
+ bbox_pred = self.bbox_head[i].bbox_pred_split(
+ bbox_pred, num_proposals_per_img)
+ ms_scores.append(cls_score)
+
+ if i < self.num_stages - 1:
+ bbox_label = [s[:, :-1].argmax(dim=1) for s in cls_score]
+ rois = torch.cat([
+ self.bbox_head[i].regress_by_class(rois[j], bbox_label[j],
+ bbox_pred[j],
+ img_metas[j])
+ for j in range(num_imgs)
+ ])
+
+ # average scores of each image by stages
+ cls_score = [
+ sum([score[i] for score in ms_scores]) / float(len(ms_scores))
+ for i in range(num_imgs)
+ ]
+
+ # apply bbox post-processing to each image individually
+ det_bboxes = []
+ det_labels = []
+ for i in range(num_imgs):
+ det_bbox, det_label = self.bbox_head[-1].get_bboxes(
+ rois[i],
+ cls_score[i],
+ bbox_pred[i],
+ img_shapes[i],
+ scale_factors[i],
+ rescale=rescale,
+ cfg=rcnn_test_cfg)
+ det_bboxes.append(det_bbox)
+ det_labels.append(det_label)
+
+ if torch.onnx.is_in_onnx_export():
+ return det_bboxes, det_labels
+ bbox_results = [
+ bbox2result(det_bboxes[i], det_labels[i],
+ self.bbox_head[-1].num_classes)
+ for i in range(num_imgs)
+ ]
+ ms_bbox_result['ensemble'] = bbox_results
+
+ if self.with_mask:
+ if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes):
+ mask_classes = self.mask_head[-1].num_classes
+ segm_results = [[[] for _ in range(mask_classes)]
+ for _ in range(num_imgs)]
+ else:
+ if rescale and not isinstance(scale_factors[0], float):
+ scale_factors = [
+ torch.from_numpy(scale_factor).to(det_bboxes[0].device)
+ for scale_factor in scale_factors
+ ]
+ _bboxes = [
+ det_bboxes[i][:, :4] *
+ scale_factors[i] if rescale else det_bboxes[i][:, :4]
+ for i in range(len(det_bboxes))
+ ]
+ mask_rois = bbox2roi(_bboxes)
+ num_mask_rois_per_img = tuple(
+ _bbox.size(0) for _bbox in _bboxes)
+ aug_masks = []
+ for i in range(self.num_stages):
+ mask_results = self._mask_forward(i, x, mask_rois)
+ mask_pred = mask_results['mask_pred']
+ # split batch mask prediction back to each image
+ mask_pred = mask_pred.split(num_mask_rois_per_img, 0)
+ aug_masks.append(
+ [m.sigmoid().cpu().numpy() for m in mask_pred])
+
+ # apply mask post-processing to each image individually
+ segm_results = []
+ for i in range(num_imgs):
+ if det_bboxes[i].shape[0] == 0:
+ segm_results.append(
+ [[]
+ for _ in range(self.mask_head[-1].num_classes)])
+ else:
+ aug_mask = [mask[i] for mask in aug_masks]
+ merged_masks = merge_aug_masks(
+ aug_mask, [[img_metas[i]]] * self.num_stages,
+ rcnn_test_cfg)
+ segm_result = self.mask_head[-1].get_seg_masks(
+ merged_masks, _bboxes[i], det_labels[i],
+ rcnn_test_cfg, ori_shapes[i], scale_factors[i],
+ rescale)
+ segm_results.append(segm_result)
+ ms_segm_result['ensemble'] = segm_results
+
+ if self.with_mask:
+ results = list(
+ zip(ms_bbox_result['ensemble'], ms_segm_result['ensemble']))
+ else:
+ results = ms_bbox_result['ensemble']
+
+ return results
+
+ def aug_test(self, features, proposal_list, img_metas, rescale=False):
+ """Test with augmentations.
+
+ If rescale is False, then returned bboxes and masks will fit the scale
+ of imgs[0].
+ """
+ rcnn_test_cfg = self.test_cfg
+ aug_bboxes = []
+ aug_scores = []
+ for x, img_meta in zip(features, img_metas):
+ # only one image in the batch
+ img_shape = img_meta[0]['img_shape']
+ scale_factor = img_meta[0]['scale_factor']
+ flip = img_meta[0]['flip']
+ flip_direction = img_meta[0]['flip_direction']
+
+ proposals = bbox_mapping(proposal_list[0][:, :4], img_shape,
+ scale_factor, flip, flip_direction)
+ # "ms" in variable names means multi-stage
+ ms_scores = []
+
+ rois = bbox2roi([proposals])
+ for i in range(self.num_stages):
+ bbox_results = self._bbox_forward(i, x, rois)
+ ms_scores.append(bbox_results['cls_score'])
+
+ if i < self.num_stages - 1:
+ bbox_label = bbox_results['cls_score'][:, :-1].argmax(
+ dim=1)
+ rois = self.bbox_head[i].regress_by_class(
+ rois, bbox_label, bbox_results['bbox_pred'],
+ img_meta[0])
+
+ cls_score = sum(ms_scores) / float(len(ms_scores))
+ bboxes, scores = self.bbox_head[-1].get_bboxes(
+ rois,
+ cls_score,
+ bbox_results['bbox_pred'],
+ img_shape,
+ scale_factor,
+ rescale=False,
+ cfg=None)
+ aug_bboxes.append(bboxes)
+ aug_scores.append(scores)
+
+ # after merging, bboxes will be rescaled to the original image size
+ merged_bboxes, merged_scores = merge_aug_bboxes(
+ aug_bboxes, aug_scores, img_metas, rcnn_test_cfg)
+ det_bboxes, det_labels = multiclass_nms(merged_bboxes, merged_scores,
+ rcnn_test_cfg.score_thr,
+ rcnn_test_cfg.nms,
+ rcnn_test_cfg.max_per_img)
+
+ bbox_result = bbox2result(det_bboxes, det_labels,
+ self.bbox_head[-1].num_classes)
+
+ if self.with_mask:
+ if det_bboxes.shape[0] == 0:
+ segm_result = [[[]
+ for _ in range(self.mask_head[-1].num_classes)]
+ ]
+ else:
+ aug_masks = []
+ aug_img_metas = []
+ for x, img_meta in zip(features, img_metas):
+ img_shape = img_meta[0]['img_shape']
+ scale_factor = img_meta[0]['scale_factor']
+ flip = img_meta[0]['flip']
+ flip_direction = img_meta[0]['flip_direction']
+ _bboxes = bbox_mapping(det_bboxes[:, :4], img_shape,
+ scale_factor, flip, flip_direction)
+ mask_rois = bbox2roi([_bboxes])
+ for i in range(self.num_stages):
+ mask_results = self._mask_forward(i, x, mask_rois)
+ aug_masks.append(
+ mask_results['mask_pred'].sigmoid().cpu().numpy())
+ aug_img_metas.append(img_meta)
+ merged_masks = merge_aug_masks(aug_masks, aug_img_metas,
+ self.test_cfg)
+
+ ori_shape = img_metas[0][0]['ori_shape']
+ segm_result = self.mask_head[-1].get_seg_masks(
+ merged_masks,
+ det_bboxes,
+ det_labels,
+ rcnn_test_cfg,
+ ori_shape,
+ scale_factor=1.0,
+ rescale=False)
+ return [(bbox_result, segm_result)]
+ else:
+ return [bbox_result]
diff --git a/annotator/uniformer/mmdet_null/models/roi_heads/double_roi_head.py b/annotator/uniformer/mmdet_null/models/roi_heads/double_roi_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..a1aa6c8244a889fbbed312a89574c3e11be294f0
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/roi_heads/double_roi_head.py
@@ -0,0 +1,33 @@
+from ..builder import HEADS
+from .standard_roi_head import StandardRoIHead
+
+
+@HEADS.register_module()
+class DoubleHeadRoIHead(StandardRoIHead):
+ """RoI head for Double Head RCNN.
+
+ https://arxiv.org/abs/1904.06493
+ """
+
+ def __init__(self, reg_roi_scale_factor, **kwargs):
+ super(DoubleHeadRoIHead, self).__init__(**kwargs)
+ self.reg_roi_scale_factor = reg_roi_scale_factor
+
+ def _bbox_forward(self, x, rois):
+ """Box head forward function used in both training and testing time."""
+ bbox_cls_feats = self.bbox_roi_extractor(
+ x[:self.bbox_roi_extractor.num_inputs], rois)
+ bbox_reg_feats = self.bbox_roi_extractor(
+ x[:self.bbox_roi_extractor.num_inputs],
+ rois,
+ roi_scale_factor=self.reg_roi_scale_factor)
+ if self.with_shared_head:
+ bbox_cls_feats = self.shared_head(bbox_cls_feats)
+ bbox_reg_feats = self.shared_head(bbox_reg_feats)
+ cls_score, bbox_pred = self.bbox_head(bbox_cls_feats, bbox_reg_feats)
+
+ bbox_results = dict(
+ cls_score=cls_score,
+ bbox_pred=bbox_pred,
+ bbox_feats=bbox_cls_feats)
+ return bbox_results
diff --git a/annotator/uniformer/mmdet_null/models/roi_heads/dynamic_roi_head.py b/annotator/uniformer/mmdet_null/models/roi_heads/dynamic_roi_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..89427a931f45f5a920c0e66fd88058bf9fa05f5c
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/roi_heads/dynamic_roi_head.py
@@ -0,0 +1,154 @@
+import numpy as np
+import torch
+
+from mmdet.core import bbox2roi
+from mmdet.models.losses import SmoothL1Loss
+from ..builder import HEADS
+from .standard_roi_head import StandardRoIHead
+
+EPS = 1e-15
+
+
+@HEADS.register_module()
+class DynamicRoIHead(StandardRoIHead):
+ """RoI head for `Dynamic R-CNN `_."""
+
+ def __init__(self, **kwargs):
+ super(DynamicRoIHead, self).__init__(**kwargs)
+ assert isinstance(self.bbox_head.loss_bbox, SmoothL1Loss)
+ # the IoU history of the past `update_iter_interval` iterations
+ self.iou_history = []
+ # the beta history of the past `update_iter_interval` iterations
+ self.beta_history = []
+
+ def forward_train(self,
+ x,
+ img_metas,
+ proposal_list,
+ gt_bboxes,
+ gt_labels,
+ gt_bboxes_ignore=None,
+ gt_masks=None):
+ """Forward function for training.
+
+ Args:
+ x (list[Tensor]): list of multi-level img features.
+
+ img_metas (list[dict]): list of image info dict where each dict
+ has: 'img_shape', 'scale_factor', 'flip', and may also contain
+ 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
+ For details on the values of these keys see
+ `mmdet/datasets/pipelines/formatting.py:Collect`.
+
+ proposals (list[Tensors]): list of region proposals.
+
+ gt_bboxes (list[Tensor]): each item are the truth boxes for each
+ image in [tl_x, tl_y, br_x, br_y] format.
+
+ gt_labels (list[Tensor]): class indices corresponding to each box
+
+ gt_bboxes_ignore (None | list[Tensor]): specify which bounding
+ boxes can be ignored when computing the loss.
+
+ gt_masks (None | Tensor) : true segmentation masks for each box
+ used if the architecture supports a segmentation task.
+
+ Returns:
+ dict[str, Tensor]: a dictionary of loss components
+ """
+ # assign gts and sample proposals
+ if self.with_bbox or self.with_mask:
+ num_imgs = len(img_metas)
+ if gt_bboxes_ignore is None:
+ gt_bboxes_ignore = [None for _ in range(num_imgs)]
+ sampling_results = []
+ cur_iou = []
+ for i in range(num_imgs):
+ assign_result = self.bbox_assigner.assign(
+ proposal_list[i], gt_bboxes[i], gt_bboxes_ignore[i],
+ gt_labels[i])
+ sampling_result = self.bbox_sampler.sample(
+ assign_result,
+ proposal_list[i],
+ gt_bboxes[i],
+ gt_labels[i],
+ feats=[lvl_feat[i][None] for lvl_feat in x])
+ # record the `iou_topk`-th largest IoU in an image
+ iou_topk = min(self.train_cfg.dynamic_rcnn.iou_topk,
+ len(assign_result.max_overlaps))
+ ious, _ = torch.topk(assign_result.max_overlaps, iou_topk)
+ cur_iou.append(ious[-1].item())
+ sampling_results.append(sampling_result)
+ # average the current IoUs over images
+ cur_iou = np.mean(cur_iou)
+ self.iou_history.append(cur_iou)
+
+ losses = dict()
+ # bbox head forward and loss
+ if self.with_bbox:
+ bbox_results = self._bbox_forward_train(x, sampling_results,
+ gt_bboxes, gt_labels,
+ img_metas)
+ losses.update(bbox_results['loss_bbox'])
+
+ # mask head forward and loss
+ if self.with_mask:
+ mask_results = self._mask_forward_train(x, sampling_results,
+ bbox_results['bbox_feats'],
+ gt_masks, img_metas)
+ losses.update(mask_results['loss_mask'])
+
+ # update IoU threshold and SmoothL1 beta
+ update_iter_interval = self.train_cfg.dynamic_rcnn.update_iter_interval
+ if len(self.iou_history) % update_iter_interval == 0:
+ new_iou_thr, new_beta = self.update_hyperparameters()
+
+ return losses
+
+ def _bbox_forward_train(self, x, sampling_results, gt_bboxes, gt_labels,
+ img_metas):
+ num_imgs = len(img_metas)
+ rois = bbox2roi([res.bboxes for res in sampling_results])
+ bbox_results = self._bbox_forward(x, rois)
+
+ bbox_targets = self.bbox_head.get_targets(sampling_results, gt_bboxes,
+ gt_labels, self.train_cfg)
+ # record the `beta_topk`-th smallest target
+ # `bbox_targets[2]` and `bbox_targets[3]` stand for bbox_targets
+ # and bbox_weights, respectively
+ pos_inds = bbox_targets[3][:, 0].nonzero().squeeze(1)
+ num_pos = len(pos_inds)
+ cur_target = bbox_targets[2][pos_inds, :2].abs().mean(dim=1)
+ beta_topk = min(self.train_cfg.dynamic_rcnn.beta_topk * num_imgs,
+ num_pos)
+ cur_target = torch.kthvalue(cur_target, beta_topk)[0].item()
+ self.beta_history.append(cur_target)
+ loss_bbox = self.bbox_head.loss(bbox_results['cls_score'],
+ bbox_results['bbox_pred'], rois,
+ *bbox_targets)
+
+ bbox_results.update(loss_bbox=loss_bbox)
+ return bbox_results
+
+ def update_hyperparameters(self):
+ """Update hyperparameters like IoU thresholds for assigner and beta for
+ SmoothL1 loss based on the training statistics.
+
+ Returns:
+ tuple[float]: the updated ``iou_thr`` and ``beta``.
+ """
+ new_iou_thr = max(self.train_cfg.dynamic_rcnn.initial_iou,
+ np.mean(self.iou_history))
+ self.iou_history = []
+ self.bbox_assigner.pos_iou_thr = new_iou_thr
+ self.bbox_assigner.neg_iou_thr = new_iou_thr
+ self.bbox_assigner.min_pos_iou = new_iou_thr
+ if (np.median(self.beta_history) < EPS):
+ # avoid 0 or too small value for new_beta
+ new_beta = self.bbox_head.loss_bbox.beta
+ else:
+ new_beta = min(self.train_cfg.dynamic_rcnn.initial_beta,
+ np.median(self.beta_history))
+ self.beta_history = []
+ self.bbox_head.loss_bbox.beta = new_beta
+ return new_iou_thr, new_beta
diff --git a/annotator/uniformer/mmdet_null/models/roi_heads/grid_roi_head.py b/annotator/uniformer/mmdet_null/models/roi_heads/grid_roi_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..4c52c79863ebaf17bd023382c7e5d4c237b4da77
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/roi_heads/grid_roi_head.py
@@ -0,0 +1,176 @@
+import torch
+
+from mmdet.core import bbox2result, bbox2roi
+from ..builder import HEADS, build_head, build_roi_extractor
+from .standard_roi_head import StandardRoIHead
+
+
+@HEADS.register_module()
+class GridRoIHead(StandardRoIHead):
+ """Grid roi head for Grid R-CNN.
+
+ https://arxiv.org/abs/1811.12030
+ """
+
+ def __init__(self, grid_roi_extractor, grid_head, **kwargs):
+ assert grid_head is not None
+ super(GridRoIHead, self).__init__(**kwargs)
+ if grid_roi_extractor is not None:
+ self.grid_roi_extractor = build_roi_extractor(grid_roi_extractor)
+ self.share_roi_extractor = False
+ else:
+ self.share_roi_extractor = True
+ self.grid_roi_extractor = self.bbox_roi_extractor
+ self.grid_head = build_head(grid_head)
+
+ def init_weights(self, pretrained):
+ """Initialize the weights in head.
+
+ Args:
+ pretrained (str, optional): Path to pre-trained weights.
+ Defaults to None.
+ """
+ super(GridRoIHead, self).init_weights(pretrained)
+ self.grid_head.init_weights()
+ if not self.share_roi_extractor:
+ self.grid_roi_extractor.init_weights()
+
+ def _random_jitter(self, sampling_results, img_metas, amplitude=0.15):
+ """Ramdom jitter positive proposals for training."""
+ for sampling_result, img_meta in zip(sampling_results, img_metas):
+ bboxes = sampling_result.pos_bboxes
+ random_offsets = bboxes.new_empty(bboxes.shape[0], 4).uniform_(
+ -amplitude, amplitude)
+ # before jittering
+ cxcy = (bboxes[:, 2:4] + bboxes[:, :2]) / 2
+ wh = (bboxes[:, 2:4] - bboxes[:, :2]).abs()
+ # after jittering
+ new_cxcy = cxcy + wh * random_offsets[:, :2]
+ new_wh = wh * (1 + random_offsets[:, 2:])
+ # xywh to xyxy
+ new_x1y1 = (new_cxcy - new_wh / 2)
+ new_x2y2 = (new_cxcy + new_wh / 2)
+ new_bboxes = torch.cat([new_x1y1, new_x2y2], dim=1)
+ # clip bboxes
+ max_shape = img_meta['img_shape']
+ if max_shape is not None:
+ new_bboxes[:, 0::2].clamp_(min=0, max=max_shape[1] - 1)
+ new_bboxes[:, 1::2].clamp_(min=0, max=max_shape[0] - 1)
+
+ sampling_result.pos_bboxes = new_bboxes
+ return sampling_results
+
+ def forward_dummy(self, x, proposals):
+ """Dummy forward function."""
+ # bbox head
+ outs = ()
+ rois = bbox2roi([proposals])
+ if self.with_bbox:
+ bbox_results = self._bbox_forward(x, rois)
+ outs = outs + (bbox_results['cls_score'],
+ bbox_results['bbox_pred'])
+
+ # grid head
+ grid_rois = rois[:100]
+ grid_feats = self.grid_roi_extractor(
+ x[:self.grid_roi_extractor.num_inputs], grid_rois)
+ if self.with_shared_head:
+ grid_feats = self.shared_head(grid_feats)
+ grid_pred = self.grid_head(grid_feats)
+ outs = outs + (grid_pred, )
+
+ # mask head
+ if self.with_mask:
+ mask_rois = rois[:100]
+ mask_results = self._mask_forward(x, mask_rois)
+ outs = outs + (mask_results['mask_pred'], )
+ return outs
+
+ def _bbox_forward_train(self, x, sampling_results, gt_bboxes, gt_labels,
+ img_metas):
+ """Run forward function and calculate loss for box head in training."""
+ bbox_results = super(GridRoIHead,
+ self)._bbox_forward_train(x, sampling_results,
+ gt_bboxes, gt_labels,
+ img_metas)
+
+ # Grid head forward and loss
+ sampling_results = self._random_jitter(sampling_results, img_metas)
+ pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results])
+
+ # GN in head does not support zero shape input
+ if pos_rois.shape[0] == 0:
+ return bbox_results
+
+ grid_feats = self.grid_roi_extractor(
+ x[:self.grid_roi_extractor.num_inputs], pos_rois)
+ if self.with_shared_head:
+ grid_feats = self.shared_head(grid_feats)
+ # Accelerate training
+ max_sample_num_grid = self.train_cfg.get('max_num_grid', 192)
+ sample_idx = torch.randperm(
+ grid_feats.shape[0])[:min(grid_feats.shape[0], max_sample_num_grid
+ )]
+ grid_feats = grid_feats[sample_idx]
+
+ grid_pred = self.grid_head(grid_feats)
+
+ grid_targets = self.grid_head.get_targets(sampling_results,
+ self.train_cfg)
+ grid_targets = grid_targets[sample_idx]
+
+ loss_grid = self.grid_head.loss(grid_pred, grid_targets)
+
+ bbox_results['loss_bbox'].update(loss_grid)
+ return bbox_results
+
+ def simple_test(self,
+ x,
+ proposal_list,
+ img_metas,
+ proposals=None,
+ rescale=False):
+ """Test without augmentation."""
+ assert self.with_bbox, 'Bbox head must be implemented.'
+
+ det_bboxes, det_labels = self.simple_test_bboxes(
+ x, img_metas, proposal_list, self.test_cfg, rescale=False)
+ # pack rois into bboxes
+ grid_rois = bbox2roi([det_bbox[:, :4] for det_bbox in det_bboxes])
+ if grid_rois.shape[0] != 0:
+ grid_feats = self.grid_roi_extractor(
+ x[:len(self.grid_roi_extractor.featmap_strides)], grid_rois)
+ self.grid_head.test_mode = True
+ grid_pred = self.grid_head(grid_feats)
+ # split batch grid head prediction back to each image
+ num_roi_per_img = tuple(len(det_bbox) for det_bbox in det_bboxes)
+ grid_pred = {
+ k: v.split(num_roi_per_img, 0)
+ for k, v in grid_pred.items()
+ }
+
+ # apply bbox post-processing to each image individually
+ bbox_results = []
+ num_imgs = len(det_bboxes)
+ for i in range(num_imgs):
+ if det_bboxes[i].shape[0] == 0:
+ bbox_results.append(grid_rois.new_tensor([]))
+ else:
+ det_bbox = self.grid_head.get_bboxes(
+ det_bboxes[i], grid_pred['fused'][i], [img_metas[i]])
+ if rescale:
+ det_bbox[:, :4] /= img_metas[i]['scale_factor']
+ bbox_results.append(
+ bbox2result(det_bbox, det_labels[i],
+ self.bbox_head.num_classes))
+ else:
+ bbox_results = [
+ grid_rois.new_tensor([]) for _ in range(len(det_bboxes))
+ ]
+
+ if not self.with_mask:
+ return bbox_results
+ else:
+ segm_results = self.simple_test_mask(
+ x, img_metas, det_bboxes, det_labels, rescale=rescale)
+ return list(zip(bbox_results, segm_results))
diff --git a/annotator/uniformer/mmdet_null/models/roi_heads/htc_roi_head.py b/annotator/uniformer/mmdet_null/models/roi_heads/htc_roi_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..5b5c2ec3bc9d579061fbd89f8b320e6e59909143
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/roi_heads/htc_roi_head.py
@@ -0,0 +1,589 @@
+import torch
+import torch.nn.functional as F
+
+from mmdet.core import (bbox2result, bbox2roi, bbox_mapping, merge_aug_bboxes,
+ merge_aug_masks, multiclass_nms)
+from ..builder import HEADS, build_head, build_roi_extractor
+from .cascade_roi_head import CascadeRoIHead
+
+
+@HEADS.register_module()
+class HybridTaskCascadeRoIHead(CascadeRoIHead):
+ """Hybrid task cascade roi head including one bbox head and one mask head.
+
+ https://arxiv.org/abs/1901.07518
+ """
+
+ def __init__(self,
+ num_stages,
+ stage_loss_weights,
+ semantic_roi_extractor=None,
+ semantic_head=None,
+ semantic_fusion=('bbox', 'mask'),
+ interleaved=True,
+ mask_info_flow=True,
+ **kwargs):
+ super(HybridTaskCascadeRoIHead,
+ self).__init__(num_stages, stage_loss_weights, **kwargs)
+ assert self.with_bbox and self.with_mask
+ assert not self.with_shared_head # shared head is not supported
+
+ if semantic_head is not None:
+ self.semantic_roi_extractor = build_roi_extractor(
+ semantic_roi_extractor)
+ self.semantic_head = build_head(semantic_head)
+
+ self.semantic_fusion = semantic_fusion
+ self.interleaved = interleaved
+ self.mask_info_flow = mask_info_flow
+
+ def init_weights(self, pretrained):
+ """Initialize the weights in head.
+
+ Args:
+ pretrained (str, optional): Path to pre-trained weights.
+ Defaults to None.
+ """
+ super(HybridTaskCascadeRoIHead, self).init_weights(pretrained)
+ if self.with_semantic:
+ self.semantic_head.init_weights()
+
+ @property
+ def with_semantic(self):
+ """bool: whether the head has semantic head"""
+ if hasattr(self, 'semantic_head') and self.semantic_head is not None:
+ return True
+ else:
+ return False
+
+ def forward_dummy(self, x, proposals):
+ """Dummy forward function."""
+ outs = ()
+ # semantic head
+ if self.with_semantic:
+ _, semantic_feat = self.semantic_head(x)
+ else:
+ semantic_feat = None
+ # bbox heads
+ rois = bbox2roi([proposals])
+ for i in range(self.num_stages):
+ bbox_results = self._bbox_forward(
+ i, x, rois, semantic_feat=semantic_feat)
+ outs = outs + (bbox_results['cls_score'],
+ bbox_results['bbox_pred'])
+ # mask heads
+ if self.with_mask:
+ mask_rois = rois[:100]
+ mask_roi_extractor = self.mask_roi_extractor[-1]
+ mask_feats = mask_roi_extractor(
+ x[:len(mask_roi_extractor.featmap_strides)], mask_rois)
+ if self.with_semantic and 'mask' in self.semantic_fusion:
+ mask_semantic_feat = self.semantic_roi_extractor(
+ [semantic_feat], mask_rois)
+ mask_feats += mask_semantic_feat
+ last_feat = None
+ for i in range(self.num_stages):
+ mask_head = self.mask_head[i]
+ if self.mask_info_flow:
+ mask_pred, last_feat = mask_head(mask_feats, last_feat)
+ else:
+ mask_pred = mask_head(mask_feats)
+ outs = outs + (mask_pred, )
+ return outs
+
+ def _bbox_forward_train(self,
+ stage,
+ x,
+ sampling_results,
+ gt_bboxes,
+ gt_labels,
+ rcnn_train_cfg,
+ semantic_feat=None):
+ """Run forward function and calculate loss for box head in training."""
+ bbox_head = self.bbox_head[stage]
+ rois = bbox2roi([res.bboxes for res in sampling_results])
+ bbox_results = self._bbox_forward(
+ stage, x, rois, semantic_feat=semantic_feat)
+
+ bbox_targets = bbox_head.get_targets(sampling_results, gt_bboxes,
+ gt_labels, rcnn_train_cfg)
+ loss_bbox = bbox_head.loss(bbox_results['cls_score'],
+ bbox_results['bbox_pred'], rois,
+ *bbox_targets)
+
+ bbox_results.update(
+ loss_bbox=loss_bbox,
+ rois=rois,
+ bbox_targets=bbox_targets,
+ )
+ return bbox_results
+
+ def _mask_forward_train(self,
+ stage,
+ x,
+ sampling_results,
+ gt_masks,
+ rcnn_train_cfg,
+ semantic_feat=None):
+ """Run forward function and calculate loss for mask head in
+ training."""
+ mask_roi_extractor = self.mask_roi_extractor[stage]
+ mask_head = self.mask_head[stage]
+ pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results])
+ mask_feats = mask_roi_extractor(x[:mask_roi_extractor.num_inputs],
+ pos_rois)
+
+ # semantic feature fusion
+ # element-wise sum for original features and pooled semantic features
+ if self.with_semantic and 'mask' in self.semantic_fusion:
+ mask_semantic_feat = self.semantic_roi_extractor([semantic_feat],
+ pos_rois)
+ if mask_semantic_feat.shape[-2:] != mask_feats.shape[-2:]:
+ mask_semantic_feat = F.adaptive_avg_pool2d(
+ mask_semantic_feat, mask_feats.shape[-2:])
+ mask_feats += mask_semantic_feat
+
+ # mask information flow
+ # forward all previous mask heads to obtain last_feat, and fuse it
+ # with the normal mask feature
+ if self.mask_info_flow:
+ last_feat = None
+ for i in range(stage):
+ last_feat = self.mask_head[i](
+ mask_feats, last_feat, return_logits=False)
+ mask_pred = mask_head(mask_feats, last_feat, return_feat=False)
+ else:
+ mask_pred = mask_head(mask_feats, return_feat=False)
+
+ mask_targets = mask_head.get_targets(sampling_results, gt_masks,
+ rcnn_train_cfg)
+ pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results])
+ loss_mask = mask_head.loss(mask_pred, mask_targets, pos_labels)
+
+ mask_results = dict(loss_mask=loss_mask)
+ return mask_results
+
+ def _bbox_forward(self, stage, x, rois, semantic_feat=None):
+ """Box head forward function used in both training and testing."""
+ bbox_roi_extractor = self.bbox_roi_extractor[stage]
+ bbox_head = self.bbox_head[stage]
+ bbox_feats = bbox_roi_extractor(
+ x[:len(bbox_roi_extractor.featmap_strides)], rois)
+ if self.with_semantic and 'bbox' in self.semantic_fusion:
+ bbox_semantic_feat = self.semantic_roi_extractor([semantic_feat],
+ rois)
+ if bbox_semantic_feat.shape[-2:] != bbox_feats.shape[-2:]:
+ bbox_semantic_feat = F.adaptive_avg_pool2d(
+ bbox_semantic_feat, bbox_feats.shape[-2:])
+ bbox_feats += bbox_semantic_feat
+ cls_score, bbox_pred = bbox_head(bbox_feats)
+
+ bbox_results = dict(cls_score=cls_score, bbox_pred=bbox_pred)
+ return bbox_results
+
+ def _mask_forward_test(self, stage, x, bboxes, semantic_feat=None):
+ """Mask head forward function for testing."""
+ mask_roi_extractor = self.mask_roi_extractor[stage]
+ mask_head = self.mask_head[stage]
+ mask_rois = bbox2roi([bboxes])
+ mask_feats = mask_roi_extractor(
+ x[:len(mask_roi_extractor.featmap_strides)], mask_rois)
+ if self.with_semantic and 'mask' in self.semantic_fusion:
+ mask_semantic_feat = self.semantic_roi_extractor([semantic_feat],
+ mask_rois)
+ if mask_semantic_feat.shape[-2:] != mask_feats.shape[-2:]:
+ mask_semantic_feat = F.adaptive_avg_pool2d(
+ mask_semantic_feat, mask_feats.shape[-2:])
+ mask_feats += mask_semantic_feat
+ if self.mask_info_flow:
+ last_feat = None
+ last_pred = None
+ for i in range(stage):
+ mask_pred, last_feat = self.mask_head[i](mask_feats, last_feat)
+ if last_pred is not None:
+ mask_pred = mask_pred + last_pred
+ last_pred = mask_pred
+ mask_pred = mask_head(mask_feats, last_feat, return_feat=False)
+ if last_pred is not None:
+ mask_pred = mask_pred + last_pred
+ else:
+ mask_pred = mask_head(mask_feats)
+ return mask_pred
+
+ def forward_train(self,
+ x,
+ img_metas,
+ proposal_list,
+ gt_bboxes,
+ gt_labels,
+ gt_bboxes_ignore=None,
+ gt_masks=None,
+ gt_semantic_seg=None):
+ """
+ Args:
+ x (list[Tensor]): list of multi-level img features.
+
+ img_metas (list[dict]): list of image info dict where each dict
+ has: 'img_shape', 'scale_factor', 'flip', and may also contain
+ 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
+ For details on the values of these keys see
+ `mmdet/datasets/pipelines/formatting.py:Collect`.
+
+ proposal_list (list[Tensors]): list of region proposals.
+
+ gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
+ shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
+
+ gt_labels (list[Tensor]): class indices corresponding to each box
+
+ gt_bboxes_ignore (None, list[Tensor]): specify which bounding
+ boxes can be ignored when computing the loss.
+
+ gt_masks (None, Tensor) : true segmentation masks for each box
+ used if the architecture supports a segmentation task.
+
+ gt_semantic_seg (None, list[Tensor]): semantic segmentation masks
+ used if the architecture supports semantic segmentation task.
+
+ Returns:
+ dict[str, Tensor]: a dictionary of loss components
+ """
+ # semantic segmentation part
+ # 2 outputs: segmentation prediction and embedded features
+ losses = dict()
+ if self.with_semantic:
+ semantic_pred, semantic_feat = self.semantic_head(x)
+ loss_seg = self.semantic_head.loss(semantic_pred, gt_semantic_seg)
+ losses['loss_semantic_seg'] = loss_seg
+ else:
+ semantic_feat = None
+
+ for i in range(self.num_stages):
+ self.current_stage = i
+ rcnn_train_cfg = self.train_cfg[i]
+ lw = self.stage_loss_weights[i]
+
+ # assign gts and sample proposals
+ sampling_results = []
+ bbox_assigner = self.bbox_assigner[i]
+ bbox_sampler = self.bbox_sampler[i]
+ num_imgs = len(img_metas)
+ if gt_bboxes_ignore is None:
+ gt_bboxes_ignore = [None for _ in range(num_imgs)]
+
+ for j in range(num_imgs):
+ assign_result = bbox_assigner.assign(proposal_list[j],
+ gt_bboxes[j],
+ gt_bboxes_ignore[j],
+ gt_labels[j])
+ sampling_result = bbox_sampler.sample(
+ assign_result,
+ proposal_list[j],
+ gt_bboxes[j],
+ gt_labels[j],
+ feats=[lvl_feat[j][None] for lvl_feat in x])
+ sampling_results.append(sampling_result)
+
+ # bbox head forward and loss
+ bbox_results = \
+ self._bbox_forward_train(
+ i, x, sampling_results, gt_bboxes, gt_labels,
+ rcnn_train_cfg, semantic_feat)
+ roi_labels = bbox_results['bbox_targets'][0]
+
+ for name, value in bbox_results['loss_bbox'].items():
+ losses[f's{i}.{name}'] = (
+ value * lw if 'loss' in name else value)
+
+ # mask head forward and loss
+ if self.with_mask:
+ # interleaved execution: use regressed bboxes by the box branch
+ # to train the mask branch
+ if self.interleaved:
+ pos_is_gts = [res.pos_is_gt for res in sampling_results]
+ with torch.no_grad():
+ proposal_list = self.bbox_head[i].refine_bboxes(
+ bbox_results['rois'], roi_labels,
+ bbox_results['bbox_pred'], pos_is_gts, img_metas)
+ # re-assign and sample 512 RoIs from 512 RoIs
+ sampling_results = []
+ for j in range(num_imgs):
+ assign_result = bbox_assigner.assign(
+ proposal_list[j], gt_bboxes[j],
+ gt_bboxes_ignore[j], gt_labels[j])
+ sampling_result = bbox_sampler.sample(
+ assign_result,
+ proposal_list[j],
+ gt_bboxes[j],
+ gt_labels[j],
+ feats=[lvl_feat[j][None] for lvl_feat in x])
+ sampling_results.append(sampling_result)
+ mask_results = self._mask_forward_train(
+ i, x, sampling_results, gt_masks, rcnn_train_cfg,
+ semantic_feat)
+ for name, value in mask_results['loss_mask'].items():
+ losses[f's{i}.{name}'] = (
+ value * lw if 'loss' in name else value)
+
+ # refine bboxes (same as Cascade R-CNN)
+ if i < self.num_stages - 1 and not self.interleaved:
+ pos_is_gts = [res.pos_is_gt for res in sampling_results]
+ with torch.no_grad():
+ proposal_list = self.bbox_head[i].refine_bboxes(
+ bbox_results['rois'], roi_labels,
+ bbox_results['bbox_pred'], pos_is_gts, img_metas)
+
+ return losses
+
+ def simple_test(self, x, proposal_list, img_metas, rescale=False):
+ """Test without augmentation."""
+ if self.with_semantic:
+ _, semantic_feat = self.semantic_head(x)
+ else:
+ semantic_feat = None
+
+ num_imgs = len(proposal_list)
+ img_shapes = tuple(meta['img_shape'] for meta in img_metas)
+ ori_shapes = tuple(meta['ori_shape'] for meta in img_metas)
+ scale_factors = tuple(meta['scale_factor'] for meta in img_metas)
+
+ # "ms" in variable names means multi-stage
+ ms_bbox_result = {}
+ ms_segm_result = {}
+ ms_scores = []
+ rcnn_test_cfg = self.test_cfg
+
+ rois = bbox2roi(proposal_list)
+ for i in range(self.num_stages):
+ bbox_head = self.bbox_head[i]
+ bbox_results = self._bbox_forward(
+ i, x, rois, semantic_feat=semantic_feat)
+ # split batch bbox prediction back to each image
+ cls_score = bbox_results['cls_score']
+ bbox_pred = bbox_results['bbox_pred']
+ num_proposals_per_img = tuple(len(p) for p in proposal_list)
+ rois = rois.split(num_proposals_per_img, 0)
+ cls_score = cls_score.split(num_proposals_per_img, 0)
+ bbox_pred = bbox_pred.split(num_proposals_per_img, 0)
+ ms_scores.append(cls_score)
+
+ if i < self.num_stages - 1:
+ bbox_label = [s[:, :-1].argmax(dim=1) for s in cls_score]
+ rois = torch.cat([
+ bbox_head.regress_by_class(rois[i], bbox_label[i],
+ bbox_pred[i], img_metas[i])
+ for i in range(num_imgs)
+ ])
+
+ # average scores of each image by stages
+ cls_score = [
+ sum([score[i] for score in ms_scores]) / float(len(ms_scores))
+ for i in range(num_imgs)
+ ]
+
+ # apply bbox post-processing to each image individually
+ det_bboxes = []
+ det_labels = []
+ for i in range(num_imgs):
+ det_bbox, det_label = self.bbox_head[-1].get_bboxes(
+ rois[i],
+ cls_score[i],
+ bbox_pred[i],
+ img_shapes[i],
+ scale_factors[i],
+ rescale=rescale,
+ cfg=rcnn_test_cfg)
+ det_bboxes.append(det_bbox)
+ det_labels.append(det_label)
+ bbox_result = [
+ bbox2result(det_bboxes[i], det_labels[i],
+ self.bbox_head[-1].num_classes)
+ for i in range(num_imgs)
+ ]
+ ms_bbox_result['ensemble'] = bbox_result
+
+ if self.with_mask:
+ if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes):
+ mask_classes = self.mask_head[-1].num_classes
+ segm_results = [[[] for _ in range(mask_classes)]
+ for _ in range(num_imgs)]
+ else:
+ if rescale and not isinstance(scale_factors[0], float):
+ scale_factors = [
+ torch.from_numpy(scale_factor).to(det_bboxes[0].device)
+ for scale_factor in scale_factors
+ ]
+ _bboxes = [
+ det_bboxes[i][:, :4] *
+ scale_factors[i] if rescale else det_bboxes[i]
+ for i in range(num_imgs)
+ ]
+ mask_rois = bbox2roi(_bboxes)
+ aug_masks = []
+ mask_roi_extractor = self.mask_roi_extractor[-1]
+ mask_feats = mask_roi_extractor(
+ x[:len(mask_roi_extractor.featmap_strides)], mask_rois)
+ if self.with_semantic and 'mask' in self.semantic_fusion:
+ mask_semantic_feat = self.semantic_roi_extractor(
+ [semantic_feat], mask_rois)
+ mask_feats += mask_semantic_feat
+ last_feat = None
+
+ num_bbox_per_img = tuple(len(_bbox) for _bbox in _bboxes)
+ for i in range(self.num_stages):
+ mask_head = self.mask_head[i]
+ if self.mask_info_flow:
+ mask_pred, last_feat = mask_head(mask_feats, last_feat)
+ else:
+ mask_pred = mask_head(mask_feats)
+
+ # split batch mask prediction back to each image
+ mask_pred = mask_pred.split(num_bbox_per_img, 0)
+ aug_masks.append(
+ [mask.sigmoid().cpu().numpy() for mask in mask_pred])
+
+ # apply mask post-processing to each image individually
+ segm_results = []
+ for i in range(num_imgs):
+ if det_bboxes[i].shape[0] == 0:
+ segm_results.append(
+ [[]
+ for _ in range(self.mask_head[-1].num_classes)])
+ else:
+ aug_mask = [mask[i] for mask in aug_masks]
+ merged_mask = merge_aug_masks(
+ aug_mask, [[img_metas[i]]] * self.num_stages,
+ rcnn_test_cfg)
+ segm_result = self.mask_head[-1].get_seg_masks(
+ merged_mask, _bboxes[i], det_labels[i],
+ rcnn_test_cfg, ori_shapes[i], scale_factors[i],
+ rescale)
+ segm_results.append(segm_result)
+ ms_segm_result['ensemble'] = segm_results
+
+ if self.with_mask:
+ results = list(
+ zip(ms_bbox_result['ensemble'], ms_segm_result['ensemble']))
+ else:
+ results = ms_bbox_result['ensemble']
+
+ return results
+
+ def aug_test(self, img_feats, proposal_list, img_metas, rescale=False):
+ """Test with augmentations.
+
+ If rescale is False, then returned bboxes and masks will fit the scale
+ of imgs[0].
+ """
+ if self.with_semantic:
+ semantic_feats = [
+ self.semantic_head(feat)[1] for feat in img_feats
+ ]
+ else:
+ semantic_feats = [None] * len(img_metas)
+
+ rcnn_test_cfg = self.test_cfg
+ aug_bboxes = []
+ aug_scores = []
+ for x, img_meta, semantic in zip(img_feats, img_metas, semantic_feats):
+ # only one image in the batch
+ img_shape = img_meta[0]['img_shape']
+ scale_factor = img_meta[0]['scale_factor']
+ flip = img_meta[0]['flip']
+ flip_direction = img_meta[0]['flip_direction']
+
+ proposals = bbox_mapping(proposal_list[0][:, :4], img_shape,
+ scale_factor, flip, flip_direction)
+ # "ms" in variable names means multi-stage
+ ms_scores = []
+
+ rois = bbox2roi([proposals])
+ for i in range(self.num_stages):
+ bbox_head = self.bbox_head[i]
+ bbox_results = self._bbox_forward(
+ i, x, rois, semantic_feat=semantic)
+ ms_scores.append(bbox_results['cls_score'])
+
+ if i < self.num_stages - 1:
+ bbox_label = bbox_results['cls_score'].argmax(dim=1)
+ rois = bbox_head.regress_by_class(
+ rois, bbox_label, bbox_results['bbox_pred'],
+ img_meta[0])
+
+ cls_score = sum(ms_scores) / float(len(ms_scores))
+ bboxes, scores = self.bbox_head[-1].get_bboxes(
+ rois,
+ cls_score,
+ bbox_results['bbox_pred'],
+ img_shape,
+ scale_factor,
+ rescale=False,
+ cfg=None)
+ aug_bboxes.append(bboxes)
+ aug_scores.append(scores)
+
+ # after merging, bboxes will be rescaled to the original image size
+ merged_bboxes, merged_scores = merge_aug_bboxes(
+ aug_bboxes, aug_scores, img_metas, rcnn_test_cfg)
+ det_bboxes, det_labels = multiclass_nms(merged_bboxes, merged_scores,
+ rcnn_test_cfg.score_thr,
+ rcnn_test_cfg.nms,
+ rcnn_test_cfg.max_per_img)
+
+ bbox_result = bbox2result(det_bboxes, det_labels,
+ self.bbox_head[-1].num_classes)
+
+ if self.with_mask:
+ if det_bboxes.shape[0] == 0:
+ segm_result = [[[]
+ for _ in range(self.mask_head[-1].num_classes)]
+ ]
+ else:
+ aug_masks = []
+ aug_img_metas = []
+ for x, img_meta, semantic in zip(img_feats, img_metas,
+ semantic_feats):
+ img_shape = img_meta[0]['img_shape']
+ scale_factor = img_meta[0]['scale_factor']
+ flip = img_meta[0]['flip']
+ flip_direction = img_meta[0]['flip_direction']
+ _bboxes = bbox_mapping(det_bboxes[:, :4], img_shape,
+ scale_factor, flip, flip_direction)
+ mask_rois = bbox2roi([_bboxes])
+ mask_feats = self.mask_roi_extractor[-1](
+ x[:len(self.mask_roi_extractor[-1].featmap_strides)],
+ mask_rois)
+ if self.with_semantic:
+ semantic_feat = semantic
+ mask_semantic_feat = self.semantic_roi_extractor(
+ [semantic_feat], mask_rois)
+ if mask_semantic_feat.shape[-2:] != mask_feats.shape[
+ -2:]:
+ mask_semantic_feat = F.adaptive_avg_pool2d(
+ mask_semantic_feat, mask_feats.shape[-2:])
+ mask_feats += mask_semantic_feat
+ last_feat = None
+ for i in range(self.num_stages):
+ mask_head = self.mask_head[i]
+ if self.mask_info_flow:
+ mask_pred, last_feat = mask_head(
+ mask_feats, last_feat)
+ else:
+ mask_pred = mask_head(mask_feats)
+ aug_masks.append(mask_pred.sigmoid().cpu().numpy())
+ aug_img_metas.append(img_meta)
+ merged_masks = merge_aug_masks(aug_masks, aug_img_metas,
+ self.test_cfg)
+
+ ori_shape = img_metas[0][0]['ori_shape']
+ segm_result = self.mask_head[-1].get_seg_masks(
+ merged_masks,
+ det_bboxes,
+ det_labels,
+ rcnn_test_cfg,
+ ori_shape,
+ scale_factor=1.0,
+ rescale=False)
+ return [(bbox_result, segm_result)]
+ else:
+ return [bbox_result]
diff --git a/annotator/uniformer/mmdet_null/models/roi_heads/mask_heads/__init__.py b/annotator/uniformer/mmdet_null/models/roi_heads/mask_heads/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..abfbe2624eecb73b029e9bcb7e2283bbf2a744ea
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/roi_heads/mask_heads/__init__.py
@@ -0,0 +1,17 @@
+from .coarse_mask_head import CoarseMaskHead
+from .fcn_mask_head import FCNMaskHead
+from .feature_relay_head import FeatureRelayHead
+from .fused_semantic_head import FusedSemanticHead
+from .global_context_head import GlobalContextHead
+from .grid_head import GridHead
+from .htc_mask_head import HTCMaskHead
+from .mask_point_head import MaskPointHead
+from .maskiou_head import MaskIoUHead
+from .scnet_mask_head import SCNetMaskHead
+from .scnet_semantic_head import SCNetSemanticHead
+
+__all__ = [
+ 'FCNMaskHead', 'HTCMaskHead', 'FusedSemanticHead', 'GridHead',
+ 'MaskIoUHead', 'CoarseMaskHead', 'MaskPointHead', 'SCNetMaskHead',
+ 'SCNetSemanticHead', 'GlobalContextHead', 'FeatureRelayHead'
+]
diff --git a/annotator/uniformer/mmdet_null/models/roi_heads/mask_heads/coarse_mask_head.py b/annotator/uniformer/mmdet_null/models/roi_heads/mask_heads/coarse_mask_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..d665dfff83855e6db3866c681559ccdef09f9999
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/roi_heads/mask_heads/coarse_mask_head.py
@@ -0,0 +1,91 @@
+import torch.nn as nn
+from mmcv.cnn import ConvModule, Linear, constant_init, xavier_init
+from mmcv.runner import auto_fp16
+
+from mmdet.models.builder import HEADS
+from .fcn_mask_head import FCNMaskHead
+
+
+@HEADS.register_module()
+class CoarseMaskHead(FCNMaskHead):
+ """Coarse mask head used in PointRend.
+
+ Compared with standard ``FCNMaskHead``, ``CoarseMaskHead`` will downsample
+ the input feature map instead of upsample it.
+
+ Args:
+ num_convs (int): Number of conv layers in the head. Default: 0.
+ num_fcs (int): Number of fc layers in the head. Default: 2.
+ fc_out_channels (int): Number of output channels of fc layer.
+ Default: 1024.
+ downsample_factor (int): The factor that feature map is downsampled by.
+ Default: 2.
+ """
+
+ def __init__(self,
+ num_convs=0,
+ num_fcs=2,
+ fc_out_channels=1024,
+ downsample_factor=2,
+ *arg,
+ **kwarg):
+ super(CoarseMaskHead, self).__init__(
+ *arg, num_convs=num_convs, upsample_cfg=dict(type=None), **kwarg)
+ self.num_fcs = num_fcs
+ assert self.num_fcs > 0
+ self.fc_out_channels = fc_out_channels
+ self.downsample_factor = downsample_factor
+ assert self.downsample_factor >= 1
+ # remove conv_logit
+ delattr(self, 'conv_logits')
+
+ if downsample_factor > 1:
+ downsample_in_channels = (
+ self.conv_out_channels
+ if self.num_convs > 0 else self.in_channels)
+ self.downsample_conv = ConvModule(
+ downsample_in_channels,
+ self.conv_out_channels,
+ kernel_size=downsample_factor,
+ stride=downsample_factor,
+ padding=0,
+ conv_cfg=self.conv_cfg,
+ norm_cfg=self.norm_cfg)
+ else:
+ self.downsample_conv = None
+
+ self.output_size = (self.roi_feat_size[0] // downsample_factor,
+ self.roi_feat_size[1] // downsample_factor)
+ self.output_area = self.output_size[0] * self.output_size[1]
+
+ last_layer_dim = self.conv_out_channels * self.output_area
+
+ self.fcs = nn.ModuleList()
+ for i in range(num_fcs):
+ fc_in_channels = (
+ last_layer_dim if i == 0 else self.fc_out_channels)
+ self.fcs.append(Linear(fc_in_channels, self.fc_out_channels))
+ last_layer_dim = self.fc_out_channels
+ output_channels = self.num_classes * self.output_area
+ self.fc_logits = Linear(last_layer_dim, output_channels)
+
+ def init_weights(self):
+ for m in self.fcs.modules():
+ if isinstance(m, nn.Linear):
+ xavier_init(m)
+ constant_init(self.fc_logits, 0.001)
+
+ @auto_fp16()
+ def forward(self, x):
+ for conv in self.convs:
+ x = conv(x)
+
+ if self.downsample_conv is not None:
+ x = self.downsample_conv(x)
+
+ x = x.flatten(1)
+ for fc in self.fcs:
+ x = self.relu(fc(x))
+ mask_pred = self.fc_logits(x).view(
+ x.size(0), self.num_classes, *self.output_size)
+ return mask_pred
diff --git a/annotator/uniformer/mmdet_null/models/roi_heads/mask_heads/fcn_mask_head.py b/annotator/uniformer/mmdet_null/models/roi_heads/mask_heads/fcn_mask_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..be6772fa6c471a7a65b77f2f18dfd217f4bd3289
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/roi_heads/mask_heads/fcn_mask_head.py
@@ -0,0 +1,377 @@
+import numpy as np
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from mmcv.cnn import Conv2d, ConvModule, build_upsample_layer
+from mmcv.ops.carafe import CARAFEPack
+from mmcv.runner import auto_fp16, force_fp32
+from torch.nn.modules.utils import _pair
+
+from mmdet.core import mask_target
+from mmdet.models.builder import HEADS, build_loss
+
+BYTES_PER_FLOAT = 4
+# TODO: This memory limit may be too much or too little. It would be better to
+# determine it based on available resources.
+GPU_MEM_LIMIT = 1024**3 # 1 GB memory limit
+
+
+@HEADS.register_module()
+class FCNMaskHead(nn.Module):
+
+ def __init__(self,
+ num_convs=4,
+ roi_feat_size=14,
+ in_channels=256,
+ conv_kernel_size=3,
+ conv_out_channels=256,
+ num_classes=80,
+ class_agnostic=False,
+ upsample_cfg=dict(type='deconv', scale_factor=2),
+ conv_cfg=None,
+ norm_cfg=None,
+ loss_mask=dict(
+ type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)):
+ super(FCNMaskHead, self).__init__()
+ self.upsample_cfg = upsample_cfg.copy()
+ if self.upsample_cfg['type'] not in [
+ None, 'deconv', 'nearest', 'bilinear', 'carafe'
+ ]:
+ raise ValueError(
+ f'Invalid upsample method {self.upsample_cfg["type"]}, '
+ 'accepted methods are "deconv", "nearest", "bilinear", '
+ '"carafe"')
+ self.num_convs = num_convs
+ # WARN: roi_feat_size is reserved and not used
+ self.roi_feat_size = _pair(roi_feat_size)
+ self.in_channels = in_channels
+ self.conv_kernel_size = conv_kernel_size
+ self.conv_out_channels = conv_out_channels
+ self.upsample_method = self.upsample_cfg.get('type')
+ self.scale_factor = self.upsample_cfg.pop('scale_factor', None)
+ self.num_classes = num_classes
+ self.class_agnostic = class_agnostic
+ self.conv_cfg = conv_cfg
+ self.norm_cfg = norm_cfg
+ self.fp16_enabled = False
+ self.loss_mask = build_loss(loss_mask)
+
+ self.convs = nn.ModuleList()
+ for i in range(self.num_convs):
+ in_channels = (
+ self.in_channels if i == 0 else self.conv_out_channels)
+ padding = (self.conv_kernel_size - 1) // 2
+ self.convs.append(
+ ConvModule(
+ in_channels,
+ self.conv_out_channels,
+ self.conv_kernel_size,
+ padding=padding,
+ conv_cfg=conv_cfg,
+ norm_cfg=norm_cfg))
+ upsample_in_channels = (
+ self.conv_out_channels if self.num_convs > 0 else in_channels)
+ upsample_cfg_ = self.upsample_cfg.copy()
+ if self.upsample_method is None:
+ self.upsample = None
+ elif self.upsample_method == 'deconv':
+ upsample_cfg_.update(
+ in_channels=upsample_in_channels,
+ out_channels=self.conv_out_channels,
+ kernel_size=self.scale_factor,
+ stride=self.scale_factor)
+ self.upsample = build_upsample_layer(upsample_cfg_)
+ elif self.upsample_method == 'carafe':
+ upsample_cfg_.update(
+ channels=upsample_in_channels, scale_factor=self.scale_factor)
+ self.upsample = build_upsample_layer(upsample_cfg_)
+ else:
+ # suppress warnings
+ align_corners = (None
+ if self.upsample_method == 'nearest' else False)
+ upsample_cfg_.update(
+ scale_factor=self.scale_factor,
+ mode=self.upsample_method,
+ align_corners=align_corners)
+ self.upsample = build_upsample_layer(upsample_cfg_)
+
+ out_channels = 1 if self.class_agnostic else self.num_classes
+ logits_in_channel = (
+ self.conv_out_channels
+ if self.upsample_method == 'deconv' else upsample_in_channels)
+ self.conv_logits = Conv2d(logits_in_channel, out_channels, 1)
+ self.relu = nn.ReLU(inplace=True)
+ self.debug_imgs = None
+
+ def init_weights(self):
+ for m in [self.upsample, self.conv_logits]:
+ if m is None:
+ continue
+ elif isinstance(m, CARAFEPack):
+ m.init_weights()
+ else:
+ nn.init.kaiming_normal_(
+ m.weight, mode='fan_out', nonlinearity='relu')
+ nn.init.constant_(m.bias, 0)
+
+ @auto_fp16()
+ def forward(self, x):
+ for conv in self.convs:
+ x = conv(x)
+ if self.upsample is not None:
+ x = self.upsample(x)
+ if self.upsample_method == 'deconv':
+ x = self.relu(x)
+ mask_pred = self.conv_logits(x)
+ return mask_pred
+
+ def get_targets(self, sampling_results, gt_masks, rcnn_train_cfg):
+ pos_proposals = [res.pos_bboxes for res in sampling_results]
+ pos_assigned_gt_inds = [
+ res.pos_assigned_gt_inds for res in sampling_results
+ ]
+ mask_targets = mask_target(pos_proposals, pos_assigned_gt_inds,
+ gt_masks, rcnn_train_cfg)
+ return mask_targets
+
+ @force_fp32(apply_to=('mask_pred', ))
+ def loss(self, mask_pred, mask_targets, labels):
+ """
+ Example:
+ >>> from mmdet.models.roi_heads.mask_heads.fcn_mask_head import * # NOQA
+ >>> N = 7 # N = number of extracted ROIs
+ >>> C, H, W = 11, 32, 32
+ >>> # Create example instance of FCN Mask Head.
+ >>> # There are lots of variations depending on the configuration
+ >>> self = FCNMaskHead(num_classes=C, num_convs=1)
+ >>> inputs = torch.rand(N, self.in_channels, H, W)
+ >>> mask_pred = self.forward(inputs)
+ >>> sf = self.scale_factor
+ >>> labels = torch.randint(0, C, size=(N,))
+ >>> # With the default properties the mask targets should indicate
+ >>> # a (potentially soft) single-class label
+ >>> mask_targets = torch.rand(N, H * sf, W * sf)
+ >>> loss = self.loss(mask_pred, mask_targets, labels)
+ >>> print('loss = {!r}'.format(loss))
+ """
+ loss = dict()
+ if mask_pred.size(0) == 0:
+ loss_mask = mask_pred.sum()
+ else:
+ if self.class_agnostic:
+ loss_mask = self.loss_mask(mask_pred, mask_targets,
+ torch.zeros_like(labels))
+ else:
+ loss_mask = self.loss_mask(mask_pred, mask_targets, labels)
+ loss['loss_mask'] = loss_mask
+ return loss
+
+ def get_seg_masks(self, mask_pred, det_bboxes, det_labels, rcnn_test_cfg,
+ ori_shape, scale_factor, rescale):
+ """Get segmentation masks from mask_pred and bboxes.
+
+ Args:
+ mask_pred (Tensor or ndarray): shape (n, #class, h, w).
+ For single-scale testing, mask_pred is the direct output of
+ model, whose type is Tensor, while for multi-scale testing,
+ it will be converted to numpy array outside of this method.
+ det_bboxes (Tensor): shape (n, 4/5)
+ det_labels (Tensor): shape (n, )
+ rcnn_test_cfg (dict): rcnn testing config
+ ori_shape (Tuple): original image height and width, shape (2,)
+ scale_factor(float | Tensor): If ``rescale is True``, box
+ coordinates are divided by this scale factor to fit
+ ``ori_shape``.
+ rescale (bool): If True, the resulting masks will be rescaled to
+ ``ori_shape``.
+
+ Returns:
+ list[list]: encoded masks. The c-th item in the outer list
+ corresponds to the c-th class. Given the c-th outer list, the
+ i-th item in that inner list is the mask for the i-th box with
+ class label c.
+
+ Example:
+ >>> import mmcv
+ >>> from mmdet.models.roi_heads.mask_heads.fcn_mask_head import * # NOQA
+ >>> N = 7 # N = number of extracted ROIs
+ >>> C, H, W = 11, 32, 32
+ >>> # Create example instance of FCN Mask Head.
+ >>> self = FCNMaskHead(num_classes=C, num_convs=0)
+ >>> inputs = torch.rand(N, self.in_channels, H, W)
+ >>> mask_pred = self.forward(inputs)
+ >>> # Each input is associated with some bounding box
+ >>> det_bboxes = torch.Tensor([[1, 1, 42, 42 ]] * N)
+ >>> det_labels = torch.randint(0, C, size=(N,))
+ >>> rcnn_test_cfg = mmcv.Config({'mask_thr_binary': 0, })
+ >>> ori_shape = (H * 4, W * 4)
+ >>> scale_factor = torch.FloatTensor((1, 1))
+ >>> rescale = False
+ >>> # Encoded masks are a list for each category.
+ >>> encoded_masks = self.get_seg_masks(
+ >>> mask_pred, det_bboxes, det_labels, rcnn_test_cfg, ori_shape,
+ >>> scale_factor, rescale
+ >>> )
+ >>> assert len(encoded_masks) == C
+ >>> assert sum(list(map(len, encoded_masks))) == N
+ """
+ if isinstance(mask_pred, torch.Tensor):
+ mask_pred = mask_pred.sigmoid()
+ else:
+ mask_pred = det_bboxes.new_tensor(mask_pred)
+
+ device = mask_pred.device
+ cls_segms = [[] for _ in range(self.num_classes)
+ ] # BG is not included in num_classes
+ bboxes = det_bboxes[:, :4]
+ labels = det_labels
+
+ if rescale:
+ img_h, img_w = ori_shape[:2]
+ else:
+ if isinstance(scale_factor, float):
+ img_h = np.round(ori_shape[0] * scale_factor).astype(np.int32)
+ img_w = np.round(ori_shape[1] * scale_factor).astype(np.int32)
+ else:
+ w_scale, h_scale = scale_factor[0], scale_factor[1]
+ img_h = np.round(ori_shape[0] * h_scale.item()).astype(
+ np.int32)
+ img_w = np.round(ori_shape[1] * w_scale.item()).astype(
+ np.int32)
+ scale_factor = 1.0
+
+ if not isinstance(scale_factor, (float, torch.Tensor)):
+ scale_factor = bboxes.new_tensor(scale_factor)
+ bboxes = bboxes / scale_factor
+
+ if torch.onnx.is_in_onnx_export():
+ # TODO: Remove after F.grid_sample is supported.
+ from torchvision.models.detection.roi_heads \
+ import paste_masks_in_image
+ masks = paste_masks_in_image(mask_pred, bboxes, ori_shape[:2])
+ thr = rcnn_test_cfg.get('mask_thr_binary', 0)
+ if thr > 0:
+ masks = masks >= thr
+ return masks
+
+ N = len(mask_pred)
+ # The actual implementation split the input into chunks,
+ # and paste them chunk by chunk.
+ if device.type == 'cpu':
+ # CPU is most efficient when they are pasted one by one with
+ # skip_empty=True, so that it performs minimal number of
+ # operations.
+ num_chunks = N
+ else:
+ # GPU benefits from parallelism for larger chunks,
+ # but may have memory issue
+ num_chunks = int(
+ np.ceil(N * img_h * img_w * BYTES_PER_FLOAT / GPU_MEM_LIMIT))
+ assert (num_chunks <=
+ N), 'Default GPU_MEM_LIMIT is too small; try increasing it'
+ chunks = torch.chunk(torch.arange(N, device=device), num_chunks)
+
+ threshold = rcnn_test_cfg.mask_thr_binary
+ im_mask = torch.zeros(
+ N,
+ img_h,
+ img_w,
+ device=device,
+ dtype=torch.bool if threshold >= 0 else torch.uint8)
+
+ if not self.class_agnostic:
+ mask_pred = mask_pred[range(N), labels][:, None]
+
+ for inds in chunks:
+ masks_chunk, spatial_inds = _do_paste_mask(
+ mask_pred[inds],
+ bboxes[inds],
+ img_h,
+ img_w,
+ skip_empty=device.type == 'cpu')
+
+ if threshold >= 0:
+ masks_chunk = (masks_chunk >= threshold).to(dtype=torch.bool)
+ else:
+ # for visualization and debugging
+ masks_chunk = (masks_chunk * 255).to(dtype=torch.uint8)
+
+ im_mask[(inds, ) + spatial_inds] = masks_chunk
+
+ for i in range(N):
+ cls_segms[labels[i]].append(im_mask[i].detach().cpu().numpy())
+ return cls_segms
+
+
+def _do_paste_mask(masks, boxes, img_h, img_w, skip_empty=True):
+ """Paste instance masks according to boxes.
+
+ This implementation is modified from
+ https://github.com/facebookresearch/detectron2/
+
+ Args:
+ masks (Tensor): N, 1, H, W
+ boxes (Tensor): N, 4
+ img_h (int): Height of the image to be pasted.
+ img_w (int): Width of the image to be pasted.
+ skip_empty (bool): Only paste masks within the region that
+ tightly bound all boxes, and returns the results this region only.
+ An important optimization for CPU.
+
+ Returns:
+ tuple: (Tensor, tuple). The first item is mask tensor, the second one
+ is the slice object.
+ If skip_empty == False, the whole image will be pasted. It will
+ return a mask of shape (N, img_h, img_w) and an empty tuple.
+ If skip_empty == True, only area around the mask will be pasted.
+ A mask of shape (N, h', w') and its start and end coordinates
+ in the original image will be returned.
+ """
+ # On GPU, paste all masks together (up to chunk size)
+ # by using the entire image to sample the masks
+ # Compared to pasting them one by one,
+ # this has more operations but is faster on COCO-scale dataset.
+ device = masks.device
+ if skip_empty:
+ x0_int, y0_int = torch.clamp(
+ boxes.min(dim=0).values.floor()[:2] - 1,
+ min=0).to(dtype=torch.int32)
+ x1_int = torch.clamp(
+ boxes[:, 2].max().ceil() + 1, max=img_w).to(dtype=torch.int32)
+ y1_int = torch.clamp(
+ boxes[:, 3].max().ceil() + 1, max=img_h).to(dtype=torch.int32)
+ else:
+ x0_int, y0_int = 0, 0
+ x1_int, y1_int = img_w, img_h
+ x0, y0, x1, y1 = torch.split(boxes, 1, dim=1) # each is Nx1
+
+ N = masks.shape[0]
+
+ img_y = torch.arange(
+ y0_int, y1_int, device=device, dtype=torch.float32) + 0.5
+ img_x = torch.arange(
+ x0_int, x1_int, device=device, dtype=torch.float32) + 0.5
+ img_y = (img_y - y0) / (y1 - y0) * 2 - 1
+ img_x = (img_x - x0) / (x1 - x0) * 2 - 1
+ # img_x, img_y have shapes (N, w), (N, h)
+ if torch.isinf(img_x).any():
+ inds = torch.where(torch.isinf(img_x))
+ img_x[inds] = 0
+ if torch.isinf(img_y).any():
+ inds = torch.where(torch.isinf(img_y))
+ img_y[inds] = 0
+
+ gx = img_x[:, None, :].expand(N, img_y.size(1), img_x.size(1))
+ gy = img_y[:, :, None].expand(N, img_y.size(1), img_x.size(1))
+ grid = torch.stack([gx, gy], dim=3)
+
+ if torch.onnx.is_in_onnx_export():
+ raise RuntimeError(
+ 'Exporting F.grid_sample from Pytorch to ONNX is not supported.')
+ img_masks = F.grid_sample(
+ masks.to(dtype=torch.float32), grid, align_corners=False)
+
+ if skip_empty:
+ return img_masks[:, 0], (slice(y0_int, y1_int), slice(x0_int, x1_int))
+ else:
+ return img_masks[:, 0], ()
diff --git a/annotator/uniformer/mmdet_null/models/roi_heads/mask_heads/feature_relay_head.py b/annotator/uniformer/mmdet_null/models/roi_heads/mask_heads/feature_relay_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..a1cfb2ce8631d51e5c465f9bbc4164a37acc4782
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/roi_heads/mask_heads/feature_relay_head.py
@@ -0,0 +1,55 @@
+import torch.nn as nn
+from mmcv.cnn import kaiming_init
+from mmcv.runner import auto_fp16
+
+from mmdet.models.builder import HEADS
+
+
+@HEADS.register_module()
+class FeatureRelayHead(nn.Module):
+ """Feature Relay Head used in `SCNet `_.
+
+ Args:
+ in_channels (int, optional): number of input channels. Default: 256.
+ conv_out_channels (int, optional): number of output channels before
+ classification layer. Default: 256.
+ roi_feat_size (int, optional): roi feat size at box head. Default: 7.
+ scale_factor (int, optional): scale factor to match roi feat size
+ at mask head. Default: 2.
+ """
+
+ def __init__(self,
+ in_channels=1024,
+ out_conv_channels=256,
+ roi_feat_size=7,
+ scale_factor=2):
+ super(FeatureRelayHead, self).__init__()
+ assert isinstance(roi_feat_size, int)
+
+ self.in_channels = in_channels
+ self.out_conv_channels = out_conv_channels
+ self.roi_feat_size = roi_feat_size
+ self.out_channels = (roi_feat_size**2) * out_conv_channels
+ self.scale_factor = scale_factor
+ self.fp16_enabled = False
+
+ self.fc = nn.Linear(self.in_channels, self.out_channels)
+ self.upsample = nn.Upsample(
+ scale_factor=scale_factor, mode='bilinear', align_corners=True)
+
+ def init_weights(self):
+ """Init weights for the head."""
+ kaiming_init(self.fc)
+
+ @auto_fp16()
+ def forward(self, x):
+ """Forward function."""
+ N, in_C = x.shape
+ if N > 0:
+ out_C = self.out_conv_channels
+ out_HW = self.roi_feat_size
+ x = self.fc(x)
+ x = x.reshape(N, out_C, out_HW, out_HW)
+ x = self.upsample(x)
+ return x
+ return None
diff --git a/annotator/uniformer/mmdet_null/models/roi_heads/mask_heads/fused_semantic_head.py b/annotator/uniformer/mmdet_null/models/roi_heads/mask_heads/fused_semantic_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..2aa6033eec17a30aeb68c0fdd218d8f0d41157e8
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/roi_heads/mask_heads/fused_semantic_head.py
@@ -0,0 +1,107 @@
+import torch.nn as nn
+import torch.nn.functional as F
+from mmcv.cnn import ConvModule, kaiming_init
+from mmcv.runner import auto_fp16, force_fp32
+
+from mmdet.models.builder import HEADS
+
+
+@HEADS.register_module()
+class FusedSemanticHead(nn.Module):
+ r"""Multi-level fused semantic segmentation head.
+
+ .. code-block:: none
+
+ in_1 -> 1x1 conv ---
+ |
+ in_2 -> 1x1 conv -- |
+ ||
+ in_3 -> 1x1 conv - ||
+ ||| /-> 1x1 conv (mask prediction)
+ in_4 -> 1x1 conv -----> 3x3 convs (*4)
+ | \-> 1x1 conv (feature)
+ in_5 -> 1x1 conv ---
+ """ # noqa: W605
+
+ def __init__(self,
+ num_ins,
+ fusion_level,
+ num_convs=4,
+ in_channels=256,
+ conv_out_channels=256,
+ num_classes=183,
+ ignore_label=255,
+ loss_weight=0.2,
+ conv_cfg=None,
+ norm_cfg=None):
+ super(FusedSemanticHead, self).__init__()
+ self.num_ins = num_ins
+ self.fusion_level = fusion_level
+ self.num_convs = num_convs
+ self.in_channels = in_channels
+ self.conv_out_channels = conv_out_channels
+ self.num_classes = num_classes
+ self.ignore_label = ignore_label
+ self.loss_weight = loss_weight
+ self.conv_cfg = conv_cfg
+ self.norm_cfg = norm_cfg
+ self.fp16_enabled = False
+
+ self.lateral_convs = nn.ModuleList()
+ for i in range(self.num_ins):
+ self.lateral_convs.append(
+ ConvModule(
+ self.in_channels,
+ self.in_channels,
+ 1,
+ conv_cfg=self.conv_cfg,
+ norm_cfg=self.norm_cfg,
+ inplace=False))
+
+ self.convs = nn.ModuleList()
+ for i in range(self.num_convs):
+ in_channels = self.in_channels if i == 0 else conv_out_channels
+ self.convs.append(
+ ConvModule(
+ in_channels,
+ conv_out_channels,
+ 3,
+ padding=1,
+ conv_cfg=self.conv_cfg,
+ norm_cfg=self.norm_cfg))
+ self.conv_embedding = ConvModule(
+ conv_out_channels,
+ conv_out_channels,
+ 1,
+ conv_cfg=self.conv_cfg,
+ norm_cfg=self.norm_cfg)
+ self.conv_logits = nn.Conv2d(conv_out_channels, self.num_classes, 1)
+
+ self.criterion = nn.CrossEntropyLoss(ignore_index=ignore_label)
+
+ def init_weights(self):
+ kaiming_init(self.conv_logits)
+
+ @auto_fp16()
+ def forward(self, feats):
+ x = self.lateral_convs[self.fusion_level](feats[self.fusion_level])
+ fused_size = tuple(x.shape[-2:])
+ for i, feat in enumerate(feats):
+ if i != self.fusion_level:
+ feat = F.interpolate(
+ feat, size=fused_size, mode='bilinear', align_corners=True)
+ x += self.lateral_convs[i](feat)
+
+ for i in range(self.num_convs):
+ x = self.convs[i](x)
+
+ mask_pred = self.conv_logits(x)
+ x = self.conv_embedding(x)
+ return mask_pred, x
+
+ @force_fp32(apply_to=('mask_pred', ))
+ def loss(self, mask_pred, labels):
+ labels = labels.squeeze(1).long()
+ loss_semantic_seg = self.criterion(mask_pred, labels)
+ loss_semantic_seg *= self.loss_weight
+ return loss_semantic_seg
diff --git a/annotator/uniformer/mmdet_null/models/roi_heads/mask_heads/global_context_head.py b/annotator/uniformer/mmdet_null/models/roi_heads/mask_heads/global_context_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..d8e8cbca95d69e86ec7a2a1e7ed7f158be1b5753
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/roi_heads/mask_heads/global_context_head.py
@@ -0,0 +1,102 @@
+import torch.nn as nn
+from mmcv.cnn import ConvModule
+from mmcv.runner import auto_fp16, force_fp32
+
+from mmdet.models.builder import HEADS
+from mmdet.models.utils import ResLayer, SimplifiedBasicBlock
+
+
+@HEADS.register_module()
+class GlobalContextHead(nn.Module):
+ """Global context head used in `SCNet `_.
+
+ Args:
+ num_convs (int, optional): number of convolutional layer in GlbCtxHead.
+ Default: 4.
+ in_channels (int, optional): number of input channels. Default: 256.
+ conv_out_channels (int, optional): number of output channels before
+ classification layer. Default: 256.
+ num_classes (int, optional): number of classes. Default: 80.
+ loss_weight (float, optional): global context loss weight. Default: 1.
+ conv_cfg (dict, optional): config to init conv layer. Default: None.
+ norm_cfg (dict, optional): config to init norm layer. Default: None.
+ conv_to_res (bool, optional): if True, 2 convs will be grouped into
+ 1 `SimplifiedBasicBlock` using a skip connection. Default: False.
+ """
+
+ def __init__(self,
+ num_convs=4,
+ in_channels=256,
+ conv_out_channels=256,
+ num_classes=80,
+ loss_weight=1.0,
+ conv_cfg=None,
+ norm_cfg=None,
+ conv_to_res=False):
+ super(GlobalContextHead, self).__init__()
+ self.num_convs = num_convs
+ self.in_channels = in_channels
+ self.conv_out_channels = conv_out_channels
+ self.num_classes = num_classes
+ self.loss_weight = loss_weight
+ self.conv_cfg = conv_cfg
+ self.norm_cfg = norm_cfg
+ self.conv_to_res = conv_to_res
+ self.fp16_enabled = False
+
+ if self.conv_to_res:
+ num_res_blocks = num_convs // 2
+ self.convs = ResLayer(
+ SimplifiedBasicBlock,
+ in_channels,
+ self.conv_out_channels,
+ num_res_blocks,
+ conv_cfg=self.conv_cfg,
+ norm_cfg=self.norm_cfg)
+ self.num_convs = num_res_blocks
+ else:
+ self.convs = nn.ModuleList()
+ for i in range(self.num_convs):
+ in_channels = self.in_channels if i == 0 else conv_out_channels
+ self.convs.append(
+ ConvModule(
+ in_channels,
+ conv_out_channels,
+ 3,
+ padding=1,
+ conv_cfg=self.conv_cfg,
+ norm_cfg=self.norm_cfg))
+
+ self.pool = nn.AdaptiveAvgPool2d(1)
+ self.fc = nn.Linear(conv_out_channels, num_classes)
+
+ self.criterion = nn.BCEWithLogitsLoss()
+
+ def init_weights(self):
+ """Init weights for the head."""
+ nn.init.normal_(self.fc.weight, 0, 0.01)
+ nn.init.constant_(self.fc.bias, 0)
+
+ @auto_fp16()
+ def forward(self, feats):
+ """Forward function."""
+ x = feats[-1]
+ for i in range(self.num_convs):
+ x = self.convs[i](x)
+ x = self.pool(x)
+
+ # multi-class prediction
+ mc_pred = x.reshape(x.size(0), -1)
+ mc_pred = self.fc(mc_pred)
+
+ return mc_pred, x
+
+ @force_fp32(apply_to=('pred', ))
+ def loss(self, pred, labels):
+ """Loss function."""
+ labels = [lbl.unique() for lbl in labels]
+ targets = pred.new_zeros(pred.size())
+ for i, label in enumerate(labels):
+ targets[i, label] = 1.0
+ loss = self.loss_weight * self.criterion(pred, targets)
+ return loss
diff --git a/annotator/uniformer/mmdet_null/models/roi_heads/mask_heads/grid_head.py b/annotator/uniformer/mmdet_null/models/roi_heads/mask_heads/grid_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..83058cbdda934ebfc3a76088e1820848ac01b78b
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/roi_heads/mask_heads/grid_head.py
@@ -0,0 +1,359 @@
+import numpy as np
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from mmcv.cnn import ConvModule, kaiming_init, normal_init
+
+from mmdet.models.builder import HEADS, build_loss
+
+
+@HEADS.register_module()
+class GridHead(nn.Module):
+
+ def __init__(self,
+ grid_points=9,
+ num_convs=8,
+ roi_feat_size=14,
+ in_channels=256,
+ conv_kernel_size=3,
+ point_feat_channels=64,
+ deconv_kernel_size=4,
+ class_agnostic=False,
+ loss_grid=dict(
+ type='CrossEntropyLoss', use_sigmoid=True,
+ loss_weight=15),
+ conv_cfg=None,
+ norm_cfg=dict(type='GN', num_groups=36)):
+ super(GridHead, self).__init__()
+ self.grid_points = grid_points
+ self.num_convs = num_convs
+ self.roi_feat_size = roi_feat_size
+ self.in_channels = in_channels
+ self.conv_kernel_size = conv_kernel_size
+ self.point_feat_channels = point_feat_channels
+ self.conv_out_channels = self.point_feat_channels * self.grid_points
+ self.class_agnostic = class_agnostic
+ self.conv_cfg = conv_cfg
+ self.norm_cfg = norm_cfg
+ if isinstance(norm_cfg, dict) and norm_cfg['type'] == 'GN':
+ assert self.conv_out_channels % norm_cfg['num_groups'] == 0
+
+ assert self.grid_points >= 4
+ self.grid_size = int(np.sqrt(self.grid_points))
+ if self.grid_size * self.grid_size != self.grid_points:
+ raise ValueError('grid_points must be a square number')
+
+ # the predicted heatmap is half of whole_map_size
+ if not isinstance(self.roi_feat_size, int):
+ raise ValueError('Only square RoIs are supporeted in Grid R-CNN')
+ self.whole_map_size = self.roi_feat_size * 4
+
+ # compute point-wise sub-regions
+ self.sub_regions = self.calc_sub_regions()
+
+ self.convs = []
+ for i in range(self.num_convs):
+ in_channels = (
+ self.in_channels if i == 0 else self.conv_out_channels)
+ stride = 2 if i == 0 else 1
+ padding = (self.conv_kernel_size - 1) // 2
+ self.convs.append(
+ ConvModule(
+ in_channels,
+ self.conv_out_channels,
+ self.conv_kernel_size,
+ stride=stride,
+ padding=padding,
+ conv_cfg=self.conv_cfg,
+ norm_cfg=self.norm_cfg,
+ bias=True))
+ self.convs = nn.Sequential(*self.convs)
+
+ self.deconv1 = nn.ConvTranspose2d(
+ self.conv_out_channels,
+ self.conv_out_channels,
+ kernel_size=deconv_kernel_size,
+ stride=2,
+ padding=(deconv_kernel_size - 2) // 2,
+ groups=grid_points)
+ self.norm1 = nn.GroupNorm(grid_points, self.conv_out_channels)
+ self.deconv2 = nn.ConvTranspose2d(
+ self.conv_out_channels,
+ grid_points,
+ kernel_size=deconv_kernel_size,
+ stride=2,
+ padding=(deconv_kernel_size - 2) // 2,
+ groups=grid_points)
+
+ # find the 4-neighbor of each grid point
+ self.neighbor_points = []
+ grid_size = self.grid_size
+ for i in range(grid_size): # i-th column
+ for j in range(grid_size): # j-th row
+ neighbors = []
+ if i > 0: # left: (i - 1, j)
+ neighbors.append((i - 1) * grid_size + j)
+ if j > 0: # up: (i, j - 1)
+ neighbors.append(i * grid_size + j - 1)
+ if j < grid_size - 1: # down: (i, j + 1)
+ neighbors.append(i * grid_size + j + 1)
+ if i < grid_size - 1: # right: (i + 1, j)
+ neighbors.append((i + 1) * grid_size + j)
+ self.neighbor_points.append(tuple(neighbors))
+ # total edges in the grid
+ self.num_edges = sum([len(p) for p in self.neighbor_points])
+
+ self.forder_trans = nn.ModuleList() # first-order feature transition
+ self.sorder_trans = nn.ModuleList() # second-order feature transition
+ for neighbors in self.neighbor_points:
+ fo_trans = nn.ModuleList()
+ so_trans = nn.ModuleList()
+ for _ in range(len(neighbors)):
+ # each transition module consists of a 5x5 depth-wise conv and
+ # 1x1 conv.
+ fo_trans.append(
+ nn.Sequential(
+ nn.Conv2d(
+ self.point_feat_channels,
+ self.point_feat_channels,
+ 5,
+ stride=1,
+ padding=2,
+ groups=self.point_feat_channels),
+ nn.Conv2d(self.point_feat_channels,
+ self.point_feat_channels, 1)))
+ so_trans.append(
+ nn.Sequential(
+ nn.Conv2d(
+ self.point_feat_channels,
+ self.point_feat_channels,
+ 5,
+ 1,
+ 2,
+ groups=self.point_feat_channels),
+ nn.Conv2d(self.point_feat_channels,
+ self.point_feat_channels, 1)))
+ self.forder_trans.append(fo_trans)
+ self.sorder_trans.append(so_trans)
+
+ self.loss_grid = build_loss(loss_grid)
+
+ def init_weights(self):
+ for m in self.modules():
+ if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
+ # TODO: compare mode = "fan_in" or "fan_out"
+ kaiming_init(m)
+ for m in self.modules():
+ if isinstance(m, nn.ConvTranspose2d):
+ normal_init(m, std=0.001)
+ nn.init.constant_(self.deconv2.bias, -np.log(0.99 / 0.01))
+
+ def forward(self, x):
+ assert x.shape[-1] == x.shape[-2] == self.roi_feat_size
+ # RoI feature transformation, downsample 2x
+ x = self.convs(x)
+
+ c = self.point_feat_channels
+ # first-order fusion
+ x_fo = [None for _ in range(self.grid_points)]
+ for i, points in enumerate(self.neighbor_points):
+ x_fo[i] = x[:, i * c:(i + 1) * c]
+ for j, point_idx in enumerate(points):
+ x_fo[i] = x_fo[i] + self.forder_trans[i][j](
+ x[:, point_idx * c:(point_idx + 1) * c])
+
+ # second-order fusion
+ x_so = [None for _ in range(self.grid_points)]
+ for i, points in enumerate(self.neighbor_points):
+ x_so[i] = x[:, i * c:(i + 1) * c]
+ for j, point_idx in enumerate(points):
+ x_so[i] = x_so[i] + self.sorder_trans[i][j](x_fo[point_idx])
+
+ # predicted heatmap with fused features
+ x2 = torch.cat(x_so, dim=1)
+ x2 = self.deconv1(x2)
+ x2 = F.relu(self.norm1(x2), inplace=True)
+ heatmap = self.deconv2(x2)
+
+ # predicted heatmap with original features (applicable during training)
+ if self.training:
+ x1 = x
+ x1 = self.deconv1(x1)
+ x1 = F.relu(self.norm1(x1), inplace=True)
+ heatmap_unfused = self.deconv2(x1)
+ else:
+ heatmap_unfused = heatmap
+
+ return dict(fused=heatmap, unfused=heatmap_unfused)
+
+ def calc_sub_regions(self):
+ """Compute point specific representation regions.
+
+ See Grid R-CNN Plus (https://arxiv.org/abs/1906.05688) for details.
+ """
+ # to make it consistent with the original implementation, half_size
+ # is computed as 2 * quarter_size, which is smaller
+ half_size = self.whole_map_size // 4 * 2
+ sub_regions = []
+ for i in range(self.grid_points):
+ x_idx = i // self.grid_size
+ y_idx = i % self.grid_size
+ if x_idx == 0:
+ sub_x1 = 0
+ elif x_idx == self.grid_size - 1:
+ sub_x1 = half_size
+ else:
+ ratio = x_idx / (self.grid_size - 1) - 0.25
+ sub_x1 = max(int(ratio * self.whole_map_size), 0)
+
+ if y_idx == 0:
+ sub_y1 = 0
+ elif y_idx == self.grid_size - 1:
+ sub_y1 = half_size
+ else:
+ ratio = y_idx / (self.grid_size - 1) - 0.25
+ sub_y1 = max(int(ratio * self.whole_map_size), 0)
+ sub_regions.append(
+ (sub_x1, sub_y1, sub_x1 + half_size, sub_y1 + half_size))
+ return sub_regions
+
+ def get_targets(self, sampling_results, rcnn_train_cfg):
+ # mix all samples (across images) together.
+ pos_bboxes = torch.cat([res.pos_bboxes for res in sampling_results],
+ dim=0).cpu()
+ pos_gt_bboxes = torch.cat(
+ [res.pos_gt_bboxes for res in sampling_results], dim=0).cpu()
+ assert pos_bboxes.shape == pos_gt_bboxes.shape
+
+ # expand pos_bboxes to 2x of original size
+ x1 = pos_bboxes[:, 0] - (pos_bboxes[:, 2] - pos_bboxes[:, 0]) / 2
+ y1 = pos_bboxes[:, 1] - (pos_bboxes[:, 3] - pos_bboxes[:, 1]) / 2
+ x2 = pos_bboxes[:, 2] + (pos_bboxes[:, 2] - pos_bboxes[:, 0]) / 2
+ y2 = pos_bboxes[:, 3] + (pos_bboxes[:, 3] - pos_bboxes[:, 1]) / 2
+ pos_bboxes = torch.stack([x1, y1, x2, y2], dim=-1)
+ pos_bbox_ws = (pos_bboxes[:, 2] - pos_bboxes[:, 0]).unsqueeze(-1)
+ pos_bbox_hs = (pos_bboxes[:, 3] - pos_bboxes[:, 1]).unsqueeze(-1)
+
+ num_rois = pos_bboxes.shape[0]
+ map_size = self.whole_map_size
+ # this is not the final target shape
+ targets = torch.zeros((num_rois, self.grid_points, map_size, map_size),
+ dtype=torch.float)
+
+ # pre-compute interpolation factors for all grid points.
+ # the first item is the factor of x-dim, and the second is y-dim.
+ # for a 9-point grid, factors are like (1, 0), (0.5, 0.5), (0, 1)
+ factors = []
+ for j in range(self.grid_points):
+ x_idx = j // self.grid_size
+ y_idx = j % self.grid_size
+ factors.append((1 - x_idx / (self.grid_size - 1),
+ 1 - y_idx / (self.grid_size - 1)))
+
+ radius = rcnn_train_cfg.pos_radius
+ radius2 = radius**2
+ for i in range(num_rois):
+ # ignore small bboxes
+ if (pos_bbox_ws[i] <= self.grid_size
+ or pos_bbox_hs[i] <= self.grid_size):
+ continue
+ # for each grid point, mark a small circle as positive
+ for j in range(self.grid_points):
+ factor_x, factor_y = factors[j]
+ gridpoint_x = factor_x * pos_gt_bboxes[i, 0] + (
+ 1 - factor_x) * pos_gt_bboxes[i, 2]
+ gridpoint_y = factor_y * pos_gt_bboxes[i, 1] + (
+ 1 - factor_y) * pos_gt_bboxes[i, 3]
+
+ cx = int((gridpoint_x - pos_bboxes[i, 0]) / pos_bbox_ws[i] *
+ map_size)
+ cy = int((gridpoint_y - pos_bboxes[i, 1]) / pos_bbox_hs[i] *
+ map_size)
+
+ for x in range(cx - radius, cx + radius + 1):
+ for y in range(cy - radius, cy + radius + 1):
+ if x >= 0 and x < map_size and y >= 0 and y < map_size:
+ if (x - cx)**2 + (y - cy)**2 <= radius2:
+ targets[i, j, y, x] = 1
+ # reduce the target heatmap size by a half
+ # proposed in Grid R-CNN Plus (https://arxiv.org/abs/1906.05688).
+ sub_targets = []
+ for i in range(self.grid_points):
+ sub_x1, sub_y1, sub_x2, sub_y2 = self.sub_regions[i]
+ sub_targets.append(targets[:, [i], sub_y1:sub_y2, sub_x1:sub_x2])
+ sub_targets = torch.cat(sub_targets, dim=1)
+ sub_targets = sub_targets.to(sampling_results[0].pos_bboxes.device)
+ return sub_targets
+
+ def loss(self, grid_pred, grid_targets):
+ loss_fused = self.loss_grid(grid_pred['fused'], grid_targets)
+ loss_unfused = self.loss_grid(grid_pred['unfused'], grid_targets)
+ loss_grid = loss_fused + loss_unfused
+ return dict(loss_grid=loss_grid)
+
+ def get_bboxes(self, det_bboxes, grid_pred, img_metas):
+ # TODO: refactoring
+ assert det_bboxes.shape[0] == grid_pred.shape[0]
+ det_bboxes = det_bboxes.cpu()
+ cls_scores = det_bboxes[:, [4]]
+ det_bboxes = det_bboxes[:, :4]
+ grid_pred = grid_pred.sigmoid().cpu()
+
+ R, c, h, w = grid_pred.shape
+ half_size = self.whole_map_size // 4 * 2
+ assert h == w == half_size
+ assert c == self.grid_points
+
+ # find the point with max scores in the half-sized heatmap
+ grid_pred = grid_pred.view(R * c, h * w)
+ pred_scores, pred_position = grid_pred.max(dim=1)
+ xs = pred_position % w
+ ys = pred_position // w
+
+ # get the position in the whole heatmap instead of half-sized heatmap
+ for i in range(self.grid_points):
+ xs[i::self.grid_points] += self.sub_regions[i][0]
+ ys[i::self.grid_points] += self.sub_regions[i][1]
+
+ # reshape to (num_rois, grid_points)
+ pred_scores, xs, ys = tuple(
+ map(lambda x: x.view(R, c), [pred_scores, xs, ys]))
+
+ # get expanded pos_bboxes
+ widths = (det_bboxes[:, 2] - det_bboxes[:, 0]).unsqueeze(-1)
+ heights = (det_bboxes[:, 3] - det_bboxes[:, 1]).unsqueeze(-1)
+ x1 = (det_bboxes[:, 0, None] - widths / 2)
+ y1 = (det_bboxes[:, 1, None] - heights / 2)
+ # map the grid point to the absolute coordinates
+ abs_xs = (xs.float() + 0.5) / w * widths + x1
+ abs_ys = (ys.float() + 0.5) / h * heights + y1
+
+ # get the grid points indices that fall on the bbox boundaries
+ x1_inds = [i for i in range(self.grid_size)]
+ y1_inds = [i * self.grid_size for i in range(self.grid_size)]
+ x2_inds = [
+ self.grid_points - self.grid_size + i
+ for i in range(self.grid_size)
+ ]
+ y2_inds = [(i + 1) * self.grid_size - 1 for i in range(self.grid_size)]
+
+ # voting of all grid points on some boundary
+ bboxes_x1 = (abs_xs[:, x1_inds] * pred_scores[:, x1_inds]).sum(
+ dim=1, keepdim=True) / (
+ pred_scores[:, x1_inds].sum(dim=1, keepdim=True))
+ bboxes_y1 = (abs_ys[:, y1_inds] * pred_scores[:, y1_inds]).sum(
+ dim=1, keepdim=True) / (
+ pred_scores[:, y1_inds].sum(dim=1, keepdim=True))
+ bboxes_x2 = (abs_xs[:, x2_inds] * pred_scores[:, x2_inds]).sum(
+ dim=1, keepdim=True) / (
+ pred_scores[:, x2_inds].sum(dim=1, keepdim=True))
+ bboxes_y2 = (abs_ys[:, y2_inds] * pred_scores[:, y2_inds]).sum(
+ dim=1, keepdim=True) / (
+ pred_scores[:, y2_inds].sum(dim=1, keepdim=True))
+
+ bbox_res = torch.cat(
+ [bboxes_x1, bboxes_y1, bboxes_x2, bboxes_y2, cls_scores], dim=1)
+ bbox_res[:, [0, 2]].clamp_(min=0, max=img_metas[0]['img_shape'][1])
+ bbox_res[:, [1, 3]].clamp_(min=0, max=img_metas[0]['img_shape'][0])
+
+ return bbox_res
diff --git a/annotator/uniformer/mmdet_null/models/roi_heads/mask_heads/htc_mask_head.py b/annotator/uniformer/mmdet_null/models/roi_heads/mask_heads/htc_mask_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..330b778ebad8d48d55d09ddd42baa70ec10ae463
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/roi_heads/mask_heads/htc_mask_head.py
@@ -0,0 +1,43 @@
+from mmcv.cnn import ConvModule
+
+from mmdet.models.builder import HEADS
+from .fcn_mask_head import FCNMaskHead
+
+
+@HEADS.register_module()
+class HTCMaskHead(FCNMaskHead):
+
+ def __init__(self, with_conv_res=True, *args, **kwargs):
+ super(HTCMaskHead, self).__init__(*args, **kwargs)
+ self.with_conv_res = with_conv_res
+ if self.with_conv_res:
+ self.conv_res = ConvModule(
+ self.conv_out_channels,
+ self.conv_out_channels,
+ 1,
+ conv_cfg=self.conv_cfg,
+ norm_cfg=self.norm_cfg)
+
+ def init_weights(self):
+ super(HTCMaskHead, self).init_weights()
+ if self.with_conv_res:
+ self.conv_res.init_weights()
+
+ def forward(self, x, res_feat=None, return_logits=True, return_feat=True):
+ if res_feat is not None:
+ assert self.with_conv_res
+ res_feat = self.conv_res(res_feat)
+ x = x + res_feat
+ for conv in self.convs:
+ x = conv(x)
+ res_feat = x
+ outs = []
+ if return_logits:
+ x = self.upsample(x)
+ if self.upsample_method == 'deconv':
+ x = self.relu(x)
+ mask_pred = self.conv_logits(x)
+ outs.append(mask_pred)
+ if return_feat:
+ outs.append(res_feat)
+ return outs if len(outs) > 1 else outs[0]
diff --git a/annotator/uniformer/mmdet_null/models/roi_heads/mask_heads/mask_point_head.py b/annotator/uniformer/mmdet_null/models/roi_heads/mask_heads/mask_point_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..fb92903a9488a44b984a489a354d838cc88f8ad4
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/roi_heads/mask_heads/mask_point_head.py
@@ -0,0 +1,300 @@
+# Modified from https://github.com/facebookresearch/detectron2/tree/master/projects/PointRend/point_head/point_head.py # noqa
+
+import torch
+import torch.nn as nn
+from mmcv.cnn import ConvModule, normal_init
+from mmcv.ops import point_sample, rel_roi_point_to_rel_img_point
+
+from mmdet.models.builder import HEADS, build_loss
+
+
+@HEADS.register_module()
+class MaskPointHead(nn.Module):
+ """A mask point head use in PointRend.
+
+ ``MaskPointHead`` use shared multi-layer perceptron (equivalent to
+ nn.Conv1d) to predict the logit of input points. The fine-grained feature
+ and coarse feature will be concatenate together for predication.
+
+ Args:
+ num_fcs (int): Number of fc layers in the head. Default: 3.
+ in_channels (int): Number of input channels. Default: 256.
+ fc_channels (int): Number of fc channels. Default: 256.
+ num_classes (int): Number of classes for logits. Default: 80.
+ class_agnostic (bool): Whether use class agnostic classification.
+ If so, the output channels of logits will be 1. Default: False.
+ coarse_pred_each_layer (bool): Whether concatenate coarse feature with
+ the output of each fc layer. Default: True.
+ conv_cfg (dict | None): Dictionary to construct and config conv layer.
+ Default: dict(type='Conv1d'))
+ norm_cfg (dict | None): Dictionary to construct and config norm layer.
+ Default: None.
+ loss_point (dict): Dictionary to construct and config loss layer of
+ point head. Default: dict(type='CrossEntropyLoss', use_mask=True,
+ loss_weight=1.0).
+ """
+
+ def __init__(self,
+ num_classes,
+ num_fcs=3,
+ in_channels=256,
+ fc_channels=256,
+ class_agnostic=False,
+ coarse_pred_each_layer=True,
+ conv_cfg=dict(type='Conv1d'),
+ norm_cfg=None,
+ act_cfg=dict(type='ReLU'),
+ loss_point=dict(
+ type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)):
+ super().__init__()
+ self.num_fcs = num_fcs
+ self.in_channels = in_channels
+ self.fc_channels = fc_channels
+ self.num_classes = num_classes
+ self.class_agnostic = class_agnostic
+ self.coarse_pred_each_layer = coarse_pred_each_layer
+ self.conv_cfg = conv_cfg
+ self.norm_cfg = norm_cfg
+ self.loss_point = build_loss(loss_point)
+
+ fc_in_channels = in_channels + num_classes
+ self.fcs = nn.ModuleList()
+ for _ in range(num_fcs):
+ fc = ConvModule(
+ fc_in_channels,
+ fc_channels,
+ kernel_size=1,
+ stride=1,
+ padding=0,
+ conv_cfg=conv_cfg,
+ norm_cfg=norm_cfg,
+ act_cfg=act_cfg)
+ self.fcs.append(fc)
+ fc_in_channels = fc_channels
+ fc_in_channels += num_classes if self.coarse_pred_each_layer else 0
+
+ out_channels = 1 if self.class_agnostic else self.num_classes
+ self.fc_logits = nn.Conv1d(
+ fc_in_channels, out_channels, kernel_size=1, stride=1, padding=0)
+
+ def init_weights(self):
+ """Initialize last classification layer of MaskPointHead, conv layers
+ are already initialized by ConvModule."""
+ normal_init(self.fc_logits, std=0.001)
+
+ def forward(self, fine_grained_feats, coarse_feats):
+ """Classify each point base on fine grained and coarse feats.
+
+ Args:
+ fine_grained_feats (Tensor): Fine grained feature sampled from FPN,
+ shape (num_rois, in_channels, num_points).
+ coarse_feats (Tensor): Coarse feature sampled from CoarseMaskHead,
+ shape (num_rois, num_classes, num_points).
+
+ Returns:
+ Tensor: Point classification results,
+ shape (num_rois, num_class, num_points).
+ """
+
+ x = torch.cat([fine_grained_feats, coarse_feats], dim=1)
+ for fc in self.fcs:
+ x = fc(x)
+ if self.coarse_pred_each_layer:
+ x = torch.cat((x, coarse_feats), dim=1)
+ return self.fc_logits(x)
+
+ def get_targets(self, rois, rel_roi_points, sampling_results, gt_masks,
+ cfg):
+ """Get training targets of MaskPointHead for all images.
+
+ Args:
+ rois (Tensor): Region of Interest, shape (num_rois, 5).
+ rel_roi_points: Points coordinates relative to RoI, shape
+ (num_rois, num_points, 2).
+ sampling_results (:obj:`SamplingResult`): Sampling result after
+ sampling and assignment.
+ gt_masks (Tensor) : Ground truth segmentation masks of
+ corresponding boxes, shape (num_rois, height, width).
+ cfg (dict): Training cfg.
+
+ Returns:
+ Tensor: Point target, shape (num_rois, num_points).
+ """
+
+ num_imgs = len(sampling_results)
+ rois_list = []
+ rel_roi_points_list = []
+ for batch_ind in range(num_imgs):
+ inds = (rois[:, 0] == batch_ind)
+ rois_list.append(rois[inds])
+ rel_roi_points_list.append(rel_roi_points[inds])
+ pos_assigned_gt_inds_list = [
+ res.pos_assigned_gt_inds for res in sampling_results
+ ]
+ cfg_list = [cfg for _ in range(num_imgs)]
+
+ point_targets = map(self._get_target_single, rois_list,
+ rel_roi_points_list, pos_assigned_gt_inds_list,
+ gt_masks, cfg_list)
+ point_targets = list(point_targets)
+
+ if len(point_targets) > 0:
+ point_targets = torch.cat(point_targets)
+
+ return point_targets
+
+ def _get_target_single(self, rois, rel_roi_points, pos_assigned_gt_inds,
+ gt_masks, cfg):
+ """Get training target of MaskPointHead for each image."""
+ num_pos = rois.size(0)
+ num_points = cfg.num_points
+ if num_pos > 0:
+ gt_masks_th = (
+ gt_masks.to_tensor(rois.dtype, rois.device).index_select(
+ 0, pos_assigned_gt_inds))
+ gt_masks_th = gt_masks_th.unsqueeze(1)
+ rel_img_points = rel_roi_point_to_rel_img_point(
+ rois, rel_roi_points, gt_masks_th.shape[2:])
+ point_targets = point_sample(gt_masks_th,
+ rel_img_points).squeeze(1)
+ else:
+ point_targets = rois.new_zeros((0, num_points))
+ return point_targets
+
+ def loss(self, point_pred, point_targets, labels):
+ """Calculate loss for MaskPointHead.
+
+ Args:
+ point_pred (Tensor): Point predication result, shape
+ (num_rois, num_classes, num_points).
+ point_targets (Tensor): Point targets, shape (num_roi, num_points).
+ labels (Tensor): Class label of corresponding boxes,
+ shape (num_rois, )
+
+ Returns:
+ dict[str, Tensor]: a dictionary of point loss components
+ """
+
+ loss = dict()
+ if self.class_agnostic:
+ loss_point = self.loss_point(point_pred, point_targets,
+ torch.zeros_like(labels))
+ else:
+ loss_point = self.loss_point(point_pred, point_targets, labels)
+ loss['loss_point'] = loss_point
+ return loss
+
+ def _get_uncertainty(self, mask_pred, labels):
+ """Estimate uncertainty based on pred logits.
+
+ We estimate uncertainty as L1 distance between 0.0 and the logits
+ prediction in 'mask_pred' for the foreground class in `classes`.
+
+ Args:
+ mask_pred (Tensor): mask predication logits, shape (num_rois,
+ num_classes, mask_height, mask_width).
+
+ labels (list[Tensor]): Either predicted or ground truth label for
+ each predicted mask, of length num_rois.
+
+ Returns:
+ scores (Tensor): Uncertainty scores with the most uncertain
+ locations having the highest uncertainty score,
+ shape (num_rois, 1, mask_height, mask_width)
+ """
+ if mask_pred.shape[1] == 1:
+ gt_class_logits = mask_pred.clone()
+ else:
+ inds = torch.arange(mask_pred.shape[0], device=mask_pred.device)
+ gt_class_logits = mask_pred[inds, labels].unsqueeze(1)
+ return -torch.abs(gt_class_logits)
+
+ def get_roi_rel_points_train(self, mask_pred, labels, cfg):
+ """Get ``num_points`` most uncertain points with random points during
+ train.
+
+ Sample points in [0, 1] x [0, 1] coordinate space based on their
+ uncertainty. The uncertainties are calculated for each point using
+ '_get_uncertainty()' function that takes point's logit prediction as
+ input.
+
+ Args:
+ mask_pred (Tensor): A tensor of shape (num_rois, num_classes,
+ mask_height, mask_width) for class-specific or class-agnostic
+ prediction.
+ labels (list): The ground truth class for each instance.
+ cfg (dict): Training config of point head.
+
+ Returns:
+ point_coords (Tensor): A tensor of shape (num_rois, num_points, 2)
+ that contains the coordinates sampled points.
+ """
+ num_points = cfg.num_points
+ oversample_ratio = cfg.oversample_ratio
+ importance_sample_ratio = cfg.importance_sample_ratio
+ assert oversample_ratio >= 1
+ assert 0 <= importance_sample_ratio <= 1
+ batch_size = mask_pred.shape[0]
+ num_sampled = int(num_points * oversample_ratio)
+ point_coords = torch.rand(
+ batch_size, num_sampled, 2, device=mask_pred.device)
+ point_logits = point_sample(mask_pred, point_coords)
+ # It is crucial to calculate uncertainty based on the sampled
+ # prediction value for the points. Calculating uncertainties of the
+ # coarse predictions first and sampling them for points leads to
+ # incorrect results. To illustrate this: assume uncertainty func(
+ # logits)=-abs(logits), a sampled point between two coarse
+ # predictions with -1 and 1 logits has 0 logits, and therefore 0
+ # uncertainty value. However, if we calculate uncertainties for the
+ # coarse predictions first, both will have -1 uncertainty,
+ # and sampled point will get -1 uncertainty.
+ point_uncertainties = self._get_uncertainty(point_logits, labels)
+ num_uncertain_points = int(importance_sample_ratio * num_points)
+ num_random_points = num_points - num_uncertain_points
+ idx = torch.topk(
+ point_uncertainties[:, 0, :], k=num_uncertain_points, dim=1)[1]
+ shift = num_sampled * torch.arange(
+ batch_size, dtype=torch.long, device=mask_pred.device)
+ idx += shift[:, None]
+ point_coords = point_coords.view(-1, 2)[idx.view(-1), :].view(
+ batch_size, num_uncertain_points, 2)
+ if num_random_points > 0:
+ rand_roi_coords = torch.rand(
+ batch_size, num_random_points, 2, device=mask_pred.device)
+ point_coords = torch.cat((point_coords, rand_roi_coords), dim=1)
+ return point_coords
+
+ def get_roi_rel_points_test(self, mask_pred, pred_label, cfg):
+ """Get ``num_points`` most uncertain points during test.
+
+ Args:
+ mask_pred (Tensor): A tensor of shape (num_rois, num_classes,
+ mask_height, mask_width) for class-specific or class-agnostic
+ prediction.
+ pred_label (list): The predication class for each instance.
+ cfg (dict): Testing config of point head.
+
+ Returns:
+ point_indices (Tensor): A tensor of shape (num_rois, num_points)
+ that contains indices from [0, mask_height x mask_width) of the
+ most uncertain points.
+ point_coords (Tensor): A tensor of shape (num_rois, num_points, 2)
+ that contains [0, 1] x [0, 1] normalized coordinates of the
+ most uncertain points from the [mask_height, mask_width] grid .
+ """
+ num_points = cfg.subdivision_num_points
+ uncertainty_map = self._get_uncertainty(mask_pred, pred_label)
+ num_rois, _, mask_height, mask_width = uncertainty_map.shape
+ h_step = 1.0 / mask_height
+ w_step = 1.0 / mask_width
+
+ uncertainty_map = uncertainty_map.view(num_rois,
+ mask_height * mask_width)
+ num_points = min(mask_height * mask_width, num_points)
+ point_indices = uncertainty_map.topk(num_points, dim=1)[1]
+ point_coords = uncertainty_map.new_zeros(num_rois, num_points, 2)
+ point_coords[:, :, 0] = w_step / 2.0 + (point_indices %
+ mask_width).float() * w_step
+ point_coords[:, :, 1] = h_step / 2.0 + (point_indices //
+ mask_width).float() * h_step
+ return point_indices, point_coords
diff --git a/annotator/uniformer/mmdet_null/models/roi_heads/mask_heads/maskiou_head.py b/annotator/uniformer/mmdet_null/models/roi_heads/mask_heads/maskiou_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..39bcd6a7dbdb089cd19cef811038e0b6a80ab89a
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/roi_heads/mask_heads/maskiou_head.py
@@ -0,0 +1,186 @@
+import numpy as np
+import torch
+import torch.nn as nn
+from mmcv.cnn import Conv2d, Linear, MaxPool2d, kaiming_init, normal_init
+from mmcv.runner import force_fp32
+from torch.nn.modules.utils import _pair
+
+from mmdet.models.builder import HEADS, build_loss
+
+
+@HEADS.register_module()
+class MaskIoUHead(nn.Module):
+ """Mask IoU Head.
+
+ This head predicts the IoU of predicted masks and corresponding gt masks.
+ """
+
+ def __init__(self,
+ num_convs=4,
+ num_fcs=2,
+ roi_feat_size=14,
+ in_channels=256,
+ conv_out_channels=256,
+ fc_out_channels=1024,
+ num_classes=80,
+ loss_iou=dict(type='MSELoss', loss_weight=0.5)):
+ super(MaskIoUHead, self).__init__()
+ self.in_channels = in_channels
+ self.conv_out_channels = conv_out_channels
+ self.fc_out_channels = fc_out_channels
+ self.num_classes = num_classes
+ self.fp16_enabled = False
+
+ self.convs = nn.ModuleList()
+ for i in range(num_convs):
+ if i == 0:
+ # concatenation of mask feature and mask prediction
+ in_channels = self.in_channels + 1
+ else:
+ in_channels = self.conv_out_channels
+ stride = 2 if i == num_convs - 1 else 1
+ self.convs.append(
+ Conv2d(
+ in_channels,
+ self.conv_out_channels,
+ 3,
+ stride=stride,
+ padding=1))
+
+ roi_feat_size = _pair(roi_feat_size)
+ pooled_area = (roi_feat_size[0] // 2) * (roi_feat_size[1] // 2)
+ self.fcs = nn.ModuleList()
+ for i in range(num_fcs):
+ in_channels = (
+ self.conv_out_channels *
+ pooled_area if i == 0 else self.fc_out_channels)
+ self.fcs.append(Linear(in_channels, self.fc_out_channels))
+
+ self.fc_mask_iou = Linear(self.fc_out_channels, self.num_classes)
+ self.relu = nn.ReLU()
+ self.max_pool = MaxPool2d(2, 2)
+ self.loss_iou = build_loss(loss_iou)
+
+ def init_weights(self):
+ for conv in self.convs:
+ kaiming_init(conv)
+ for fc in self.fcs:
+ kaiming_init(
+ fc,
+ a=1,
+ mode='fan_in',
+ nonlinearity='leaky_relu',
+ distribution='uniform')
+ normal_init(self.fc_mask_iou, std=0.01)
+
+ def forward(self, mask_feat, mask_pred):
+ mask_pred = mask_pred.sigmoid()
+ mask_pred_pooled = self.max_pool(mask_pred.unsqueeze(1))
+
+ x = torch.cat((mask_feat, mask_pred_pooled), 1)
+
+ for conv in self.convs:
+ x = self.relu(conv(x))
+ x = x.flatten(1)
+ for fc in self.fcs:
+ x = self.relu(fc(x))
+ mask_iou = self.fc_mask_iou(x)
+ return mask_iou
+
+ @force_fp32(apply_to=('mask_iou_pred', ))
+ def loss(self, mask_iou_pred, mask_iou_targets):
+ pos_inds = mask_iou_targets > 0
+ if pos_inds.sum() > 0:
+ loss_mask_iou = self.loss_iou(mask_iou_pred[pos_inds],
+ mask_iou_targets[pos_inds])
+ else:
+ loss_mask_iou = mask_iou_pred.sum() * 0
+ return dict(loss_mask_iou=loss_mask_iou)
+
+ @force_fp32(apply_to=('mask_pred', ))
+ def get_targets(self, sampling_results, gt_masks, mask_pred, mask_targets,
+ rcnn_train_cfg):
+ """Compute target of mask IoU.
+
+ Mask IoU target is the IoU of the predicted mask (inside a bbox) and
+ the gt mask of corresponding gt mask (the whole instance).
+ The intersection area is computed inside the bbox, and the gt mask area
+ is computed with two steps, firstly we compute the gt area inside the
+ bbox, then divide it by the area ratio of gt area inside the bbox and
+ the gt area of the whole instance.
+
+ Args:
+ sampling_results (list[:obj:`SamplingResult`]): sampling results.
+ gt_masks (BitmapMask | PolygonMask): Gt masks (the whole instance)
+ of each image, with the same shape of the input image.
+ mask_pred (Tensor): Predicted masks of each positive proposal,
+ shape (num_pos, h, w).
+ mask_targets (Tensor): Gt mask of each positive proposal,
+ binary map of the shape (num_pos, h, w).
+ rcnn_train_cfg (dict): Training config for R-CNN part.
+
+ Returns:
+ Tensor: mask iou target (length == num positive).
+ """
+ pos_proposals = [res.pos_bboxes for res in sampling_results]
+ pos_assigned_gt_inds = [
+ res.pos_assigned_gt_inds for res in sampling_results
+ ]
+
+ # compute the area ratio of gt areas inside the proposals and
+ # the whole instance
+ area_ratios = map(self._get_area_ratio, pos_proposals,
+ pos_assigned_gt_inds, gt_masks)
+ area_ratios = torch.cat(list(area_ratios))
+ assert mask_targets.size(0) == area_ratios.size(0)
+
+ mask_pred = (mask_pred > rcnn_train_cfg.mask_thr_binary).float()
+ mask_pred_areas = mask_pred.sum((-1, -2))
+
+ # mask_pred and mask_targets are binary maps
+ overlap_areas = (mask_pred * mask_targets).sum((-1, -2))
+
+ # compute the mask area of the whole instance
+ gt_full_areas = mask_targets.sum((-1, -2)) / (area_ratios + 1e-7)
+
+ mask_iou_targets = overlap_areas / (
+ mask_pred_areas + gt_full_areas - overlap_areas)
+ return mask_iou_targets
+
+ def _get_area_ratio(self, pos_proposals, pos_assigned_gt_inds, gt_masks):
+ """Compute area ratio of the gt mask inside the proposal and the gt
+ mask of the corresponding instance."""
+ num_pos = pos_proposals.size(0)
+ if num_pos > 0:
+ area_ratios = []
+ proposals_np = pos_proposals.cpu().numpy()
+ pos_assigned_gt_inds = pos_assigned_gt_inds.cpu().numpy()
+ # compute mask areas of gt instances (batch processing for speedup)
+ gt_instance_mask_area = gt_masks.areas
+ for i in range(num_pos):
+ gt_mask = gt_masks[pos_assigned_gt_inds[i]]
+
+ # crop the gt mask inside the proposal
+ bbox = proposals_np[i, :].astype(np.int32)
+ gt_mask_in_proposal = gt_mask.crop(bbox)
+
+ ratio = gt_mask_in_proposal.areas[0] / (
+ gt_instance_mask_area[pos_assigned_gt_inds[i]] + 1e-7)
+ area_ratios.append(ratio)
+ area_ratios = torch.from_numpy(np.stack(area_ratios)).float().to(
+ pos_proposals.device)
+ else:
+ area_ratios = pos_proposals.new_zeros((0, ))
+ return area_ratios
+
+ @force_fp32(apply_to=('mask_iou_pred', ))
+ def get_mask_scores(self, mask_iou_pred, det_bboxes, det_labels):
+ """Get the mask scores.
+
+ mask_score = bbox_score * mask_iou
+ """
+ inds = range(det_labels.size(0))
+ mask_scores = mask_iou_pred[inds, det_labels] * det_bboxes[inds, -1]
+ mask_scores = mask_scores.cpu().numpy()
+ det_labels = det_labels.cpu().numpy()
+ return [mask_scores[det_labels == i] for i in range(self.num_classes)]
diff --git a/annotator/uniformer/mmdet_null/models/roi_heads/mask_heads/scnet_mask_head.py b/annotator/uniformer/mmdet_null/models/roi_heads/mask_heads/scnet_mask_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..983a2d9db71a3b2b4980996725fdafb0b412b413
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/roi_heads/mask_heads/scnet_mask_head.py
@@ -0,0 +1,27 @@
+from mmdet.models.builder import HEADS
+from mmdet.models.utils import ResLayer, SimplifiedBasicBlock
+from .fcn_mask_head import FCNMaskHead
+
+
+@HEADS.register_module()
+class SCNetMaskHead(FCNMaskHead):
+ """Mask head for `SCNet `_.
+
+ Args:
+ conv_to_res (bool, optional): if True, change the conv layers to
+ ``SimplifiedBasicBlock``.
+ """
+
+ def __init__(self, conv_to_res=True, **kwargs):
+ super(SCNetMaskHead, self).__init__(**kwargs)
+ self.conv_to_res = conv_to_res
+ if conv_to_res:
+ assert self.conv_kernel_size == 3
+ self.num_res_blocks = self.num_convs // 2
+ self.convs = ResLayer(
+ SimplifiedBasicBlock,
+ self.in_channels,
+ self.conv_out_channels,
+ self.num_res_blocks,
+ conv_cfg=self.conv_cfg,
+ norm_cfg=self.norm_cfg)
diff --git a/annotator/uniformer/mmdet_null/models/roi_heads/mask_heads/scnet_semantic_head.py b/annotator/uniformer/mmdet_null/models/roi_heads/mask_heads/scnet_semantic_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..df85a0112d27d97301fff56189f99bee0bf8efa5
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/roi_heads/mask_heads/scnet_semantic_head.py
@@ -0,0 +1,27 @@
+from mmdet.models.builder import HEADS
+from mmdet.models.utils import ResLayer, SimplifiedBasicBlock
+from .fused_semantic_head import FusedSemanticHead
+
+
+@HEADS.register_module()
+class SCNetSemanticHead(FusedSemanticHead):
+ """Mask head for `SCNet `_.
+
+ Args:
+ conv_to_res (bool, optional): if True, change the conv layers to
+ ``SimplifiedBasicBlock``.
+ """
+
+ def __init__(self, conv_to_res=True, **kwargs):
+ super(SCNetSemanticHead, self).__init__(**kwargs)
+ self.conv_to_res = conv_to_res
+ if self.conv_to_res:
+ num_res_blocks = self.num_convs // 2
+ self.convs = ResLayer(
+ SimplifiedBasicBlock,
+ self.in_channels,
+ self.conv_out_channels,
+ num_res_blocks,
+ conv_cfg=self.conv_cfg,
+ norm_cfg=self.norm_cfg)
+ self.num_convs = num_res_blocks
diff --git a/annotator/uniformer/mmdet_null/models/roi_heads/mask_scoring_roi_head.py b/annotator/uniformer/mmdet_null/models/roi_heads/mask_scoring_roi_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..c6e55c7752209cb5c15eab689ad9e8ac1fef1b66
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/roi_heads/mask_scoring_roi_head.py
@@ -0,0 +1,122 @@
+import torch
+
+from mmdet.core import bbox2roi
+from ..builder import HEADS, build_head
+from .standard_roi_head import StandardRoIHead
+
+
+@HEADS.register_module()
+class MaskScoringRoIHead(StandardRoIHead):
+ """Mask Scoring RoIHead for Mask Scoring RCNN.
+
+ https://arxiv.org/abs/1903.00241
+ """
+
+ def __init__(self, mask_iou_head, **kwargs):
+ assert mask_iou_head is not None
+ super(MaskScoringRoIHead, self).__init__(**kwargs)
+ self.mask_iou_head = build_head(mask_iou_head)
+
+ def init_weights(self, pretrained):
+ """Initialize the weights in head.
+
+ Args:
+ pretrained (str, optional): Path to pre-trained weights.
+ Defaults to None.
+ """
+ super(MaskScoringRoIHead, self).init_weights(pretrained)
+ self.mask_iou_head.init_weights()
+
+ def _mask_forward_train(self, x, sampling_results, bbox_feats, gt_masks,
+ img_metas):
+ """Run forward function and calculate loss for Mask head in
+ training."""
+ pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results])
+ mask_results = super(MaskScoringRoIHead,
+ self)._mask_forward_train(x, sampling_results,
+ bbox_feats, gt_masks,
+ img_metas)
+ if mask_results['loss_mask'] is None:
+ return mask_results
+
+ # mask iou head forward and loss
+ pos_mask_pred = mask_results['mask_pred'][
+ range(mask_results['mask_pred'].size(0)), pos_labels]
+ mask_iou_pred = self.mask_iou_head(mask_results['mask_feats'],
+ pos_mask_pred)
+ pos_mask_iou_pred = mask_iou_pred[range(mask_iou_pred.size(0)),
+ pos_labels]
+
+ mask_iou_targets = self.mask_iou_head.get_targets(
+ sampling_results, gt_masks, pos_mask_pred,
+ mask_results['mask_targets'], self.train_cfg)
+ loss_mask_iou = self.mask_iou_head.loss(pos_mask_iou_pred,
+ mask_iou_targets)
+ mask_results['loss_mask'].update(loss_mask_iou)
+ return mask_results
+
+ def simple_test_mask(self,
+ x,
+ img_metas,
+ det_bboxes,
+ det_labels,
+ rescale=False):
+ """Obtain mask prediction without augmentation."""
+ # image shapes of images in the batch
+ ori_shapes = tuple(meta['ori_shape'] for meta in img_metas)
+ scale_factors = tuple(meta['scale_factor'] for meta in img_metas)
+
+ num_imgs = len(det_bboxes)
+ if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes):
+ num_classes = self.mask_head.num_classes
+ segm_results = [[[] for _ in range(num_classes)]
+ for _ in range(num_imgs)]
+ mask_scores = [[[] for _ in range(num_classes)]
+ for _ in range(num_imgs)]
+ else:
+ # if det_bboxes is rescaled to the original image size, we need to
+ # rescale it back to the testing scale to obtain RoIs.
+ if rescale and not isinstance(scale_factors[0], float):
+ scale_factors = [
+ torch.from_numpy(scale_factor).to(det_bboxes[0].device)
+ for scale_factor in scale_factors
+ ]
+ _bboxes = [
+ det_bboxes[i][:, :4] *
+ scale_factors[i] if rescale else det_bboxes[i]
+ for i in range(num_imgs)
+ ]
+ mask_rois = bbox2roi(_bboxes)
+ mask_results = self._mask_forward(x, mask_rois)
+ concat_det_labels = torch.cat(det_labels)
+ # get mask scores with mask iou head
+ mask_feats = mask_results['mask_feats']
+ mask_pred = mask_results['mask_pred']
+ mask_iou_pred = self.mask_iou_head(
+ mask_feats, mask_pred[range(concat_det_labels.size(0)),
+ concat_det_labels])
+ # split batch mask prediction back to each image
+ num_bboxes_per_img = tuple(len(_bbox) for _bbox in _bboxes)
+ mask_preds = mask_pred.split(num_bboxes_per_img, 0)
+ mask_iou_preds = mask_iou_pred.split(num_bboxes_per_img, 0)
+
+ # apply mask post-processing to each image individually
+ segm_results = []
+ mask_scores = []
+ for i in range(num_imgs):
+ if det_bboxes[i].shape[0] == 0:
+ segm_results.append(
+ [[] for _ in range(self.mask_head.num_classes)])
+ mask_scores.append(
+ [[] for _ in range(self.mask_head.num_classes)])
+ else:
+ segm_result = self.mask_head.get_seg_masks(
+ mask_preds[i], _bboxes[i], det_labels[i],
+ self.test_cfg, ori_shapes[i], scale_factors[i],
+ rescale)
+ # get mask scores with mask iou head
+ mask_score = self.mask_iou_head.get_mask_scores(
+ mask_iou_preds[i], det_bboxes[i], det_labels[i])
+ segm_results.append(segm_result)
+ mask_scores.append(mask_score)
+ return list(zip(segm_results, mask_scores))
diff --git a/annotator/uniformer/mmdet_null/models/roi_heads/pisa_roi_head.py b/annotator/uniformer/mmdet_null/models/roi_heads/pisa_roi_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..e01113629837eb9c065ba40cd4025899b7bd0172
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/roi_heads/pisa_roi_head.py
@@ -0,0 +1,159 @@
+from mmdet.core import bbox2roi
+from ..builder import HEADS
+from ..losses.pisa_loss import carl_loss, isr_p
+from .standard_roi_head import StandardRoIHead
+
+
+@HEADS.register_module()
+class PISARoIHead(StandardRoIHead):
+ r"""The RoI head for `Prime Sample Attention in Object Detection
+ `_."""
+
+ def forward_train(self,
+ x,
+ img_metas,
+ proposal_list,
+ gt_bboxes,
+ gt_labels,
+ gt_bboxes_ignore=None,
+ gt_masks=None):
+ """Forward function for training.
+
+ Args:
+ x (list[Tensor]): List of multi-level img features.
+ img_metas (list[dict]): List of image info dict where each dict
+ has: 'img_shape', 'scale_factor', 'flip', and may also contain
+ 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
+ For details on the values of these keys see
+ `mmdet/datasets/pipelines/formatting.py:Collect`.
+ proposals (list[Tensors]): List of region proposals.
+ gt_bboxes (list[Tensor]): Each item are the truth boxes for each
+ image in [tl_x, tl_y, br_x, br_y] format.
+ gt_labels (list[Tensor]): Class indices corresponding to each box
+ gt_bboxes_ignore (list[Tensor], optional): Specify which bounding
+ boxes can be ignored when computing the loss.
+ gt_masks (None | Tensor) : True segmentation masks for each box
+ used if the architecture supports a segmentation task.
+
+ Returns:
+ dict[str, Tensor]: a dictionary of loss components
+ """
+ # assign gts and sample proposals
+ if self.with_bbox or self.with_mask:
+ num_imgs = len(img_metas)
+ if gt_bboxes_ignore is None:
+ gt_bboxes_ignore = [None for _ in range(num_imgs)]
+ sampling_results = []
+ neg_label_weights = []
+ for i in range(num_imgs):
+ assign_result = self.bbox_assigner.assign(
+ proposal_list[i], gt_bboxes[i], gt_bboxes_ignore[i],
+ gt_labels[i])
+ sampling_result = self.bbox_sampler.sample(
+ assign_result,
+ proposal_list[i],
+ gt_bboxes[i],
+ gt_labels[i],
+ feats=[lvl_feat[i][None] for lvl_feat in x])
+ # neg label weight is obtained by sampling when using ISR-N
+ neg_label_weight = None
+ if isinstance(sampling_result, tuple):
+ sampling_result, neg_label_weight = sampling_result
+ sampling_results.append(sampling_result)
+ neg_label_weights.append(neg_label_weight)
+
+ losses = dict()
+ # bbox head forward and loss
+ if self.with_bbox:
+ bbox_results = self._bbox_forward_train(
+ x,
+ sampling_results,
+ gt_bboxes,
+ gt_labels,
+ img_metas,
+ neg_label_weights=neg_label_weights)
+ losses.update(bbox_results['loss_bbox'])
+
+ # mask head forward and loss
+ if self.with_mask:
+ mask_results = self._mask_forward_train(x, sampling_results,
+ bbox_results['bbox_feats'],
+ gt_masks, img_metas)
+ losses.update(mask_results['loss_mask'])
+
+ return losses
+
+ def _bbox_forward(self, x, rois):
+ """Box forward function used in both training and testing."""
+ # TODO: a more flexible way to decide which feature maps to use
+ bbox_feats = self.bbox_roi_extractor(
+ x[:self.bbox_roi_extractor.num_inputs], rois)
+ if self.with_shared_head:
+ bbox_feats = self.shared_head(bbox_feats)
+ cls_score, bbox_pred = self.bbox_head(bbox_feats)
+
+ bbox_results = dict(
+ cls_score=cls_score, bbox_pred=bbox_pred, bbox_feats=bbox_feats)
+ return bbox_results
+
+ def _bbox_forward_train(self,
+ x,
+ sampling_results,
+ gt_bboxes,
+ gt_labels,
+ img_metas,
+ neg_label_weights=None):
+ """Run forward function and calculate loss for box head in training."""
+ rois = bbox2roi([res.bboxes for res in sampling_results])
+
+ bbox_results = self._bbox_forward(x, rois)
+
+ bbox_targets = self.bbox_head.get_targets(sampling_results, gt_bboxes,
+ gt_labels, self.train_cfg)
+
+ # neg_label_weights obtained by sampler is image-wise, mapping back to
+ # the corresponding location in label weights
+ if neg_label_weights[0] is not None:
+ label_weights = bbox_targets[1]
+ cur_num_rois = 0
+ for i in range(len(sampling_results)):
+ num_pos = sampling_results[i].pos_inds.size(0)
+ num_neg = sampling_results[i].neg_inds.size(0)
+ label_weights[cur_num_rois + num_pos:cur_num_rois + num_pos +
+ num_neg] = neg_label_weights[i]
+ cur_num_rois += num_pos + num_neg
+
+ cls_score = bbox_results['cls_score']
+ bbox_pred = bbox_results['bbox_pred']
+
+ # Apply ISR-P
+ isr_cfg = self.train_cfg.get('isr', None)
+ if isr_cfg is not None:
+ bbox_targets = isr_p(
+ cls_score,
+ bbox_pred,
+ bbox_targets,
+ rois,
+ sampling_results,
+ self.bbox_head.loss_cls,
+ self.bbox_head.bbox_coder,
+ **isr_cfg,
+ num_class=self.bbox_head.num_classes)
+ loss_bbox = self.bbox_head.loss(cls_score, bbox_pred, rois,
+ *bbox_targets)
+
+ # Add CARL Loss
+ carl_cfg = self.train_cfg.get('carl', None)
+ if carl_cfg is not None:
+ loss_carl = carl_loss(
+ cls_score,
+ bbox_targets[0],
+ bbox_pred,
+ bbox_targets[2],
+ self.bbox_head.loss_bbox,
+ **carl_cfg,
+ num_class=self.bbox_head.num_classes)
+ loss_bbox.update(loss_carl)
+
+ bbox_results.update(loss_bbox=loss_bbox)
+ return bbox_results
diff --git a/annotator/uniformer/mmdet_null/models/roi_heads/point_rend_roi_head.py b/annotator/uniformer/mmdet_null/models/roi_heads/point_rend_roi_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..478cdf5bff6779e9291f94c543205289036ea2c6
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/roi_heads/point_rend_roi_head.py
@@ -0,0 +1,218 @@
+# Modified from https://github.com/facebookresearch/detectron2/tree/master/projects/PointRend # noqa
+
+import torch
+import torch.nn.functional as F
+from mmcv.ops import point_sample, rel_roi_point_to_rel_img_point
+
+from mmdet.core import bbox2roi, bbox_mapping, merge_aug_masks
+from .. import builder
+from ..builder import HEADS
+from .standard_roi_head import StandardRoIHead
+
+
+@HEADS.register_module()
+class PointRendRoIHead(StandardRoIHead):
+ """`PointRend `_."""
+
+ def __init__(self, point_head, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ assert self.with_bbox and self.with_mask
+ self.init_point_head(point_head)
+
+ def init_point_head(self, point_head):
+ """Initialize ``point_head``"""
+ self.point_head = builder.build_head(point_head)
+
+ def init_weights(self, pretrained):
+ """Initialize the weights in head.
+
+ Args:
+ pretrained (str, optional): Path to pre-trained weights.
+ """
+ super().init_weights(pretrained)
+ self.point_head.init_weights()
+
+ def _mask_forward_train(self, x, sampling_results, bbox_feats, gt_masks,
+ img_metas):
+ """Run forward function and calculate loss for mask head and point head
+ in training."""
+ mask_results = super()._mask_forward_train(x, sampling_results,
+ bbox_feats, gt_masks,
+ img_metas)
+ if mask_results['loss_mask'] is not None:
+ loss_point = self._mask_point_forward_train(
+ x, sampling_results, mask_results['mask_pred'], gt_masks,
+ img_metas)
+ mask_results['loss_mask'].update(loss_point)
+
+ return mask_results
+
+ def _mask_point_forward_train(self, x, sampling_results, mask_pred,
+ gt_masks, img_metas):
+ """Run forward function and calculate loss for point head in
+ training."""
+ pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results])
+ rel_roi_points = self.point_head.get_roi_rel_points_train(
+ mask_pred, pos_labels, cfg=self.train_cfg)
+ rois = bbox2roi([res.pos_bboxes for res in sampling_results])
+
+ fine_grained_point_feats = self._get_fine_grained_point_feats(
+ x, rois, rel_roi_points, img_metas)
+ coarse_point_feats = point_sample(mask_pred, rel_roi_points)
+ mask_point_pred = self.point_head(fine_grained_point_feats,
+ coarse_point_feats)
+ mask_point_target = self.point_head.get_targets(
+ rois, rel_roi_points, sampling_results, gt_masks, self.train_cfg)
+ loss_mask_point = self.point_head.loss(mask_point_pred,
+ mask_point_target, pos_labels)
+
+ return loss_mask_point
+
+ def _get_fine_grained_point_feats(self, x, rois, rel_roi_points,
+ img_metas):
+ """Sample fine grained feats from each level feature map and
+ concatenate them together."""
+ num_imgs = len(img_metas)
+ fine_grained_feats = []
+ for idx in range(self.mask_roi_extractor.num_inputs):
+ feats = x[idx]
+ spatial_scale = 1. / float(
+ self.mask_roi_extractor.featmap_strides[idx])
+ point_feats = []
+ for batch_ind in range(num_imgs):
+ # unravel batch dim
+ feat = feats[batch_ind].unsqueeze(0)
+ inds = (rois[:, 0].long() == batch_ind)
+ if inds.any():
+ rel_img_points = rel_roi_point_to_rel_img_point(
+ rois[inds], rel_roi_points[inds], feat.shape[2:],
+ spatial_scale).unsqueeze(0)
+ point_feat = point_sample(feat, rel_img_points)
+ point_feat = point_feat.squeeze(0).transpose(0, 1)
+ point_feats.append(point_feat)
+ fine_grained_feats.append(torch.cat(point_feats, dim=0))
+ return torch.cat(fine_grained_feats, dim=1)
+
+ def _mask_point_forward_test(self, x, rois, label_pred, mask_pred,
+ img_metas):
+ """Mask refining process with point head in testing."""
+ refined_mask_pred = mask_pred.clone()
+ for subdivision_step in range(self.test_cfg.subdivision_steps):
+ refined_mask_pred = F.interpolate(
+ refined_mask_pred,
+ scale_factor=self.test_cfg.scale_factor,
+ mode='bilinear',
+ align_corners=False)
+ # If `subdivision_num_points` is larger or equal to the
+ # resolution of the next step, then we can skip this step
+ num_rois, channels, mask_height, mask_width = \
+ refined_mask_pred.shape
+ if (self.test_cfg.subdivision_num_points >=
+ self.test_cfg.scale_factor**2 * mask_height * mask_width
+ and
+ subdivision_step < self.test_cfg.subdivision_steps - 1):
+ continue
+ point_indices, rel_roi_points = \
+ self.point_head.get_roi_rel_points_test(
+ refined_mask_pred, label_pred, cfg=self.test_cfg)
+ fine_grained_point_feats = self._get_fine_grained_point_feats(
+ x, rois, rel_roi_points, img_metas)
+ coarse_point_feats = point_sample(mask_pred, rel_roi_points)
+ mask_point_pred = self.point_head(fine_grained_point_feats,
+ coarse_point_feats)
+
+ point_indices = point_indices.unsqueeze(1).expand(-1, channels, -1)
+ refined_mask_pred = refined_mask_pred.reshape(
+ num_rois, channels, mask_height * mask_width)
+ refined_mask_pred = refined_mask_pred.scatter_(
+ 2, point_indices, mask_point_pred)
+ refined_mask_pred = refined_mask_pred.view(num_rois, channels,
+ mask_height, mask_width)
+
+ return refined_mask_pred
+
+ def simple_test_mask(self,
+ x,
+ img_metas,
+ det_bboxes,
+ det_labels,
+ rescale=False):
+ """Obtain mask prediction without augmentation."""
+ ori_shapes = tuple(meta['ori_shape'] for meta in img_metas)
+ scale_factors = tuple(meta['scale_factor'] for meta in img_metas)
+ num_imgs = len(det_bboxes)
+ if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes):
+ segm_results = [[[] for _ in range(self.mask_head.num_classes)]
+ for _ in range(num_imgs)]
+ else:
+ # if det_bboxes is rescaled to the original image size, we need to
+ # rescale it back to the testing scale to obtain RoIs.
+ if rescale and not isinstance(scale_factors[0], float):
+ scale_factors = [
+ torch.from_numpy(scale_factor).to(det_bboxes[0].device)
+ for scale_factor in scale_factors
+ ]
+ _bboxes = [
+ det_bboxes[i][:, :4] *
+ scale_factors[i] if rescale else det_bboxes[i][:, :4]
+ for i in range(len(det_bboxes))
+ ]
+ mask_rois = bbox2roi(_bboxes)
+ mask_results = self._mask_forward(x, mask_rois)
+ # split batch mask prediction back to each image
+ mask_pred = mask_results['mask_pred']
+ num_mask_roi_per_img = [len(det_bbox) for det_bbox in det_bboxes]
+ mask_preds = mask_pred.split(num_mask_roi_per_img, 0)
+ mask_rois = mask_rois.split(num_mask_roi_per_img, 0)
+
+ # apply mask post-processing to each image individually
+ segm_results = []
+ for i in range(num_imgs):
+ if det_bboxes[i].shape[0] == 0:
+ segm_results.append(
+ [[] for _ in range(self.mask_head.num_classes)])
+ else:
+ x_i = [xx[[i]] for xx in x]
+ mask_rois_i = mask_rois[i]
+ mask_rois_i[:, 0] = 0 # TODO: remove this hack
+ mask_pred_i = self._mask_point_forward_test(
+ x_i, mask_rois_i, det_labels[i], mask_preds[i],
+ [img_metas])
+ segm_result = self.mask_head.get_seg_masks(
+ mask_pred_i, _bboxes[i], det_labels[i], self.test_cfg,
+ ori_shapes[i], scale_factors[i], rescale)
+ segm_results.append(segm_result)
+ return segm_results
+
+ def aug_test_mask(self, feats, img_metas, det_bboxes, det_labels):
+ """Test for mask head with test time augmentation."""
+ if det_bboxes.shape[0] == 0:
+ segm_result = [[] for _ in range(self.mask_head.num_classes)]
+ else:
+ aug_masks = []
+ for x, img_meta in zip(feats, img_metas):
+ img_shape = img_meta[0]['img_shape']
+ scale_factor = img_meta[0]['scale_factor']
+ flip = img_meta[0]['flip']
+ _bboxes = bbox_mapping(det_bboxes[:, :4], img_shape,
+ scale_factor, flip)
+ mask_rois = bbox2roi([_bboxes])
+ mask_results = self._mask_forward(x, mask_rois)
+ mask_results['mask_pred'] = self._mask_point_forward_test(
+ x, mask_rois, det_labels, mask_results['mask_pred'],
+ img_metas)
+ # convert to numpy array to save memory
+ aug_masks.append(
+ mask_results['mask_pred'].sigmoid().cpu().numpy())
+ merged_masks = merge_aug_masks(aug_masks, img_metas, self.test_cfg)
+
+ ori_shape = img_metas[0][0]['ori_shape']
+ segm_result = self.mask_head.get_seg_masks(
+ merged_masks,
+ det_bboxes,
+ det_labels,
+ self.test_cfg,
+ ori_shape,
+ scale_factor=1.0,
+ rescale=False)
+ return segm_result
diff --git a/annotator/uniformer/mmdet_null/models/roi_heads/roi_extractors/__init__.py b/annotator/uniformer/mmdet_null/models/roi_heads/roi_extractors/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..a6ec0ecc3063cd23c2463f2f53f1c2a83b04d43b
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/roi_heads/roi_extractors/__init__.py
@@ -0,0 +1,7 @@
+from .generic_roi_extractor import GenericRoIExtractor
+from .single_level_roi_extractor import SingleRoIExtractor
+
+__all__ = [
+ 'SingleRoIExtractor',
+ 'GenericRoIExtractor',
+]
diff --git a/annotator/uniformer/mmdet_null/models/roi_heads/roi_extractors/base_roi_extractor.py b/annotator/uniformer/mmdet_null/models/roi_heads/roi_extractors/base_roi_extractor.py
new file mode 100644
index 0000000000000000000000000000000000000000..847932547c6c309ae38b45dc43ac0ef8ca66d347
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/roi_heads/roi_extractors/base_roi_extractor.py
@@ -0,0 +1,83 @@
+from abc import ABCMeta, abstractmethod
+
+import torch
+import torch.nn as nn
+from mmcv import ops
+
+
+class BaseRoIExtractor(nn.Module, metaclass=ABCMeta):
+ """Base class for RoI extractor.
+
+ Args:
+ roi_layer (dict): Specify RoI layer type and arguments.
+ out_channels (int): Output channels of RoI layers.
+ featmap_strides (List[int]): Strides of input feature maps.
+ """
+
+ def __init__(self, roi_layer, out_channels, featmap_strides):
+ super(BaseRoIExtractor, self).__init__()
+ self.roi_layers = self.build_roi_layers(roi_layer, featmap_strides)
+ self.out_channels = out_channels
+ self.featmap_strides = featmap_strides
+ self.fp16_enabled = False
+
+ @property
+ def num_inputs(self):
+ """int: Number of input feature maps."""
+ return len(self.featmap_strides)
+
+ def init_weights(self):
+ pass
+
+ def build_roi_layers(self, layer_cfg, featmap_strides):
+ """Build RoI operator to extract feature from each level feature map.
+
+ Args:
+ layer_cfg (dict): Dictionary to construct and config RoI layer
+ operation. Options are modules under ``mmcv/ops`` such as
+ ``RoIAlign``.
+ featmap_strides (List[int]): The stride of input feature map w.r.t
+ to the original image size, which would be used to scale RoI
+ coordinate (original image coordinate system) to feature
+ coordinate system.
+
+ Returns:
+ nn.ModuleList: The RoI extractor modules for each level feature
+ map.
+ """
+
+ cfg = layer_cfg.copy()
+ layer_type = cfg.pop('type')
+ assert hasattr(ops, layer_type)
+ layer_cls = getattr(ops, layer_type)
+ roi_layers = nn.ModuleList(
+ [layer_cls(spatial_scale=1 / s, **cfg) for s in featmap_strides])
+ return roi_layers
+
+ def roi_rescale(self, rois, scale_factor):
+ """Scale RoI coordinates by scale factor.
+
+ Args:
+ rois (torch.Tensor): RoI (Region of Interest), shape (n, 5)
+ scale_factor (float): Scale factor that RoI will be multiplied by.
+
+ Returns:
+ torch.Tensor: Scaled RoI.
+ """
+
+ cx = (rois[:, 1] + rois[:, 3]) * 0.5
+ cy = (rois[:, 2] + rois[:, 4]) * 0.5
+ w = rois[:, 3] - rois[:, 1]
+ h = rois[:, 4] - rois[:, 2]
+ new_w = w * scale_factor
+ new_h = h * scale_factor
+ x1 = cx - new_w * 0.5
+ x2 = cx + new_w * 0.5
+ y1 = cy - new_h * 0.5
+ y2 = cy + new_h * 0.5
+ new_rois = torch.stack((rois[:, 0], x1, y1, x2, y2), dim=-1)
+ return new_rois
+
+ @abstractmethod
+ def forward(self, feats, rois, roi_scale_factor=None):
+ pass
diff --git a/annotator/uniformer/mmdet_null/models/roi_heads/roi_extractors/generic_roi_extractor.py b/annotator/uniformer/mmdet_null/models/roi_heads/roi_extractors/generic_roi_extractor.py
new file mode 100644
index 0000000000000000000000000000000000000000..80c25bb8fde7844c994bfc1f4ae1a2d960cbf3d6
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/roi_heads/roi_extractors/generic_roi_extractor.py
@@ -0,0 +1,83 @@
+from mmcv.cnn.bricks import build_plugin_layer
+from mmcv.runner import force_fp32
+
+from mmdet.models.builder import ROI_EXTRACTORS
+from .base_roi_extractor import BaseRoIExtractor
+
+
+@ROI_EXTRACTORS.register_module()
+class GenericRoIExtractor(BaseRoIExtractor):
+ """Extract RoI features from all level feature maps levels.
+
+ This is the implementation of `A novel Region of Interest Extraction Layer
+ for Instance Segmentation `_.
+
+ Args:
+ aggregation (str): The method to aggregate multiple feature maps.
+ Options are 'sum', 'concat'. Default: 'sum'.
+ pre_cfg (dict | None): Specify pre-processing modules. Default: None.
+ post_cfg (dict | None): Specify post-processing modules. Default: None.
+ kwargs (keyword arguments): Arguments that are the same
+ as :class:`BaseRoIExtractor`.
+ """
+
+ def __init__(self,
+ aggregation='sum',
+ pre_cfg=None,
+ post_cfg=None,
+ **kwargs):
+ super(GenericRoIExtractor, self).__init__(**kwargs)
+
+ assert aggregation in ['sum', 'concat']
+
+ self.aggregation = aggregation
+ self.with_post = post_cfg is not None
+ self.with_pre = pre_cfg is not None
+ # build pre/post processing modules
+ if self.with_post:
+ self.post_module = build_plugin_layer(post_cfg, '_post_module')[1]
+ if self.with_pre:
+ self.pre_module = build_plugin_layer(pre_cfg, '_pre_module')[1]
+
+ @force_fp32(apply_to=('feats', ), out_fp16=True)
+ def forward(self, feats, rois, roi_scale_factor=None):
+ """Forward function."""
+ if len(feats) == 1:
+ return self.roi_layers[0](feats[0], rois)
+
+ out_size = self.roi_layers[0].output_size
+ num_levels = len(feats)
+ roi_feats = feats[0].new_zeros(
+ rois.size(0), self.out_channels, *out_size)
+
+ # some times rois is an empty tensor
+ if roi_feats.shape[0] == 0:
+ return roi_feats
+
+ if roi_scale_factor is not None:
+ rois = self.roi_rescale(rois, roi_scale_factor)
+
+ # mark the starting channels for concat mode
+ start_channels = 0
+ for i in range(num_levels):
+ roi_feats_t = self.roi_layers[i](feats[i], rois)
+ end_channels = start_channels + roi_feats_t.size(1)
+ if self.with_pre:
+ # apply pre-processing to a RoI extracted from each layer
+ roi_feats_t = self.pre_module(roi_feats_t)
+ if self.aggregation == 'sum':
+ # and sum them all
+ roi_feats += roi_feats_t
+ else:
+ # and concat them along channel dimension
+ roi_feats[:, start_channels:end_channels] = roi_feats_t
+ # update channels starting position
+ start_channels = end_channels
+ # check if concat channels match at the end
+ if self.aggregation == 'concat':
+ assert start_channels == self.out_channels
+
+ if self.with_post:
+ # apply post-processing before return the result
+ roi_feats = self.post_module(roi_feats)
+ return roi_feats
diff --git a/annotator/uniformer/mmdet_null/models/roi_heads/roi_extractors/single_level_roi_extractor.py b/annotator/uniformer/mmdet_null/models/roi_heads/roi_extractors/single_level_roi_extractor.py
new file mode 100644
index 0000000000000000000000000000000000000000..cfc838f23270a1ae4d70f90059b67a890850e981
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/roi_heads/roi_extractors/single_level_roi_extractor.py
@@ -0,0 +1,108 @@
+import torch
+from mmcv.runner import force_fp32
+
+from mmdet.models.builder import ROI_EXTRACTORS
+from .base_roi_extractor import BaseRoIExtractor
+
+
+@ROI_EXTRACTORS.register_module()
+class SingleRoIExtractor(BaseRoIExtractor):
+ """Extract RoI features from a single level feature map.
+
+ If there are multiple input feature levels, each RoI is mapped to a level
+ according to its scale. The mapping rule is proposed in
+ `FPN `_.
+
+ Args:
+ roi_layer (dict): Specify RoI layer type and arguments.
+ out_channels (int): Output channels of RoI layers.
+ featmap_strides (List[int]): Strides of input feature maps.
+ finest_scale (int): Scale threshold of mapping to level 0. Default: 56.
+ """
+
+ def __init__(self,
+ roi_layer,
+ out_channels,
+ featmap_strides,
+ finest_scale=56):
+ super(SingleRoIExtractor, self).__init__(roi_layer, out_channels,
+ featmap_strides)
+ self.finest_scale = finest_scale
+
+ def map_roi_levels(self, rois, num_levels):
+ """Map rois to corresponding feature levels by scales.
+
+ - scale < finest_scale * 2: level 0
+ - finest_scale * 2 <= scale < finest_scale * 4: level 1
+ - finest_scale * 4 <= scale < finest_scale * 8: level 2
+ - scale >= finest_scale * 8: level 3
+
+ Args:
+ rois (Tensor): Input RoIs, shape (k, 5).
+ num_levels (int): Total level number.
+
+ Returns:
+ Tensor: Level index (0-based) of each RoI, shape (k, )
+ """
+ scale = torch.sqrt(
+ (rois[:, 3] - rois[:, 1]) * (rois[:, 4] - rois[:, 2]))
+ target_lvls = torch.floor(torch.log2(scale / self.finest_scale + 1e-6))
+ target_lvls = target_lvls.clamp(min=0, max=num_levels - 1).long()
+ return target_lvls
+
+ @force_fp32(apply_to=('feats', ), out_fp16=True)
+ def forward(self, feats, rois, roi_scale_factor=None):
+ """Forward function."""
+ out_size = self.roi_layers[0].output_size
+ num_levels = len(feats)
+ expand_dims = (-1, self.out_channels * out_size[0] * out_size[1])
+ if torch.onnx.is_in_onnx_export():
+ # Work around to export mask-rcnn to onnx
+ roi_feats = rois[:, :1].clone().detach()
+ roi_feats = roi_feats.expand(*expand_dims)
+ roi_feats = roi_feats.reshape(-1, self.out_channels, *out_size)
+ roi_feats = roi_feats * 0
+ else:
+ roi_feats = feats[0].new_zeros(
+ rois.size(0), self.out_channels, *out_size)
+ # TODO: remove this when parrots supports
+ if torch.__version__ == 'parrots':
+ roi_feats.requires_grad = True
+
+ if num_levels == 1:
+ if len(rois) == 0:
+ return roi_feats
+ return self.roi_layers[0](feats[0], rois)
+
+ target_lvls = self.map_roi_levels(rois, num_levels)
+
+ if roi_scale_factor is not None:
+ rois = self.roi_rescale(rois, roi_scale_factor)
+
+ for i in range(num_levels):
+ mask = target_lvls == i
+ if torch.onnx.is_in_onnx_export():
+ # To keep all roi_align nodes exported to onnx
+ # and skip nonzero op
+ mask = mask.float().unsqueeze(-1).expand(*expand_dims).reshape(
+ roi_feats.shape)
+ roi_feats_t = self.roi_layers[i](feats[i], rois)
+ roi_feats_t *= mask
+ roi_feats += roi_feats_t
+ continue
+ inds = mask.nonzero(as_tuple=False).squeeze(1)
+ if inds.numel() > 0:
+ rois_ = rois[inds]
+ roi_feats_t = self.roi_layers[i](feats[i], rois_)
+ roi_feats[inds] = roi_feats_t
+ else:
+ # Sometimes some pyramid levels will not be used for RoI
+ # feature extraction and this will cause an incomplete
+ # computation graph in one GPU, which is different from those
+ # in other GPUs and will cause a hanging error.
+ # Therefore, we add it to ensure each feature pyramid is
+ # included in the computation graph to avoid runtime bugs.
+ roi_feats += sum(
+ x.view(-1)[0]
+ for x in self.parameters()) * 0. + feats[i].sum() * 0.
+ return roi_feats
diff --git a/annotator/uniformer/mmdet_null/models/roi_heads/scnet_roi_head.py b/annotator/uniformer/mmdet_null/models/roi_heads/scnet_roi_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..85aaa2f0600afbdfc8b0917cb5f341740776a603
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/roi_heads/scnet_roi_head.py
@@ -0,0 +1,582 @@
+import torch
+import torch.nn.functional as F
+
+from mmdet.core import (bbox2result, bbox2roi, bbox_mapping, merge_aug_bboxes,
+ merge_aug_masks, multiclass_nms)
+from ..builder import HEADS, build_head, build_roi_extractor
+from .cascade_roi_head import CascadeRoIHead
+
+
+@HEADS.register_module()
+class SCNetRoIHead(CascadeRoIHead):
+ """RoIHead for `SCNet `_.
+
+ Args:
+ num_stages (int): number of cascade stages.
+ stage_loss_weights (list): loss weight of cascade stages.
+ semantic_roi_extractor (dict): config to init semantic roi extractor.
+ semantic_head (dict): config to init semantic head.
+ feat_relay_head (dict): config to init feature_relay_head.
+ glbctx_head (dict): config to init global context head.
+ """
+
+ def __init__(self,
+ num_stages,
+ stage_loss_weights,
+ semantic_roi_extractor=None,
+ semantic_head=None,
+ feat_relay_head=None,
+ glbctx_head=None,
+ **kwargs):
+ super(SCNetRoIHead, self).__init__(num_stages, stage_loss_weights,
+ **kwargs)
+ assert self.with_bbox and self.with_mask
+ assert not self.with_shared_head # shared head is not supported
+
+ if semantic_head is not None:
+ self.semantic_roi_extractor = build_roi_extractor(
+ semantic_roi_extractor)
+ self.semantic_head = build_head(semantic_head)
+
+ if feat_relay_head is not None:
+ self.feat_relay_head = build_head(feat_relay_head)
+
+ if glbctx_head is not None:
+ self.glbctx_head = build_head(glbctx_head)
+
+ def init_mask_head(self, mask_roi_extractor, mask_head):
+ """Initialize ``mask_head``"""
+ if mask_roi_extractor is not None:
+ self.mask_roi_extractor = build_roi_extractor(mask_roi_extractor)
+ self.mask_head = build_head(mask_head)
+
+ def init_weights(self, pretrained):
+ """Initialize the weights in head.
+
+ Args:
+ pretrained (str, optional): Path to pre-trained weights.
+ Defaults to None.
+ """
+ for i in range(self.num_stages):
+ if self.with_bbox:
+ self.bbox_roi_extractor[i].init_weights()
+ self.bbox_head[i].init_weights()
+ if self.with_mask:
+ self.mask_roi_extractor.init_weights()
+ self.mask_head.init_weights()
+ if self.with_semantic:
+ self.semantic_head.init_weights()
+ if self.with_glbctx:
+ self.glbctx_head.init_weights()
+ if self.with_feat_relay:
+ self.feat_relay_head.init_weights()
+
+ @property
+ def with_semantic(self):
+ """bool: whether the head has semantic head"""
+ return hasattr(self,
+ 'semantic_head') and self.semantic_head is not None
+
+ @property
+ def with_feat_relay(self):
+ """bool: whether the head has feature relay head"""
+ return (hasattr(self, 'feat_relay_head')
+ and self.feat_relay_head is not None)
+
+ @property
+ def with_glbctx(self):
+ """bool: whether the head has global context head"""
+ return hasattr(self, 'glbctx_head') and self.glbctx_head is not None
+
+ def _fuse_glbctx(self, roi_feats, glbctx_feat, rois):
+ """Fuse global context feats with roi feats."""
+ assert roi_feats.size(0) == rois.size(0)
+ img_inds = torch.unique(rois[:, 0].cpu(), sorted=True).long()
+ fused_feats = torch.zeros_like(roi_feats)
+ for img_id in img_inds:
+ inds = (rois[:, 0] == img_id.item())
+ fused_feats[inds] = roi_feats[inds] + glbctx_feat[img_id]
+ return fused_feats
+
+ def _slice_pos_feats(self, feats, sampling_results):
+ """Get features from pos rois."""
+ num_rois = [res.bboxes.size(0) for res in sampling_results]
+ num_pos_rois = [res.pos_bboxes.size(0) for res in sampling_results]
+ inds = torch.zeros(sum(num_rois), dtype=torch.bool)
+ start = 0
+ for i in range(len(num_rois)):
+ start = 0 if i == 0 else start + num_rois[i - 1]
+ stop = start + num_pos_rois[i]
+ inds[start:stop] = 1
+ sliced_feats = feats[inds]
+ return sliced_feats
+
+ def _bbox_forward(self,
+ stage,
+ x,
+ rois,
+ semantic_feat=None,
+ glbctx_feat=None):
+ """Box head forward function used in both training and testing."""
+ bbox_roi_extractor = self.bbox_roi_extractor[stage]
+ bbox_head = self.bbox_head[stage]
+ bbox_feats = bbox_roi_extractor(
+ x[:len(bbox_roi_extractor.featmap_strides)], rois)
+ if self.with_semantic and semantic_feat is not None:
+ bbox_semantic_feat = self.semantic_roi_extractor([semantic_feat],
+ rois)
+ if bbox_semantic_feat.shape[-2:] != bbox_feats.shape[-2:]:
+ bbox_semantic_feat = F.adaptive_avg_pool2d(
+ bbox_semantic_feat, bbox_feats.shape[-2:])
+ bbox_feats += bbox_semantic_feat
+ if self.with_glbctx and glbctx_feat is not None:
+ bbox_feats = self._fuse_glbctx(bbox_feats, glbctx_feat, rois)
+ cls_score, bbox_pred, relayed_feat = bbox_head(
+ bbox_feats, return_shared_feat=True)
+
+ bbox_results = dict(
+ cls_score=cls_score,
+ bbox_pred=bbox_pred,
+ relayed_feat=relayed_feat)
+ return bbox_results
+
+ def _mask_forward(self,
+ x,
+ rois,
+ semantic_feat=None,
+ glbctx_feat=None,
+ relayed_feat=None):
+ """Mask head forward function used in both training and testing."""
+ mask_feats = self.mask_roi_extractor(
+ x[:self.mask_roi_extractor.num_inputs], rois)
+ if self.with_semantic and semantic_feat is not None:
+ mask_semantic_feat = self.semantic_roi_extractor([semantic_feat],
+ rois)
+ if mask_semantic_feat.shape[-2:] != mask_feats.shape[-2:]:
+ mask_semantic_feat = F.adaptive_avg_pool2d(
+ mask_semantic_feat, mask_feats.shape[-2:])
+ mask_feats += mask_semantic_feat
+ if self.with_glbctx and glbctx_feat is not None:
+ mask_feats = self._fuse_glbctx(mask_feats, glbctx_feat, rois)
+ if self.with_feat_relay and relayed_feat is not None:
+ mask_feats = mask_feats + relayed_feat
+ mask_pred = self.mask_head(mask_feats)
+ mask_results = dict(mask_pred=mask_pred)
+
+ return mask_results
+
+ def _bbox_forward_train(self,
+ stage,
+ x,
+ sampling_results,
+ gt_bboxes,
+ gt_labels,
+ rcnn_train_cfg,
+ semantic_feat=None,
+ glbctx_feat=None):
+ """Run forward function and calculate loss for box head in training."""
+ bbox_head = self.bbox_head[stage]
+ rois = bbox2roi([res.bboxes for res in sampling_results])
+ bbox_results = self._bbox_forward(
+ stage,
+ x,
+ rois,
+ semantic_feat=semantic_feat,
+ glbctx_feat=glbctx_feat)
+
+ bbox_targets = bbox_head.get_targets(sampling_results, gt_bboxes,
+ gt_labels, rcnn_train_cfg)
+ loss_bbox = bbox_head.loss(bbox_results['cls_score'],
+ bbox_results['bbox_pred'], rois,
+ *bbox_targets)
+
+ bbox_results.update(
+ loss_bbox=loss_bbox, rois=rois, bbox_targets=bbox_targets)
+ return bbox_results
+
+ def _mask_forward_train(self,
+ x,
+ sampling_results,
+ gt_masks,
+ rcnn_train_cfg,
+ semantic_feat=None,
+ glbctx_feat=None,
+ relayed_feat=None):
+ """Run forward function and calculate loss for mask head in
+ training."""
+ pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results])
+ mask_results = self._mask_forward(
+ x,
+ pos_rois,
+ semantic_feat=semantic_feat,
+ glbctx_feat=glbctx_feat,
+ relayed_feat=relayed_feat)
+
+ mask_targets = self.mask_head.get_targets(sampling_results, gt_masks,
+ rcnn_train_cfg)
+ pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results])
+ loss_mask = self.mask_head.loss(mask_results['mask_pred'],
+ mask_targets, pos_labels)
+
+ mask_results = loss_mask
+ return mask_results
+
+ def forward_train(self,
+ x,
+ img_metas,
+ proposal_list,
+ gt_bboxes,
+ gt_labels,
+ gt_bboxes_ignore=None,
+ gt_masks=None,
+ gt_semantic_seg=None):
+ """
+ Args:
+ x (list[Tensor]): list of multi-level img features.
+
+ img_metas (list[dict]): list of image info dict where each dict
+ has: 'img_shape', 'scale_factor', 'flip', and may also contain
+ 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
+ For details on the values of these keys see
+ `mmdet/datasets/pipelines/formatting.py:Collect`.
+
+ proposal_list (list[Tensors]): list of region proposals.
+
+ gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
+ shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
+
+ gt_labels (list[Tensor]): class indices corresponding to each box
+
+ gt_bboxes_ignore (None, list[Tensor]): specify which bounding
+ boxes can be ignored when computing the loss.
+
+ gt_masks (None, Tensor) : true segmentation masks for each box
+ used if the architecture supports a segmentation task.
+
+ gt_semantic_seg (None, list[Tensor]): semantic segmentation masks
+ used if the architecture supports semantic segmentation task.
+
+ Returns:
+ dict[str, Tensor]: a dictionary of loss components
+ """
+ losses = dict()
+
+ # semantic segmentation branch
+ if self.with_semantic:
+ semantic_pred, semantic_feat = self.semantic_head(x)
+ loss_seg = self.semantic_head.loss(semantic_pred, gt_semantic_seg)
+ losses['loss_semantic_seg'] = loss_seg
+ else:
+ semantic_feat = None
+
+ # global context branch
+ if self.with_glbctx:
+ mc_pred, glbctx_feat = self.glbctx_head(x)
+ loss_glbctx = self.glbctx_head.loss(mc_pred, gt_labels)
+ losses['loss_glbctx'] = loss_glbctx
+ else:
+ glbctx_feat = None
+
+ for i in range(self.num_stages):
+ self.current_stage = i
+ rcnn_train_cfg = self.train_cfg[i]
+ lw = self.stage_loss_weights[i]
+
+ # assign gts and sample proposals
+ sampling_results = []
+ bbox_assigner = self.bbox_assigner[i]
+ bbox_sampler = self.bbox_sampler[i]
+ num_imgs = len(img_metas)
+ if gt_bboxes_ignore is None:
+ gt_bboxes_ignore = [None for _ in range(num_imgs)]
+
+ for j in range(num_imgs):
+ assign_result = bbox_assigner.assign(proposal_list[j],
+ gt_bboxes[j],
+ gt_bboxes_ignore[j],
+ gt_labels[j])
+ sampling_result = bbox_sampler.sample(
+ assign_result,
+ proposal_list[j],
+ gt_bboxes[j],
+ gt_labels[j],
+ feats=[lvl_feat[j][None] for lvl_feat in x])
+ sampling_results.append(sampling_result)
+
+ bbox_results = \
+ self._bbox_forward_train(
+ i, x, sampling_results, gt_bboxes, gt_labels,
+ rcnn_train_cfg, semantic_feat, glbctx_feat)
+ roi_labels = bbox_results['bbox_targets'][0]
+
+ for name, value in bbox_results['loss_bbox'].items():
+ losses[f's{i}.{name}'] = (
+ value * lw if 'loss' in name else value)
+
+ # refine boxes
+ if i < self.num_stages - 1:
+ pos_is_gts = [res.pos_is_gt for res in sampling_results]
+ with torch.no_grad():
+ proposal_list = self.bbox_head[i].refine_bboxes(
+ bbox_results['rois'], roi_labels,
+ bbox_results['bbox_pred'], pos_is_gts, img_metas)
+
+ if self.with_feat_relay:
+ relayed_feat = self._slice_pos_feats(bbox_results['relayed_feat'],
+ sampling_results)
+ relayed_feat = self.feat_relay_head(relayed_feat)
+ else:
+ relayed_feat = None
+
+ mask_results = self._mask_forward_train(x, sampling_results, gt_masks,
+ rcnn_train_cfg, semantic_feat,
+ glbctx_feat, relayed_feat)
+ mask_lw = sum(self.stage_loss_weights)
+ losses['loss_mask'] = mask_lw * mask_results['loss_mask']
+
+ return losses
+
+ def simple_test(self, x, proposal_list, img_metas, rescale=False):
+ """Test without augmentation."""
+ if self.with_semantic:
+ _, semantic_feat = self.semantic_head(x)
+ else:
+ semantic_feat = None
+
+ if self.with_glbctx:
+ mc_pred, glbctx_feat = self.glbctx_head(x)
+ else:
+ glbctx_feat = None
+
+ num_imgs = len(proposal_list)
+ img_shapes = tuple(meta['img_shape'] for meta in img_metas)
+ ori_shapes = tuple(meta['ori_shape'] for meta in img_metas)
+ scale_factors = tuple(meta['scale_factor'] for meta in img_metas)
+
+ # "ms" in variable names means multi-stage
+ ms_scores = []
+ rcnn_test_cfg = self.test_cfg
+
+ rois = bbox2roi(proposal_list)
+ for i in range(self.num_stages):
+ bbox_head = self.bbox_head[i]
+ bbox_results = self._bbox_forward(
+ i,
+ x,
+ rois,
+ semantic_feat=semantic_feat,
+ glbctx_feat=glbctx_feat)
+ # split batch bbox prediction back to each image
+ cls_score = bbox_results['cls_score']
+ bbox_pred = bbox_results['bbox_pred']
+ num_proposals_per_img = tuple(len(p) for p in proposal_list)
+ rois = rois.split(num_proposals_per_img, 0)
+ cls_score = cls_score.split(num_proposals_per_img, 0)
+ bbox_pred = bbox_pred.split(num_proposals_per_img, 0)
+ ms_scores.append(cls_score)
+
+ if i < self.num_stages - 1:
+ bbox_label = [s[:, :-1].argmax(dim=1) for s in cls_score]
+ rois = torch.cat([
+ bbox_head.regress_by_class(rois[i], bbox_label[i],
+ bbox_pred[i], img_metas[i])
+ for i in range(num_imgs)
+ ])
+
+ # average scores of each image by stages
+ cls_score = [
+ sum([score[i] for score in ms_scores]) / float(len(ms_scores))
+ for i in range(num_imgs)
+ ]
+
+ # apply bbox post-processing to each image individually
+ det_bboxes = []
+ det_labels = []
+ for i in range(num_imgs):
+ det_bbox, det_label = self.bbox_head[-1].get_bboxes(
+ rois[i],
+ cls_score[i],
+ bbox_pred[i],
+ img_shapes[i],
+ scale_factors[i],
+ rescale=rescale,
+ cfg=rcnn_test_cfg)
+ det_bboxes.append(det_bbox)
+ det_labels.append(det_label)
+ det_bbox_results = [
+ bbox2result(det_bboxes[i], det_labels[i],
+ self.bbox_head[-1].num_classes)
+ for i in range(num_imgs)
+ ]
+
+ if self.with_mask:
+ if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes):
+ mask_classes = self.mask_head.num_classes
+ det_segm_results = [[[] for _ in range(mask_classes)]
+ for _ in range(num_imgs)]
+ else:
+ if rescale and not isinstance(scale_factors[0], float):
+ scale_factors = [
+ torch.from_numpy(scale_factor).to(det_bboxes[0].device)
+ for scale_factor in scale_factors
+ ]
+ _bboxes = [
+ det_bboxes[i][:, :4] *
+ scale_factors[i] if rescale else det_bboxes[i]
+ for i in range(num_imgs)
+ ]
+ mask_rois = bbox2roi(_bboxes)
+
+ # get relay feature on mask_rois
+ bbox_results = self._bbox_forward(
+ -1,
+ x,
+ mask_rois,
+ semantic_feat=semantic_feat,
+ glbctx_feat=glbctx_feat)
+ relayed_feat = bbox_results['relayed_feat']
+ relayed_feat = self.feat_relay_head(relayed_feat)
+
+ mask_results = self._mask_forward(
+ x,
+ mask_rois,
+ semantic_feat=semantic_feat,
+ glbctx_feat=glbctx_feat,
+ relayed_feat=relayed_feat)
+ mask_pred = mask_results['mask_pred']
+
+ # split batch mask prediction back to each image
+ num_bbox_per_img = tuple(len(_bbox) for _bbox in _bboxes)
+ mask_preds = mask_pred.split(num_bbox_per_img, 0)
+
+ # apply mask post-processing to each image individually
+ det_segm_results = []
+ for i in range(num_imgs):
+ if det_bboxes[i].shape[0] == 0:
+ det_segm_results.append(
+ [[] for _ in range(self.mask_head.num_classes)])
+ else:
+ segm_result = self.mask_head.get_seg_masks(
+ mask_preds[i], _bboxes[i], det_labels[i],
+ self.test_cfg, ori_shapes[i], scale_factors[i],
+ rescale)
+ det_segm_results.append(segm_result)
+
+ # return results
+ if self.with_mask:
+ return list(zip(det_bbox_results, det_segm_results))
+ else:
+ return det_bbox_results
+
+ def aug_test(self, img_feats, proposal_list, img_metas, rescale=False):
+ if self.with_semantic:
+ semantic_feats = [
+ self.semantic_head(feat)[1] for feat in img_feats
+ ]
+ else:
+ semantic_feats = [None] * len(img_metas)
+
+ if self.with_glbctx:
+ glbctx_feats = [self.glbctx_head(feat)[1] for feat in img_feats]
+ else:
+ glbctx_feats = [None] * len(img_metas)
+
+ rcnn_test_cfg = self.test_cfg
+ aug_bboxes = []
+ aug_scores = []
+ for x, img_meta, semantic_feat, glbctx_feat in zip(
+ img_feats, img_metas, semantic_feats, glbctx_feats):
+ # only one image in the batch
+ img_shape = img_meta[0]['img_shape']
+ scale_factor = img_meta[0]['scale_factor']
+ flip = img_meta[0]['flip']
+
+ proposals = bbox_mapping(proposal_list[0][:, :4], img_shape,
+ scale_factor, flip)
+ # "ms" in variable names means multi-stage
+ ms_scores = []
+
+ rois = bbox2roi([proposals])
+ for i in range(self.num_stages):
+ bbox_head = self.bbox_head[i]
+ bbox_results = self._bbox_forward(
+ i,
+ x,
+ rois,
+ semantic_feat=semantic_feat,
+ glbctx_feat=glbctx_feat)
+ ms_scores.append(bbox_results['cls_score'])
+ if i < self.num_stages - 1:
+ bbox_label = bbox_results['cls_score'].argmax(dim=1)
+ rois = bbox_head.regress_by_class(
+ rois, bbox_label, bbox_results['bbox_pred'],
+ img_meta[0])
+
+ cls_score = sum(ms_scores) / float(len(ms_scores))
+ bboxes, scores = self.bbox_head[-1].get_bboxes(
+ rois,
+ cls_score,
+ bbox_results['bbox_pred'],
+ img_shape,
+ scale_factor,
+ rescale=False,
+ cfg=None)
+ aug_bboxes.append(bboxes)
+ aug_scores.append(scores)
+
+ # after merging, bboxes will be rescaled to the original image size
+ merged_bboxes, merged_scores = merge_aug_bboxes(
+ aug_bboxes, aug_scores, img_metas, rcnn_test_cfg)
+ det_bboxes, det_labels = multiclass_nms(merged_bboxes, merged_scores,
+ rcnn_test_cfg.score_thr,
+ rcnn_test_cfg.nms,
+ rcnn_test_cfg.max_per_img)
+
+ det_bbox_results = bbox2result(det_bboxes, det_labels,
+ self.bbox_head[-1].num_classes)
+
+ if self.with_mask:
+ if det_bboxes.shape[0] == 0:
+ det_segm_results = [[]
+ for _ in range(self.mask_head.num_classes)]
+ else:
+ aug_masks = []
+ for x, img_meta, semantic_feat, glbctx_feat in zip(
+ img_feats, img_metas, semantic_feats, glbctx_feats):
+ img_shape = img_meta[0]['img_shape']
+ scale_factor = img_meta[0]['scale_factor']
+ flip = img_meta[0]['flip']
+ _bboxes = bbox_mapping(det_bboxes[:, :4], img_shape,
+ scale_factor, flip)
+ mask_rois = bbox2roi([_bboxes])
+ # get relay feature on mask_rois
+ bbox_results = self._bbox_forward(
+ -1,
+ x,
+ mask_rois,
+ semantic_feat=semantic_feat,
+ glbctx_feat=glbctx_feat)
+ relayed_feat = bbox_results['relayed_feat']
+ relayed_feat = self.feat_relay_head(relayed_feat)
+ mask_results = self._mask_forward(
+ x,
+ mask_rois,
+ semantic_feat=semantic_feat,
+ glbctx_feat=glbctx_feat,
+ relayed_feat=relayed_feat)
+ mask_pred = mask_results['mask_pred']
+ aug_masks.append(mask_pred.sigmoid().cpu().numpy())
+ merged_masks = merge_aug_masks(aug_masks, img_metas,
+ self.test_cfg)
+ ori_shape = img_metas[0][0]['ori_shape']
+ det_segm_results = self.mask_head.get_seg_masks(
+ merged_masks,
+ det_bboxes,
+ det_labels,
+ rcnn_test_cfg,
+ ori_shape,
+ scale_factor=1.0,
+ rescale=False)
+ return [(det_bbox_results, det_segm_results)]
+ else:
+ return [det_bbox_results]
diff --git a/annotator/uniformer/mmdet_null/models/roi_heads/shared_heads/__init__.py b/annotator/uniformer/mmdet_null/models/roi_heads/shared_heads/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..bbe70145b8bf7c304370f725f5afa8db98666679
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/roi_heads/shared_heads/__init__.py
@@ -0,0 +1,3 @@
+from .res_layer import ResLayer
+
+__all__ = ['ResLayer']
diff --git a/annotator/uniformer/mmdet_null/models/roi_heads/shared_heads/res_layer.py b/annotator/uniformer/mmdet_null/models/roi_heads/shared_heads/res_layer.py
new file mode 100644
index 0000000000000000000000000000000000000000..b5c343258b079a0dd832d4f999c18d002b06efac
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/roi_heads/shared_heads/res_layer.py
@@ -0,0 +1,77 @@
+import torch.nn as nn
+from mmcv.cnn import constant_init, kaiming_init
+from mmcv.runner import auto_fp16, load_checkpoint
+
+from mmdet.models.backbones import ResNet
+from mmdet.models.builder import SHARED_HEADS
+from mmdet.models.utils import ResLayer as _ResLayer
+from mmdet.utils import get_root_logger
+
+
+@SHARED_HEADS.register_module()
+class ResLayer(nn.Module):
+
+ def __init__(self,
+ depth,
+ stage=3,
+ stride=2,
+ dilation=1,
+ style='pytorch',
+ norm_cfg=dict(type='BN', requires_grad=True),
+ norm_eval=True,
+ with_cp=False,
+ dcn=None):
+ super(ResLayer, self).__init__()
+ self.norm_eval = norm_eval
+ self.norm_cfg = norm_cfg
+ self.stage = stage
+ self.fp16_enabled = False
+ block, stage_blocks = ResNet.arch_settings[depth]
+ stage_block = stage_blocks[stage]
+ planes = 64 * 2**stage
+ inplanes = 64 * 2**(stage - 1) * block.expansion
+
+ res_layer = _ResLayer(
+ block,
+ inplanes,
+ planes,
+ stage_block,
+ stride=stride,
+ dilation=dilation,
+ style=style,
+ with_cp=with_cp,
+ norm_cfg=self.norm_cfg,
+ dcn=dcn)
+ self.add_module(f'layer{stage + 1}', res_layer)
+
+ def init_weights(self, pretrained=None):
+ """Initialize the weights in the module.
+
+ Args:
+ pretrained (str, optional): Path to pre-trained weights.
+ Defaults to None.
+ """
+ if isinstance(pretrained, str):
+ logger = get_root_logger()
+ load_checkpoint(self, pretrained, strict=False, logger=logger)
+ elif pretrained is None:
+ for m in self.modules():
+ if isinstance(m, nn.Conv2d):
+ kaiming_init(m)
+ elif isinstance(m, nn.BatchNorm2d):
+ constant_init(m, 1)
+ else:
+ raise TypeError('pretrained must be a str or None')
+
+ @auto_fp16()
+ def forward(self, x):
+ res_layer = getattr(self, f'layer{self.stage + 1}')
+ out = res_layer(x)
+ return out
+
+ def train(self, mode=True):
+ super(ResLayer, self).train(mode)
+ if self.norm_eval:
+ for m in self.modules():
+ if isinstance(m, nn.BatchNorm2d):
+ m.eval()
diff --git a/annotator/uniformer/mmdet_null/models/roi_heads/sparse_roi_head.py b/annotator/uniformer/mmdet_null/models/roi_heads/sparse_roi_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..8d85ebc4698f3fc0b974e680c343f91deff4bb50
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/roi_heads/sparse_roi_head.py
@@ -0,0 +1,311 @@
+import torch
+
+from mmdet.core import bbox2result, bbox2roi, bbox_xyxy_to_cxcywh
+from mmdet.core.bbox.samplers import PseudoSampler
+from ..builder import HEADS
+from .cascade_roi_head import CascadeRoIHead
+
+
+@HEADS.register_module()
+class SparseRoIHead(CascadeRoIHead):
+ r"""The RoIHead for `Sparse R-CNN: End-to-End Object Detection with
+ Learnable Proposals `_
+
+ Args:
+ num_stages (int): Number of stage whole iterative process.
+ Defaults to 6.
+ stage_loss_weights (Tuple[float]): The loss
+ weight of each stage. By default all stages have
+ the same weight 1.
+ bbox_roi_extractor (dict): Config of box roi extractor.
+ bbox_head (dict): Config of box head.
+ train_cfg (dict, optional): Configuration information in train stage.
+ Defaults to None.
+ test_cfg (dict, optional): Configuration information in test stage.
+ Defaults to None.
+
+ """
+
+ def __init__(self,
+ num_stages=6,
+ stage_loss_weights=(1, 1, 1, 1, 1, 1),
+ proposal_feature_channel=256,
+ bbox_roi_extractor=dict(
+ type='SingleRoIExtractor',
+ roi_layer=dict(
+ type='RoIAlign', output_size=7, sampling_ratio=2),
+ out_channels=256,
+ featmap_strides=[4, 8, 16, 32]),
+ bbox_head=dict(
+ type='DIIHead',
+ num_classes=80,
+ num_fcs=2,
+ num_heads=8,
+ num_cls_fcs=1,
+ num_reg_fcs=3,
+ feedforward_channels=2048,
+ hidden_channels=256,
+ dropout=0.0,
+ roi_feat_size=7,
+ ffn_act_cfg=dict(type='ReLU', inplace=True)),
+ train_cfg=None,
+ test_cfg=None):
+ assert bbox_roi_extractor is not None
+ assert bbox_head is not None
+ assert len(stage_loss_weights) == num_stages
+ self.num_stages = num_stages
+ self.stage_loss_weights = stage_loss_weights
+ self.proposal_feature_channel = proposal_feature_channel
+ super(SparseRoIHead, self).__init__(
+ num_stages,
+ stage_loss_weights,
+ bbox_roi_extractor=bbox_roi_extractor,
+ bbox_head=bbox_head,
+ train_cfg=train_cfg,
+ test_cfg=test_cfg)
+ # train_cfg would be None when run the test.py
+ if train_cfg is not None:
+ for stage in range(num_stages):
+ assert isinstance(self.bbox_sampler[stage], PseudoSampler), \
+ 'Sparse R-CNN only support `PseudoSampler`'
+
+ def _bbox_forward(self, stage, x, rois, object_feats, img_metas):
+ """Box head forward function used in both training and testing. Returns
+ all regression, classification results and a intermediate feature.
+
+ Args:
+ stage (int): The index of current stage in
+ iterative process.
+ x (List[Tensor]): List of FPN features
+ rois (Tensor): Rois in total batch. With shape (num_proposal, 5).
+ the last dimension 5 represents (img_index, x1, y1, x2, y2).
+ object_feats (Tensor): The object feature extracted from
+ the previous stage.
+ img_metas (dict): meta information of images.
+
+ Returns:
+ dict[str, Tensor]: a dictionary of bbox head outputs,
+ Containing the following results:
+
+ - cls_score (Tensor): The score of each class, has
+ shape (batch_size, num_proposals, num_classes)
+ when use focal loss or
+ (batch_size, num_proposals, num_classes+1)
+ otherwise.
+ - decode_bbox_pred (Tensor): The regression results
+ with shape (batch_size, num_proposal, 4).
+ The last dimension 4 represents
+ [tl_x, tl_y, br_x, br_y].
+ - object_feats (Tensor): The object feature extracted
+ from current stage
+ - detach_cls_score_list (list[Tensor]): The detached
+ classification results, length is batch_size, and
+ each tensor has shape (num_proposal, num_classes).
+ - detach_proposal_list (list[tensor]): The detached
+ regression results, length is batch_size, and each
+ tensor has shape (num_proposal, 4). The last
+ dimension 4 represents [tl_x, tl_y, br_x, br_y].
+ """
+ num_imgs = len(img_metas)
+ bbox_roi_extractor = self.bbox_roi_extractor[stage]
+ bbox_head = self.bbox_head[stage]
+ bbox_feats = bbox_roi_extractor(x[:bbox_roi_extractor.num_inputs],
+ rois)
+ cls_score, bbox_pred, object_feats = bbox_head(bbox_feats,
+ object_feats)
+ proposal_list = self.bbox_head[stage].refine_bboxes(
+ rois,
+ rois.new_zeros(len(rois)), # dummy arg
+ bbox_pred.view(-1, bbox_pred.size(-1)),
+ [rois.new_zeros(object_feats.size(1)) for _ in range(num_imgs)],
+ img_metas)
+ bbox_results = dict(
+ cls_score=cls_score,
+ decode_bbox_pred=torch.cat(proposal_list),
+ object_feats=object_feats,
+ # detach then use it in label assign
+ detach_cls_score_list=[
+ cls_score[i].detach() for i in range(num_imgs)
+ ],
+ detach_proposal_list=[item.detach() for item in proposal_list])
+
+ return bbox_results
+
+ def forward_train(self,
+ x,
+ proposal_boxes,
+ proposal_features,
+ img_metas,
+ gt_bboxes,
+ gt_labels,
+ gt_bboxes_ignore=None,
+ imgs_whwh=None,
+ gt_masks=None):
+ """Forward function in training stage.
+
+ Args:
+ x (list[Tensor]): list of multi-level img features.
+ proposals (Tensor): Decoded proposal bboxes, has shape
+ (batch_size, num_proposals, 4)
+ proposal_features (Tensor): Expanded proposal
+ features, has shape
+ (batch_size, num_proposals, proposal_feature_channel)
+ img_metas (list[dict]): list of image info dict where
+ each dict has: 'img_shape', 'scale_factor', 'flip',
+ and may also contain 'filename', 'ori_shape',
+ 'pad_shape', and 'img_norm_cfg'. For details on the
+ values of these keys see
+ `mmdet/datasets/pipelines/formatting.py:Collect`.
+ gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
+ shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
+ gt_labels (list[Tensor]): class indices corresponding to each box
+ gt_bboxes_ignore (None | list[Tensor]): specify which bounding
+ boxes can be ignored when computing the loss.
+ imgs_whwh (Tensor): Tensor with shape (batch_size, 4),
+ the dimension means
+ [img_width,img_height, img_width, img_height].
+ gt_masks (None | Tensor) : true segmentation masks for each box
+ used if the architecture supports a segmentation task.
+
+ Returns:
+ dict[str, Tensor]: a dictionary of loss components of all stage.
+ """
+
+ num_imgs = len(img_metas)
+ num_proposals = proposal_boxes.size(1)
+ imgs_whwh = imgs_whwh.repeat(1, num_proposals, 1)
+ all_stage_bbox_results = []
+ proposal_list = [proposal_boxes[i] for i in range(len(proposal_boxes))]
+ object_feats = proposal_features
+ all_stage_loss = {}
+ for stage in range(self.num_stages):
+ rois = bbox2roi(proposal_list)
+ bbox_results = self._bbox_forward(stage, x, rois, object_feats,
+ img_metas)
+ all_stage_bbox_results.append(bbox_results)
+ if gt_bboxes_ignore is None:
+ # TODO support ignore
+ gt_bboxes_ignore = [None for _ in range(num_imgs)]
+ sampling_results = []
+ cls_pred_list = bbox_results['detach_cls_score_list']
+ proposal_list = bbox_results['detach_proposal_list']
+ for i in range(num_imgs):
+ normalize_bbox_ccwh = bbox_xyxy_to_cxcywh(proposal_list[i] /
+ imgs_whwh[i])
+ assign_result = self.bbox_assigner[stage].assign(
+ normalize_bbox_ccwh, cls_pred_list[i], gt_bboxes[i],
+ gt_labels[i], img_metas[i])
+ sampling_result = self.bbox_sampler[stage].sample(
+ assign_result, proposal_list[i], gt_bboxes[i])
+ sampling_results.append(sampling_result)
+ bbox_targets = self.bbox_head[stage].get_targets(
+ sampling_results, gt_bboxes, gt_labels, self.train_cfg[stage],
+ True)
+ cls_score = bbox_results['cls_score']
+ decode_bbox_pred = bbox_results['decode_bbox_pred']
+
+ single_stage_loss = self.bbox_head[stage].loss(
+ cls_score.view(-1, cls_score.size(-1)),
+ decode_bbox_pred.view(-1, 4),
+ *bbox_targets,
+ imgs_whwh=imgs_whwh)
+ for key, value in single_stage_loss.items():
+ all_stage_loss[f'stage{stage}_{key}'] = value * \
+ self.stage_loss_weights[stage]
+ object_feats = bbox_results['object_feats']
+
+ return all_stage_loss
+
+ def simple_test(self,
+ x,
+ proposal_boxes,
+ proposal_features,
+ img_metas,
+ imgs_whwh,
+ rescale=False):
+ """Test without augmentation.
+
+ Args:
+ x (list[Tensor]): list of multi-level img features.
+ proposal_boxes (Tensor): Decoded proposal bboxes, has shape
+ (batch_size, num_proposals, 4)
+ proposal_features (Tensor): Expanded proposal
+ features, has shape
+ (batch_size, num_proposals, proposal_feature_channel)
+ img_metas (dict): meta information of images.
+ imgs_whwh (Tensor): Tensor with shape (batch_size, 4),
+ the dimension means
+ [img_width,img_height, img_width, img_height].
+ rescale (bool): If True, return boxes in original image
+ space. Defaults to False.
+
+ Returns:
+ bbox_results (list[tuple[np.ndarray]]): \
+ [[cls1_det, cls2_det, ...], ...]. \
+ The outer list indicates images, and the inner \
+ list indicates per-class detected bboxes. The \
+ np.ndarray has shape (num_det, 5) and the last \
+ dimension 5 represents (x1, y1, x2, y2, score).
+ """
+ assert self.with_bbox, 'Bbox head must be implemented.'
+ # Decode initial proposals
+ num_imgs = len(img_metas)
+ proposal_list = [proposal_boxes[i] for i in range(num_imgs)]
+ object_feats = proposal_features
+ for stage in range(self.num_stages):
+ rois = bbox2roi(proposal_list)
+ bbox_results = self._bbox_forward(stage, x, rois, object_feats,
+ img_metas)
+ object_feats = bbox_results['object_feats']
+ cls_score = bbox_results['cls_score']
+ proposal_list = bbox_results['detach_proposal_list']
+
+ num_classes = self.bbox_head[-1].num_classes
+ det_bboxes = []
+ det_labels = []
+
+ if self.bbox_head[-1].loss_cls.use_sigmoid:
+ cls_score = cls_score.sigmoid()
+ else:
+ cls_score = cls_score.softmax(-1)[..., :-1]
+
+ for img_id in range(num_imgs):
+ cls_score_per_img = cls_score[img_id]
+ scores_per_img, topk_indices = cls_score_per_img.flatten(
+ 0, 1).topk(
+ self.test_cfg.max_per_img, sorted=False)
+ labels_per_img = topk_indices % num_classes
+ bbox_pred_per_img = proposal_list[img_id][topk_indices //
+ num_classes]
+ if rescale:
+ scale_factor = img_metas[img_id]['scale_factor']
+ bbox_pred_per_img /= bbox_pred_per_img.new_tensor(scale_factor)
+ det_bboxes.append(
+ torch.cat([bbox_pred_per_img, scores_per_img[:, None]], dim=1))
+ det_labels.append(labels_per_img)
+
+ bbox_results = [
+ bbox2result(det_bboxes[i], det_labels[i], num_classes)
+ for i in range(num_imgs)
+ ]
+
+ return bbox_results
+
+ def aug_test(self, features, proposal_list, img_metas, rescale=False):
+ raise NotImplementedError('Sparse R-CNN does not support `aug_test`')
+
+ def forward_dummy(self, x, proposal_boxes, proposal_features, img_metas):
+ """Dummy forward function when do the flops computing."""
+ all_stage_bbox_results = []
+ proposal_list = [proposal_boxes[i] for i in range(len(proposal_boxes))]
+ object_feats = proposal_features
+ if self.with_bbox:
+ for stage in range(self.num_stages):
+ rois = bbox2roi(proposal_list)
+ bbox_results = self._bbox_forward(stage, x, rois, object_feats,
+ img_metas)
+
+ all_stage_bbox_results.append(bbox_results)
+ proposal_list = bbox_results['detach_proposal_list']
+ object_feats = bbox_results['object_feats']
+ return all_stage_bbox_results
diff --git a/annotator/uniformer/mmdet_null/models/roi_heads/standard_roi_head.py b/annotator/uniformer/mmdet_null/models/roi_heads/standard_roi_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..c530f2a5ce904439492de12ff7d267cc1e757d3a
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/roi_heads/standard_roi_head.py
@@ -0,0 +1,295 @@
+import torch
+
+from mmdet.core import bbox2result, bbox2roi, build_assigner, build_sampler
+from ..builder import HEADS, build_head, build_roi_extractor
+from .base_roi_head import BaseRoIHead
+from .test_mixins import BBoxTestMixin, MaskTestMixin
+
+
+@HEADS.register_module()
+class StandardRoIHead(BaseRoIHead, BBoxTestMixin, MaskTestMixin):
+ """Simplest base roi head including one bbox head and one mask head."""
+
+ def init_assigner_sampler(self):
+ """Initialize assigner and sampler."""
+ self.bbox_assigner = None
+ self.bbox_sampler = None
+ if self.train_cfg:
+ self.bbox_assigner = build_assigner(self.train_cfg.assigner)
+ self.bbox_sampler = build_sampler(
+ self.train_cfg.sampler, context=self)
+
+ def init_bbox_head(self, bbox_roi_extractor, bbox_head):
+ """Initialize ``bbox_head``"""
+ self.bbox_roi_extractor = build_roi_extractor(bbox_roi_extractor)
+ self.bbox_head = build_head(bbox_head)
+
+ def init_mask_head(self, mask_roi_extractor, mask_head):
+ """Initialize ``mask_head``"""
+ if mask_roi_extractor is not None:
+ self.mask_roi_extractor = build_roi_extractor(mask_roi_extractor)
+ self.share_roi_extractor = False
+ else:
+ self.share_roi_extractor = True
+ self.mask_roi_extractor = self.bbox_roi_extractor
+ self.mask_head = build_head(mask_head)
+
+ def init_weights(self, pretrained):
+ """Initialize the weights in head.
+
+ Args:
+ pretrained (str, optional): Path to pre-trained weights.
+ Defaults to None.
+ """
+ if self.with_shared_head:
+ self.shared_head.init_weights(pretrained=pretrained)
+ if self.with_bbox:
+ self.bbox_roi_extractor.init_weights()
+ self.bbox_head.init_weights()
+ if self.with_mask:
+ self.mask_head.init_weights()
+ if not self.share_roi_extractor:
+ self.mask_roi_extractor.init_weights()
+
+ def forward_dummy(self, x, proposals):
+ """Dummy forward function."""
+ # bbox head
+ outs = ()
+ rois = bbox2roi([proposals])
+ if self.with_bbox:
+ bbox_results = self._bbox_forward(x, rois)
+ outs = outs + (bbox_results['cls_score'],
+ bbox_results['bbox_pred'])
+ # mask head
+ if self.with_mask:
+ mask_rois = rois[:100]
+ mask_results = self._mask_forward(x, mask_rois)
+ outs = outs + (mask_results['mask_pred'], )
+ return outs
+
+ def forward_train(self,
+ x,
+ img_metas,
+ proposal_list,
+ gt_bboxes,
+ gt_labels,
+ gt_bboxes_ignore=None,
+ gt_masks=None):
+ """
+ Args:
+ x (list[Tensor]): list of multi-level img features.
+ img_metas (list[dict]): list of image info dict where each dict
+ has: 'img_shape', 'scale_factor', 'flip', and may also contain
+ 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
+ For details on the values of these keys see
+ `mmdet/datasets/pipelines/formatting.py:Collect`.
+ proposals (list[Tensors]): list of region proposals.
+ gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
+ shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
+ gt_labels (list[Tensor]): class indices corresponding to each box
+ gt_bboxes_ignore (None | list[Tensor]): specify which bounding
+ boxes can be ignored when computing the loss.
+ gt_masks (None | Tensor) : true segmentation masks for each box
+ used if the architecture supports a segmentation task.
+
+ Returns:
+ dict[str, Tensor]: a dictionary of loss components
+ """
+ # assign gts and sample proposals
+ if self.with_bbox or self.with_mask:
+ num_imgs = len(img_metas)
+ if gt_bboxes_ignore is None:
+ gt_bboxes_ignore = [None for _ in range(num_imgs)]
+ sampling_results = []
+ for i in range(num_imgs):
+ assign_result = self.bbox_assigner.assign(
+ proposal_list[i], gt_bboxes[i], gt_bboxes_ignore[i],
+ gt_labels[i])
+ sampling_result = self.bbox_sampler.sample(
+ assign_result,
+ proposal_list[i],
+ gt_bboxes[i],
+ gt_labels[i],
+ feats=[lvl_feat[i][None] for lvl_feat in x])
+ sampling_results.append(sampling_result)
+
+ losses = dict()
+ # bbox head forward and loss
+ if self.with_bbox:
+ bbox_results = self._bbox_forward_train(x, sampling_results,
+ gt_bboxes, gt_labels,
+ img_metas)
+ losses.update(bbox_results['loss_bbox'])
+
+ # mask head forward and loss
+ if self.with_mask:
+ mask_results = self._mask_forward_train(x, sampling_results,
+ bbox_results['bbox_feats'],
+ gt_masks, img_metas)
+ losses.update(mask_results['loss_mask'])
+
+ return losses
+
+ def _bbox_forward(self, x, rois):
+ """Box head forward function used in both training and testing."""
+ # TODO: a more flexible way to decide which feature maps to use
+ bbox_feats = self.bbox_roi_extractor(
+ x[:self.bbox_roi_extractor.num_inputs], rois)
+ if self.with_shared_head:
+ bbox_feats = self.shared_head(bbox_feats)
+ cls_score, bbox_pred = self.bbox_head(bbox_feats)
+
+ bbox_results = dict(
+ cls_score=cls_score, bbox_pred=bbox_pred, bbox_feats=bbox_feats)
+ return bbox_results
+
+ def _bbox_forward_train(self, x, sampling_results, gt_bboxes, gt_labels,
+ img_metas):
+ """Run forward function and calculate loss for box head in training."""
+ rois = bbox2roi([res.bboxes for res in sampling_results])
+ bbox_results = self._bbox_forward(x, rois)
+
+ bbox_targets = self.bbox_head.get_targets(sampling_results, gt_bboxes,
+ gt_labels, self.train_cfg)
+ loss_bbox = self.bbox_head.loss(bbox_results['cls_score'],
+ bbox_results['bbox_pred'], rois,
+ *bbox_targets)
+
+ bbox_results.update(loss_bbox=loss_bbox)
+ return bbox_results
+
+ def _mask_forward_train(self, x, sampling_results, bbox_feats, gt_masks,
+ img_metas):
+ """Run forward function and calculate loss for mask head in
+ training."""
+ if not self.share_roi_extractor:
+ pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results])
+ mask_results = self._mask_forward(x, pos_rois)
+ else:
+ pos_inds = []
+ device = bbox_feats.device
+ for res in sampling_results:
+ pos_inds.append(
+ torch.ones(
+ res.pos_bboxes.shape[0],
+ device=device,
+ dtype=torch.uint8))
+ pos_inds.append(
+ torch.zeros(
+ res.neg_bboxes.shape[0],
+ device=device,
+ dtype=torch.uint8))
+ pos_inds = torch.cat(pos_inds)
+
+ mask_results = self._mask_forward(
+ x, pos_inds=pos_inds, bbox_feats=bbox_feats)
+
+ mask_targets = self.mask_head.get_targets(sampling_results, gt_masks,
+ self.train_cfg)
+ pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results])
+ loss_mask = self.mask_head.loss(mask_results['mask_pred'],
+ mask_targets, pos_labels)
+
+ mask_results.update(loss_mask=loss_mask, mask_targets=mask_targets)
+ return mask_results
+
+ def _mask_forward(self, x, rois=None, pos_inds=None, bbox_feats=None):
+ """Mask head forward function used in both training and testing."""
+ assert ((rois is not None) ^
+ (pos_inds is not None and bbox_feats is not None))
+ if rois is not None:
+ mask_feats = self.mask_roi_extractor(
+ x[:self.mask_roi_extractor.num_inputs], rois)
+ if self.with_shared_head:
+ mask_feats = self.shared_head(mask_feats)
+ else:
+ assert bbox_feats is not None
+ mask_feats = bbox_feats[pos_inds]
+
+ mask_pred = self.mask_head(mask_feats)
+ mask_results = dict(mask_pred=mask_pred, mask_feats=mask_feats)
+ return mask_results
+
+ async def async_simple_test(self,
+ x,
+ proposal_list,
+ img_metas,
+ proposals=None,
+ rescale=False):
+ """Async test without augmentation."""
+ assert self.with_bbox, 'Bbox head must be implemented.'
+
+ det_bboxes, det_labels = await self.async_test_bboxes(
+ x, img_metas, proposal_list, self.test_cfg, rescale=rescale)
+ bbox_results = bbox2result(det_bboxes, det_labels,
+ self.bbox_head.num_classes)
+ if not self.with_mask:
+ return bbox_results
+ else:
+ segm_results = await self.async_test_mask(
+ x,
+ img_metas,
+ det_bboxes,
+ det_labels,
+ rescale=rescale,
+ mask_test_cfg=self.test_cfg.get('mask'))
+ return bbox_results, segm_results
+
+ def simple_test(self,
+ x,
+ proposal_list,
+ img_metas,
+ proposals=None,
+ rescale=False):
+ """Test without augmentation."""
+ assert self.with_bbox, 'Bbox head must be implemented.'
+
+ det_bboxes, det_labels = self.simple_test_bboxes(
+ x, img_metas, proposal_list, self.test_cfg, rescale=rescale)
+ if torch.onnx.is_in_onnx_export():
+ if self.with_mask:
+ segm_results = self.simple_test_mask(
+ x, img_metas, det_bboxes, det_labels, rescale=rescale)
+ return det_bboxes, det_labels, segm_results
+ else:
+ return det_bboxes, det_labels
+
+ bbox_results = [
+ bbox2result(det_bboxes[i], det_labels[i],
+ self.bbox_head.num_classes)
+ for i in range(len(det_bboxes))
+ ]
+
+ if not self.with_mask:
+ return bbox_results
+ else:
+ segm_results = self.simple_test_mask(
+ x, img_metas, det_bboxes, det_labels, rescale=rescale)
+ return list(zip(bbox_results, segm_results))
+
+ def aug_test(self, x, proposal_list, img_metas, rescale=False):
+ """Test with augmentations.
+
+ If rescale is False, then returned bboxes and masks will fit the scale
+ of imgs[0].
+ """
+ det_bboxes, det_labels = self.aug_test_bboxes(x, img_metas,
+ proposal_list,
+ self.test_cfg)
+
+ if rescale:
+ _det_bboxes = det_bboxes
+ else:
+ _det_bboxes = det_bboxes.clone()
+ _det_bboxes[:, :4] *= det_bboxes.new_tensor(
+ img_metas[0][0]['scale_factor'])
+ bbox_results = bbox2result(_det_bboxes, det_labels,
+ self.bbox_head.num_classes)
+
+ # det_bboxes always keep the original scale
+ if self.with_mask:
+ segm_results = self.aug_test_mask(x, img_metas, det_bboxes,
+ det_labels)
+ return [(bbox_results, segm_results)]
+ else:
+ return [bbox_results]
diff --git a/annotator/uniformer/mmdet_null/models/roi_heads/test_mixins.py b/annotator/uniformer/mmdet_null/models/roi_heads/test_mixins.py
new file mode 100644
index 0000000000000000000000000000000000000000..78a092a431aa884ab7dfd08346f79a4ccf8303bf
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/roi_heads/test_mixins.py
@@ -0,0 +1,348 @@
+import logging
+import sys
+
+import torch
+
+from mmdet.core import (bbox2roi, bbox_mapping, merge_aug_bboxes,
+ merge_aug_masks, multiclass_nms)
+
+logger = logging.getLogger(__name__)
+
+if sys.version_info >= (3, 7):
+ from mmdet.utils.contextmanagers import completed
+
+
+class BBoxTestMixin(object):
+
+ if sys.version_info >= (3, 7):
+
+ async def async_test_bboxes(self,
+ x,
+ img_metas,
+ proposals,
+ rcnn_test_cfg,
+ rescale=False,
+ bbox_semaphore=None,
+ global_lock=None):
+ """Asynchronized test for box head without augmentation."""
+ rois = bbox2roi(proposals)
+ roi_feats = self.bbox_roi_extractor(
+ x[:len(self.bbox_roi_extractor.featmap_strides)], rois)
+ if self.with_shared_head:
+ roi_feats = self.shared_head(roi_feats)
+ sleep_interval = rcnn_test_cfg.get('async_sleep_interval', 0.017)
+
+ async with completed(
+ __name__, 'bbox_head_forward',
+ sleep_interval=sleep_interval):
+ cls_score, bbox_pred = self.bbox_head(roi_feats)
+
+ img_shape = img_metas[0]['img_shape']
+ scale_factor = img_metas[0]['scale_factor']
+ det_bboxes, det_labels = self.bbox_head.get_bboxes(
+ rois,
+ cls_score,
+ bbox_pred,
+ img_shape,
+ scale_factor,
+ rescale=rescale,
+ cfg=rcnn_test_cfg)
+ return det_bboxes, det_labels
+
+ def simple_test_bboxes(self,
+ x,
+ img_metas,
+ proposals,
+ rcnn_test_cfg,
+ rescale=False):
+ """Test only det bboxes without augmentation.
+
+ Args:
+ x (tuple[Tensor]): Feature maps of all scale level.
+ img_metas (list[dict]): Image meta info.
+ proposals (Tensor or List[Tensor]): Region proposals.
+ rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of R-CNN.
+ rescale (bool): If True, return boxes in original image space.
+ Default: False.
+
+ Returns:
+ tuple[list[Tensor], list[Tensor]]: The first list contains
+ the boxes of the corresponding image in a batch, each
+ tensor has the shape (num_boxes, 5) and last dimension
+ 5 represent (tl_x, tl_y, br_x, br_y, score). Each Tensor
+ in the second list is the labels with shape (num_boxes, ).
+ The length of both lists should be equal to batch_size.
+ """
+ # get origin input shape to support onnx dynamic input shape
+ if torch.onnx.is_in_onnx_export():
+ assert len(
+ img_metas
+ ) == 1, 'Only support one input image while in exporting to ONNX'
+ img_shapes = img_metas[0]['img_shape_for_onnx']
+ else:
+ img_shapes = tuple(meta['img_shape'] for meta in img_metas)
+ scale_factors = tuple(meta['scale_factor'] for meta in img_metas)
+
+ # The length of proposals of different batches may be different.
+ # In order to form a batch, a padding operation is required.
+ if isinstance(proposals, list):
+ # padding to form a batch
+ max_size = max([proposal.size(0) for proposal in proposals])
+ for i, proposal in enumerate(proposals):
+ supplement = proposal.new_full(
+ (max_size - proposal.size(0), proposal.size(1)), 0)
+ proposals[i] = torch.cat((supplement, proposal), dim=0)
+ rois = torch.stack(proposals, dim=0)
+ else:
+ rois = proposals
+
+ batch_index = torch.arange(
+ rois.size(0), device=rois.device).float().view(-1, 1, 1).expand(
+ rois.size(0), rois.size(1), 1)
+ rois = torch.cat([batch_index, rois[..., :4]], dim=-1)
+ batch_size = rois.shape[0]
+ num_proposals_per_img = rois.shape[1]
+
+ # Eliminate the batch dimension
+ rois = rois.view(-1, 5)
+ bbox_results = self._bbox_forward(x, rois)
+ cls_score = bbox_results['cls_score']
+ bbox_pred = bbox_results['bbox_pred']
+
+ # Recover the batch dimension
+ rois = rois.reshape(batch_size, num_proposals_per_img, -1)
+ cls_score = cls_score.reshape(batch_size, num_proposals_per_img, -1)
+
+ if not torch.onnx.is_in_onnx_export():
+ # remove padding
+ supplement_mask = rois[..., -1] == 0
+ cls_score[supplement_mask, :] = 0
+
+ # bbox_pred would be None in some detector when with_reg is False,
+ # e.g. Grid R-CNN.
+ if bbox_pred is not None:
+ # the bbox prediction of some detectors like SABL is not Tensor
+ if isinstance(bbox_pred, torch.Tensor):
+ bbox_pred = bbox_pred.reshape(batch_size,
+ num_proposals_per_img, -1)
+ if not torch.onnx.is_in_onnx_export():
+ bbox_pred[supplement_mask, :] = 0
+ else:
+ # TODO: Looking forward to a better way
+ # For SABL
+ bbox_preds = self.bbox_head.bbox_pred_split(
+ bbox_pred, num_proposals_per_img)
+ # apply bbox post-processing to each image individually
+ det_bboxes = []
+ det_labels = []
+ for i in range(len(proposals)):
+ # remove padding
+ supplement_mask = proposals[i][..., -1] == 0
+ for bbox in bbox_preds[i]:
+ bbox[supplement_mask] = 0
+ det_bbox, det_label = self.bbox_head.get_bboxes(
+ rois[i],
+ cls_score[i],
+ bbox_preds[i],
+ img_shapes[i],
+ scale_factors[i],
+ rescale=rescale,
+ cfg=rcnn_test_cfg)
+ det_bboxes.append(det_bbox)
+ det_labels.append(det_label)
+ return det_bboxes, det_labels
+ else:
+ bbox_pred = None
+
+ return self.bbox_head.get_bboxes(
+ rois,
+ cls_score,
+ bbox_pred,
+ img_shapes,
+ scale_factors,
+ rescale=rescale,
+ cfg=rcnn_test_cfg)
+
+ def aug_test_bboxes(self, feats, img_metas, proposal_list, rcnn_test_cfg):
+ """Test det bboxes with test time augmentation."""
+ aug_bboxes = []
+ aug_scores = []
+ for x, img_meta in zip(feats, img_metas):
+ # only one image in the batch
+ img_shape = img_meta[0]['img_shape']
+ scale_factor = img_meta[0]['scale_factor']
+ flip = img_meta[0]['flip']
+ flip_direction = img_meta[0]['flip_direction']
+ # TODO more flexible
+ proposals = bbox_mapping(proposal_list[0][:, :4], img_shape,
+ scale_factor, flip, flip_direction)
+ rois = bbox2roi([proposals])
+ bbox_results = self._bbox_forward(x, rois)
+ bboxes, scores = self.bbox_head.get_bboxes(
+ rois,
+ bbox_results['cls_score'],
+ bbox_results['bbox_pred'],
+ img_shape,
+ scale_factor,
+ rescale=False,
+ cfg=None)
+ aug_bboxes.append(bboxes)
+ aug_scores.append(scores)
+ # after merging, bboxes will be rescaled to the original image size
+ merged_bboxes, merged_scores = merge_aug_bboxes(
+ aug_bboxes, aug_scores, img_metas, rcnn_test_cfg)
+ det_bboxes, det_labels = multiclass_nms(merged_bboxes, merged_scores,
+ rcnn_test_cfg.score_thr,
+ rcnn_test_cfg.nms,
+ rcnn_test_cfg.max_per_img)
+ return det_bboxes, det_labels
+
+
+class MaskTestMixin(object):
+
+ if sys.version_info >= (3, 7):
+
+ async def async_test_mask(self,
+ x,
+ img_metas,
+ det_bboxes,
+ det_labels,
+ rescale=False,
+ mask_test_cfg=None):
+ """Asynchronized test for mask head without augmentation."""
+ # image shape of the first image in the batch (only one)
+ ori_shape = img_metas[0]['ori_shape']
+ scale_factor = img_metas[0]['scale_factor']
+ if det_bboxes.shape[0] == 0:
+ segm_result = [[] for _ in range(self.mask_head.num_classes)]
+ else:
+ if rescale and not isinstance(scale_factor,
+ (float, torch.Tensor)):
+ scale_factor = det_bboxes.new_tensor(scale_factor)
+ _bboxes = (
+ det_bboxes[:, :4] *
+ scale_factor if rescale else det_bboxes)
+ mask_rois = bbox2roi([_bboxes])
+ mask_feats = self.mask_roi_extractor(
+ x[:len(self.mask_roi_extractor.featmap_strides)],
+ mask_rois)
+
+ if self.with_shared_head:
+ mask_feats = self.shared_head(mask_feats)
+ if mask_test_cfg and mask_test_cfg.get('async_sleep_interval'):
+ sleep_interval = mask_test_cfg['async_sleep_interval']
+ else:
+ sleep_interval = 0.035
+ async with completed(
+ __name__,
+ 'mask_head_forward',
+ sleep_interval=sleep_interval):
+ mask_pred = self.mask_head(mask_feats)
+ segm_result = self.mask_head.get_seg_masks(
+ mask_pred, _bboxes, det_labels, self.test_cfg, ori_shape,
+ scale_factor, rescale)
+ return segm_result
+
+ def simple_test_mask(self,
+ x,
+ img_metas,
+ det_bboxes,
+ det_labels,
+ rescale=False):
+ """Simple test for mask head without augmentation."""
+ # image shapes of images in the batch
+ ori_shapes = tuple(meta['ori_shape'] for meta in img_metas)
+ scale_factors = tuple(meta['scale_factor'] for meta in img_metas)
+
+ # The length of proposals of different batches may be different.
+ # In order to form a batch, a padding operation is required.
+ if isinstance(det_bboxes, list):
+ # padding to form a batch
+ max_size = max([bboxes.size(0) for bboxes in det_bboxes])
+ for i, (bbox, label) in enumerate(zip(det_bboxes, det_labels)):
+ supplement_bbox = bbox.new_full(
+ (max_size - bbox.size(0), bbox.size(1)), 0)
+ supplement_label = label.new_full((max_size - label.size(0), ),
+ 0)
+ det_bboxes[i] = torch.cat((supplement_bbox, bbox), dim=0)
+ det_labels[i] = torch.cat((supplement_label, label), dim=0)
+ det_bboxes = torch.stack(det_bboxes, dim=0)
+ det_labels = torch.stack(det_labels, dim=0)
+
+ batch_size = det_bboxes.size(0)
+ num_proposals_per_img = det_bboxes.shape[1]
+
+ # if det_bboxes is rescaled to the original image size, we need to
+ # rescale it back to the testing scale to obtain RoIs.
+ det_bboxes = det_bboxes[..., :4]
+ if rescale:
+ if not isinstance(scale_factors[0], float):
+ scale_factors = det_bboxes.new_tensor(scale_factors)
+ det_bboxes = det_bboxes * scale_factors.unsqueeze(1)
+
+ batch_index = torch.arange(
+ det_bboxes.size(0), device=det_bboxes.device).float().view(
+ -1, 1, 1).expand(det_bboxes.size(0), det_bboxes.size(1), 1)
+ mask_rois = torch.cat([batch_index, det_bboxes], dim=-1)
+ mask_rois = mask_rois.view(-1, 5)
+ mask_results = self._mask_forward(x, mask_rois)
+ mask_pred = mask_results['mask_pred']
+
+ # Recover the batch dimension
+ mask_preds = mask_pred.reshape(batch_size, num_proposals_per_img,
+ *mask_pred.shape[1:])
+
+ # apply mask post-processing to each image individually
+ segm_results = []
+ for i in range(batch_size):
+ mask_pred = mask_preds[i]
+ det_bbox = det_bboxes[i]
+ det_label = det_labels[i]
+
+ # remove padding
+ supplement_mask = det_bbox[..., -1] != 0
+ mask_pred = mask_pred[supplement_mask]
+ det_bbox = det_bbox[supplement_mask]
+ det_label = det_label[supplement_mask]
+
+ if det_label.shape[0] == 0:
+ segm_results.append([[]
+ for _ in range(self.mask_head.num_classes)
+ ])
+ else:
+ segm_result = self.mask_head.get_seg_masks(
+ mask_pred, det_bbox, det_label, self.test_cfg,
+ ori_shapes[i], scale_factors[i], rescale)
+ segm_results.append(segm_result)
+ return segm_results
+
+ def aug_test_mask(self, feats, img_metas, det_bboxes, det_labels):
+ """Test for mask head with test time augmentation."""
+ if det_bboxes.shape[0] == 0:
+ segm_result = [[] for _ in range(self.mask_head.num_classes)]
+ else:
+ aug_masks = []
+ for x, img_meta in zip(feats, img_metas):
+ img_shape = img_meta[0]['img_shape']
+ scale_factor = img_meta[0]['scale_factor']
+ flip = img_meta[0]['flip']
+ flip_direction = img_meta[0]['flip_direction']
+ _bboxes = bbox_mapping(det_bboxes[:, :4], img_shape,
+ scale_factor, flip, flip_direction)
+ mask_rois = bbox2roi([_bboxes])
+ mask_results = self._mask_forward(x, mask_rois)
+ # convert to numpy array to save memory
+ aug_masks.append(
+ mask_results['mask_pred'].sigmoid().cpu().numpy())
+ merged_masks = merge_aug_masks(aug_masks, img_metas, self.test_cfg)
+
+ ori_shape = img_metas[0][0]['ori_shape']
+ segm_result = self.mask_head.get_seg_masks(
+ merged_masks,
+ det_bboxes,
+ det_labels,
+ self.test_cfg,
+ ori_shape,
+ scale_factor=1.0,
+ rescale=False)
+ return segm_result
diff --git a/annotator/uniformer/mmdet_null/models/roi_heads/trident_roi_head.py b/annotator/uniformer/mmdet_null/models/roi_heads/trident_roi_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..245569e50b45cc8e21ba8e7210edf4bd0c7f27c5
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/roi_heads/trident_roi_head.py
@@ -0,0 +1,119 @@
+import torch
+from mmcv.ops import batched_nms
+
+from mmdet.core import (bbox2result, bbox2roi, bbox_mapping, merge_aug_bboxes,
+ multiclass_nms)
+from mmdet.models.roi_heads.standard_roi_head import StandardRoIHead
+from ..builder import HEADS
+
+
+@HEADS.register_module()
+class TridentRoIHead(StandardRoIHead):
+ """Trident roi head.
+
+ Args:
+ num_branch (int): Number of branches in TridentNet.
+ test_branch_idx (int): In inference, all 3 branches will be used
+ if `test_branch_idx==-1`, otherwise only branch with index
+ `test_branch_idx` will be used.
+ """
+
+ def __init__(self, num_branch, test_branch_idx, **kwargs):
+ self.num_branch = num_branch
+ self.test_branch_idx = test_branch_idx
+ super(TridentRoIHead, self).__init__(**kwargs)
+
+ def merge_trident_bboxes(self, trident_det_bboxes, trident_det_labels):
+ """Merge bbox predictions of each branch."""
+ if trident_det_bboxes.numel() == 0:
+ det_bboxes = trident_det_bboxes.new_zeros((0, 5))
+ det_labels = trident_det_bboxes.new_zeros((0, ), dtype=torch.long)
+ else:
+ nms_bboxes = trident_det_bboxes[:, :4]
+ nms_scores = trident_det_bboxes[:, 4].contiguous()
+ nms_inds = trident_det_labels
+ nms_cfg = self.test_cfg['nms']
+ det_bboxes, keep = batched_nms(nms_bboxes, nms_scores, nms_inds,
+ nms_cfg)
+ det_labels = trident_det_labels[keep]
+ if self.test_cfg['max_per_img'] > 0:
+ det_labels = det_labels[:self.test_cfg['max_per_img']]
+ det_bboxes = det_bboxes[:self.test_cfg['max_per_img']]
+
+ return det_bboxes, det_labels
+
+ def simple_test(self,
+ x,
+ proposal_list,
+ img_metas,
+ proposals=None,
+ rescale=False):
+ """Test without augmentation as follows:
+
+ 1. Compute prediction bbox and label per branch.
+ 2. Merge predictions of each branch according to scores of
+ bboxes, i.e., bboxes with higher score are kept to give
+ top-k prediction.
+ """
+ assert self.with_bbox, 'Bbox head must be implemented.'
+ det_bboxes_list, det_labels_list = self.simple_test_bboxes(
+ x, img_metas, proposal_list, self.test_cfg, rescale=rescale)
+ num_branch = self.num_branch if self.test_branch_idx == -1 else 1
+ for _ in range(len(det_bboxes_list)):
+ if det_bboxes_list[_].shape[0] == 0:
+ det_bboxes_list[_] = det_bboxes_list[_].new_empty((0, 5))
+ det_bboxes, det_labels = [], []
+ for i in range(len(img_metas) // num_branch):
+ det_result = self.merge_trident_bboxes(
+ torch.cat(det_bboxes_list[i * num_branch:(i + 1) *
+ num_branch]),
+ torch.cat(det_labels_list[i * num_branch:(i + 1) *
+ num_branch]))
+ det_bboxes.append(det_result[0])
+ det_labels.append(det_result[1])
+
+ bbox_results = [
+ bbox2result(det_bboxes[i], det_labels[i],
+ self.bbox_head.num_classes)
+ for i in range(len(det_bboxes))
+ ]
+ return bbox_results
+
+ def aug_test_bboxes(self, feats, img_metas, proposal_list, rcnn_test_cfg):
+ """Test det bboxes with test time augmentation."""
+ aug_bboxes = []
+ aug_scores = []
+ for x, img_meta in zip(feats, img_metas):
+ # only one image in the batch
+ img_shape = img_meta[0]['img_shape']
+ scale_factor = img_meta[0]['scale_factor']
+ flip = img_meta[0]['flip']
+ flip_direction = img_meta[0]['flip_direction']
+
+ trident_bboxes, trident_scores = [], []
+ for branch_idx in range(len(proposal_list)):
+ proposals = bbox_mapping(proposal_list[0][:, :4], img_shape,
+ scale_factor, flip, flip_direction)
+ rois = bbox2roi([proposals])
+ bbox_results = self._bbox_forward(x, rois)
+ bboxes, scores = self.bbox_head.get_bboxes(
+ rois,
+ bbox_results['cls_score'],
+ bbox_results['bbox_pred'],
+ img_shape,
+ scale_factor,
+ rescale=False,
+ cfg=None)
+ trident_bboxes.append(bboxes)
+ trident_scores.append(scores)
+
+ aug_bboxes.append(torch.cat(trident_bboxes, 0))
+ aug_scores.append(torch.cat(trident_scores, 0))
+ # after merging, bboxes will be rescaled to the original image size
+ merged_bboxes, merged_scores = merge_aug_bboxes(
+ aug_bboxes, aug_scores, img_metas, rcnn_test_cfg)
+ det_bboxes, det_labels = multiclass_nms(merged_bboxes, merged_scores,
+ rcnn_test_cfg.score_thr,
+ rcnn_test_cfg.nms,
+ rcnn_test_cfg.max_per_img)
+ return det_bboxes, det_labels
diff --git a/annotator/uniformer/mmdet_null/models/utils/__init__.py b/annotator/uniformer/mmdet_null/models/utils/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..5165b22ce57d17f28392213e0f1b055c2b9360c1
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/utils/__init__.py
@@ -0,0 +1,16 @@
+from .builder import build_positional_encoding, build_transformer
+from .gaussian_target import gaussian_radius, gen_gaussian_target
+from .positional_encoding import (LearnedPositionalEncoding,
+ SinePositionalEncoding)
+from .res_layer import ResLayer, SimplifiedBasicBlock
+from .transformer import (FFN, DynamicConv, MultiheadAttention, Transformer,
+ TransformerDecoder, TransformerDecoderLayer,
+ TransformerEncoder, TransformerEncoderLayer)
+
+__all__ = [
+ 'ResLayer', 'gaussian_radius', 'gen_gaussian_target', 'MultiheadAttention',
+ 'FFN', 'TransformerEncoderLayer', 'TransformerEncoder',
+ 'TransformerDecoderLayer', 'TransformerDecoder', 'Transformer',
+ 'build_transformer', 'build_positional_encoding', 'SinePositionalEncoding',
+ 'LearnedPositionalEncoding', 'DynamicConv', 'SimplifiedBasicBlock'
+]
diff --git a/annotator/uniformer/mmdet_null/models/utils/builder.py b/annotator/uniformer/mmdet_null/models/utils/builder.py
new file mode 100644
index 0000000000000000000000000000000000000000..f362d1c92ca9d4ed95a2b3d28d3e6baedd14e462
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/utils/builder.py
@@ -0,0 +1,14 @@
+from mmcv.utils import Registry, build_from_cfg
+
+TRANSFORMER = Registry('Transformer')
+POSITIONAL_ENCODING = Registry('Position encoding')
+
+
+def build_transformer(cfg, default_args=None):
+ """Builder for Transformer."""
+ return build_from_cfg(cfg, TRANSFORMER, default_args)
+
+
+def build_positional_encoding(cfg, default_args=None):
+ """Builder for Position Encoding."""
+ return build_from_cfg(cfg, POSITIONAL_ENCODING, default_args)
diff --git a/annotator/uniformer/mmdet_null/models/utils/gaussian_target.py b/annotator/uniformer/mmdet_null/models/utils/gaussian_target.py
new file mode 100644
index 0000000000000000000000000000000000000000..7bb7160cb4bf2f47876f6e8373142aa5846920a9
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/utils/gaussian_target.py
@@ -0,0 +1,185 @@
+from math import sqrt
+
+import torch
+
+
+def gaussian2D(radius, sigma=1, dtype=torch.float32, device='cpu'):
+ """Generate 2D gaussian kernel.
+
+ Args:
+ radius (int): Radius of gaussian kernel.
+ sigma (int): Sigma of gaussian function. Default: 1.
+ dtype (torch.dtype): Dtype of gaussian tensor. Default: torch.float32.
+ device (str): Device of gaussian tensor. Default: 'cpu'.
+
+ Returns:
+ h (Tensor): Gaussian kernel with a
+ ``(2 * radius + 1) * (2 * radius + 1)`` shape.
+ """
+ x = torch.arange(
+ -radius, radius + 1, dtype=dtype, device=device).view(1, -1)
+ y = torch.arange(
+ -radius, radius + 1, dtype=dtype, device=device).view(-1, 1)
+
+ h = (-(x * x + y * y) / (2 * sigma * sigma)).exp()
+
+ h[h < torch.finfo(h.dtype).eps * h.max()] = 0
+ return h
+
+
+def gen_gaussian_target(heatmap, center, radius, k=1):
+ """Generate 2D gaussian heatmap.
+
+ Args:
+ heatmap (Tensor): Input heatmap, the gaussian kernel will cover on
+ it and maintain the max value.
+ center (list[int]): Coord of gaussian kernel's center.
+ radius (int): Radius of gaussian kernel.
+ k (int): Coefficient of gaussian kernel. Default: 1.
+
+ Returns:
+ out_heatmap (Tensor): Updated heatmap covered by gaussian kernel.
+ """
+ diameter = 2 * radius + 1
+ gaussian_kernel = gaussian2D(
+ radius, sigma=diameter / 6, dtype=heatmap.dtype, device=heatmap.device)
+
+ x, y = center
+
+ height, width = heatmap.shape[:2]
+
+ left, right = min(x, radius), min(width - x, radius + 1)
+ top, bottom = min(y, radius), min(height - y, radius + 1)
+
+ masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right]
+ masked_gaussian = gaussian_kernel[radius - top:radius + bottom,
+ radius - left:radius + right]
+ out_heatmap = heatmap
+ torch.max(
+ masked_heatmap,
+ masked_gaussian * k,
+ out=out_heatmap[y - top:y + bottom, x - left:x + right])
+
+ return out_heatmap
+
+
+def gaussian_radius(det_size, min_overlap):
+ r"""Generate 2D gaussian radius.
+
+ This function is modified from the `official github repo
+ `_.
+
+ Given ``min_overlap``, radius could computed by a quadratic equation
+ according to Vieta's formulas.
+
+ There are 3 cases for computing gaussian radius, details are following:
+
+ - Explanation of figure: ``lt`` and ``br`` indicates the left-top and
+ bottom-right corner of ground truth box. ``x`` indicates the
+ generated corner at the limited position when ``radius=r``.
+
+ - Case1: one corner is inside the gt box and the other is outside.
+
+ .. code:: text
+
+ |< width >|
+
+ lt-+----------+ -
+ | | | ^
+ +--x----------+--+
+ | | | |
+ | | | | height
+ | | overlap | |
+ | | | |
+ | | | | v
+ +--+---------br--+ -
+ | | |
+ +----------+--x
+
+ To ensure IoU of generated box and gt box is larger than ``min_overlap``:
+
+ .. math::
+ \cfrac{(w-r)*(h-r)}{w*h+(w+h)r-r^2} \ge {iou} \quad\Rightarrow\quad
+ {r^2-(w+h)r+\cfrac{1-iou}{1+iou}*w*h} \ge 0 \\
+ {a} = 1,\quad{b} = {-(w+h)},\quad{c} = {\cfrac{1-iou}{1+iou}*w*h}
+ {r} \le \cfrac{-b-\sqrt{b^2-4*a*c}}{2*a}
+
+ - Case2: both two corners are inside the gt box.
+
+ .. code:: text
+
+ |< width >|
+
+ lt-+----------+ -
+ | | | ^
+ +--x-------+ |
+ | | | |
+ | |overlap| | height
+ | | | |
+ | +-------x--+
+ | | | v
+ +----------+-br -
+
+ To ensure IoU of generated box and gt box is larger than ``min_overlap``:
+
+ .. math::
+ \cfrac{(w-2*r)*(h-2*r)}{w*h} \ge {iou} \quad\Rightarrow\quad
+ {4r^2-2(w+h)r+(1-iou)*w*h} \ge 0 \\
+ {a} = 4,\quad {b} = {-2(w+h)},\quad {c} = {(1-iou)*w*h}
+ {r} \le \cfrac{-b-\sqrt{b^2-4*a*c}}{2*a}
+
+ - Case3: both two corners are outside the gt box.
+
+ .. code:: text
+
+ |< width >|
+
+ x--+----------------+
+ | | |
+ +-lt-------------+ | -
+ | | | | ^
+ | | | |
+ | | overlap | | height
+ | | | |
+ | | | | v
+ | +------------br--+ -
+ | | |
+ +----------------+--x
+
+ To ensure IoU of generated box and gt box is larger than ``min_overlap``:
+
+ .. math::
+ \cfrac{w*h}{(w+2*r)*(h+2*r)} \ge {iou} \quad\Rightarrow\quad
+ {4*iou*r^2+2*iou*(w+h)r+(iou-1)*w*h} \le 0 \\
+ {a} = {4*iou},\quad {b} = {2*iou*(w+h)},\quad {c} = {(iou-1)*w*h} \\
+ {r} \le \cfrac{-b+\sqrt{b^2-4*a*c}}{2*a}
+
+ Args:
+ det_size (list[int]): Shape of object.
+ min_overlap (float): Min IoU with ground truth for boxes generated by
+ keypoints inside the gaussian kernel.
+
+ Returns:
+ radius (int): Radius of gaussian kernel.
+ """
+ height, width = det_size
+
+ a1 = 1
+ b1 = (height + width)
+ c1 = width * height * (1 - min_overlap) / (1 + min_overlap)
+ sq1 = sqrt(b1**2 - 4 * a1 * c1)
+ r1 = (b1 - sq1) / (2 * a1)
+
+ a2 = 4
+ b2 = 2 * (height + width)
+ c2 = (1 - min_overlap) * width * height
+ sq2 = sqrt(b2**2 - 4 * a2 * c2)
+ r2 = (b2 - sq2) / (2 * a2)
+
+ a3 = 4 * min_overlap
+ b3 = -2 * min_overlap * (height + width)
+ c3 = (min_overlap - 1) * width * height
+ sq3 = sqrt(b3**2 - 4 * a3 * c3)
+ r3 = (b3 + sq3) / (2 * a3)
+ return min(r1, r2, r3)
diff --git a/annotator/uniformer/mmdet_null/models/utils/positional_encoding.py b/annotator/uniformer/mmdet_null/models/utils/positional_encoding.py
new file mode 100644
index 0000000000000000000000000000000000000000..9bda2bbdbfcc28ba6304b6325ae556fa02554ac1
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/utils/positional_encoding.py
@@ -0,0 +1,150 @@
+import math
+
+import torch
+import torch.nn as nn
+from mmcv.cnn import uniform_init
+
+from .builder import POSITIONAL_ENCODING
+
+
+@POSITIONAL_ENCODING.register_module()
+class SinePositionalEncoding(nn.Module):
+ """Position encoding with sine and cosine functions.
+
+ See `End-to-End Object Detection with Transformers
+ `_ for details.
+
+ Args:
+ num_feats (int): The feature dimension for each position
+ along x-axis or y-axis. Note the final returned dimension
+ for each position is 2 times of this value.
+ temperature (int, optional): The temperature used for scaling
+ the position embedding. Default 10000.
+ normalize (bool, optional): Whether to normalize the position
+ embedding. Default False.
+ scale (float, optional): A scale factor that scales the position
+ embedding. The scale will be used only when `normalize` is True.
+ Default 2*pi.
+ eps (float, optional): A value added to the denominator for
+ numerical stability. Default 1e-6.
+ """
+
+ def __init__(self,
+ num_feats,
+ temperature=10000,
+ normalize=False,
+ scale=2 * math.pi,
+ eps=1e-6):
+ super(SinePositionalEncoding, self).__init__()
+ if normalize:
+ assert isinstance(scale, (float, int)), 'when normalize is set,' \
+ 'scale should be provided and in float or int type, ' \
+ f'found {type(scale)}'
+ self.num_feats = num_feats
+ self.temperature = temperature
+ self.normalize = normalize
+ self.scale = scale
+ self.eps = eps
+
+ def forward(self, mask):
+ """Forward function for `SinePositionalEncoding`.
+
+ Args:
+ mask (Tensor): ByteTensor mask. Non-zero values representing
+ ignored positions, while zero values means valid positions
+ for this image. Shape [bs, h, w].
+
+ Returns:
+ pos (Tensor): Returned position embedding with shape
+ [bs, num_feats*2, h, w].
+ """
+ not_mask = ~mask
+ y_embed = not_mask.cumsum(1, dtype=torch.float32)
+ x_embed = not_mask.cumsum(2, dtype=torch.float32)
+ if self.normalize:
+ y_embed = y_embed / (y_embed[:, -1:, :] + self.eps) * self.scale
+ x_embed = x_embed / (x_embed[:, :, -1:] + self.eps) * self.scale
+ dim_t = torch.arange(
+ self.num_feats, dtype=torch.float32, device=mask.device)
+ dim_t = self.temperature**(2 * (dim_t // 2) / self.num_feats)
+ pos_x = x_embed[:, :, :, None] / dim_t
+ pos_y = y_embed[:, :, :, None] / dim_t
+ pos_x = torch.stack(
+ (pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()),
+ dim=4).flatten(3)
+ pos_y = torch.stack(
+ (pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()),
+ dim=4).flatten(3)
+ pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
+ return pos
+
+ def __repr__(self):
+ """str: a string that describes the module"""
+ repr_str = self.__class__.__name__
+ repr_str += f'(num_feats={self.num_feats}, '
+ repr_str += f'temperature={self.temperature}, '
+ repr_str += f'normalize={self.normalize}, '
+ repr_str += f'scale={self.scale}, '
+ repr_str += f'eps={self.eps})'
+ return repr_str
+
+
+@POSITIONAL_ENCODING.register_module()
+class LearnedPositionalEncoding(nn.Module):
+ """Position embedding with learnable embedding weights.
+
+ Args:
+ num_feats (int): The feature dimension for each position
+ along x-axis or y-axis. The final returned dimension for
+ each position is 2 times of this value.
+ row_num_embed (int, optional): The dictionary size of row embeddings.
+ Default 50.
+ col_num_embed (int, optional): The dictionary size of col embeddings.
+ Default 50.
+ """
+
+ def __init__(self, num_feats, row_num_embed=50, col_num_embed=50):
+ super(LearnedPositionalEncoding, self).__init__()
+ self.row_embed = nn.Embedding(row_num_embed, num_feats)
+ self.col_embed = nn.Embedding(col_num_embed, num_feats)
+ self.num_feats = num_feats
+ self.row_num_embed = row_num_embed
+ self.col_num_embed = col_num_embed
+ self.init_weights()
+
+ def init_weights(self):
+ """Initialize the learnable weights."""
+ uniform_init(self.row_embed)
+ uniform_init(self.col_embed)
+
+ def forward(self, mask):
+ """Forward function for `LearnedPositionalEncoding`.
+
+ Args:
+ mask (Tensor): ByteTensor mask. Non-zero values representing
+ ignored positions, while zero values means valid positions
+ for this image. Shape [bs, h, w].
+
+ Returns:
+ pos (Tensor): Returned position embedding with shape
+ [bs, num_feats*2, h, w].
+ """
+ h, w = mask.shape[-2:]
+ x = torch.arange(w, device=mask.device)
+ y = torch.arange(h, device=mask.device)
+ x_embed = self.col_embed(x)
+ y_embed = self.row_embed(y)
+ pos = torch.cat(
+ (x_embed.unsqueeze(0).repeat(h, 1, 1), y_embed.unsqueeze(1).repeat(
+ 1, w, 1)),
+ dim=-1).permute(2, 0,
+ 1).unsqueeze(0).repeat(mask.shape[0], 1, 1, 1)
+ return pos
+
+ def __repr__(self):
+ """str: a string that describes the module"""
+ repr_str = self.__class__.__name__
+ repr_str += f'(num_feats={self.num_feats}, '
+ repr_str += f'row_num_embed={self.row_num_embed}, '
+ repr_str += f'col_num_embed={self.col_num_embed})'
+ return repr_str
diff --git a/annotator/uniformer/mmdet_null/models/utils/res_layer.py b/annotator/uniformer/mmdet_null/models/utils/res_layer.py
new file mode 100644
index 0000000000000000000000000000000000000000..4a4efd3dd30b30123ed5135eac080ad9f7f7b448
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/utils/res_layer.py
@@ -0,0 +1,187 @@
+from mmcv.cnn import build_conv_layer, build_norm_layer
+from torch import nn as nn
+
+
+class ResLayer(nn.Sequential):
+ """ResLayer to build ResNet style backbone.
+
+ Args:
+ block (nn.Module): block used to build ResLayer.
+ inplanes (int): inplanes of block.
+ planes (int): planes of block.
+ num_blocks (int): number of blocks.
+ stride (int): stride of the first block. Default: 1
+ avg_down (bool): Use AvgPool instead of stride conv when
+ downsampling in the bottleneck. Default: False
+ conv_cfg (dict): dictionary to construct and config conv layer.
+ Default: None
+ norm_cfg (dict): dictionary to construct and config norm layer.
+ Default: dict(type='BN')
+ downsample_first (bool): Downsample at the first block or last block.
+ False for Hourglass, True for ResNet. Default: True
+ """
+
+ def __init__(self,
+ block,
+ inplanes,
+ planes,
+ num_blocks,
+ stride=1,
+ avg_down=False,
+ conv_cfg=None,
+ norm_cfg=dict(type='BN'),
+ downsample_first=True,
+ **kwargs):
+ self.block = block
+
+ downsample = None
+ if stride != 1 or inplanes != planes * block.expansion:
+ downsample = []
+ conv_stride = stride
+ if avg_down:
+ conv_stride = 1
+ downsample.append(
+ nn.AvgPool2d(
+ kernel_size=stride,
+ stride=stride,
+ ceil_mode=True,
+ count_include_pad=False))
+ downsample.extend([
+ build_conv_layer(
+ conv_cfg,
+ inplanes,
+ planes * block.expansion,
+ kernel_size=1,
+ stride=conv_stride,
+ bias=False),
+ build_norm_layer(norm_cfg, planes * block.expansion)[1]
+ ])
+ downsample = nn.Sequential(*downsample)
+
+ layers = []
+ if downsample_first:
+ layers.append(
+ block(
+ inplanes=inplanes,
+ planes=planes,
+ stride=stride,
+ downsample=downsample,
+ conv_cfg=conv_cfg,
+ norm_cfg=norm_cfg,
+ **kwargs))
+ inplanes = planes * block.expansion
+ for _ in range(1, num_blocks):
+ layers.append(
+ block(
+ inplanes=inplanes,
+ planes=planes,
+ stride=1,
+ conv_cfg=conv_cfg,
+ norm_cfg=norm_cfg,
+ **kwargs))
+
+ else: # downsample_first=False is for HourglassModule
+ for _ in range(num_blocks - 1):
+ layers.append(
+ block(
+ inplanes=inplanes,
+ planes=inplanes,
+ stride=1,
+ conv_cfg=conv_cfg,
+ norm_cfg=norm_cfg,
+ **kwargs))
+ layers.append(
+ block(
+ inplanes=inplanes,
+ planes=planes,
+ stride=stride,
+ downsample=downsample,
+ conv_cfg=conv_cfg,
+ norm_cfg=norm_cfg,
+ **kwargs))
+ super(ResLayer, self).__init__(*layers)
+
+
+class SimplifiedBasicBlock(nn.Module):
+ """Simplified version of original basic residual block. This is used in
+ `SCNet `_.
+
+ - Norm layer is now optional
+ - Last ReLU in forward function is removed
+ """
+ expansion = 1
+
+ def __init__(self,
+ inplanes,
+ planes,
+ stride=1,
+ dilation=1,
+ downsample=None,
+ style='pytorch',
+ with_cp=False,
+ conv_cfg=None,
+ norm_cfg=dict(type='BN'),
+ dcn=None,
+ plugins=None):
+ super(SimplifiedBasicBlock, self).__init__()
+ assert dcn is None, 'Not implemented yet.'
+ assert plugins is None, 'Not implemented yet.'
+ assert not with_cp, 'Not implemented yet.'
+ self.with_norm = norm_cfg is not None
+ with_bias = True if norm_cfg is None else False
+ self.conv1 = build_conv_layer(
+ conv_cfg,
+ inplanes,
+ planes,
+ 3,
+ stride=stride,
+ padding=dilation,
+ dilation=dilation,
+ bias=with_bias)
+ if self.with_norm:
+ self.norm1_name, norm1 = build_norm_layer(
+ norm_cfg, planes, postfix=1)
+ self.add_module(self.norm1_name, norm1)
+ self.conv2 = build_conv_layer(
+ conv_cfg, planes, planes, 3, padding=1, bias=with_bias)
+ if self.with_norm:
+ self.norm2_name, norm2 = build_norm_layer(
+ norm_cfg, planes, postfix=2)
+ self.add_module(self.norm2_name, norm2)
+
+ self.relu = nn.ReLU(inplace=True)
+ self.downsample = downsample
+ self.stride = stride
+ self.dilation = dilation
+ self.with_cp = with_cp
+
+ @property
+ def norm1(self):
+ """nn.Module: normalization layer after the first convolution layer"""
+ return getattr(self, self.norm1_name) if self.with_norm else None
+
+ @property
+ def norm2(self):
+ """nn.Module: normalization layer after the second convolution layer"""
+ return getattr(self, self.norm2_name) if self.with_norm else None
+
+ def forward(self, x):
+ """Forward function."""
+
+ identity = x
+
+ out = self.conv1(x)
+ if self.with_norm:
+ out = self.norm1(out)
+ out = self.relu(out)
+
+ out = self.conv2(out)
+ if self.with_norm:
+ out = self.norm2(out)
+
+ if self.downsample is not None:
+ identity = self.downsample(x)
+
+ out += identity
+
+ return out
diff --git a/annotator/uniformer/mmdet_null/models/utils/transformer.py b/annotator/uniformer/mmdet_null/models/utils/transformer.py
new file mode 100644
index 0000000000000000000000000000000000000000..83870eead42f4b0bf73c9e19248d5512d3d044c5
--- /dev/null
+++ b/annotator/uniformer/mmdet_null/models/utils/transformer.py
@@ -0,0 +1,860 @@
+import torch
+import torch.nn as nn
+from mmcv.cnn import (Linear, build_activation_layer, build_norm_layer,
+ xavier_init)
+
+from .builder import TRANSFORMER
+
+
+class MultiheadAttention(nn.Module):
+ """A warpper for torch.nn.MultiheadAttention.
+
+ This module implements MultiheadAttention with residual connection,
+ and positional encoding used in DETR is also passed as input.
+
+ Args:
+ embed_dims (int): The embedding dimension.
+ num_heads (int): Parallel attention heads. Same as
+ `nn.MultiheadAttention`.
+ dropout (float): A Dropout layer on attn_output_weights. Default 0.0.
+ """
+
+ def __init__(self, embed_dims, num_heads, dropout=0.0):
+ super(MultiheadAttention, self).__init__()
+ assert embed_dims % num_heads == 0, 'embed_dims must be ' \
+ f'divisible by num_heads. got {embed_dims} and {num_heads}.'
+ self.embed_dims = embed_dims
+ self.num_heads = num_heads
+ self.dropout = dropout
+ self.attn = nn.MultiheadAttention(embed_dims, num_heads, dropout)
+ self.dropout = nn.Dropout(dropout)
+
+ def forward(self,
+ x,
+ key=None,
+ value=None,
+ residual=None,
+ query_pos=None,
+ key_pos=None,
+ attn_mask=None,
+ key_padding_mask=None):
+ """Forward function for `MultiheadAttention`.
+
+ Args:
+ x (Tensor): The input query with shape [num_query, bs,
+ embed_dims]. Same in `nn.MultiheadAttention.forward`.
+ key (Tensor): The key tensor with shape [num_key, bs,
+ embed_dims]. Same in `nn.MultiheadAttention.forward`.
+ Default None. If None, the `query` will be used.
+ value (Tensor): The value tensor with same shape as `key`.
+ Same in `nn.MultiheadAttention.forward`. Default None.
+ If None, the `key` will be used.
+ residual (Tensor): The tensor used for addition, with the
+ same shape as `x`. Default None. If None, `x` will be used.
+ query_pos (Tensor): The positional encoding for query, with
+ the same shape as `x`. Default None. If not None, it will
+ be added to `x` before forward function.
+ key_pos (Tensor): The positional encoding for `key`, with the
+ same shape as `key`. Default None. If not None, it will
+ be added to `key` before forward function. If None, and
+ `query_pos` has the same shape as `key`, then `query_pos`
+ will be used for `key_pos`.
+ attn_mask (Tensor): ByteTensor mask with shape [num_query,
+ num_key]. Same in `nn.MultiheadAttention.forward`.
+ Default None.
+ key_padding_mask (Tensor): ByteTensor with shape [bs, num_key].
+ Same in `nn.MultiheadAttention.forward`. Default None.
+
+ Returns:
+ Tensor: forwarded results with shape [num_query, bs, embed_dims].
+ """
+ query = x
+ if key is None:
+ key = query
+ if value is None:
+ value = key
+ if residual is None:
+ residual = x
+ if key_pos is None:
+ if query_pos is not None and key is not None:
+ if query_pos.shape == key.shape:
+ key_pos = query_pos
+ if query_pos is not None:
+ query = query + query_pos
+ if key_pos is not None:
+ key = key + key_pos
+ out = self.attn(
+ query,
+ key,
+ value=value,
+ attn_mask=attn_mask,
+ key_padding_mask=key_padding_mask)[0]
+
+ return residual + self.dropout(out)
+
+ def __repr__(self):
+ """str: a string that describes the module"""
+ repr_str = self.__class__.__name__
+ repr_str += f'(embed_dims={self.embed_dims}, '
+ repr_str += f'num_heads={self.num_heads}, '
+ repr_str += f'dropout={self.dropout})'
+ return repr_str
+
+
+class FFN(nn.Module):
+ """Implements feed-forward networks (FFNs) with residual connection.
+
+ Args:
+ embed_dims (int): The feature dimension. Same as
+ `MultiheadAttention`.
+ feedforward_channels (int): The hidden dimension of FFNs.
+ num_fcs (int, optional): The number of fully-connected layers in
+ FFNs. Defaults to 2.
+ act_cfg (dict, optional): The activation config for FFNs.
+ dropout (float, optional): Probability of an element to be
+ zeroed. Default 0.0.
+ add_residual (bool, optional): Add resudual connection.
+ Defaults to True.
+ """
+
+ def __init__(self,
+ embed_dims,
+ feedforward_channels,
+ num_fcs=2,
+ act_cfg=dict(type='ReLU', inplace=True),
+ dropout=0.0,
+ add_residual=True):
+ super(FFN, self).__init__()
+ assert num_fcs >= 2, 'num_fcs should be no less ' \
+ f'than 2. got {num_fcs}.'
+ self.embed_dims = embed_dims
+ self.feedforward_channels = feedforward_channels
+ self.num_fcs = num_fcs
+ self.act_cfg = act_cfg
+ self.dropout = dropout
+ self.activate = build_activation_layer(act_cfg)
+
+ layers = nn.ModuleList()
+ in_channels = embed_dims
+ for _ in range(num_fcs - 1):
+ layers.append(
+ nn.Sequential(
+ Linear(in_channels, feedforward_channels), self.activate,
+ nn.Dropout(dropout)))
+ in_channels = feedforward_channels
+ layers.append(Linear(feedforward_channels, embed_dims))
+ self.layers = nn.Sequential(*layers)
+ self.dropout = nn.Dropout(dropout)
+ self.add_residual = add_residual
+
+ def forward(self, x, residual=None):
+ """Forward function for `FFN`."""
+ out = self.layers(x)
+ if not self.add_residual:
+ return out
+ if residual is None:
+ residual = x
+ return residual + self.dropout(out)
+
+ def __repr__(self):
+ """str: a string that describes the module"""
+ repr_str = self.__class__.__name__
+ repr_str += f'(embed_dims={self.embed_dims}, '
+ repr_str += f'feedforward_channels={self.feedforward_channels}, '
+ repr_str += f'num_fcs={self.num_fcs}, '
+ repr_str += f'act_cfg={self.act_cfg}, '
+ repr_str += f'dropout={self.dropout}, '
+ repr_str += f'add_residual={self.add_residual})'
+ return repr_str
+
+
+class TransformerEncoderLayer(nn.Module):
+ """Implements one encoder layer in DETR transformer.
+
+ Args:
+ embed_dims (int): The feature dimension. Same as `FFN`.
+ num_heads (int): Parallel attention heads.
+ feedforward_channels (int): The hidden dimension for FFNs.
+ dropout (float): Probability of an element to be zeroed. Default 0.0.
+ order (tuple[str]): The order for encoder layer. Valid examples are
+ ('selfattn', 'norm', 'ffn', 'norm') and ('norm', 'selfattn',
+ 'norm', 'ffn'). Default ('selfattn', 'norm', 'ffn', 'norm').
+ act_cfg (dict): The activation config for FFNs. Default ReLU.
+ norm_cfg (dict): Config dict for normalization layer. Default
+ layer normalization.
+ num_fcs (int): The number of fully-connected layers for FFNs.
+ Default 2.
+ """
+
+ def __init__(self,
+ embed_dims,
+ num_heads,
+ feedforward_channels,
+ dropout=0.0,
+ order=('selfattn', 'norm', 'ffn', 'norm'),
+ act_cfg=dict(type='ReLU', inplace=True),
+ norm_cfg=dict(type='LN'),
+ num_fcs=2):
+ super(TransformerEncoderLayer, self).__init__()
+ assert isinstance(order, tuple) and len(order) == 4
+ assert set(order) == set(['selfattn', 'norm', 'ffn'])
+ self.embed_dims = embed_dims
+ self.num_heads = num_heads
+ self.feedforward_channels = feedforward_channels
+ self.dropout = dropout
+ self.order = order
+ self.act_cfg = act_cfg
+ self.norm_cfg = norm_cfg
+ self.num_fcs = num_fcs
+ self.pre_norm = order[0] == 'norm'
+ self.self_attn = MultiheadAttention(embed_dims, num_heads, dropout)
+ self.ffn = FFN(embed_dims, feedforward_channels, num_fcs, act_cfg,
+ dropout)
+ self.norms = nn.ModuleList()
+ self.norms.append(build_norm_layer(norm_cfg, embed_dims)[1])
+ self.norms.append(build_norm_layer(norm_cfg, embed_dims)[1])
+
+ def forward(self, x, pos=None, attn_mask=None, key_padding_mask=None):
+ """Forward function for `TransformerEncoderLayer`.
+
+ Args:
+ x (Tensor): The input query with shape [num_key, bs,
+ embed_dims]. Same in `MultiheadAttention.forward`.
+ pos (Tensor): The positional encoding for query. Default None.
+ Same as `query_pos` in `MultiheadAttention.forward`.
+ attn_mask (Tensor): ByteTensor mask with shape [num_key,
+ num_key]. Same in `MultiheadAttention.forward`. Default None.
+ key_padding_mask (Tensor): ByteTensor with shape [bs, num_key].
+ Same in `MultiheadAttention.forward`. Default None.
+
+ Returns:
+ Tensor: forwarded results with shape [num_key, bs, embed_dims].
+ """
+ norm_cnt = 0
+ inp_residual = x
+ for layer in self.order:
+ if layer == 'selfattn':
+ # self attention
+ query = key = value = x
+ x = self.self_attn(
+ query,
+ key,
+ value,
+ inp_residual if self.pre_norm else None,
+ query_pos=pos,
+ key_pos=pos,
+ attn_mask=attn_mask,
+ key_padding_mask=key_padding_mask)
+ inp_residual = x
+ elif layer == 'norm':
+ x = self.norms[norm_cnt](x)
+ norm_cnt += 1
+ elif layer == 'ffn':
+ x = self.ffn(x, inp_residual if self.pre_norm else None)
+ return x
+
+ def __repr__(self):
+ """str: a string that describes the module"""
+ repr_str = self.__class__.__name__
+ repr_str += f'(embed_dims={self.embed_dims}, '
+ repr_str += f'num_heads={self.num_heads}, '
+ repr_str += f'feedforward_channels={self.feedforward_channels}, '
+ repr_str += f'dropout={self.dropout}, '
+ repr_str += f'order={self.order}, '
+ repr_str += f'act_cfg={self.act_cfg}, '
+ repr_str += f'norm_cfg={self.norm_cfg}, '
+ repr_str += f'num_fcs={self.num_fcs})'
+ return repr_str
+
+
+class TransformerDecoderLayer(nn.Module):
+ """Implements one decoder layer in DETR transformer.
+
+ Args:
+ embed_dims (int): The feature dimension. Same as
+ `TransformerEncoderLayer`.
+ num_heads (int): Parallel attention heads.
+ feedforward_channels (int): Same as `TransformerEncoderLayer`.
+ dropout (float): Same as `TransformerEncoderLayer`. Default 0.0.
+ order (tuple[str]): The order for decoder layer. Valid examples are
+ ('selfattn', 'norm', 'multiheadattn', 'norm', 'ffn', 'norm') and
+ ('norm', 'selfattn', 'norm', 'multiheadattn', 'norm', 'ffn').
+ Default the former.
+ act_cfg (dict): Same as `TransformerEncoderLayer`. Default ReLU.
+ norm_cfg (dict): Config dict for normalization layer. Default
+ layer normalization.
+ num_fcs (int): The number of fully-connected layers in FFNs.
+ """
+
+ def __init__(self,
+ embed_dims,
+ num_heads,
+ feedforward_channels,
+ dropout=0.0,
+ order=('selfattn', 'norm', 'multiheadattn', 'norm', 'ffn',
+ 'norm'),
+ act_cfg=dict(type='ReLU', inplace=True),
+ norm_cfg=dict(type='LN'),
+ num_fcs=2):
+ super(TransformerDecoderLayer, self).__init__()
+ assert isinstance(order, tuple) and len(order) == 6
+ assert set(order) == set(['selfattn', 'norm', 'multiheadattn', 'ffn'])
+ self.embed_dims = embed_dims
+ self.num_heads = num_heads
+ self.feedforward_channels = feedforward_channels
+ self.dropout = dropout
+ self.order = order
+ self.act_cfg = act_cfg
+ self.norm_cfg = norm_cfg
+ self.num_fcs = num_fcs
+ self.pre_norm = order[0] == 'norm'
+ self.self_attn = MultiheadAttention(embed_dims, num_heads, dropout)
+ self.multihead_attn = MultiheadAttention(embed_dims, num_heads,
+ dropout)
+ self.ffn = FFN(embed_dims, feedforward_channels, num_fcs, act_cfg,
+ dropout)
+ self.norms = nn.ModuleList()
+ # 3 norm layers in official DETR's TransformerDecoderLayer
+ for _ in range(3):
+ self.norms.append(build_norm_layer(norm_cfg, embed_dims)[1])
+
+ def forward(self,
+ x,
+ memory,
+ memory_pos=None,
+ query_pos=None,
+ memory_attn_mask=None,
+ target_attn_mask=None,
+ memory_key_padding_mask=None,
+ target_key_padding_mask=None):
+ """Forward function for `TransformerDecoderLayer`.
+
+ Args:
+ x (Tensor): Input query with shape [num_query, bs, embed_dims].
+ memory (Tensor): Tensor got from `TransformerEncoder`, with shape
+ [num_key, bs, embed_dims].
+ memory_pos (Tensor): The positional encoding for `memory`. Default
+ None. Same as `key_pos` in `MultiheadAttention.forward`.
+ query_pos (Tensor): The positional encoding for `query`. Default
+ None. Same as `query_pos` in `MultiheadAttention.forward`.
+ memory_attn_mask (Tensor): ByteTensor mask for `memory`, with
+ shape [num_key, num_key]. Same as `attn_mask` in
+ `MultiheadAttention.forward`. Default None.
+ target_attn_mask (Tensor): ByteTensor mask for `x`, with shape
+ [num_query, num_query]. Same as `attn_mask` in
+ `MultiheadAttention.forward`. Default None.
+ memory_key_padding_mask (Tensor): ByteTensor for `memory`, with
+ shape [bs, num_key]. Same as `key_padding_mask` in
+ `MultiheadAttention.forward`. Default None.
+ target_key_padding_mask (Tensor): ByteTensor for `x`, with shape
+ [bs, num_query]. Same as `key_padding_mask` in
+ `MultiheadAttention.forward`. Default None.
+
+ Returns:
+ Tensor: forwarded results with shape [num_query, bs, embed_dims].
+ """
+ norm_cnt = 0
+ inp_residual = x
+ for layer in self.order:
+ if layer == 'selfattn':
+ query = key = value = x
+ x = self.self_attn(
+ query,
+ key,
+ value,
+ inp_residual if self.pre_norm else None,
+ query_pos,
+ key_pos=query_pos,
+ attn_mask=target_attn_mask,
+ key_padding_mask=target_key_padding_mask)
+ inp_residual = x
+ elif layer == 'norm':
+ x = self.norms[norm_cnt](x)
+ norm_cnt += 1
+ elif layer == 'multiheadattn':
+ query = x
+ key = value = memory
+ x = self.multihead_attn(
+ query,
+ key,
+ value,
+ inp_residual if self.pre_norm else None,
+ query_pos,
+ key_pos=memory_pos,
+ attn_mask=memory_attn_mask,
+ key_padding_mask=memory_key_padding_mask)
+ inp_residual = x
+ elif layer == 'ffn':
+ x = self.ffn(x, inp_residual if self.pre_norm else None)
+ return x
+
+ def __repr__(self):
+ """str: a string that describes the module"""
+ repr_str = self.__class__.__name__
+ repr_str += f'(embed_dims={self.embed_dims}, '
+ repr_str += f'num_heads={self.num_heads}, '
+ repr_str += f'feedforward_channels={self.feedforward_channels}, '
+ repr_str += f'dropout={self.dropout}, '
+ repr_str += f'order={self.order}, '
+ repr_str += f'act_cfg={self.act_cfg}, '
+ repr_str += f'norm_cfg={self.norm_cfg}, '
+ repr_str += f'num_fcs={self.num_fcs})'
+ return repr_str
+
+
+class TransformerEncoder(nn.Module):
+ """Implements the encoder in DETR transformer.
+
+ Args:
+ num_layers (int): The number of `TransformerEncoderLayer`.
+ embed_dims (int): Same as `TransformerEncoderLayer`.
+ num_heads (int): Same as `TransformerEncoderLayer`.
+ feedforward_channels (int): Same as `TransformerEncoderLayer`.
+ dropout (float): Same as `TransformerEncoderLayer`. Default 0.0.
+ order (tuple[str]): Same as `TransformerEncoderLayer`.
+ act_cfg (dict): Same as `TransformerEncoderLayer`. Default ReLU.
+ norm_cfg (dict): Same as `TransformerEncoderLayer`. Default
+ layer normalization.
+ num_fcs (int): Same as `TransformerEncoderLayer`. Default 2.
+ """
+
+ def __init__(self,
+ num_layers,
+ embed_dims,
+ num_heads,
+ feedforward_channels,
+ dropout=0.0,
+ order=('selfattn', 'norm', 'ffn', 'norm'),
+ act_cfg=dict(type='ReLU', inplace=True),
+ norm_cfg=dict(type='LN'),
+ num_fcs=2):
+ super(TransformerEncoder, self).__init__()
+ assert isinstance(order, tuple) and len(order) == 4
+ assert set(order) == set(['selfattn', 'norm', 'ffn'])
+ self.num_layers = num_layers
+ self.embed_dims = embed_dims
+ self.num_heads = num_heads
+ self.feedforward_channels = feedforward_channels
+ self.dropout = dropout
+ self.order = order
+ self.act_cfg = act_cfg
+ self.norm_cfg = norm_cfg
+ self.num_fcs = num_fcs
+ self.pre_norm = order[0] == 'norm'
+ self.layers = nn.ModuleList()
+ for _ in range(num_layers):
+ self.layers.append(
+ TransformerEncoderLayer(embed_dims, num_heads,
+ feedforward_channels, dropout, order,
+ act_cfg, norm_cfg, num_fcs))
+ self.norm = build_norm_layer(norm_cfg,
+ embed_dims)[1] if self.pre_norm else None
+
+ def forward(self, x, pos=None, attn_mask=None, key_padding_mask=None):
+ """Forward function for `TransformerEncoder`.
+
+ Args:
+ x (Tensor): Input query. Same in `TransformerEncoderLayer.forward`.
+ pos (Tensor): Positional encoding for query. Default None.
+ Same in `TransformerEncoderLayer.forward`.
+ attn_mask (Tensor): ByteTensor attention mask. Default None.
+ Same in `TransformerEncoderLayer.forward`.
+ key_padding_mask (Tensor): Same in
+ `TransformerEncoderLayer.forward`. Default None.
+
+ Returns:
+ Tensor: Results with shape [num_key, bs, embed_dims].
+ """
+ for layer in self.layers:
+ x = layer(x, pos, attn_mask, key_padding_mask)
+ if self.norm is not None:
+ x = self.norm(x)
+ return x
+
+ def __repr__(self):
+ """str: a string that describes the module"""
+ repr_str = self.__class__.__name__
+ repr_str += f'(num_layers={self.num_layers}, '
+ repr_str += f'embed_dims={self.embed_dims}, '
+ repr_str += f'num_heads={self.num_heads}, '
+ repr_str += f'feedforward_channels={self.feedforward_channels}, '
+ repr_str += f'dropout={self.dropout}, '
+ repr_str += f'order={self.order}, '
+ repr_str += f'act_cfg={self.act_cfg}, '
+ repr_str += f'norm_cfg={self.norm_cfg}, '
+ repr_str += f'num_fcs={self.num_fcs})'
+ return repr_str
+
+
+class TransformerDecoder(nn.Module):
+ """Implements the decoder in DETR transformer.
+
+ Args:
+ num_layers (int): The number of `TransformerDecoderLayer`.
+ embed_dims (int): Same as `TransformerDecoderLayer`.
+ num_heads (int): Same as `TransformerDecoderLayer`.
+ feedforward_channels (int): Same as `TransformerDecoderLayer`.
+ dropout (float): Same as `TransformerDecoderLayer`. Default 0.0.
+ order (tuple[str]): Same as `TransformerDecoderLayer`.
+ act_cfg (dict): Same as `TransformerDecoderLayer`. Default ReLU.
+ norm_cfg (dict): Same as `TransformerDecoderLayer`. Default
+ layer normalization.
+ num_fcs (int): Same as `TransformerDecoderLayer`. Default 2.
+ """
+
+ def __init__(self,
+ num_layers,
+ embed_dims,
+ num_heads,
+ feedforward_channels,
+ dropout=0.0,
+ order=('selfattn', 'norm', 'multiheadattn', 'norm', 'ffn',
+ 'norm'),
+ act_cfg=dict(type='ReLU', inplace=True),
+ norm_cfg=dict(type='LN'),
+ num_fcs=2,
+ return_intermediate=False):
+ super(TransformerDecoder, self).__init__()
+ assert isinstance(order, tuple) and len(order) == 6
+ assert set(order) == set(['selfattn', 'norm', 'multiheadattn', 'ffn'])
+ self.num_layers = num_layers
+ self.embed_dims = embed_dims
+ self.num_heads = num_heads
+ self.feedforward_channels = feedforward_channels
+ self.dropout = dropout
+ self.order = order
+ self.act_cfg = act_cfg
+ self.norm_cfg = norm_cfg
+ self.num_fcs = num_fcs
+ self.return_intermediate = return_intermediate
+ self.layers = nn.ModuleList()
+ for _ in range(num_layers):
+ self.layers.append(
+ TransformerDecoderLayer(embed_dims, num_heads,
+ feedforward_channels, dropout, order,
+ act_cfg, norm_cfg, num_fcs))
+ self.norm = build_norm_layer(norm_cfg, embed_dims)[1]
+
+ def forward(self,
+ x,
+ memory,
+ memory_pos=None,
+ query_pos=None,
+ memory_attn_mask=None,
+ target_attn_mask=None,
+ memory_key_padding_mask=None,
+ target_key_padding_mask=None):
+ """Forward function for `TransformerDecoder`.
+
+ Args:
+ x (Tensor): Input query. Same in `TransformerDecoderLayer.forward`.
+ memory (Tensor): Same in `TransformerDecoderLayer.forward`.
+ memory_pos (Tensor): Same in `TransformerDecoderLayer.forward`.
+ Default None.
+ query_pos (Tensor): Same in `TransformerDecoderLayer.forward`.
+ Default None.
+ memory_attn_mask (Tensor): Same in
+ `TransformerDecoderLayer.forward`. Default None.
+ target_attn_mask (Tensor): Same in
+ `TransformerDecoderLayer.forward`. Default None.
+ memory_key_padding_mask (Tensor): Same in
+ `TransformerDecoderLayer.forward`. Default None.
+ target_key_padding_mask (Tensor): Same in
+ `TransformerDecoderLayer.forward`. Default None.
+
+ Returns:
+ Tensor: Results with shape [num_query, bs, embed_dims].
+ """
+ intermediate = []
+ for layer in self.layers:
+ x = layer(x, memory, memory_pos, query_pos, memory_attn_mask,
+ target_attn_mask, memory_key_padding_mask,
+ target_key_padding_mask)
+ if self.return_intermediate:
+ intermediate.append(self.norm(x))
+ if self.norm is not None:
+ x = self.norm(x)
+ if self.return_intermediate:
+ intermediate.pop()
+ intermediate.append(x)
+ if self.return_intermediate:
+ return torch.stack(intermediate)
+ return x.unsqueeze(0)
+
+ def __repr__(self):
+ """str: a string that describes the module"""
+ repr_str = self.__class__.__name__
+ repr_str += f'(num_layers={self.num_layers}, '
+ repr_str += f'embed_dims={self.embed_dims}, '
+ repr_str += f'num_heads={self.num_heads}, '
+ repr_str += f'feedforward_channels={self.feedforward_channels}, '
+ repr_str += f'dropout={self.dropout}, '
+ repr_str += f'order={self.order}, '
+ repr_str += f'act_cfg={self.act_cfg}, '
+ repr_str += f'norm_cfg={self.norm_cfg}, '
+ repr_str += f'num_fcs={self.num_fcs}, '
+ repr_str += f'return_intermediate={self.return_intermediate})'
+ return repr_str
+
+
+@TRANSFORMER.register_module()
+class Transformer(nn.Module):
+ """Implements the DETR transformer.
+
+ Following the official DETR implementation, this module copy-paste
+ from torch.nn.Transformer with modifications:
+
+ * positional encodings are passed in MultiheadAttention
+ * extra LN at the end of encoder is removed
+ * decoder returns a stack of activations from all decoding layers
+
+ See `paper: End-to-End Object Detection with Transformers
+ `_ for details.
+
+ Args:
+ embed_dims (int): The feature dimension.
+ num_heads (int): Parallel attention heads. Same as
+ `nn.MultiheadAttention`.
+ num_encoder_layers (int): Number of `TransformerEncoderLayer`.
+ num_decoder_layers (int): Number of `TransformerDecoderLayer`.
+ feedforward_channels (int): The hidden dimension for FFNs used in both
+ encoder and decoder.
+ dropout (float): Probability of an element to be zeroed. Default 0.0.
+ act_cfg (dict): Activation config for FFNs used in both encoder
+ and decoder. Default ReLU.
+ norm_cfg (dict): Config dict for normalization used in both encoder
+ and decoder. Default layer normalization.
+ num_fcs (int): The number of fully-connected layers in FFNs, which is
+ used for both encoder and decoder.
+ pre_norm (bool): Whether the normalization layer is ordered
+ first in the encoder and decoder. Default False.
+ return_intermediate_dec (bool): Whether to return the intermediate
+ output from each TransformerDecoderLayer or only the last
+ TransformerDecoderLayer. Default False. If False, the returned
+ `hs` has shape [num_decoder_layers, bs, num_query, embed_dims].
+ If True, the returned `hs` will have shape [1, bs, num_query,
+ embed_dims].
+ """
+
+ def __init__(self,
+ embed_dims=512,
+ num_heads=8,
+ num_encoder_layers=6,
+ num_decoder_layers=6,
+ feedforward_channels=2048,
+ dropout=0.0,
+ act_cfg=dict(type='ReLU', inplace=True),
+ norm_cfg=dict(type='LN'),
+ num_fcs=2,
+ pre_norm=False,
+ return_intermediate_dec=False):
+ super(Transformer, self).__init__()
+ self.embed_dims = embed_dims
+ self.num_heads = num_heads
+ self.num_encoder_layers = num_encoder_layers
+ self.num_decoder_layers = num_decoder_layers
+ self.feedforward_channels = feedforward_channels
+ self.dropout = dropout
+ self.act_cfg = act_cfg
+ self.norm_cfg = norm_cfg
+ self.num_fcs = num_fcs
+ self.pre_norm = pre_norm
+ self.return_intermediate_dec = return_intermediate_dec
+ if self.pre_norm:
+ encoder_order = ('norm', 'selfattn', 'norm', 'ffn')
+ decoder_order = ('norm', 'selfattn', 'norm', 'multiheadattn',
+ 'norm', 'ffn')
+ else:
+ encoder_order = ('selfattn', 'norm', 'ffn', 'norm')
+ decoder_order = ('selfattn', 'norm', 'multiheadattn', 'norm',
+ 'ffn', 'norm')
+ self.encoder = TransformerEncoder(num_encoder_layers, embed_dims,
+ num_heads, feedforward_channels,
+ dropout, encoder_order, act_cfg,
+ norm_cfg, num_fcs)
+ self.decoder = TransformerDecoder(num_decoder_layers, embed_dims,
+ num_heads, feedforward_channels,
+ dropout, decoder_order, act_cfg,
+ norm_cfg, num_fcs,
+ return_intermediate_dec)
+
+ def init_weights(self, distribution='uniform'):
+ """Initialize the transformer weights."""
+ # follow the official DETR to init parameters
+ for m in self.modules():
+ if hasattr(m, 'weight') and m.weight.dim() > 1:
+ xavier_init(m, distribution=distribution)
+
+ def forward(self, x, mask, query_embed, pos_embed):
+ """Forward function for `Transformer`.
+
+ Args:
+ x (Tensor): Input query with shape [bs, c, h, w] where
+ c = embed_dims.
+ mask (Tensor): The key_padding_mask used for encoder and decoder,
+ with shape [bs, h, w].
+ query_embed (Tensor): The query embedding for decoder, with shape
+ [num_query, c].
+ pos_embed (Tensor): The positional encoding for encoder and
+ decoder, with the same shape as `x`.
+
+ Returns:
+ tuple[Tensor]: results of decoder containing the following tensor.
+
+ - out_dec: Output from decoder. If return_intermediate_dec \
+ is True output has shape [num_dec_layers, bs,
+ num_query, embed_dims], else has shape [1, bs, \
+ num_query, embed_dims].
+ - memory: Output results from encoder, with shape \
+ [bs, embed_dims, h, w].
+ """
+ bs, c, h, w = x.shape
+ x = x.flatten(2).permute(2, 0, 1) # [bs, c, h, w] -> [h*w, bs, c]
+ pos_embed = pos_embed.flatten(2).permute(2, 0, 1)
+ query_embed = query_embed.unsqueeze(1).repeat(
+ 1, bs, 1) # [num_query, dim] -> [num_query, bs, dim]
+ mask = mask.flatten(1) # [bs, h, w] -> [bs, h*w]
+ memory = self.encoder(
+ x, pos=pos_embed, attn_mask=None, key_padding_mask=mask)
+ target = torch.zeros_like(query_embed)
+ # out_dec: [num_layers, num_query, bs, dim]
+ out_dec = self.decoder(
+ target,
+ memory,
+ memory_pos=pos_embed,
+ query_pos=query_embed,
+ memory_attn_mask=None,
+ target_attn_mask=None,
+ memory_key_padding_mask=mask,
+ target_key_padding_mask=None)
+ out_dec = out_dec.transpose(1, 2)
+ memory = memory.permute(1, 2, 0).reshape(bs, c, h, w)
+ return out_dec, memory
+
+ def __repr__(self):
+ """str: a string that describes the module"""
+ repr_str = self.__class__.__name__
+ repr_str += f'(embed_dims={self.embed_dims}, '
+ repr_str += f'num_heads={self.num_heads}, '
+ repr_str += f'num_encoder_layers={self.num_encoder_layers}, '
+ repr_str += f'num_decoder_layers={self.num_decoder_layers}, '
+ repr_str += f'feedforward_channels={self.feedforward_channels}, '
+ repr_str += f'dropout={self.dropout}, '
+ repr_str += f'act_cfg={self.act_cfg}, '
+ repr_str += f'norm_cfg={self.norm_cfg}, '
+ repr_str += f'num_fcs={self.num_fcs}, '
+ repr_str += f'pre_norm={self.pre_norm}, '
+ repr_str += f'return_intermediate_dec={self.return_intermediate_dec})'
+ return repr_str
+
+
+@TRANSFORMER.register_module()
+class DynamicConv(nn.Module):
+ """Implements Dynamic Convolution.
+
+ This module generate parameters for each sample and
+ use bmm to implement 1*1 convolution. Code is modified
+ from the `official github repo