|
""" |
|
# Model: YOLOv7 |
|
@inproceedings{wang2023yolov7, |
|
title={{YOLOv7}: Trainable bag-of-freebies sets new state-of-the-art for real-time object detectors}, |
|
author={Wang, Chien-Yao and Bochkovskiy, Alexey and Liao, Hong-Yuan Mark}, |
|
booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, |
|
year={2023} |
|
} |
|
""" |
|
import os |
|
import random |
|
import time |
|
import torch |
|
import torchvision |
|
import onnxruntime as ort |
|
import cv2 |
|
import numpy as np |
|
from Lib.Const import LABELS, COLOR_MAP, COLOR_MAP_RGB |
|
from Tool.Core import DownloadHFModel |
|
|
|
|
|
|
|
REPO_ID = "blitzkrieg0000/yolov7_fault-detection" |
|
MODEL_FILE = "yolov7_ariza.onnx" |
|
DownloadHFModel(REPO_ID, MODEL_FILE) |
|
|
|
|
|
def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None): |
|
|
|
if ratio_pad is None: |
|
gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) |
|
pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 |
|
else: |
|
gain = ratio_pad[0][0] |
|
pad = ratio_pad[1] |
|
|
|
coords[:, [0, 2]] -= pad[0] |
|
coords[:, [1, 3]] -= pad[1] |
|
coords[:, :4] /= gain |
|
clip_coords(coords, img0_shape) |
|
return coords |
|
|
|
|
|
def clip_coords(boxes, img_shape): |
|
|
|
boxes[:, 0].clamp_(0, img_shape[1]) |
|
boxes[:, 1].clamp_(0, img_shape[0]) |
|
boxes[:, 2].clamp_(0, img_shape[1]) |
|
boxes[:, 3].clamp_(0, img_shape[0]) |
|
|
|
|
|
def box_iou(box1, box2): |
|
|
|
""" |
|
Return intersection-over-union (Jaccard index) of boxes. |
|
Both sets of boxes are expected to be in (x1, y1, x2, y2) format. |
|
Arguments: |
|
box1 (Tensor[N, 4]) |
|
box2 (Tensor[M, 4]) |
|
Returns: |
|
iou (Tensor[N, M]): the NxM matrix containing the pairwise |
|
IoU values for every element in boxes1 and boxes2 |
|
""" |
|
|
|
def box_area(box): |
|
|
|
return (box[2] - box[0]) * (box[3] - box[1]) |
|
|
|
area1 = box_area(box1.T) |
|
area2 = box_area(box2.T) |
|
|
|
|
|
inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2) |
|
return inter / (area1[:, None] + area2 - inter) |
|
|
|
|
|
def xywh2xyxy(x): |
|
|
|
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) |
|
y[:, 0] = x[:, 0] - x[:, 2] / 2 |
|
y[:, 1] = x[:, 1] - x[:, 3] / 2 |
|
y[:, 2] = x[:, 0] + x[:, 2] / 2 |
|
y[:, 3] = x[:, 1] + x[:, 3] / 2 |
|
return y |
|
|
|
|
|
def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False, labels=()): |
|
"""Runs Non-Maximum Suppression (NMS) on inference results |
|
|
|
Returns: |
|
list of detections, on (n,6) tensor per image [xyxy, conf, cls] |
|
""" |
|
|
|
nc = prediction.shape[2] - 5 |
|
xc = prediction[..., 4] > conf_thres |
|
|
|
|
|
min_wh, max_wh = 2, 4096 |
|
max_det = 300 |
|
max_nms = 30000 |
|
time_limit = 10.0 |
|
redundant = True |
|
multi_label &= nc > 1 |
|
merge = False |
|
|
|
t = time.time() |
|
output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0] |
|
for xi, x in enumerate(prediction): |
|
|
|
|
|
x = x[xc[xi]] |
|
|
|
|
|
if labels and len(labels[xi]): |
|
l = labels[xi] |
|
v = torch.zeros((len(l), nc + 5), device=x.device) |
|
v[:, :4] = l[:, 1:5] |
|
v[:, 4] = 1.0 |
|
v[range(len(l)), l[:, 0].long() + 5] = 1.0 |
|
x = torch.cat((x, v), 0) |
|
|
|
|
|
if not x.shape[0]: |
|
continue |
|
|
|
|
|
if nc == 1: |
|
x[:, 5:] = x[:, 4:5] |
|
|
|
else: |
|
x[:, 5:] *= x[:, 4:5] |
|
|
|
|
|
box = xywh2xyxy(x[:, :4]) |
|
|
|
|
|
if multi_label: |
|
i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T |
|
x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1) |
|
else: |
|
conf, j = x[:, 5:].max(1, keepdim=True) |
|
x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres] |
|
|
|
|
|
if classes is not None: |
|
x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)] |
|
|
|
|
|
|
|
|
|
|
|
|
|
n = x.shape[0] |
|
if not n: |
|
continue |
|
elif n > max_nms: |
|
x = x[x[:, 4].argsort(descending=True)[:max_nms]] |
|
|
|
|
|
c = x[:, 5:6] * (0 if agnostic else max_wh) |
|
boxes, scores = x[:, :4] + c, x[:, 4] |
|
i = torchvision.ops.nms(boxes, scores, iou_thres) |
|
if i.shape[0] > max_det: |
|
i = i[:max_det] |
|
if merge and (1 < n < 3E3): |
|
|
|
iou = box_iou(boxes[i], boxes) > iou_thres |
|
weights = iou * scores[None] |
|
x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) |
|
if redundant: |
|
i = i[iou.sum(1) > 1] |
|
|
|
output[xi] = x[i] |
|
if (time.time() - t) > time_limit: |
|
print(f'WARNING: NMS time limit {time_limit}s exceeded') |
|
break |
|
|
|
return output |
|
|
|
|
|
def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32): |
|
|
|
shape = img.shape[:2] |
|
if isinstance(new_shape, int): |
|
new_shape = (new_shape, new_shape) |
|
|
|
|
|
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1]) |
|
if not scaleup: |
|
r = min(r, 1.0) |
|
|
|
|
|
ratio = r, r |
|
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r)) |
|
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] |
|
if auto: |
|
dw, dh = np.mod(dw, stride), np.mod(dh, stride) |
|
elif scaleFill: |
|
dw, dh = 0.0, 0.0 |
|
new_unpad = (new_shape[1], new_shape[0]) |
|
ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] |
|
|
|
dw /= 2 |
|
dh /= 2 |
|
|
|
if shape[::-1] != new_unpad: |
|
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR) |
|
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1)) |
|
left, right = int(round(dw - 0.1)), int(round(dw + 0.1)) |
|
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) |
|
return img, ratio, (dw, dh) |
|
|
|
|
|
def plot_one_box(x, img, color=None, label=None, line_thickness=3): |
|
|
|
tl = line_thickness or round(0.002 * (img.shape[2] + img.shape[3]) / 2) + 1 |
|
color = color or [random.randint(0, 255) for _ in range(3)] |
|
c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3])) |
|
cv2.rectangle(img, c1, c2, color, tl, cv2.LINE_AA) |
|
|
|
if label: |
|
tf = max(tl - 1, 1) |
|
t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0] |
|
c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3 |
|
cv2.rectangle(img, c1, c2, color, -1, cv2.LINE_AA) |
|
cv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA) |
|
|
|
|
|
print(ort.get_available_providers()) |
|
session = ort.InferenceSession(os.path.join("./Weight", "yolov7_ariza.onnx"), providers=ort.get_available_providers()) |
|
|
|
input_name = session.get_inputs()[0].name |
|
print("input name", input_name) |
|
input_shape = session.get_inputs()[0].shape |
|
print("input shape", input_shape) |
|
input_type = session.get_inputs()[0].type |
|
print("input type", input_type) |
|
output_name = session.get_outputs()[0].name |
|
|
|
|
|
def DetectFaults(im0, model_threshold=0.25, iou_thres=0.45): |
|
|
|
img = letterbox(im0, 640, stride=64, auto=True)[0] |
|
img = img[:, :, ::-1].transpose(2, 0, 1) |
|
img = np.ascontiguousarray(img) |
|
image = img.astype(np.float16) / 255.0 |
|
image = image[np.newaxis, ...] |
|
|
|
|
|
|
|
results = session.run([output_name], {input_name: image}) |
|
res = torch.from_numpy(results[0]) |
|
pred = non_max_suppression(res, conf_thres=model_threshold, iou_thres=iou_thres, classes=None, agnostic=False, multi_label=False, labels=()) |
|
|
|
|
|
|
|
print(pred[0].shape) |
|
print(pred[0]) |
|
|
|
boxes = [] |
|
classes = [] |
|
for i, det in enumerate(pred): |
|
if len(det): |
|
det[:, :4] = scale_coords(image.shape[2:], det[:, :4], im0.shape).round() |
|
print(det) |
|
for *xyxy, conf, cls in reversed(det): |
|
_label = LABELS[int(cls)] |
|
plot_one_box(xyxy, im0, label=_label, color=COLOR_MAP_RGB[_label], line_thickness=2) |
|
classes.append(int(cls)) |
|
boxes.append([int(xyxy[0]), int(xyxy[1]), int(xyxy[2]), int(xyxy[3])]) |
|
|
|
return im0, boxes, classes |
|
|
|
|
|
if "__main__" == __name__: |
|
im0 = cv2.imread("data/DJI_20240905125342_0004_Z.JPG") |
|
img0, boxes = DetectFaults(im0) |
|
cv2.imwrite("result.png", im0) |
|
|
|
|