blitzkrieg0000 commited on
Commit
1ddcf83
·
verified ·
1 Parent(s): aa69e0d

Upload 9 files

Browse files
Lib/Const.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ LABELS = {0: "Eksik Halka", 1: "KIRIK", 2: "Ark izi"}
2
+ COLOR_MAP = {"Eksik Halka":"#ffffff", "KIRIK":"#0000ff", "Ark izi":"#ff0000"}
3
+ COLOR_MAP_RGB = {key : [ int(value[1:3], 16), int(value[3:5], 16), int(value[5:7], 16)] for key, value in COLOR_MAP.items()}
Lib/DetectFaultOnnx.py ADDED
@@ -0,0 +1,272 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ # Model: YOLOv7
3
+ @inproceedings{wang2023yolov7,
4
+ title={{YOLOv7}: Trainable bag-of-freebies sets new state-of-the-art for real-time object detectors},
5
+ author={Wang, Chien-Yao and Bochkovskiy, Alexey and Liao, Hong-Yuan Mark},
6
+ booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
7
+ year={2023}
8
+ }
9
+ """
10
+ import random
11
+ import time
12
+ import torch
13
+ import torchvision
14
+ import onnxruntime as ort
15
+ import cv2
16
+ import numpy as np
17
+ from Lib.Const import LABELS, COLOR_MAP, COLOR_MAP_RGB
18
+
19
+
20
+ def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):
21
+ # Rescale coords (xyxy) from img1_shape to img0_shape
22
+ if ratio_pad is None: # calculate from img0_shape
23
+ gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new
24
+ pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding
25
+ else:
26
+ gain = ratio_pad[0][0]
27
+ pad = ratio_pad[1]
28
+
29
+ coords[:, [0, 2]] -= pad[0] # x padding
30
+ coords[:, [1, 3]] -= pad[1] # y padding
31
+ coords[:, :4] /= gain
32
+ clip_coords(coords, img0_shape)
33
+ return coords
34
+
35
+
36
+ def clip_coords(boxes, img_shape):
37
+ # Clip bounding xyxy bounding boxes to image shape (height, width)
38
+ boxes[:, 0].clamp_(0, img_shape[1]) # x1
39
+ boxes[:, 1].clamp_(0, img_shape[0]) # y1
40
+ boxes[:, 2].clamp_(0, img_shape[1]) # x2
41
+ boxes[:, 3].clamp_(0, img_shape[0]) # y2
42
+
43
+
44
+ def box_iou(box1, box2):
45
+ # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py
46
+ """
47
+ Return intersection-over-union (Jaccard index) of boxes.
48
+ Both sets of boxes are expected to be in (x1, y1, x2, y2) format.
49
+ Arguments:
50
+ box1 (Tensor[N, 4])
51
+ box2 (Tensor[M, 4])
52
+ Returns:
53
+ iou (Tensor[N, M]): the NxM matrix containing the pairwise
54
+ IoU values for every element in boxes1 and boxes2
55
+ """
56
+
57
+ def box_area(box):
58
+ # box = 4xn
59
+ return (box[2] - box[0]) * (box[3] - box[1])
60
+
61
+ area1 = box_area(box1.T)
62
+ area2 = box_area(box2.T)
63
+
64
+ # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2)
65
+ inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2)
66
+ return inter / (area1[:, None] + area2 - inter) # iou = inter / (area1 + area2 - inter)
67
+
68
+
69
+ def xywh2xyxy(x):
70
+ # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
71
+ y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
72
+ y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x
73
+ y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y
74
+ y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x
75
+ y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y
76
+ return y
77
+
78
+
79
+ def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False, labels=()):
80
+ """Runs Non-Maximum Suppression (NMS) on inference results
81
+
82
+ Returns:
83
+ list of detections, on (n,6) tensor per image [xyxy, conf, cls]
84
+ """
85
+
86
+ nc = prediction.shape[2] - 5 # number of classes
87
+ xc = prediction[..., 4] > conf_thres # candidates
88
+
89
+ # Settings
90
+ min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height
91
+ max_det = 300 # maximum number of detections per image
92
+ max_nms = 30000 # maximum number of boxes into torchvision.ops.nms()
93
+ time_limit = 10.0 # seconds to quit after
94
+ redundant = True # require redundant detections
95
+ multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img)
96
+ merge = False # use merge-NMS
97
+
98
+ t = time.time()
99
+ output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0]
100
+ for xi, x in enumerate(prediction): # image index, image inference
101
+ # Apply constraints
102
+ # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height
103
+ x = x[xc[xi]] # confidence
104
+
105
+ # Cat apriori labels if autolabelling
106
+ if labels and len(labels[xi]):
107
+ l = labels[xi]
108
+ v = torch.zeros((len(l), nc + 5), device=x.device)
109
+ v[:, :4] = l[:, 1:5] # box
110
+ v[:, 4] = 1.0 # conf
111
+ v[range(len(l)), l[:, 0].long() + 5] = 1.0 # cls
112
+ x = torch.cat((x, v), 0)
113
+
114
+ # If none remain process next image
115
+ if not x.shape[0]:
116
+ continue
117
+
118
+ # Compute conf
119
+ if nc == 1:
120
+ x[:, 5:] = x[:, 4:5] # for models with one class, cls_loss is 0 and cls_conf is always 0.5,
121
+ # so there is no need to multiplicate.
122
+ else:
123
+ x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf
124
+
125
+ # Box (center x, center y, width, height) to (x1, y1, x2, y2)
126
+ box = xywh2xyxy(x[:, :4])
127
+
128
+ # Detections matrix nx6 (xyxy, conf, cls)
129
+ if multi_label:
130
+ i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T
131
+ x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1)
132
+ else: # best class only
133
+ conf, j = x[:, 5:].max(1, keepdim=True)
134
+ x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]
135
+
136
+ # Filter by class
137
+ if classes is not None:
138
+ x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]
139
+
140
+ # Apply finite constraint
141
+ # if not torch.isfinite(x).all():
142
+ # x = x[torch.isfinite(x).all(1)]
143
+
144
+ # Check shape
145
+ n = x.shape[0] # number of boxes
146
+ if not n: # no boxes
147
+ continue
148
+ elif n > max_nms: # excess boxes
149
+ x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence
150
+
151
+ # Batched NMS
152
+ c = x[:, 5:6] * (0 if agnostic else max_wh) # classes
153
+ boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores
154
+ i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS
155
+ if i.shape[0] > max_det: # limit detections
156
+ i = i[:max_det]
157
+ if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean)
158
+ # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)
159
+ iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix
160
+ weights = iou * scores[None] # box weights
161
+ x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes
162
+ if redundant:
163
+ i = i[iou.sum(1) > 1] # require redundancy
164
+
165
+ output[xi] = x[i]
166
+ if (time.time() - t) > time_limit:
167
+ print(f'WARNING: NMS time limit {time_limit}s exceeded')
168
+ break # time limit exceeded
169
+
170
+ return output
171
+
172
+
173
+ def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32):
174
+ # Resize and pad image while meeting stride-multiple constraints
175
+ shape = img.shape[:2] # current shape [height, width]
176
+ if isinstance(new_shape, int):
177
+ new_shape = (new_shape, new_shape)
178
+
179
+ # Scale ratio (new / old)
180
+ r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
181
+ if not scaleup: # only scale down, do not scale up (for better test mAP)
182
+ r = min(r, 1.0)
183
+
184
+ # Compute padding
185
+ ratio = r, r # width, height ratios
186
+ new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
187
+ dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
188
+ if auto: # minimum rectangle
189
+ dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding
190
+ elif scaleFill: # stretch
191
+ dw, dh = 0.0, 0.0
192
+ new_unpad = (new_shape[1], new_shape[0])
193
+ ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios
194
+
195
+ dw /= 2 # divide padding into 2 sides
196
+ dh /= 2
197
+
198
+ if shape[::-1] != new_unpad: # resize
199
+ img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
200
+ top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
201
+ left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
202
+ img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
203
+ return img, ratio, (dw, dh)
204
+
205
+
206
+ def plot_one_box(x, img, color=None, label=None, line_thickness=3):
207
+ # Plots one bounding box on image img
208
+ tl = line_thickness or round(0.002 * (img.shape[2] + img.shape[3]) / 2) + 1 # line/font thickness
209
+ color = color or [random.randint(0, 255) for _ in range(3)]
210
+ c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))
211
+ cv2.rectangle(img, c1, c2, color, tl, cv2.LINE_AA)
212
+
213
+ if label:
214
+ tf = max(tl - 1, 1) # font thickness
215
+ t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
216
+ c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
217
+ cv2.rectangle(img, c1, c2, color, -1, cv2.LINE_AA) # filled
218
+ cv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)
219
+
220
+
221
+ print(ort.get_available_providers())
222
+ session = ort.InferenceSession("Weight/yolov7_ariza.onnx", providers=ort.get_available_providers())
223
+
224
+ input_name = session.get_inputs()[0].name
225
+ print("input name", input_name)
226
+ input_shape = session.get_inputs()[0].shape
227
+ print("input shape", input_shape)
228
+ input_type = session.get_inputs()[0].type
229
+ print("input type", input_type)
230
+ output_name = session.get_outputs()[0].name
231
+
232
+
233
+ def DetectFaults(im0, model_threshold=0.25, iou_thres=0.45):
234
+ # Preprocess
235
+ img = letterbox(im0, 640, stride=64, auto=True)[0]
236
+ img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
237
+ img = np.ascontiguousarray(img)
238
+ image = img.astype(np.float16) / 255.0
239
+ image = image[np.newaxis, ...]
240
+
241
+
242
+ # Inference
243
+ results = session.run([output_name], {input_name: image})
244
+ res = torch.from_numpy(results[0])
245
+ pred = non_max_suppression(res, conf_thres=model_threshold, iou_thres=iou_thres, classes=None, agnostic=False, multi_label=False, labels=())
246
+
247
+
248
+ # Postprocess
249
+ print(pred[0].shape)
250
+ print(pred[0])
251
+
252
+ boxes = []
253
+ classes = []
254
+ for i, det in enumerate(pred):
255
+ if len(det):
256
+ det[:, :4] = scale_coords(image.shape[2:], det[:, :4], im0.shape).round()
257
+ print(det)
258
+ for *xyxy, conf, cls in reversed(det):
259
+ _label = LABELS[int(cls)]
260
+ plot_one_box(xyxy, im0, label=_label, color=COLOR_MAP_RGB[_label], line_thickness=2)
261
+ classes.append(int(cls))
262
+ boxes.append([int(xyxy[0]), int(xyxy[1]), int(xyxy[2]), int(xyxy[3])])
263
+
264
+ return im0, boxes, classes
265
+
266
+
267
+ if "__main__" == __name__:
268
+ im0 = cv2.imread("data/DJI_20240905125342_0004_Z.JPG")
269
+ img0, boxes = DetectFaults(im0)
270
+ cv2.imwrite("result.png", im0)
271
+ # cv2.imshow("image", im0)
272
+ # cv2.waitKey(0)
Lib/__init__.py ADDED
File without changes
Lib/__pycache__/Const.cpython-312.pyc ADDED
Binary file (670 Bytes). View file
 
Lib/__pycache__/DetectFaultOnnx.cpython-312.pyc ADDED
Binary file (14.4 kB). View file
 
Lib/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (211 Bytes). View file
 
UI/Main.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ sys.path.append(os.getcwd())
4
+
5
+ from Lib.Const import COLOR_MAP, LABELS
6
+ from Lib.DetectFaultOnnx import DetectFaults
7
+
8
+ import cv2
9
+ import gradio as gr
10
+
11
+ demoImages = [
12
+ "data/DJI_20240905095004_0007_W.JPG",
13
+ "data/DJI_20240905091530_0003_W.JPG",
14
+ "data/DJI_20240905094647_0003_W.JPG",
15
+ "data/DJI_20240905094647_0003_Z.JPG",
16
+ "data/DJI_20240905101846_0005_W.JPG",
17
+ "data/16_3450.png",
18
+ "data/16_3735.png",
19
+ "data/16_3900.png",
20
+ "data/19_00350.png",
21
+ "data/25_00272.png",
22
+ "data/67_02661.png"
23
+ ]
24
+
25
+
26
+ def Warning():
27
+ gr.Info("DGH ARGE YAZILIM DANIŞMANLIK ENERJİ İNŞAAT SAN.TİC.LTD.ŞTİ", duration=0.5)
28
+
29
+ with gr.Blocks(css="footer{display:none !important}") as block:
30
+ gr.Markdown("## Yüksek Gerilim Hattı İzolatörlerinin Arıza Tespiti - Demo")
31
+ gr.Markdown("**Ark İzi, Kırık ve Eksik İzolatör Hatalarını Tespit Eder**")
32
+ with gr.Row():
33
+ with gr.Column():
34
+ inputImage = gr.Image(label="Fotoğraf")
35
+
36
+ with gr.Column():
37
+ thresholdSlider = gr.Slider(0, 1, value=0.25, label="Model Eşik Değeri", info="0 ve 1 arası seçiniz.")
38
+ iouThresholdSlider = gr.Slider(0, 1, value=0.45, label="IOU (Intersection Over Union) Eşik Değeri", info="0 ve 1 arası seçiniz.")
39
+ with gr.Accordion("Demo Görsellerden Seçebilirsiniz", open=False):
40
+ imageGallery = gr.Examples(
41
+ examples=[
42
+ os.path.join("data", img_name) for img_name in sorted(os.listdir("data"))
43
+ ],
44
+ inputs=[inputImage],
45
+ label="Örnekler",
46
+ cache_examples=False,
47
+ examples_per_page=7
48
+ )
49
+ processButton = gr.Button("Tespit Et")
50
+
51
+ results = gr.Textbox(label="Log")
52
+ gr.HTML("</hr>")
53
+ processedImageGallery = gr.Gallery(
54
+ label="Sonuçlar",
55
+ rows=1,
56
+ columns=2,
57
+ object_fit="contain",
58
+ height="auto"
59
+ )
60
+
61
+ annotatedImage = gr.AnnotatedImage(color_map=COLOR_MAP)
62
+
63
+ @processButton.click(outputs=[processedImageGallery, annotatedImage, results], inputs=[inputImage, thresholdSlider, iouThresholdSlider])
64
+ def Process(image, model_threshold, iouThresholdSlider):
65
+ if image is None:
66
+ raise gr.Warning("Lütfen görüntü yükleyiniz veya hazır seçiniz!", duration=3)
67
+
68
+ img0, boxes, labels = DetectFaults(image, model_threshold, iouThresholdSlider)
69
+
70
+ if len(boxes) == 0:
71
+ raise gr.Error("Bir Hata ile Karşılaşıldı: Görüntüde Tespit Yapılamadı 💥!", duration=5)
72
+
73
+ sections = []
74
+ for b, c in zip(boxes, labels):
75
+ sections+=[(b, LABELS[c])]
76
+
77
+ image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
78
+ return [img0], (image, sections), "Görüntü İşlendi!"
79
+
80
+ block.load(Warning)
81
+
82
+
83
+ block.queue(max_size=10)
84
+ block.launch(server_name="0.0.0.0", server_port=1071)
85
+
UI/__init__.py ADDED
File without changes
dockerfile ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM pytorch/pytorch:2.4.1-cuda12.4-cudnn9-runtime
2
+
3
+ ARG DEBIAN_FRONTEND=noninteractive
4
+ ENV PYTHONUNBUFFERED=1
5
+ RUN useradd -m -u 1000 user
6
+ RUN apt-get update && apt-get install ffmpeg libsm6 libxext6 --no-install-recommends -y \
7
+ && apt-get clean \
8
+ && rm -rf /var/lib/apt/lists/*
9
+
10
+ RUN pip install --no-cache-dir gradio opencv-python pandas ultralytics onnx onnxruntime
11
+
12
+ USER user
13
+ WORKDIR /app
14
+ COPY --chown=user ./ /app
15
+
16
+ EXPOSE 1071
17
+ CMD ["python", "/app/UI/Main.py"]