StarAtNyte1 commited on
Commit
148e06b
·
1 Parent(s): 0ae63fa

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +102 -16
app.py CHANGED
@@ -1,10 +1,21 @@
1
  import gradio as gr
2
- import torch
3
  import yolov7
 
 
 
 
 
 
 
4
 
5
 
6
 
7
- def yolov7_inference(
 
 
 
 
8
  image: gr.inputs.Image = None,
9
  model_path: gr.inputs.Dropdown = None,
10
  image_size: gr.inputs.Slider = 640,
@@ -28,30 +39,105 @@ def yolov7_inference(
28
  model.iou = iou_threshold
29
  results = model([image], size=image_size)
30
  return results.render()[0]
 
 
31
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32
 
33
- inputs = [
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34
  gr.inputs.Image(type="pil", label="Input Image"),
35
  gr.inputs.Dropdown(
36
  choices=[
37
  "StarAtNyte1/yolov7_custom",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
  ],
39
  default="StarAtNyte1/yolov7_custom",
40
  label="Model",
41
  ),
42
- gr.inputs.Slider(minimum=320, maximum=1280, default=640, step=32, label="Image Size"),
43
- gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.25, step=0.05, label="Confidence Threshold"),
44
- gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.45, step=0.05, label="IOU Threshold"),
45
- ]
46
-
47
- outputs = gr.outputs.Image(type="filepath", label="Output Image")
48
- #title = "Yolov7: Trainable bag-of-freebies sets new state-of-the-art for real-time object detectors"
49
-
50
- demo_app = gr.Interface(
51
- fn=yolov7_inference,
52
- inputs=inputs,
53
- outputs=outputs,
54
  cache_examples=True,
55
  theme='huggingface',
 
56
  )
57
- demo_app.launch(debug=True, enable_queue=True)
 
 
 
 
 
 
1
  import gradio as gr
2
+ #import torch
3
  import yolov7
4
+ import subprocess
5
+ import tempfile
6
+ import time
7
+ from pathlib import Path
8
+ import uuid
9
+ import cv2
10
+ import gradio as gr
11
 
12
 
13
 
14
+ # Images
15
+ #torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/raw/master/data/images/zidane.jpg', 'zidane.jpg')
16
+ #torch.hub.download_url_to_file('https://raw.githubusercontent.com/obss/sahi/main/tests/data/small-vehicles1.jpeg', 'small-vehicles1.jpeg')
17
+
18
+ def image_fn(
19
  image: gr.inputs.Image = None,
20
  model_path: gr.inputs.Dropdown = None,
21
  image_size: gr.inputs.Slider = 640,
 
39
  model.iou = iou_threshold
40
  results = model([image], size=image_size)
41
  return results.render()[0]
42
+
43
+
44
 
45
+ def video_fn(model_path, video_file, conf_thres, iou_thres, start_sec, duration):
46
+ model = yolov7.load(model_path, device="cpu", hf_model=True, trace=False)
47
+ start_timestamp = time.strftime("%H:%M:%S", time.gmtime(start_sec))
48
+ end_timestamp = time.strftime("%H:%M:%S", time.gmtime(start_sec + duration))
49
+
50
+ suffix = Path(video_file).suffix
51
+
52
+ clip_temp_file = tempfile.NamedTemporaryFile(suffix=suffix)
53
+ subprocess.call(
54
+ f"ffmpeg -y -ss {start_timestamp} -i {video_file} -to {end_timestamp} -c copy {clip_temp_file.name}".split()
55
+ )
56
+
57
+ # Reader of clip file
58
+ cap = cv2.VideoCapture(clip_temp_file.name)
59
+
60
+ # This is an intermediary temp file where we'll write the video to
61
+ # Unfortunately, gradio doesn't play too nice with videos rn so we have to do some hackiness
62
+ # with ffmpeg at the end of the function here.
63
+ with tempfile.NamedTemporaryFile(suffix=".mp4") as temp_file:
64
+ out = cv2.VideoWriter(temp_file.name, cv2.VideoWriter_fourcc(*"MP4V"), 30, (1280, 720))
65
 
66
+ num_frames = 0
67
+ max_frames = duration * 30
68
+ while cap.isOpened():
69
+ try:
70
+ ret, frame = cap.read()
71
+ if not ret:
72
+ break
73
+ except Exception as e:
74
+ print(e)
75
+ continue
76
+ print("FRAME DTYPE", type(frame))
77
+ out.write(model([frame], conf_thres, iou_thres))
78
+ num_frames += 1
79
+ print("Processed {} frames".format(num_frames))
80
+ if num_frames == max_frames:
81
+ break
82
+
83
+ out.release()
84
+
85
+ # Aforementioned hackiness
86
+ out_file = tempfile.NamedTemporaryFile(suffix="out.mp4", delete=False)
87
+ subprocess.run(f"ffmpeg -y -loglevel quiet -stats -i {temp_file.name} -c:v libx264 {out_file.name}".split())
88
+
89
+ return out_file.name
90
+
91
+ image_interface = gr.Interface(
92
+ fn=image_fn,
93
+ inputs=[
94
  gr.inputs.Image(type="pil", label="Input Image"),
95
  gr.inputs.Dropdown(
96
  choices=[
97
  "StarAtNyte1/yolov7_custom",
98
+ #"kadirnar/yolov7-v0.1",
99
+ ],
100
+ default="StarAtNyte1/yolov7_custom",
101
+ label="Model",
102
+ )
103
+ #gr.inputs.Slider(minimum=320, maximum=1280, default=640, step=32, label="Image Size")
104
+ #gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.25, step=0.05, label="Confidence Threshold"),
105
+ #gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.45, step=0.05, label="IOU Threshold")
106
+ ],
107
+ outputs=gr.outputs.Image(type="filepath", label="Output Image"),
108
+ title="Smart Environmental Eye (SEE)",
109
+ examples=[['image1.jpg', 'alshimaa/SEE_model_yolo7', 640, 0.25, 0.45], ['image2.jpg', 'alshimaa/SEE_model_yolo7', 640, 0.25, 0.45], ['image3.jpg', 'alshimaa/SEE_model_yolo7', 640, 0.25, 0.45]],
110
+ cache_examples=True,
111
+ theme='huggingface',
112
+ )
113
+
114
+
115
+ video_interface = gr.Interface(
116
+ fn=video_fn,
117
+ inputs=[
118
+ gr.inputs.Video(source = "upload", type = "mp4", label = "Input Video"),
119
+ gr.inputs.Dropdown(
120
+ choices=[
121
+ "StarAtNyte1/yolov7_custom",
122
+ #"kadirnar/yolov7-v0.1",
123
  ],
124
  default="StarAtNyte1/yolov7_custom",
125
  label="Model",
126
  ),
127
+ ],
128
+ outputs=gr.outputs.Video(type = "mp4", label = "Output Video"),
129
+ # examples=[
130
+ # ["video.mp4", 0.25, 0.45, 0, 2],
131
+
132
+ # ],
133
+ title="Smart Environmental Eye (SEE)",
 
 
 
 
 
134
  cache_examples=True,
135
  theme='huggingface',
136
+
137
  )
138
+
139
+ if __name__ == "__main__":
140
+ gr.TabbedInterface(
141
+ [image_interface, video_interface],
142
+ ["Run on Images", "Run on Videos"],
143
+ ).launch()