import cv2 import ultralytics import gradio as gr import torch # Load YOLOv8 model model = ultralytics.YOLO('yolov8n.pt') # Set the stream URL (optional, can be commented out if using only camera) stream_url = "https://edge01.london.nginx.hdontap.com/hosb5/ng_showcase-coke_bottle-street_fixed.stream/chunklist_w464099566.m3u8" # Low-resolution for inference LOW_RES = (320, 180) def detect_and_draw(frame): # Resize frame to low resolution for faster inference low_res_frame = cv2.resize(frame, LOW_RES) # Perform YOLOv8 inference results = model(low_res_frame) # Scale bounding boxes scale_x = frame.shape[1] / LOW_RES[0] scale_y = frame.shape[0] / LOW_RES[1] # Draw bounding boxes on high-res frame for detection in results[0].boxes.data: x1, y1, x2, y2, conf, cls = detection x1, y1, x2, y2 = int(x1*scale_x), int(y1*scale_y), int(x2*scale_x), int(y2*scale_y) label = f"{results[0].names[int(cls)]} {conf:.2f}" cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2) cv2.putText(frame, label, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 0), 2) return frame def process_stream(stream_source): if stream_source == "camera": cap = cv2.VideoCapture(0) # Use 0 for the default camera else: cap = cv2.VideoCapture(stream_url) frame_count = 0 while cap.isOpened(): ret, frame = cap.read() if not ret: break frame_count += 3 if frame_count % 30 == 0: # Process every 30th frame result = detect_and_draw(frame) result_rgb = cv2.cvtColor(result, cv2.COLOR_BGR2RGB) yield result_rgb cap.release() # Gradio interface for live video stream iface = gr.Interface( fn=process_stream, inputs=gr.Dropdown(["camera", "stream"], label="Video Source"), outputs="image", live=True, title="YOLOv8 Real-Time Object Detection", description="Live stream processed with YOLOv8 for real-time object detection.") if __name__ == "__main__": if torch.cuda.is_available(): model.to('cuda') iface.queue() iface.launch()