subhanliaqat commited on
Commit
ba6bc21
·
verified ·
1 Parent(s): aa2921d

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +94 -0
app.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import dlib
3
+ import pyttsx3
4
+ from scipy.spatial import distance
5
+ import streamlit as st
6
+ import numpy as np
7
+
8
+ # INITIALIZING THE pyttsx3 SO THAT
9
+ # ALERT AUDIO MESSAGE CAN BE DELIVERED
10
+ engine = pyttsx3.init()
11
+
12
+ # FACE DETECTION OR MAPPING THE FACE TO
13
+ # GET THE Eye AND EYES DETECTED
14
+ face_detector = dlib.get_frontal_face_detector()
15
+
16
+ # PUT THE LOCATION OF .DAT FILE (FILE FOR
17
+ # PREDICTING THE LANDMARKS ON FACE )
18
+ dlib_facelandmark = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
19
+
20
+ # FUNCTION CALCULATING THE ASPECT RATIO FOR
21
+ # THE Eye BY USING EUCLIDEAN DISTANCE FUNCTION
22
+ def Detect_Eye(eye):
23
+ poi_A = distance.euclidean(eye[1], eye[5])
24
+ poi_B = distance.euclidean(eye[2], eye[4])
25
+ poi_C = distance.euclidean(eye[0], eye[3])
26
+ aspect_ratio_Eye = (poi_A + poi_B) / (2 * poi_C)
27
+ return aspect_ratio_Eye
28
+
29
+ # Function to process each frame
30
+ def process_frame(frame):
31
+ gray_scale = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
32
+ faces = face_detector(gray_scale)
33
+
34
+ for face in faces:
35
+ face_landmarks = dlib_facelandmark(gray_scale, face)
36
+ leftEye = []
37
+ rightEye = []
38
+
39
+ # LEFT EYE points
40
+ for n in range(42, 48):
41
+ x = face_landmarks.part(n).x
42
+ y = face_landmarks.part(n).y
43
+ rightEye.append((x, y))
44
+ next_point = n + 1 if n < 47 else 42
45
+ x2 = face_landmarks.part(next_point).x
46
+ y2 = face_landmarks.part(next_point).y
47
+ cv2.line(frame, (x, y), (x2, y2), (0, 255, 0), 1)
48
+
49
+ # RIGHT EYE points
50
+ for n in range(36, 42):
51
+ x = face_landmarks.part(n).x
52
+ y = face_landmarks.part(n).y
53
+ leftEye.append((x, y))
54
+ next_point = n + 1 if n < 41 else 36
55
+ x2 = face_landmarks.part(next_point).x
56
+ y2 = face_landmarks.part(next_point).y
57
+ cv2.line(frame, (x, y), (x2, y2), (255, 255, 0), 1)
58
+
59
+ # ASPECT RATIO
60
+ right_Eye = Detect_Eye(rightEye)
61
+ left_Eye = Detect_Eye(leftEye)
62
+ Eye_Rat = (left_Eye + right_Eye) / 2
63
+
64
+ # DROWSINESS ALERT
65
+ if round(Eye_Rat, 2) < 0.25:
66
+ cv2.putText(frame, "DROWSINESS DETECTED", (50, 100),
67
+ cv2.FONT_HERSHEY_PLAIN, 2, (21, 56, 210), 3)
68
+ cv2.putText(frame, "Alert!!!! WAKE UP DUDE", (50, 450),
69
+ cv2.FONT_HERSHEY_PLAIN, 2, (21, 56, 212), 3)
70
+ engine.say("Alert!!!! WAKE UP DUDE")
71
+ engine.runAndWait()
72
+
73
+ return frame
74
+
75
+ # Streamlit app
76
+ st.title("Drowsiness Detection App")
77
+ run = st.checkbox('Run Drowsiness Detection')
78
+
79
+ # Open webcam
80
+ cap = cv2.VideoCapture(0)
81
+
82
+ while run:
83
+ ret, frame = cap.read()
84
+ if not ret:
85
+ st.write("Failed to grab frame")
86
+ break
87
+
88
+ processed_frame = process_frame(frame)
89
+
90
+ # Convert BGR to RGB
91
+ processed_frame = cv2.cvtColor(processed_frame, cv2.COLOR_BGR2RGB)
92
+ st.image(processed_frame, channels="RGB", use_column_width=True)
93
+
94
+ cap.release()