Skip to content

Commit

Permalink
Add files via upload
Browse files Browse the repository at this point in the history
  • Loading branch information
Shayan925 authored Apr 16, 2022
1 parent 26a5916 commit 0ea6357
Show file tree
Hide file tree
Showing 4 changed files with 156 additions and 0 deletions.
106 changes: 106 additions & 0 deletions eye_tracker.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,106 @@
import cv2
from cv2 import INTER_MAX
import mediapipe as mp
import numpy as np
import time
from datetime import datetime
from util import calc_center, eye_direction, warn

# Indices for the left and right eye
LEFT_EYE = [362, 382, 381, 380, 374, 373, 390,
249, 263, 466, 388, 387, 386, 385, 384, 398]
RIGHT_EYE = [33, 7, 163, 144, 145, 153, 154,
155, 133, 173, 157, 158, 159, 160, 161, 246]

# Indices for the left and right iris
LEFT_IRIS = [474, 475, 476, 477]
RIGHT_IRIS = [469, 470, 471, 472]

mpFace = mp.solutions.face_mesh

# Capture video input from any source
cap = cv2.VideoCapture(1)

face_mesh = mpFace.FaceMesh(max_num_faces=1,
refine_landmarks=True,
min_detection_confidence=0.9,
min_tracking_confidence=0.7)

t1 = time.time()
warned = False

while True:
success, img = cap.read()

if not success:
break

# Process image
img = cv2.flip(img, 1)
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
h, w, c = img.shape
results = face_mesh.process(imgRGB)

# Find landmarks
if results.multi_face_landmarks:
# Find points
points = np.array([np.multiply([p.x, p.y], [w, h]).astype(
int) for p in results.multi_face_landmarks[0].landmark])

# Calculate the center point of both irises and eyes
left_iris_center = calc_center(LEFT_IRIS, points, img, True)
right_iris_center = calc_center(RIGHT_IRIS, points, img, True)

left_eye_center = calc_center(LEFT_EYE, points, img)
right_eye_center = calc_center(RIGHT_EYE, points, img)

# Determine the direction the eye is looking and display the text
eye1 = eye_direction(left_iris_center, left_eye_center)
eye2 = eye_direction(right_iris_center, right_eye_center)

if eye1 == eye2:
direction = eye1
else:
direction = eye1 if eye1 != "CENTER" else eye2

cv2.putText(img, "DIRECTION: " + direction, (10, 450), cv2.FONT_HERSHEY_PLAIN, 3, (255, 255, 255), 2)


# If user's eyes are not in the center for more than 2 seconds warn them
if direction == "CENTER":
t1 = time.time()

if warned:
warned = False

# Record how long user has been distracted into log.txt
now = datetime.now()
cur_time = now.strftime("%H:%M:%S")

with open("log.txt", "a") as f:
f.write(cur_time + "\n")

elif direction != "CENTER" and not warned:
t2 = time.time()
if t2 - t1 > 2:
warn(img)
warned = True

# If face is off the screen for longer than 2 seconds it will warn the user
else:
if not warned:
t2 = time.time()
if t2 - t1 > 2:
warn(img)
warned = True


cv2.imshow("ADHD Aid", img)
cv2.waitKey(1)

# Click the "X" icon to close window
if cv2.getWindowProperty("ADHD Aid", cv2.WND_PROP_VISIBLE) < 1:
break

cap.release()
cv2.destroyAllWindows()
1 change: 1 addition & 0 deletions log.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@

49 changes: 49 additions & 0 deletions util.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
import cv2
import numpy as np
import winsound
from datetime import datetime

# Determine the center point of the given shape
def calc_center(indexes, points, img, draw=False):
(cx, cy), radius = cv2.minEnclosingCircle(
points[indexes])

center = np.array([cx, cy], dtype=np.int32)

if draw:
cv2.circle(img, center, int(radius),
(0, 0, 255), 1, cv2.LINE_AA)

return center

# Determine the direction of the eye
def eye_direction(iris_center, eye_center):
if abs(iris_center[0] - eye_center[0]) < 5:
return "CENTER"
elif iris_center[0] - eye_center[0] < 0:
return "LEFT"
elif iris_center[0]-eye_center[0] > 0:
return "RIGHT"

# Play the audio file
def play_sound():
audio_file = "warning.wav"
winsound.PlaySound(audio_file, winsound.SND_FILENAME)

# Warn the user and the supervisor that their attention is elsewhere
# and should pay attention to the screen
def warn(img):
h, w, c = img.shape
cv2.rectangle(img, (0,0), (w, h), (0, 0, 255), -1)
cv2.putText(img, "You have lost focus", (150, 250), cv2.FONT_HERSHEY_PLAIN, 2, (255, 255, 255), 4)
cv2.imshow("ADHD Aid", img)
cv2.waitKey(1)

play_sound()

# Record when the user first loses focus into log.txt
now = datetime.now()
cur_time = now.strftime("%H:%M:%S")

with open("log.txt", "a") as f:
f.write("User has lost focus from " + cur_time + " to ")
Binary file added warning.wav
Binary file not shown.

0 comments on commit 0ea6357

Please sign in to comment.