-
Notifications
You must be signed in to change notification settings - Fork 158
/
agegenderemotion_webcam.py
192 lines (151 loc) · 6.96 KB
/
agegenderemotion_webcam.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
import sys
import argparse
import cv2
import datetime
from libfaceid.detector import FaceDetectorModels, FaceDetector
from libfaceid.encoder import FaceEncoderModels, FaceEncoder
from libfaceid.pose import FacePoseEstimatorModels, FacePoseEstimator
from libfaceid.age import FaceAgeEstimatorModels, FaceAgeEstimator
from libfaceid.gender import FaceGenderEstimatorModels, FaceGenderEstimator
from libfaceid.emotion import FaceEmotionEstimatorModels, FaceEmotionEstimator
# Set the window name
WINDOW_NAME = "Facial_Recognition"
# Set the input directories
INPUT_DIR_DATASET = "datasets"
INPUT_DIR_MODEL_DETECTION = "models/detection/"
INPUT_DIR_MODEL_ENCODING = "models/encoding/"
INPUT_DIR_MODEL_TRAINING = "models/training/"
INPUT_DIR_MODEL_ESTIMATION = "models/estimation/"
# Set width and height
RESOLUTION_QVGA = (320, 240)
RESOLUTION_VGA = (640, 480)
RESOLUTION_HD = (1280, 720)
RESOLUTION_FULLHD = (1920, 1080)
def cam_init(cam_index, width, height):
cap = cv2.VideoCapture(cam_index)
if sys.version_info < (3, 0):
cap.set(cv2.cv.CV_CAP_PROP_FPS, 30)
cap.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, width)
cap.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, height)
else:
cap.set(cv2.CAP_PROP_FPS, 30)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, width)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
return cap
def label_face(frame, face_rect, face_id, confidence):
(x, y, w, h) = face_rect
cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 255, 255), 1)
if face_id is not None:
cv2.putText(frame, "{} {:.2f}%".format(face_id, confidence),
(x+5,y+h-5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1, cv2.LINE_AA)
def process_facedetection(model_detector, model_poseestimator, model_ageestimator, model_genderestimator, model_emotionestimator, cam_resolution, cam_index):
# Initialize the camera
camera = cam_init(cam_index, cam_resolution[0], cam_resolution[1])
try:
# Initialize face detection
face_detector = FaceDetector(model=model_detector, path=INPUT_DIR_MODEL_DETECTION, minfacesize=120)
# Initialize face pose/age/gender estimation
face_pose_estimator = FacePoseEstimator(model=model_poseestimator, path=INPUT_DIR_MODEL_ESTIMATION)
face_age_estimator = FaceAgeEstimator(model=model_ageestimator, path=INPUT_DIR_MODEL_ESTIMATION)
face_gender_estimator = FaceGenderEstimator(model=model_genderestimator, path=INPUT_DIR_MODEL_ESTIMATION)
face_emotion_estimator = FaceEmotionEstimator(model=model_emotionestimator, path=INPUT_DIR_MODEL_ESTIMATION)
except:
print("Warning, check if models and trained dataset models exists!")
(age, gender, emotion) = (None, None, None)
while (True):
# Capture frame from webcam
ret, frame = camera.read()
if frame is None:
print("Error, check if camera is connected!")
break
# Detect and identify faces in the frame
faces = face_detector.detect(frame)
for (index, face) in enumerate(faces):
(x, y, w, h) = face
# Detect age, gender, emotion
face_image = frame[y:y+h, h:h+w]
age = face_age_estimator.estimate(frame, face_image)
gender = face_gender_estimator.estimate(frame, face_image)
emotion = face_emotion_estimator.estimate(frame, face_image)
# Detect and draw face pose locations
shape = face_pose_estimator.detect(frame, face)
face_pose_estimator.add_overlay(frame, shape)
# Display age, gender, emotion
if True: # Added condition to easily disable text
cv2.putText(frame, "Age: {}".format(age),
(x, y-45), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1, cv2.LINE_AA)
cv2.putText(frame, "Gender: {}".format(gender),
(x, y-30), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1, cv2.LINE_AA)
cv2.putText(frame, "Emotion: {}".format(emotion),
(x, y-15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1, cv2.LINE_AA)
# Display the resulting frame
cv2.imshow(WINDOW_NAME, frame)
# Check for user actions
keyPressed = cv2.waitKey(1) & 0xFF
if keyPressed == 27: # ESC
break
elif keyPressed == 13: # Enter
cv2.imwrite(WINDOW_NAME + "_" + datetime.datetime.now().strftime("%Y%m%d_%H%M%S") + ".jpg", frame);
# Release the camera
camera.release()
cv2.destroyAllWindows()
def run(cam_index, cam_resolution):
# detector=FaceDetectorModels.HAARCASCADE
# detector=FaceDetectorModels.DLIBHOG
# detector=FaceDetectorModels.DLIBCNN
# detector=FaceDetectorModels.SSDRESNET
detector=FaceDetectorModels.MTCNN
# detector=FaceDetectorModels.FACENET
encoder=FaceEncoderModels.LBPH
# encoder=FaceEncoderModels.OPENFACE
# encoder=FaceEncoderModels.DLIBRESNET
# encoder=FaceEncoderModels.FACENET
poseestimator = FacePoseEstimatorModels.DLIB68
ageestimator = FaceAgeEstimatorModels.CV2CAFFE
genderestimator = FaceGenderEstimatorModels.CV2CAFFE
emotionestimator = FaceEmotionEstimatorModels.KERAS
process_facedetection(
detector,
poseestimator,
ageestimator,
genderestimator,
emotionestimator,
cam_resolution,
cam_index)
def main(args):
if sys.version_info < (3, 0):
print("Error: Python2 is slow. Use Python3 for max performance.")
return
cam_index = int(args.webcam)
resolutions = [ RESOLUTION_QVGA, RESOLUTION_VGA, RESOLUTION_HD, RESOLUTION_FULLHD ]
try:
cam_resolution = resolutions[int(args.resolution)]
except:
cam_resolution = RESOLUTION_QVGA
if args.detector:
try:
detector = FaceDetectorModels(int(args.detector))
print( "Parameters: {}".format(detector) )
process_facedetection(
detector,
FacePoseEstimatorModels.DEFAULT,
FaceAgeEstimatorModels.DEFAULT,
FaceGenderEstimatorModels.DEFAULT,
FaceEmotionEstimatorModels.DEFAULT,
cam_resolution,
cam_index)
except:
print( "Invalid parameter" )
return
run(cam_index, cam_resolution)
def parse_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument('--detector', required=False,
help='Detector model to use. Options: 0-HAARCASCADE, 1-DLIBHOG, 2-DLIBCNN, 3-SSDRESNET, 4-MTCNN, 5-FACENET')
parser.add_argument('--webcam', required=False, default=0,
help='Camera index to use. Default is 0. Assume only 1 camera connected.)')
parser.add_argument('--resolution', required=False, default=0,
help='Camera resolution to use. Default is 0. Options: 0-QVGA, 1-VGA, 2-HD, 3-FULLHD')
return parser.parse_args(argv)
if __name__ == '__main__':
main(parse_arguments(sys.argv[1:]))