From 852149a21faf1a53009de6be4b7a6431a2dd9909 Mon Sep 17 00:00:00 2001 From: Ryan Cranfill Date: Wed, 27 Sep 2017 22:13:07 -0500 Subject: [PATCH] moar python2 compatibility --- cena/recognition.py | 23 +++++++++++++++++------ cena/settings.py | 24 +++++++++++++++++------- face_detector.py | 4 ++-- feature_server.py | 4 ++++ 4 files changed, 40 insertions(+), 15 deletions(-) diff --git a/cena/recognition.py b/cena/recognition.py index 6d31b83..94808bf 100644 --- a/cena/recognition.py +++ b/cena/recognition.py @@ -70,7 +70,7 @@ def recognize_faces(self, frame, list_o_faces): pred_prob = max(pred_probs) pred_names.append({pred_name: pred_prob}) - if DEV and ANNOTATE_FRAME: + if ANNOTATE_FRAME: pose_landmarks = self.face_pose_predictor(frame, rect) cv2.putText(frame, '{}: {}'.format(pred_name, pred_prob), (x, y), cv2.FONT_HERSHEY_SIMPLEX, 1, (102, 204, 102), thickness=2) @@ -78,9 +78,20 @@ def recognize_faces(self, frame, list_o_faces): x, y = point.x, point.y cv2.circle(frame, (x, y), 5, (0, 255, 0), -1) - end = datetime.now() - return frame, pred_names, (end - start).microseconds / 1000 - else: - end = datetime.now() - return pred_names, (end - start).microseconds / 1000 + end = datetime.now() + return frame, pred_names, (end - start).microseconds / 1000 + + # if DEV and ANNOTATE_FRAME: + # pose_landmarks = self.face_pose_predictor(frame, rect) + # cv2.putText(frame, '{}: {}'.format(pred_name, pred_prob), (x, y), cv2.FONT_HERSHEY_SIMPLEX, 1, + # (102, 204, 102), thickness=2) + # for point in pose_landmarks.parts(): + # x, y = point.x, point.y + # cv2.circle(frame, (x, y), 5, (0, 255, 0), -1) + # + # end = datetime.now() + # return frame, pred_names, (end - start).microseconds / 1000 + # else: + # end = datetime.now() + # return pred_names, (end - start).microseconds / 1000 diff --git a/cena/settings.py b/cena/settings.py index bb4f248..170d2c7 100644 --- a/cena/settings.py +++ b/cena/settings.py @@ -1,4 +1,5 @@ import os +from ast import literal_eval ENVIRONMENT = 'dev' # ENVIRONMENT = 'nah dude' @@ -7,8 +8,14 @@ YOLO_MODE = True # YOLO_MODE = False -ANNOTATE_FRAME = True -# ANNOTATE_FRAME = False +# ANNOTATE_FRAME = True +ANNOTATE_FRAME = False + +CLIENT_ENV_VAR = os.getenv('FACE_CLIENT', True) +if not isinstance(CLIENT_ENV_VAR, bool): + IS_CLIENT = literal_eval(CLIENT_ENV_VAR) +else: + IS_CLIENT = CLIENT_ENV_VAR API_SERVER_NAME = 'face-api' @@ -39,9 +46,12 @@ RYAN_FILE_NAME = 'dun_dun_dun.mp3' RYAN_SONG_PATH = os.path.join(SONGS_DIR, RYAN_FILE_NAME) -from cena.utils import get_api_server_ip_address -# SERVER_URL = 'http://localhost:5000/recognize' -# SERVER_IP = 'localhost' -SERVER_IP = get_api_server_ip_address() +if IS_CLIENT: + from cena.utils import get_api_server_ip_address + # SERVER_URL = 'http://localhost:5000/recognize' + SERVER_IP = get_api_server_ip_address() + # SERVER_URL = 'http://107.20.57.175:5000/recognize' +else: + SERVER_IP = 'localhost' + SERVER_URL = 'http://{}:5000/recognize'.format(SERVER_IP) -# SERVER_URL = 'http://107.20.57.175:5000/recognize' diff --git a/face_detector.py b/face_detector.py index 60dac5d..f3e11cc 100755 --- a/face_detector.py +++ b/face_detector.py @@ -8,7 +8,6 @@ from cena.utils import encode_image, decode_image, play_mp3 - def listen_for_quit(): k = cv2.waitKey(1) if k == ord('q'): @@ -59,7 +58,8 @@ def process_frame(video_capture, face_recognizer=None): # frame, people_list, time = face_recognizer.recognize_faces(frame, list_o_faces) frame, people_list, time = get_server_response(frame, list_o_faces) elif DEV: - people_list, time = face_recognizer.recognize_faces(frame, list_o_faces) + people_list, time = get_server_response(frame, list_o_faces) + # people_list, time = face_recognizer.recognize_faces(frame, list_o_faces) else: people_list, time = get_server_response(frame, list_o_faces) # play_mp3(RYAN_SONG_PATH) diff --git a/feature_server.py b/feature_server.py index 0170f70..30cf3a8 100644 --- a/feature_server.py +++ b/feature_server.py @@ -34,6 +34,10 @@ def recognize(): list_o_faces = request.json['list_o_faces'] frame, people_list, time = RECOGNIZER.recognize_faces(frame, list_o_faces) + # if ANNOTATE_FRAME: + # frame, people_list, time = RECOGNIZER.recognize_faces(frame, list_o_faces) + # else: + # people_list, time = RECOGNIZER.recognize_faces(frame, list_o_faces) response = { 'people_list': people_list,