-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathfaceDetector.py
158 lines (115 loc) · 5.59 KB
/
faceDetector.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
import math
import os
from pathlib import Path
from time import time
import click
import cvzone
import cv2
import numpy as np
from cvzone.FaceDetectionModule import FaceDetector
@click.command()
@click.option("--debug", default=False, show_choices=True, type=bool,
help="Which mode to use: release or debug")
@click.option("--save", default=False, show_choices=True, type=bool,
help="Should result be saved or not so we can collect the data for training")
def detect_faces(debug: bool, save: bool):
dir_path: os.PathLike = Path(os.path.dirname(os.path.realpath(__file__)))
if debug:
print(dir_path)
output_folder_path: os.PathLike = Path.joinpath(Path.joinpath(dir_path, "Datasets"), "DataCollected")
if debug:
print(output_folder_path)
class_id: int = 0 # 0 means fake and 1 means real
offset_percentage_w: int = 10
offset_percentage_h: int = 20
confidence_threshold: float = 0.8
frame_width: int = 640
frame_height: int = 480
precision: int = 6
blur_threshold: int = 70 # larger means more focus
capture: cv2.VideoCapture = cv2.VideoCapture(0)
capture.set(3, frame_width)
capture.set(4, frame_height)
detector: FaceDetector = FaceDetector()
while True:
image: np.ndarray = np.empty(0)
success, image = capture.read()
image_out: np.ndarray = image.copy()
bboxes: list[dict[str]] = []
image, bboxes = detector.findFaces(image, draw=False)
list_blur: list[bool] = [] # true if face is blur else false
list_info: list[str] = [] # normalized values and a class name for the label text file
if bboxes:
# bboxInfo - "id","bbox","score","center"
for bbox in bboxes:
x: int = 0
y: int = 0
w: int = 0
h: int = 0
x, y, w, h = bbox["bbox"]
current_confidence: float = bbox["score"][0]
# ----check the current confidence----
if current_confidence > confidence_threshold:
# ----adding offset to the detected face----
offset_w: float = (offset_percentage_w/100)*w
x -= math.ceil(offset_w)
w += math.ceil(offset_w*2)
offset_h: float = (offset_percentage_h/100)*h
y -= math.ceil(offset_h*3)
h += math.ceil(offset_h*3.5)
# ----to avoid values below 0----
x = max(x, 0)
y = max(y, 0)
w = max(w, 0)
h = max(h, 0)
# ----find blurriness----
face_crop: np.ndarray = image[y:y + h, x: x + w]
blur_value: int = math.ceil(cv2.Laplacian(face_crop, cv2.CV_64F).var())
if blur_value > blur_threshold:
list_blur.append(True)
else:
list_blur.append(False)
# ----normalize values----
image_height: int = 0
image_width: int = 0
image_height, image_width, _ = image.shape
x_center: float = x + w/2
y_center: float = y + h/2
x_center_normalized: float = round(x_center/image_width, precision)
y_center_normalized: float = round(y_center/image_height, precision)
w_normalized: float = round(w/image_width, precision)
h_normalized: float = round(h/image_height, precision)
# ----to avoid values above 1----
x_center_normalized = min(x_center_normalized, 1)
y_center_normalized = min(y_center_normalized, 1)
w_normalized = min(w_normalized, 1)
h_normalized = min(h_normalized, 1)
# YOLO requires this format
list_info.append(f"{class_id} {x_center_normalized}" +
f"{y_center_normalized} {w_normalized} {h_normalized}\n")
# ----drawing----
cv2.rectangle(image_out, (x, y, w, h), (255, 0, 0), 3)
cvzone.putTextRect(image_out, f"Score: {math.floor(current_confidence*100)}%;" +
f"Blur: {blur_value}", (x, y-20), scale = 1.5, thickness = 3)
if debug:
cv2.rectangle(image, (x, y, w, h), (255, 0, 0), 3)
cvzone.putTextRect(image, f"Score: {math.floor(current_confidence * 100)}%;" +
f"Blur: {blur_value}", (x, y - 20), scale = 1.5, thickness = 3)
# ---- to save ----
if save:
if list_blur != [] and all(list_blur): # all the faces aren't blurred
# ---- save image ----
time_now = "".join(str(time()).split("."))
if debug:
print(Path.joinpathPath(output_folder_path, f"{time_now}.jpg"))
cv2.imwrite(Path.joinpathPath(output_folder_path, f"{time_now}.jpg"), image)
# ----save label text file----
with open(Path.joinpathPath(output_folder_path, f"{time_now}.jpg"), "a") as output:
for info in list_info:
output.write(info)
cv2.imshow("Image", image_out)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.destroyAllWindows()
if __name__ == "__main__":
detect_faces()