Skip to content

Commit 16f91b3

Browse files
committed
Updated missing weights on conversion + typos + new normalization
1 parent be825ab commit 16f91b3

7 files changed

+387
-146
lines changed

Convert-Iris.ipynb

+232-143
Large diffs are not rendered by default.

README.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@ I've made the conversion semi-manually using a similar method as available for b
99

1010

1111
#### Input for the model is expected to be cropped iris image normalized to -1.0 to 1.0. The cropped image should be 60x60 and centered at the center of the eye contour points as given by FaceMesh.
12-
To get the right scaling, simply use the 192x192 cropped face image used as input for the FaceMesh model, get the average eye contour position for each eyes, use a rect of 60x60 centered at the average position of one eye and use it to crop from the 192x192 image.
12+
To get the right scaling, simply use the 192x192 cropped face image used as input for the FaceMesh model, get the average eye contour position for each eyes, use a rect of 64x64 centered at the average position of one eye and use it to crop from the 192x192 image.
1313

1414
However, `predict_on_image` function normalizes your image itself, so you can even treat resized image as np.array as input
1515

compare_dicts.py

+11
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
import ast
2+
3+
4+
d = ast.literal_eval(open("conversion_dict.txt", "r").read())
5+
d_new = ast.literal_eval(open("new_conv_dict.txt", "r").read())
6+
7+
for k in d_new.keys():
8+
if d[k] != d_new[k]:
9+
print("Difference at key: %s" % k)
10+
print("Old dict has value: %s" % d[k])
11+
print("New dict has value: %s" % d_new[k])

conversion_dict.txt

+5-1
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,7 @@
1+
{
2+
'split_eye.0.convAct.0.weight': 'conv2d_21/Kernel',
3+
'split_eye.0.convAct.0.bias': 'conv2d_21/Bias',
4+
15
'split_eye.0.convAct.1.weight': 'p_re_lu_21/Alpha',
26
'split_eye.0.dwConvConv.0.weight': 'depthwise_conv2d_10/Kernel',
37
'split_eye.0.dwConvConv.0.bias': 'depthwise_conv2d_10/Bias',
@@ -127,4 +131,4 @@
127131
'split_iris.7.dwConvConv.1.bias': 'conv2d_52/Bias',
128132
'split_iris.7.act.weight': 'p_re_lu_52/Alpha',
129133
'split_iris.8.weight': 'conv_iris/Kernel',
130-
'split_iris.8.bias': 'conv_iris/Bias'
134+
'split_iris.8.bias': 'conv_iris/Bias'}

irislandmarks.pth

2.57 KB
Binary file not shown.

irislandmarks.py

+2-1
Original file line numberDiff line numberDiff line change
@@ -146,7 +146,8 @@ def load_weights(self, path):
146146

147147
def _preprocess(self, x):
148148
"""Converts the image pixels to the range [-1, 1]."""
149-
return x.float() / 127.5 - 1.0
149+
# return x.float() / 127.5 - 1.0
150+
return x.float() / 255.0 # NOTE: [0.0, 1.0] range seems to give better results
150151

151152
def predict_on_image(self, img):
152153
"""Makes a prediction on a single image.

new_conv_dict.txt

+136
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,136 @@
1+
{
2+
'split_eye.0.convAct.0.weight': 'conv2d_21/Kernel',
3+
'split_eye.0.convAct.0.bias': 'conv2d_21/Bias',
4+
5+
'split_eye.0.convAct.1.weight': 'p_re_lu_21/Alpha',
6+
'split_eye.0.dwConvConv.0.weight': 'depthwise_conv2d_10/Kernel',
7+
'split_eye.0.dwConvConv.0.bias': 'depthwise_conv2d_10/Bias',
8+
'split_eye.0.dwConvConv.1.weight': 'conv2d_22/Kernel',
9+
'split_eye.0.dwConvConv.1.bias': 'conv2d_22/Bias',
10+
'split_eye.0.act.weight': 'p_re_lu_22/Alpha',
11+
'split_eye.1.convAct.0.weight': 'conv2d_23/Kernel',
12+
'split_eye.1.convAct.0.bias': 'conv2d_23/Bias',
13+
'split_eye.1.convAct.1.weight': 'p_re_lu_23/Alpha',
14+
'split_eye.1.dwConvConv.0.weight': 'depthwise_conv2d_11/Kernel',
15+
'split_eye.1.dwConvConv.0.bias': 'depthwise_conv2d_11/Bias',
16+
'split_eye.1.dwConvConv.1.weight': 'conv2d_24/Kernel',
17+
'split_eye.1.dwConvConv.1.bias': 'conv2d_24/Bias',
18+
'split_eye.1.act.weight': 'p_re_lu_24/Alpha',
19+
'split_eye.2.convAct.0.weight': 'conv2d_25/Kernel',
20+
'split_eye.2.convAct.0.bias': 'conv2d_25/Bias',
21+
'split_eye.2.convAct.1.weight': 'p_re_lu_25/Alpha',
22+
'split_eye.2.dwConvConv.0.weight': 'depthwise_conv2d_12/Kernel',
23+
'split_eye.2.dwConvConv.0.bias': 'depthwise_conv2d_12/Bias',
24+
'split_eye.2.dwConvConv.1.weight': 'conv2d_26/Kernel',
25+
'split_eye.2.dwConvConv.1.bias': 'conv2d_26/Bias',
26+
'split_eye.2.act.weight': 'p_re_lu_26/Alpha',
27+
'split_eye.3.convAct.0.weight': 'conv2d_27/Kernel',
28+
'split_eye.3.convAct.0.bias': 'conv2d_27/Bias',
29+
'split_eye.3.convAct.1.weight': 'p_re_lu_27/Alpha',
30+
'split_eye.3.dwConvConv.0.weight': 'depthwise_conv2d_13/Kernel',
31+
'split_eye.3.dwConvConv.0.bias': 'depthwise_conv2d_13/Bias',
32+
'split_eye.3.dwConvConv.1.weight': 'conv2d_28/Kernel',
33+
'split_eye.3.dwConvConv.1.bias': 'conv2d_28/Bias',
34+
'split_eye.3.act.weight': 'p_re_lu_28/Alpha',
35+
'split_eye.4.convAct.0.weight': 'conv2d_29/Kernel',
36+
'split_eye.4.convAct.0.bias': 'conv2d_29/Bias',
37+
'split_eye.4.convAct.1.weight': 'p_re_lu_29/Alpha',
38+
'split_eye.4.dwConvConv.0.weight': 'depthwise_conv2d_14/Kernel',
39+
'split_eye.4.dwConvConv.0.bias': 'depthwise_conv2d_14/Bias',
40+
'split_eye.4.dwConvConv.1.weight': 'conv2d_30/Kernel',
41+
'split_eye.4.dwConvConv.1.bias': 'conv2d_30/Bias',
42+
'split_eye.4.act.weight': 'p_re_lu_30/Alpha',
43+
'split_eye.5.convAct.0.weight': 'conv2d_31/Kernel',
44+
'split_eye.5.convAct.0.bias': 'conv2d_31/Bias',
45+
'split_eye.5.convAct.1.weight': 'p_re_lu_31/Alpha',
46+
'split_eye.5.dwConvConv.0.weight': 'depthwise_conv2d_15/Kernel',
47+
'split_eye.5.dwConvConv.0.bias': 'depthwise_conv2d_15/Bias',
48+
'split_eye.5.dwConvConv.1.weight': 'conv2d_32/Kernel',
49+
'split_eye.5.dwConvConv.1.bias': 'conv2d_32/Bias',
50+
'split_eye.5.act.weight': 'p_re_lu_32/Alpha',
51+
'split_eye.6.convAct.0.weight': 'conv2d_33/Kernel',
52+
'split_eye.6.convAct.0.bias': 'conv2d_33/Bias',
53+
'split_eye.6.convAct.1.weight': 'p_re_lu_33/Alpha',
54+
'split_eye.6.dwConvConv.0.weight': 'depthwise_conv2d_16/Kernel',
55+
'split_eye.6.dwConvConv.0.bias': 'depthwise_conv2d_16/Bias',
56+
'split_eye.6.dwConvConv.1.weight': 'conv2d_34/Kernel',
57+
'split_eye.6.dwConvConv.1.bias': 'conv2d_34/Bias',
58+
'split_eye.6.act.weight': 'p_re_lu_34/Alpha',
59+
'split_eye.7.convAct.0.weight': 'conv2d_35/Kernel',
60+
'split_eye.7.convAct.0.bias': 'conv2d_35/Bias',
61+
'split_eye.7.convAct.1.weight': 'p_re_lu_35/Alpha',
62+
'split_eye.7.dwConvConv.0.weight': 'depthwise_conv2d_17/Kernel',
63+
'split_eye.7.dwConvConv.0.bias': 'depthwise_conv2d_17/Bias',
64+
'split_eye.7.dwConvConv.1.weight': 'conv2d_36/Kernel',
65+
'split_eye.7.dwConvConv.1.bias': 'conv2d_36/Bias',
66+
'split_eye.7.act.weight': 'p_re_lu_36/Alpha',
67+
'split_eye.8.weight': 'conv_eyes_contours_and_brows/Kernel',
68+
'split_eye.8.bias': 'conv_eyes_contours_and_brows/Bias',
69+
70+
'split_iris.0.convAct.0.weight': 'conv2d_37/Kernel',
71+
'split_iris.0.convAct.0.bias': 'conv2d_37/Bias',
72+
'split_iris.0.convAct.1.weight': 'p_re_lu_37/Alpha',
73+
'split_iris.0.dwConvConv.0.weight': 'depthwise_conv2d_18/Kernel',
74+
'split_iris.0.dwConvConv.0.bias': 'depthwise_conv2d_18/Bias',
75+
'split_iris.0.dwConvConv.1.weight': 'conv2d_38/Kernel',
76+
'split_iris.0.dwConvConv.1.bias': 'conv2d_38/Bias',
77+
'split_iris.0.act.weight': 'p_re_lu_38/Alpha',
78+
'split_iris.1.convAct.0.weight': 'conv2d_39/Kernel',
79+
'split_iris.1.convAct.0.bias': 'conv2d_39/Bias',
80+
'split_iris.1.convAct.1.weight': 'p_re_lu_39/Alpha',
81+
'split_iris.1.dwConvConv.0.weight': 'depthwise_conv2d_19/Kernel',
82+
'split_iris.1.dwConvConv.0.bias': 'depthwise_conv2d_19/Bias',
83+
'split_iris.1.dwConvConv.1.weight': 'conv2d_40/Kernel',
84+
'split_iris.1.dwConvConv.1.bias': 'conv2d_40/Bias',
85+
'split_iris.1.act.weight': 'p_re_lu_40/Alpha',
86+
'split_iris.2.convAct.0.weight': 'conv2d_41/Kernel',
87+
'split_iris.2.convAct.0.bias': 'conv2d_41/Bias',
88+
'split_iris.2.convAct.1.weight': 'p_re_lu_41/Alpha',
89+
'split_iris.2.dwConvConv.0.weight': 'depthwise_conv2d_20/Kernel',
90+
'split_iris.2.dwConvConv.0.bias': 'depthwise_conv2d_20/Bias',
91+
'split_iris.2.dwConvConv.1.weight': 'conv2d_42/Kernel',
92+
'split_iris.2.dwConvConv.1.bias': 'conv2d_42/Bias',
93+
'split_iris.2.act.weight': 'p_re_lu_42/Alpha',
94+
'split_iris.3.convAct.0.weight': 'conv2d_43/Kernel',
95+
'split_iris.3.convAct.0.bias': 'conv2d_43/Bias',
96+
'split_iris.3.convAct.1.weight': 'p_re_lu_43/Alpha',
97+
'split_iris.3.dwConvConv.0.weight': 'depthwise_conv2d_21/Kernel',
98+
'split_iris.3.dwConvConv.0.bias': 'depthwise_conv2d_21/Bias',
99+
'split_iris.3.dwConvConv.1.weight': 'conv2d_44/Kernel',
100+
'split_iris.3.dwConvConv.1.bias': 'conv2d_44/Bias',
101+
'split_iris.3.act.weight': 'p_re_lu_44/Alpha',
102+
'split_iris.4.convAct.0.weight': 'conv2d_45/Kernel',
103+
'split_iris.4.convAct.0.bias': 'conv2d_45/Bias',
104+
'split_iris.4.convAct.1.weight': 'p_re_lu_45/Alpha',
105+
'split_iris.4.dwConvConv.0.weight': 'depthwise_conv2d_22/Kernel',
106+
'split_iris.4.dwConvConv.0.bias': 'depthwise_conv2d_22/Bias',
107+
'split_iris.4.dwConvConv.1.weight': 'conv2d_46/Kernel',
108+
'split_iris.4.dwConvConv.1.bias': 'conv2d_46/Bias',
109+
'split_iris.4.act.weight': 'p_re_lu_46/Alpha',
110+
'split_iris.5.convAct.0.weight': 'conv2d_47/Kernel',
111+
'split_iris.5.convAct.0.bias': 'conv2d_47/Bias',
112+
'split_iris.5.convAct.1.weight': 'p_re_lu_47/Alpha',
113+
'split_iris.5.dwConvConv.0.weight': 'depthwise_conv2d_23/Kernel',
114+
'split_iris.5.dwConvConv.0.bias': 'depthwise_conv2d_23/Bias',
115+
'split_iris.5.dwConvConv.1.weight': 'conv2d_48/Kernel',
116+
'split_iris.5.dwConvConv.1.bias': 'conv2d_48/Bias',
117+
'split_iris.5.act.weight': 'p_re_lu_48/Alpha',
118+
'split_iris.6.convAct.0.weight': 'conv2d_49/Kernel',
119+
'split_iris.6.convAct.0.bias': 'conv2d_49/Bias',
120+
'split_iris.6.convAct.1.weight': 'p_re_lu_49/Alpha',
121+
'split_iris.6.dwConvConv.0.weight': 'depthwise_conv2d_24/Kernel',
122+
'split_iris.6.dwConvConv.0.bias': 'depthwise_conv2d_24/Bias',
123+
'split_iris.6.dwConvConv.1.weight': 'conv2d_50/Kernel',
124+
'split_iris.6.dwConvConv.1.bias': 'conv2d_50/Bias',
125+
'split_iris.6.act.weight': 'p_re_lu_50/Alpha',
126+
'split_iris.7.convAct.0.weight': 'conv2d_51/Kernel',
127+
'split_iris.7.convAct.0.bias': 'conv2d_51/Bias',
128+
'split_iris.7.convAct.1.weight': 'p_re_lu_51/Alpha',
129+
'split_iris.7.dwConvConv.0.weight': 'depthwise_conv2d_25/Kernel',
130+
'split_iris.7.dwConvConv.0.bias': 'depthwise_conv2d_25/Bias',
131+
'split_iris.7.dwConvConv.1.weight': 'conv2d_52/Kernel',
132+
'split_iris.7.dwConvConv.1.bias': 'conv2d_52/Bias',
133+
'split_iris.7.act.weight': 'p_re_lu_52/Alpha',
134+
'split_iris.8.weight': 'conv_iris/Kernel',
135+
'split_iris.8.bias': 'conv_iris/Bias'
136+
}

0 commit comments

Comments
 (0)