Skip to content

Commit 8fb4615

Browse files
Minor fixes linked to deprecated types and dependencies (#122)
* Replace deprecated np types * Comment python dependencies from package.xml, as they no longer work
1 parent aef31be commit 8fb4615

File tree

12 files changed

+43
-41
lines changed

12 files changed

+43
-41
lines changed

rt_bene_model_training/pytorch/rtbene_dataset.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -65,7 +65,7 @@ def __getitem__(self, index):
6565
with h5py.File(self._h5_file, mode="r") as h5_file:
6666
left_img = h5_file[sample[0] + "/left"][sample[1]][()][0]
6767
right_img = h5_file[sample[0] + "/right"][sample[1]][()][0]
68-
label = h5_file[sample[0] + "/label"][()].astype(np.float32)
68+
label = h5_file[sample[0] + "/label"][()].astype(float)
6969

7070
# Load data and get label
7171
transformed_left_img = self._transform(Image.fromarray(left_img, 'RGB'))

rt_gene/package.xml

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -42,15 +42,17 @@
4242
<depend>uvc_camera</depend>
4343
<depend>dynamic_reconfigure</depend>
4444

45-
<depend>python-opencv</depend>
45+
<!-- Python dependencies, no longer installable this way -->
46+
47+
<!-- <depend>python-opencv</depend>
4648
<depend>python-dlib</depend>
4749
<depend>python-scipy</depend>
4850
<depend>python-numpy</depend>
4951
<depend>python-tensorflow-gpu-pip</depend>
5052
<depend>python-tqdm</depend>
5153
<depend>python-imaging</depend>
5254
<depend>python-pytorch-pip</depend>
53-
<depend>python-matplotlib</depend>
55+
<depend>python-matplotlib</depend> -->
5456

5557
<exec_depend>rviz</exec_depend>
5658
<exec_depend>message_runtime</exec_depend>

rt_gene/src/rt_gene/ThreeDDFA/io.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -88,19 +88,19 @@ def load_bfm(model_path):
8888
model = model[0, 0]
8989

9090
model_new = {}
91-
w_shp = model['w'].astype(np.float32)
91+
w_shp = model['w'].astype(float)
9292
model_new['w_shp_sim'] = w_shp[:, :40]
93-
w_exp = model['w_exp'].astype(np.float32)
93+
w_exp = model['w_exp'].astype(float)
9494
model_new['w_exp_sim'] = w_exp[:, :10]
9595

9696
u_shp = model['mu_shape']
9797
u_exp = model['mu_exp']
98-
u = (u_shp + u_exp).astype(np.float32)
98+
u = (u_shp + u_exp).astype(float)
9999
model_new['mu'] = u
100-
model_new['tri'] = model['tri'].astype(np.int32) - 1
100+
model_new['tri'] = model['tri'].astype(int) - 1
101101

102102
# flatten it, pay attention to index value
103-
keypoints = model['keypoints'].astype(np.int32) - 1
103+
keypoints = model['keypoints'].astype(int) - 1
104104
keypoints = np.concatenate((3 * keypoints, 3 * keypoints + 1, 3 * keypoints + 2), axis=0)
105105

106106
model_new['keypoints'] = keypoints.T.flatten()

rt_gene/src/rt_gene/estimate_gaze_tensorflow.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@ def input_from_image(self, cv_image):
7070
"""This method converts an eye_img_msg provided by the landmark estimator, and converts it to a format
7171
suitable for the gaze network."""
7272
currimg = cv_image.reshape(36, 60, 3, order='F')
73-
currimg = currimg.astype(np.float32)
73+
currimg = currimg.astype(float)
7474
testimg = np.zeros((36, 60, 3))
7575
testimg[:, :, 0] = currimg[:, :, 0] - 103.939
7676
testimg[:, :, 1] = currimg[:, :, 1] - 116.779

rt_gene/src/rt_gene/extract_landmarks_method_base.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -65,7 +65,7 @@ def get_full_model_points(self, model_points_file=None):
6565
with open(model_points_file) as f:
6666
for line in f:
6767
raw_value.append(line)
68-
model_points = np.array(raw_value, dtype=np.float32)
68+
model_points = np.array(raw_value, dtype=float)
6969
model_points = np.reshape(model_points, (3, -1)).T
7070

7171
# index the expansion of the model based.
@@ -115,7 +115,7 @@ def ddfa_forward_pass(self, color_img, roi_box_list):
115115
_input = torch.cat([facial_landmark_transform(img).unsqueeze(0) for img in img_step], 0)
116116
with torch.no_grad():
117117
_input = _input.to(self.device)
118-
param = self.facial_landmark_nn(_input).cpu().numpy().astype(np.float32)
118+
param = self.facial_landmark_nn(_input).cpu().numpy().astype(float)
119119

120120
return [predict_68pts(p.flatten(), roi_box) for p, roi_box in zip(param, roi_box_list)]
121121

rt_gene/src/rt_gene/kalman_stabilizer.py

Lines changed: 18 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -51,44 +51,44 @@ def __init__(self,
5151
self.filter = cv2.KalmanFilter(state_num, measure_num, 0)
5252

5353
# Store the state.
54-
self.state = np.zeros((state_num, 1), dtype=np.float32)
54+
self.state = np.zeros((state_num, 1), dtype=float)
5555

5656
# Store the measurement result.
57-
self.measurement = np.array((measure_num, 1), np.float32)
57+
self.measurement = np.array((measure_num, 1), float)
5858

5959
# Store the prediction.
60-
self.prediction = np.zeros((state_num, 1), np.float32)
60+
self.prediction = np.zeros((state_num, 1), float)
6161

6262
# Kalman parameters setup for scalar.
6363
if self.measure_num == 1:
6464
self.filter.transitionMatrix = np.array([[1, 1],
65-
[0, 1]], np.float32)
65+
[0, 1]], float)
6666

67-
self.filter.measurementMatrix = np.array([[1, 1]], np.float32)
67+
self.filter.measurementMatrix = np.array([[1, 1]], float)
6868

6969
self.filter.processNoiseCov = np.array([[1, 0],
70-
[0, 1]], np.float32) * cov_process
70+
[0, 1]], float) * cov_process
7171

7272
self.filter.measurementNoiseCov = np.array(
73-
[[1]], np.float32) * cov_measure
73+
[[1]], float) * cov_measure
7474

7575
# Kalman parameters setup for point.
7676
if self.measure_num == 2:
7777
self.filter.transitionMatrix = np.array([[1, 0, 1, 0],
7878
[0, 1, 0, 1],
7979
[0, 0, 1, 0],
80-
[0, 0, 0, 1]], np.float32)
80+
[0, 0, 0, 1]], float)
8181

8282
self.filter.measurementMatrix = np.array([[1, 0, 0, 0],
83-
[0, 1, 0, 0]], np.float32)
83+
[0, 1, 0, 0]], float)
8484

8585
self.filter.processNoiseCov = np.array([[1, 0, 0, 0],
8686
[0, 1, 0, 0],
8787
[0, 0, 1, 0],
88-
[0, 0, 0, 1]], np.float32) * cov_process
88+
[0, 0, 0, 1]], float) * cov_process
8989

9090
self.filter.measurementNoiseCov = np.array([[1, 0],
91-
[0, 1]], np.float32) * cov_measure
91+
[0, 1]], float) * cov_measure
9292

9393
def update(self, measurement):
9494
"""Update the filter"""
@@ -97,10 +97,10 @@ def update(self, measurement):
9797

9898
# Get new measurement
9999
if self.measure_num == 1:
100-
self.measurement = np.array([[np.float32(measurement[0])]])
100+
self.measurement = np.array([[float(measurement[0])]])
101101
else:
102-
self.measurement = np.array([[np.float32(measurement[0])],
103-
[np.float32(measurement[1])]])
102+
self.measurement = np.array([[float(measurement[0])],
103+
[float(measurement[1])]])
104104

105105
# Correct according to mesurement
106106
self.filter.correct(self.measurement)
@@ -112,13 +112,13 @@ def set_q_r(self, cov_process=0.1, cov_measure=0.001):
112112
"""Set new value for processNoiseCov and measurementNoiseCov."""
113113
if self.measure_num == 1:
114114
self.filter.processNoiseCov = np.array([[1, 0],
115-
[0, 1]], np.float32) * cov_process
115+
[0, 1]], float) * cov_process
116116
self.filter.measurementNoiseCov = np.array(
117-
[[1]], np.float32) * cov_measure
117+
[[1]], float) * cov_measure
118118
else:
119119
self.filter.processNoiseCov = np.array([[1, 0, 0, 0],
120120
[0, 1, 0, 0],
121121
[0, 0, 1, 0],
122-
[0, 0, 0, 1]], np.float32) * cov_process
122+
[0, 0, 0, 1]], float) * cov_process
123123
self.filter.measurementNoiseCov = np.array([[1, 0],
124-
[0, 1]], np.float32) * cov_measure
124+
[0, 1]], float) * cov_measure

rt_gene/src/rt_gene/tracker_face_encoding.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@ def __add_new_element(self, element):
5555

5656
for untracked_encoding_id in list_to_check:
5757
previous_encoding = self.__encoding_list[untracked_encoding_id]
58-
previous_encoding = np.fromstring(previous_encoding[1:-1], dtype=np.float, sep=",")
58+
previous_encoding = np.fromstring(previous_encoding[1:-1], dtype=float, sep=",")
5959
distance = np.linalg.norm(previous_encoding - encoding, axis=0)
6060

6161
# the new element and the previous encoding are the same person

rt_gene/src/rt_gene/tracker_generic.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -58,13 +58,13 @@ def get_eye_image_from_landmarks(subject, eye_image_size):
5858
# Now compute the bounding boxes
5959
# The left / right x-coordinates are computed as the landmark position plus/minus the margin
6060
# The bottom / top y-coordinates are computed according to the desired ratio, as the width of the image is known
61-
left_bb = np.zeros(4, dtype=np.int)
61+
left_bb = np.zeros(4, dtype=int)
6262
left_bb[0] = transformed_eye_landmarks[2][0] - lefteye_margin / 2.0
6363
left_bb[1] = lefteye_center_y - (lefteye_width + lefteye_margin) * desired_ratio
6464
left_bb[2] = transformed_eye_landmarks[3][0] + lefteye_margin / 2.0
6565
left_bb[3] = lefteye_center_y + (lefteye_width + lefteye_margin) * desired_ratio
6666

67-
right_bb = np.zeros(4, dtype=np.int)
67+
right_bb = np.zeros(4, dtype=int)
6868
right_bb[0] = transformed_eye_landmarks[0][0] - righteye_margin / 2.0
6969
right_bb[1] = righteye_center_y - (righteye_width + righteye_margin) * desired_ratio
7070
right_bb[2] = transformed_eye_landmarks[1][0] + righteye_margin / 2.0

rt_gene_inpainting/GlassesCompletion.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -116,10 +116,10 @@ def image_completion_random_search(self, nIter=1000, GPU_ID="0"):
116116
continue
117117

118118
data_face = imread_PRL(filename_face, is_grayscale=False)
119-
image_face = np.array(data_face).astype(np.float32)
119+
image_face = np.array(data_face).astype(float)
120120

121121
data_mask = imread_PRL(filename_mask, is_grayscale=True)
122-
image_mask = np.array(data_mask).astype(np.float32)
122+
image_mask = np.array(data_mask).astype(float)
123123

124124
# Sample index
125125
sample_num = 1

rt_gene_inpainting/utils.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -11,16 +11,16 @@
1111

1212
def imread_PRL(path, is_grayscale=False):
1313
if is_grayscale:
14-
return scipy.misc.imread(path, flatten=True).astype(np.float) / 127.5 - 1.
14+
return scipy.misc.imread(path, flatten=True).astype(float) / 127.5 - 1.
1515
else:
16-
return scipy.misc.imread(path).astype(np.float) / 127.5 - 1.
16+
return scipy.misc.imread(path).astype(float) / 127.5 - 1.
1717

1818

1919
def PRL_data_image_load(data, sample_idx=0):
2020
data_files = map(lambda i: data[i], sample_idx)
2121

2222
data = [imread_PRL(data_file, is_grayscale=False) for data_file in data_files]
23-
data_images = np.array(data).astype(np.float32)
23+
data_images = np.array(data).astype(float)
2424

2525
return data_images
2626

0 commit comments

Comments
 (0)