Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions mmdet/core/evaluation/mean_ap.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,8 +84,8 @@ def tpfp_imagenet(
# an indicator of ignored gts
gt_ignore_inds = np.concatenate(
(
np.zeros(gt_bboxes.shape[0], dtype=np.bool),
np.ones(gt_bboxes_ignore.shape[0], dtype=np.bool),
np.zeros(gt_bboxes.shape[0], dtype=bool),
np.ones(gt_bboxes_ignore.shape[0], dtype=bool),
)
)
# stack gt_bboxes and gt_bboxes_ignore for convenience
Expand Down Expand Up @@ -179,8 +179,8 @@ def tpfp_default(
# an indicator of ignored gts
gt_ignore_inds = np.concatenate(
(
np.zeros(gt_bboxes.shape[0], dtype=np.bool),
np.ones(gt_bboxes_ignore.shape[0], dtype=np.bool),
np.zeros(gt_bboxes.shape[0], dtype=bool),
np.ones(gt_bboxes_ignore.shape[0], dtype=bool),
)
)
# stack gt_bboxes and gt_bboxes_ignore for convenience
Expand Down
2 changes: 1 addition & 1 deletion mmdet/core/mask/structures.py
Original file line number Diff line number Diff line change
Expand Up @@ -860,5 +860,5 @@ def polygon_to_bitmap(polygons, height, width):
"""
rles = maskUtils.frPyObjects(polygons, height, width)
rle = maskUtils.merge(rles)
bitmap_mask = maskUtils.decode(rle).astype(np.bool)
bitmap_mask = maskUtils.decode(rle).astype(bool)
return bitmap_mask
8 changes: 4 additions & 4 deletions mmdet/models/detectors/pa_predictor.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ def palette2filter(palette, neighbor_sizes=None, bidirection=True):
"""

def generate_cross_filter(number_of_neighbors, dist):
cross_filter = np.zeros((number_of_neighbors,) + palette.shape, dtype=np.bool)
cross_filter = np.zeros((number_of_neighbors,) + palette.shape, dtype=bool)
cross_filter[0, dist:, :] = np.logical_or(palette[dist:, :], palette[:-dist, :])
cross_filter[1, :, dist:] = np.logical_or(palette[:, dist:], palette[:, :-dist])
cross_filter[2, dist:, dist:] = np.logical_or(
Expand Down Expand Up @@ -81,7 +81,7 @@ def generate_cross_filter(number_of_neighbors, dist):
number_of_span = len(neighbor_sizes)
number_of_neighbors_per_span = 8 if bidirection else 4
potential_filter = np.zeros(
(number_of_neighbors_per_span * number_of_span,) + palette.shape, dtype=np.bool
(number_of_neighbors_per_span * number_of_span,) + palette.shape, dtype=bool
)
for neighbor_idx, dist in enumerate(neighbor_sizes):
offset = neighbor_idx * number_of_neighbors_per_span
Expand All @@ -103,7 +103,7 @@ def palette2weight(
"""

def generate_cross_weight(number_of_neighbors, dist):
cross_filter = np.zeros((number_of_neighbors,) + palette.shape, dtype=np.bool)
cross_filter = np.zeros((number_of_neighbors,) + palette.shape, dtype=bool)
cross_filter[0, dist:, :] = np.maximum(palette[dist:, :], palette[:-dist, :])
cross_filter[1, :, dist:] = np.maximum(palette[:, dist:], palette[:, :-dist])
cross_filter[2, dist:, dist:] = np.maximum(
Expand Down Expand Up @@ -152,7 +152,7 @@ def generate_cross_weight(number_of_neighbors, dist):
number_of_span = len(neighbor_sizes)
number_of_neighbors_per_span = 8 if bidirection else 4
potential_weight = np.zeros(
(number_of_neighbors_per_span * number_of_span,) + palette.shape, dtype=np.bool
(number_of_neighbors_per_span * number_of_span,) + palette.shape, dtype=bool
)
for neighbor_idx, dist in enumerate(neighbor_sizes):
offset = neighbor_idx * number_of_neighbors_per_span
Expand Down
4 changes: 2 additions & 2 deletions pa_lib/evaluate_helper.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,8 +61,8 @@ def rank_by_variance(image, masks, weight_by_size=False, nms=1.0):
def compute_iou(annotation, segmentation, mask_threshold=0.0):
if type(annotation) == torch.Tensor:
annotation = annotation.numpy()
annotation = annotation.astype(np.bool)
segmentation = (segmentation > mask_threshold).astype(np.bool)
annotation = annotation.astype(bool)
segmentation = (segmentation > mask_threshold).astype(bool)

if np.isclose(np.sum(annotation), 0) and np.isclose(np.sum(segmentation), 0):
return 1
Expand Down