Skip to content

Commit

Permalink
Update mAP calculation
Browse files Browse the repository at this point in the history
  • Loading branch information
rcorrero committed Dec 11, 2020
1 parent 4199460 commit b6b8b07
Show file tree
Hide file tree
Showing 4 changed files with 146 additions and 23 deletions.
30 changes: 16 additions & 14 deletions models/vessel_detector/vessel_detector.py
Original file line number Diff line number Diff line change
Expand Up @@ -475,8 +475,8 @@ def calculate_map(gt_boxes,
if pr_boxes.shape[0] == 0:
return 1.0
return 0.0
if pr_boxes.shape[0] == 0:
return 0.0
#if pr_boxes.shape[0] == 0:
# return 0.0
# sorting
pr_boxes = pr_boxes[scores.argsort().flip(-1)]
iou_mat = calculate_iou(gt_boxes,pr_boxes,form)
Expand Down Expand Up @@ -520,16 +520,18 @@ def evaluate(model, data_loader, device, thresh_list):
for thresh in thresh_list:
mAP_dict[thresh] = np.mean(mAP_dict[thresh])
# Create metrics dict
metrics = mAP_dict
metrics['eval_time'] = end - start
return metrics
#metrics = mAP_dict
#metrics['eval_time'] = end - start
#return metrics
mAP = np.mean(list(mAP_dict.values()))
return mAP


def print_metrics(metrics: dict, epoch: int, thresh_list) -> None:
def print_metrics(mAP: float, epoch: int, thresh_list) -> None:
print('[Epoch %-2.d] Evaluation results:' % (epoch + 1))
for thresh in thresh_list:
mAP = metrics[thresh]
print(' IoU (>) Threshold: %-3.3f | mAP: %-3.3f' % (thresh, mAP))
#for thresh in thresh_list:
# mAP = metrics[thresh]
print(' IoU (>) Thresholds: %s | mAP: %-5.5f' % (thresh_list, mAP))
print('\n')


Expand All @@ -550,13 +552,13 @@ def main(savepath, backbone_state_dict=None):
'shuffle': True,
'batch_size': 16,
'num_epochs': 30,
'print_every': 100,
'print_every': 500,
# Increase number of detections since there may be many vessels in an image
'box_detections_per_img': 256,
# Use small anchor boxes since targets are small
'anchor_sizes': (8, 16, 32, 64, 128),
'anchor_sizes': (8, 16, 32, 64, 128, 256),
# IoU thresholds for mAP calculation
'thresh_list': [0.5, 0.75, 1.0]
'thresh_list': np.arange(0.5, 0.76, 0.05).round(8)
}

seed = params['seed']
Expand Down Expand Up @@ -653,8 +655,8 @@ def main(savepath, backbone_state_dict=None):
num_epochs = num_epochs
)
print('Epoch %d completed. Running validation...\n' % (epoch + 1))
metrics = evaluate(model, valid_loader, device, thresh_list)
print_metrics(metrics, epoch, thresh_list)
mAP = evaluate(model, valid_loader, device, thresh_list)
print_metrics(mAP, epoch, thresh_list)
print('Saving Model...\n')
torch.save(model.state_dict(), savepath)
print('Model Saved.\n')
Expand Down
64 changes: 55 additions & 9 deletions notebooks/object_detector_01.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
"cells": [
{
"cell_type": "code",
"execution_count": 25,
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
Expand Down Expand Up @@ -1878,7 +1878,7 @@
},
{
"cell_type": "code",
"execution_count": 70,
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
Expand Down Expand Up @@ -2328,7 +2328,7 @@
},
{
"cell_type": "code",
"execution_count": 167,
"execution_count": 20,
"metadata": {},
"outputs": [],
"source": [
Expand Down Expand Up @@ -2407,13 +2407,17 @@
" return mappings\n",
"\n",
"\n",
"def calculate_map(gt_boxes,pr_boxes,scores,thresh=0.5,form='pascal_voc'):\n",
"def calculate_map(gt_boxes,\n",
" pr_boxes,scores,\n",
" thresh=0.5,\n",
" form='pascal_voc'):\n",
" # sorting\n",
" pr_boxes = pr_boxes[scores.argsort().flip(-1)]\n",
" iou_mat = calculate_iou(gt_boxes,pr_boxes,form) \n",
" \n",
" # thresholding\n",
" iou_mat = iou_mat.where(iou_mat>thresh,tensor(0.))\n",
" thresh_mask = iou_mat >= thresh\n",
" iou_mat = iou_mat.where(thresh_mask,tensor(0.))\n",
" \n",
" mappings = get_mappings(iou_mat)\n",
" \n",
Expand All @@ -2428,22 +2432,64 @@
},
{
"cell_type": "code",
"execution_count": 169,
"execution_count": 21,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"tensor(0.)"
"tensor(0.6552)"
]
},
"execution_count": 169,
"execution_count": 21,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"calculate_map(target['boxes'],outputs[0]['boxes'],outputs[0]['scores'],form='pascal_voc')"
"calculate_map(targs, preds, scores,form='pascal_voc')"
]
},
{
"cell_type": "code",
"execution_count": 29,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"0.5\n",
"0.55\n",
"0.6\n",
"0.65\n",
"0.7\n",
"0.75\n"
]
}
],
"source": [
"thresholds = np.arange(0.5, 0.76, 0.05).round(8)\n",
"for threshold in thresholds:\n",
" print(threshold)"
]
},
{
"cell_type": "code",
"execution_count": 41,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"deez: [1, 10] cuz\n"
]
}
],
"source": [
"dez = {'a':1, 'b':10}\n",
"print('deez: %s cuz' % list(dez.values()))"
]
},
{
Expand Down
5 changes: 5 additions & 0 deletions notes/dev_log.txt
Original file line number Diff line number Diff line change
@@ -1,3 +1,8 @@
2020/12/10
TODO:
- Verify mAP calculation in `vessel_detector.py` is correct.


2020/19/9
TODO:
- Figure out how to load model from state_dict
Expand Down
70 changes: 70 additions & 0 deletions notes/object_detection_dev.txt
Original file line number Diff line number Diff line change
Expand Up @@ -136,6 +136,76 @@ for i in range(10):
cv2.rectangle(img_1, (prop.bbox[1], prop.bbox[0]), (prop.bbox[3], prop.bbox[2]), (255, 0, 0), 2)
```


```
# Code for loading full model from state dict:
def make_model(state_dict_path, backbone_state_dict_path, num_trainable_backbone_layers):
num_classes = 2
anchor_sizes = (8, 16, 32, 64, 128)
box_detections_per_img = 256
# Adapted from https://discuss.pytorch.org/t/faster-rcnn-with-inceptionv3-backbone-very-slow/91455
def _make_model(backbone_state_dict,
num_classes,
anchor_sizes: tuple,
box_detections_per_img: int,
num_trainable_backbone_layers: int):
inception = torchvision.models.inception_v3(pretrained=False, progress=False,
num_classes=num_classes, aux_logits=False)
inception.load_state_dict(torch.load(backbone_state_dict))
modules = list(inception.children())[:-1]
backbone = nn.Sequential(*modules)

#for layer in backbone:
# for p in layer.parameters():
# p.requires_grad = False # Freezes the backbone layers

num_layers = len(backbone)
trainable_layers = [num_layers - (3 + i) for i in range(num_trainable_backbone_layers)]
print('Trainable layers: \n')
for layer_idx, layer in enumerate(backbone):
if layer_idx not in trainable_layers:
for p in layer.parameters():
p.requires_grad = False # Freezes the backbone layers
else:
print(layer, '\n\n')
print('=================================\n\n')

backbone.out_channels = 2048

# Use smaller anchor boxes since targets are relatively small
anchor_generator = AnchorGenerator(sizes=(anchor_sizes,),
aspect_ratios=((0.5, 1.0, 2.0),))

model = FasterRCNN(backbone,
min_size=299, # Backbone expects 299x299 inputs
max_size=299, # so you don't need to rescale
rpn_anchor_generator=anchor_generator,
box_predictor=FastRCNNPredictor(1024, num_classes),
box_detections_per_img=box_detections_per_img
)

return model


model = _make_model(backbone_state_dict_path,
num_classes,
anchor_sizes,
box_detections_per_img,
num_trainable_backbone_layers
)
model_dict = model.state_dict()
state_dict = torch.load(state_dict_path)
new_state_dict = {}
for sd_key, model_key in list(zip(state_dict.keys(), model_dict.keys())):
new_state_dict[model_key] = state_dict[sd_key]
assert len(new_state_dict) == len(model_dict)
for nsd_key, model_key in list(zip(new_state_dict.keys(), model_dict.keys())):
assert nsd_key == model_key
model.load_state_dict(new_state_dict)
return model
```


=======================TODO=======================
TRAINER:
***Change training sample distributions to favor positive samples
Expand Down

0 comments on commit b6b8b07

Please sign in to comment.