Skip to content

Commit

Permalink
Reformat repo
Browse files Browse the repository at this point in the history
  • Loading branch information
rcorrero committed Dec 7, 2020
1 parent d722d8e commit bac804d
Show file tree
Hide file tree
Showing 22 changed files with 225 additions and 22 deletions.
3 changes: 1 addition & 2 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,9 @@

# Irrelevant background files #
###############################
/dev/
/documents/
/notes/
/papers/
/private/
*.egg-info/


Expand Down
Binary file added dev/imgs/0002756f7.jpg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added dev/imgs/000592296.jpg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added dev/imgs/0005d01c8.jpg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added dev/imgs/0006c52e8.jpg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added dev/imgs/000d26c17.jpg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added dev/imgs/000d42241.jpg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added dev/imgs/000f7e728.jpg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added dev/imgs/0014b1235.jpg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added dev/imgs/0017c19d6.jpg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
13 changes: 13 additions & 0 deletions dev/train_ship_segmentations_v2.csv
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
,ImageId,EncodedPixels
18,0002756f7.jpg,255784 2 256552 4 257319 7 258087 9 258854 12 259622 14 260389 16 261159 16 261929 16 262699 16 263469 16 264239 16 265009 14 265779 11 266549 9 267319 6 268089 4 268859 1
19,0002756f7.jpg,248878 1 249645 4 250413 6 251180 9 251948 10 252715 13 253482 16 254250 18 255019 18 255789 18 256559 18 257329 18 258099 17 258869 15 259639 12 260409 10 261179 7 261949 5 262719 2 263488 1 264255 1
28,000592296.jpg,
29,0005d01c8.jpg,56010 1 56777 3 57544 6 58312 7 59079 9 59846 11 60613 14 61380 16 62148 17 62915 19 63682 22 64449 24 65216 26 65984 27 66751 30 67518 32 68285 34 69052 36 69820 38 70587 40 71354 42 72121 42 72888 42 73656 42 74423 42 75190 42 75957 42 76724 42 77492 42 78259 42 79026 42 79793 42 80560 42 81328 42 82095 42 82862 42 83629 42 84396 42 85164 42 85931 42 86698 42 87465 42 88232 42 89000 42 89767 42 90534 42 91301 42 92068 42 92836 42 93603 42 94370 42 95137 42 95904 42 96672 42 97439 42 98206 42 98973 42 99740 42 100508 42 101275 42 102042 42 102809 42 103576 42 104344 42 105111 42 105878 42 106645 42 107412 42 108180 42 108947 42 109714 42 110481 42 111249 41 112016 42 112783 42 113550 42 114317 42 115085 41 115852 42 116619 42 117386 42 118153 42 118921 41 119688 42 120455 42 121222 42 121989 42 122757 41 123524 42 124291 42 125058 42 125825 42 126593 42 127360 42 128127 42 128894 42 129661 42 130429 42 131196 42 131963 42 132730 42 133497 42 134265 42 135032 42 135799 42 136566 42 137333 42 138101 42 138868 42 139635 42 140402 42 141169 42 141937 42 142704 42 143471 42 144238 42 145005 42 145773 42 146540 42 147307 42 148074 42 148841 42 149609 42 150376 42 151143 42 151910 42 152677 42 153445 42 154212 42 154979 42 155746 42 156513 42 157281 42 158048 42 158815 42 159582 42 160349 42 161117 42 161884 42 162651 42 163418 42 164185 42 164953 42 165720 42 166487 42 167256 40 168025 38 168795 36 169564 34 170333 32 171102 30 171872 27 172641 26 173410 24 174179 22 174949 19 175718 17 176487 16 177256 14 178026 11 178795 9 179564 7 180333 6 181103 3 181872 1
30,0005d01c8.jpg,365871 1 366638 3 367405 6 368173 7 368940 9 369707 12 370474 14 371242 15 372009 17 372776 20 373543 22 374310 24 375078 26 375845 28 376612 30 377379 30 378147 29 378914 30 379681 30 380448 30 381215 30 381983 30 382750 30 383517 30 384284 30 385051 31 385819 30 386586 30 387353 30 388120 31 388888 30 389655 30 390422 30 391189 31 391956 31 392724 30 393491 30 394258 31 395025 31 395793 30 396560 30 397327 31 398094 31 398861 31 399629 30 400396 31 401163 31 401930 31 402698 30 403465 31 404232 31 404999 31 405766 31 406534 31 407301 31 408068 31 408835 31 409602 32 410370 31 411137 31 411904 31 412671 32 413439 31 414206 31 414973 31 415742 30 416511 28 417281 25 418050 23 418819 22 419588 20 420358 17 421127 15 421896 14 422666 11 423435 9 424204 7 424973 6 425743 3 426512 1
32,0006c52e8.jpg,146366 1 147132 4 147899 5 148666 7 149432 10 150199 12 150966 13 151732 16 152499 18 153265 20 154032 22 154799 24 155565 27 156332 28 157099 30 157865 33 158632 35 159398 37 160165 39 160932 41 161698 43 162465 45 163231 48 163998 50 164765 51 165531 54 166298 56 167065 57 167831 60 168598 62 169364 65 170131 66 170898 68 171664 71 172431 73 173198 74 173964 77 174731 77 175497 77 176264 77 177031 77 177797 77 178564 77 179331 77 180097 77 180864 77 181630 77 182397 77 183164 77 183930 77 184697 77 185464 77 186230 77 186997 77 187763 77 188530 77 189297 77 190063 77 190830 77 191597 76 192363 77 193130 77 193896 77 194663 77 195430 77 196196 77 196963 77 197729 77 198496 77 199263 77 200029 77 200796 77 201563 77 202329 77 203096 77 203862 77 204629 77 205396 77 206162 77 206929 77 207696 77 208462 77 209229 77 209995 77 210762 77 211529 77 212295 77 213062 77 213829 77 214595 77 215362 77 216128 77 216895 77 217662 77 218428 77 219195 77 219962 77 220728 77 221495 77 222261 77 223028 77 223795 77 224561 77 225328 77 226095 76 226861 77 227628 77 228394 77 229161 77 229928 77 230694 77 231461 77 232227 77 232994 77 233761 77 234527 77 235294 77 236061 77 236827 77 237594 77 238360 77 239127 77 239894 77 240660 77 241427 77 242194 77 242960 77 243727 77 244493 77 245260 77 246027 77 246793 77 247560 77 248327 77 249093 77 249860 77 250626 77 251393 77 252160 77 252926 77 253693 77 254460 77 255226 77 255993 77 256759 77 257526 77 258293 77 259059 77 259826 77 260593 76 261359 77 262126 77 262892 77 263659 77 264426 77 265192 77 265959 77 266725 77 267492 77 268259 77 269025 77 269792 77 270559 77 271325 77 272092 77 272858 77 273625 77 274392 77 275158 77 275925 77 276692 77 277458 77 278225 77 278991 77 279758 77 280525 77 281291 77 282058 77 282825 77 283591 77 284358 77 285127 74 285895 73 286664 71 287433 68 288202 66 288970 65 289739 62 290508 60 291276 58 292045 56 292814 54 293583 51 294351 50 295120 47 295889 45 296658 43 297426 41 298195 39 298964 37 299732 35 300501 33 301270 30 302039 28 302807 27 303576 24 304345 22 305113 21 305882 18 306651 16 307420 13 308188 12 308957 10 309726 7 310495 5 311263 4 312032 1
47,000d26c17.jpg,
48,000d42241.jpg,369226 3 369992 5 370760 5 371528 5 372296 5 373065 4 373833 4 374601 2
54,000f7e728.jpg,
78,0014b1235.jpg,
89,0017c19d6.jpg,329228 1 329995 3 330762 4 331529 6 332296 8 333064 7 333831 7 334598 7 335365 7 336132 8 336899 8 337668 6 338438 3 339207 1
90,0017c19d6.jpg,405963 1 406730 3 407497 5 408264 7 409031 10 409798 12 410565 14 411332 16 412099 18 412866 20 413635 18 414404 16 415173 14 415942 12 416711 11 417480 9 418249 7 419018 5 419787 3 420556 1
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ def __getitem__(self, idx):
else:
return img, img_file_name

ship_dir = '../data/airbus-ship-detection/'
ship_dir = '../../../data/airbus-ship-detection/'
train_image_dir = os.path.join(ship_dir, 'train_v2/')
valid_image_dir = os.path.join(ship_dir, 'train_v2/')
masks = pd.read_csv(os.path.join(ship_dir,'train_ship_segmentations_v2.csv'))
Expand All @@ -130,7 +130,7 @@ def __getitem__(self, idx):


def test_load_state_dict(self):
state_dict = r'../data/vessel_classifier_state_dict-01.pth'
state_dict = r'../../../data/vessel_classifier_state_dict-01.pth'
model = torchvision.models.inception_v3(pretrained=False, progress=True, num_classes=2,
aux_logits=False)
model.load_state_dict(torch.load(state_dict))
Expand All @@ -139,7 +139,7 @@ def test_load_state_dict(self):


def test_vessel_dataset(self):
ship_dir = '../data/airbus-ship-detection/'
ship_dir = '../../../data/airbus-ship-detection/'
train_image_dir = os.path.join(ship_dir, 'train_v2/')
masks = pd.read_csv(os.path.join(ship_dir,
'train_ship_segmentations_v2.csv'))
Expand All @@ -163,7 +163,7 @@ def test_vessel_dataset(self):
def test_validation(self):
criterion = nn.CrossEntropyLoss()

ship_dir = '../data/airbus-ship-detection/'
ship_dir = '../../../data/airbus-ship-detection/'
valid_image_dir = os.path.join(ship_dir, 'train_v2/')
masks = pd.read_csv(os.path.join(ship_dir,
'train_ship_segmentations_v2.csv'))
Expand Down Expand Up @@ -200,13 +200,13 @@ def test_io(self):
model = torchvision.models.inception_v3(pretrained=False, progress=True, num_classes=2,
aux_logits=False)
model = model.to('cuda')
savepath = '../data/test_vessel_classifier_state_dict.pth'
savepath = '../../../data/test_vessel_classifier_state_dict.pth'
torch.save(model.state_dict(), savepath)


def test_full_training_loop(self):
ImageFile.LOAD_TRUNCATED_IMAGES = True
state_dict = r'../data/vessel_classifier_state_dict-01.pth'
state_dict = r'../../../data/vessel_classifier_state_dict-01.pth'
model = torchvision.models.inception_v3(pretrained=False, progress=True, num_classes=2,
aux_logits=False)
model.load_state_dict(torch.load(state_dict))
Expand All @@ -218,7 +218,7 @@ def test_full_training_loop(self):
lr = 1e-4
optimizer = optim.Adam(model.parameters(), lr=lr)

ship_dir = '../data/airbus-ship-detection/'
ship_dir = '../../../data/airbus-ship-detection/'
train_image_dir = os.path.join(ship_dir, 'train_v2/')
valid_image_dir = os.path.join(ship_dir, 'train_v2/')
masks = pd.read_csv(os.path.join(ship_dir,
Expand Down Expand Up @@ -300,11 +300,11 @@ def test_full_training_loop(self):
print('[Epoch %d] Validation Accuracy: %.3f | Validation Loss: %.3f\n' %
((epoch + 1), metrics['valid_acc'], metrics['valid_loss']))
print('Saving Model...\n')
savepath = '../data/test_vessel_classifier_state_dict.pth'
savepath = '../../../data/test_vessel_classifier_state_dict.pth'
torch.save(model.state_dict(), savepath)
print('Finished Training.')
print('Saving Model...\n')
savepath = '../data/test_vessel_classifier_state_dict.pth'
savepath = '../../../data/test_vessel_classifier_state_dict.pth'
torch.save(model.state_dict(), savepath)
print('Done.')

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -141,7 +141,7 @@ def main(savepath, load_state_dict=False, state_dict=None):
weight_decay = 1e-7 # Default should be 1e-5
optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=weight_decay)

ship_dir = '../data/airbus-ship-detection/'
ship_dir = '../../../data/airbus-ship-detection/'
train_image_dir = os.path.join(ship_dir, 'train_v2/')
valid_image_dir = os.path.join(ship_dir, 'train_v2/')
masks = pd.read_csv(os.path.join(ship_dir,
Expand Down Expand Up @@ -240,6 +240,6 @@ def main(savepath, load_state_dict=False, state_dict=None):
# - Decrease weight decay from 1e-5 to 1e-7
# - Use state_dict from previous run
load_state_dict = True
loadpath = r'../data/vessel_classifier_state_dict.pth'
loadpath = r'../../../data/vessel_classifier_state_dict.pth'
savepath = r'vessel_classifier_state_dict.pth'
main(savepath, load_state_dict, loadpath)
Original file line number Diff line number Diff line change
Expand Up @@ -157,7 +157,7 @@ def test(model, criterion, test_loader):

def make_test_loader():
seed = 0
ship_dir = '../data/test/'
ship_dir = '../../../data/test/'
test_image_dir = os.path.join(ship_dir, 'imgs/')
labels = pd.read_csv(os.path.join(ship_dir, 'labels.csv'))
print("Test Size: %d" % len(labels['sample_id'].tolist()))
Expand All @@ -181,7 +181,7 @@ def make_test_loader():

if __name__ == '__main__':
ImageFile.LOAD_TRUNCATED_IMAGES = True
state_dict = r'../data/vessel_classifier_state_dict.pth'
state_dict = r'../../../data/vessel_classifier_state_dict.pth'
model = torchvision.models.inception_v3(pretrained=False, progress=True, num_classes=2,
aux_logits=False)
model.load_state_dict(torch.load(state_dict))
Expand Down
171 changes: 171 additions & 0 deletions models/vessel_detector/test_vessel_detector.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,171 @@
import unittest

from vessel_detector import *

import os
import time
import torch
import math
import numpy as np
import pandas as pd
import torch.nn as nn
import torch.optim as optim
import torchvision

from torch import tensor
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torch.utils.data import Dataset
from torchvision import transforms
from torchvision.models.detection.rpn import AnchorGenerator
from torchvision.models.detection.faster_rcnn import FasterRCNN, FastRCNNPredictor
from torchvision.ops.boxes import box_iou
from torchvision.transforms import ToTensor, Compose, RandomHorizontalFlip,\
RandomVerticalFlip, Normalize
from sklearn.model_selection import train_test_split
from PIL import Image, ImageFile, ImageFilter

import pathlib
from typing import Callable, Iterator, Union, Optional, List, Tuple, Dict
from torchvision.transforms.functional import resize


class TestVesselDetector(unittest.TestCase):
def test_training(self):
self.main(train = True)


def test_evaluation(self):
self.main(train = False)


def main(self, train: bool):
backbone_state_dict = r'../../../data/vessel_classifier_state_dict.pth'
# Define all training params in one dict to make assumptions clear
params = {
# optimizer params from: https://arxiv.org/pdf/1506.01497.pdf
'seed': 0,
'num_classes': 2,
'lr': 0.001,
'momentum': 0.9,
'weight_decay': 0.0005,
# All samples have at least one ground truth bbox
'no_null_samples': True,
'test_size': 1,
'shuffle': True,
'batch_size': 2,
'num_epochs': 1,
'print_every': 1,
# Increase number of detections since there may be many vessels in an image
'box_detections_per_img': 256,
# Use small anchor boxes since targets are small
'anchor_sizes': (8, 16, 32, 64, 128),
# IoU thresholds for mAP calculation
'thresh_list': [0.5, 0.75, 1.0]
}

seed = params['seed']
torch.manual_seed(seed)
np.random.seed(seed)
ImageFile.LOAD_TRUNCATED_IMAGES = True # Necessary for PIL to work correctly

# NOTE: InceptionV3 backbone requires input samples of size 299x299x3
anchor_sizes = params['anchor_sizes']
num_classes = params['num_classes']
box_detections_per_img = params['box_detections_per_img']
model = make_model(backbone_state_dict,
num_classes=num_classes,
anchor_sizes=anchor_sizes,
box_detections_per_img=box_detections_per_img
)

device = torch.device('cuda')
model = model.to(device)

# Params from: https://arxiv.org/pdf/1506.01497.pdf
lr = params['lr']
momentum = params['momentum']
weight_decay = params['weight_decay']
optimizer = optim.SGD(model.parameters(),
lr=lr,
momentum=momentum,
weight_decay=weight_decay)

ship_dir = '../../../data/dev/'
train_image_dir = os.path.join(ship_dir, 'imgs/')
valid_image_dir = os.path.join(ship_dir, 'imgs/')
masks = get_masks(ship_dir, train_image_dir, valid_image_dir)

no_null_samples = params['no_null_samples']
image_names, filtered_masks = filter_masks(masks, no_null_samples=no_null_samples)

test_size = params['test_size']
train_ids, train_masks, valid_ids, valid_masks = get_train_valid_dfs(
filtered_masks, seed, test_size=test_size
)

vessel_dataset = VesselDataset(train_masks,
train_ids,
image_names,
train_image_dir=train_image_dir,
mode='train')
vessel_valid_dataset = VesselDataset(valid_masks,
valid_ids,
image_names,
valid_image_dir=valid_image_dir,
mode='valid')

#print("Train Size: %d" % len(train_ids))
#print("Valid Size: %d" % len(valid_ids))

batch_size = params['batch_size']
shuffle = params['shuffle']
collate_fn = lambda batch: tuple(zip(*batch))
loader = DataLoader(
dataset=vessel_dataset,
shuffle=shuffle,
#num_workers = 0,
batch_size=batch_size,
collate_fn=collate_fn,
pin_memory=torch.cuda.is_available()
)

valid_loader = DataLoader(
dataset=vessel_valid_dataset,
shuffle=shuffle,
#num_workers = 0,
batch_size=batch_size,
collate_fn=collate_fn,
pin_memory=torch.cuda.is_available()
)

num_epochs = params['num_epochs']
print_every = params['print_every']
thresh_list = params['thresh_list']

#print('Starting Training...\n')
for epoch in range(num_epochs):
if train:
train_one_epoch(model,
optimizer,
loader,
device,
epoch,
lr_scheduler = None,
batch_size=batch_size,
print_every=print_every,
num_epochs = num_epochs
)
else:
metrics = evaluate(model, valid_loader, device, thresh_list)
#print_metrics(metrics, epoch, thresh_list)
#print('Saving Model...\n')
#torch.save(model.state_dict(), savepath)
#print('Model Saved.\n')
#print('Finished Training.\n')




if __name__ == '__main__':
unittest.main()
Loading

0 comments on commit bac804d

Please sign in to comment.