Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
98 changes: 98 additions & 0 deletions data.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,98 @@
import sys, os
import numpy as np
import pdb
import warnings

def load_one_label_seq(path):
npy = np.load(path)
return npy


def load_label_seqs(path, mode, index):#index are generated by gen_index
labels=[]
for i in range(len(index)): #eg index = [(lab,1),(lab,2),(lab,3),...]
loc = index[i][0] #eg lab office house
idx = index[i][1] #eg 1,2,3
labelnpy = os.path.join(path,loc,mode+"_left"+str(idx)+'.npy')
labels.append(load_one_label_seq(labelnpy))
#labelnpy = os.path.join(path,loc,mode+"_right"+str(idx)+'.npy')
#labels.append(load_one_label_seq(labelnpy))
for i in range(len(index)):
loc = index[i][0]
idx = index[i][1]
#labelnpy = os.path.join(path,loc,mode+"_left"+str(idx)+'.npy')
#labels.append(load_one_label_seq(labelnpy))
labelnpy = os.path.join(path,loc,mode+"_right"+str(idx)+'.npy')
labels.append(load_one_label_seq(labelnpy))
return labels #left1~3 right1~3

def gen_index(setting_index):
train_index=[]
test_index =[]
if setting_index == 0:
#order:
#train : lab1~4 off1~3 house1~3
#test : lab5~8 off4~6 house4~6
for i in range(1,7):
if i <= 3:
train_index.append(('house',i))
else:
test_index.append(('house',i))
for i in range(1,9):
if i <= 4:
train_index.append(('lab',i))
else:
test_index.append(('lab',i))
for i in range(1,7):
if i <= 3:
train_index.append(('office',i))
else:
test_index.append(('office',i))

elif setting_index == 1:
for i in range(1,9):
train_index.append(('lab',i))
for i in range(1,7):
train_index.append(('office',i))
for i in range(1,7):
test_index.append(('house',i))
else:
raise ValueError ('error setting index')

return train_index, test_index



def gen_index_process(index=None, setting_index=None):
if index == None:
if setting_index==None:
raise ValueError('Setting index can not be none')
else:
train_index, test_index = gen_index(setting_index)
return train_index, test_index


def load_train_labels(path, mode, index=None, setting_index=None):
if index == None:
index,_ = gen_index_process(index,setting_index)
else:
if setting_index != None:
warnings.warn('setting_index has no effect when given particular index')
labels = load_label_seqs(path, mode, index)
return labels

def load_test_labels(path, mode, index=None, setting_index=None):
if index == None:
_,index = gen_index_process(index,setting_index)
else:
if setting_index != None:
warnings.warn('setting_index has no effect when given particular index')
labels = load_label_seqs(path, mode, index)
return labels


def load_all_labels(path, mode, setting_index):
train_index, test_index = gen_index(setting_index)
train_labels = load_train_labels(path, mode,train_index)
test_labels = load_train_labels(path, mode,test_index)
return train_labels, test_labels
64 changes: 30 additions & 34 deletions results/index.md
Original file line number Diff line number Diff line change
@@ -1,47 +1,43 @@
# Your Name <span style="color:red">(id)</span>
# Your Name <span style="color:red">104061213 林倢愷</span>

#Project 5: Deep Classification
#Project 1: Deep Classification

## Overview
The project is related to
> quote

The project is related to finetuning VGG16.
Reference
https://gist.github.com/omoindrot/dedc857cdc0e680dfb1be99762990c9c
https://github.com/bgshih/vgg16.tf

## Implementation
1. One
* item
* item
1. One load in data
2. Two

```
Code highlights
```
load in pretrained VGG16
restore VGG16 weight
delete FC8 layer cause for our own classification task.
3. Three
finetune from pretrained VGG16

VGG architecture
![](http://book.paddlepaddle.org/03.image_classification/image/vgg16.png)
original VGG paper : https://arxiv.org/pdf/1409.1556.pdf

finetune

## Installation
* Other required packages.
* How to compile from source?
Tensorflow, numpy, scipy...etc

### Results
test accuracy : 0.48 (I will train a pretrain resnet50 to surpass 0.6)

### discussion
原本預計使用pretrained model就可以達到還不錯的成績,
但是最後的結果很慘,
所以來分析一下,
大部分純classification的task用pretrained model+finetune都可以達到還不錯的結果,
我結果差我認為有兩個可能。
1. 本身dataset跟imagenet差異太大,之前看過一個分析是目前用imagenet來train的model都有明顯的缺點在小的物體、薄的物體、半透明的物體...等,而老師的dataset大部分的物體都很小,因此model辨識不出來,所以導致result很差。
2. finetune不夠遠,我freeze住除了FC8跟最後兩個conv以外的所有層數,根據上一點,應該要finetune更遠,可能一半的network都finetune才可以達到比較好的result,原本finetune不遠的考量是dataset不是很大,所以才決定只finetune最後幾層。


<table border=1>
<tr>
<td>
<img src="placeholder.jpg" width="24%"/>
<img src="placeholder.jpg" width="24%"/>
<img src="placeholder.jpg" width="24%"/>
<img src="placeholder.jpg" width="24%"/>
</td>
</tr>

<tr>
<td>
<img src="placeholder.jpg" width="24%"/>
<img src="placeholder.jpg" width="24%"/>
<img src="placeholder.jpg" width="24%"/>
<img src="placeholder.jpg" width="24%"/>
</td>
</tr>

</table>


Loading