diff --git a/data.py b/data.py
new file mode 100644
index 0000000..8011419
--- /dev/null
+++ b/data.py
@@ -0,0 +1,98 @@
+import sys, os
+import numpy as np
+import pdb
+import warnings
+
+def load_one_label_seq(path):
+ npy = np.load(path)
+ return npy
+
+
+def load_label_seqs(path, mode, index):#index are generated by gen_index
+ labels=[]
+ for i in range(len(index)): #eg index = [(lab,1),(lab,2),(lab,3),...]
+ loc = index[i][0] #eg lab office house
+ idx = index[i][1] #eg 1,2,3
+ labelnpy = os.path.join(path,loc,mode+"_left"+str(idx)+'.npy')
+ labels.append(load_one_label_seq(labelnpy))
+ #labelnpy = os.path.join(path,loc,mode+"_right"+str(idx)+'.npy')
+ #labels.append(load_one_label_seq(labelnpy))
+ for i in range(len(index)):
+ loc = index[i][0]
+ idx = index[i][1]
+ #labelnpy = os.path.join(path,loc,mode+"_left"+str(idx)+'.npy')
+ #labels.append(load_one_label_seq(labelnpy))
+ labelnpy = os.path.join(path,loc,mode+"_right"+str(idx)+'.npy')
+ labels.append(load_one_label_seq(labelnpy))
+ return labels #left1~3 right1~3
+
+def gen_index(setting_index):
+ train_index=[]
+ test_index =[]
+ if setting_index == 0:
+ #order:
+ #train : lab1~4 off1~3 house1~3
+ #test : lab5~8 off4~6 house4~6
+ for i in range(1,7):
+ if i <= 3:
+ train_index.append(('house',i))
+ else:
+ test_index.append(('house',i))
+ for i in range(1,9):
+ if i <= 4:
+ train_index.append(('lab',i))
+ else:
+ test_index.append(('lab',i))
+ for i in range(1,7):
+ if i <= 3:
+ train_index.append(('office',i))
+ else:
+ test_index.append(('office',i))
+
+ elif setting_index == 1:
+ for i in range(1,9):
+ train_index.append(('lab',i))
+ for i in range(1,7):
+ train_index.append(('office',i))
+ for i in range(1,7):
+ test_index.append(('house',i))
+ else:
+ raise ValueError ('error setting index')
+
+ return train_index, test_index
+
+
+
+def gen_index_process(index=None, setting_index=None):
+ if index == None:
+ if setting_index==None:
+ raise ValueError('Setting index can not be none')
+ else:
+ train_index, test_index = gen_index(setting_index)
+ return train_index, test_index
+
+
+def load_train_labels(path, mode, index=None, setting_index=None):
+ if index == None:
+ index,_ = gen_index_process(index,setting_index)
+ else:
+ if setting_index != None:
+ warnings.warn('setting_index has no effect when given particular index')
+ labels = load_label_seqs(path, mode, index)
+ return labels
+
+def load_test_labels(path, mode, index=None, setting_index=None):
+ if index == None:
+ _,index = gen_index_process(index,setting_index)
+ else:
+ if setting_index != None:
+ warnings.warn('setting_index has no effect when given particular index')
+ labels = load_label_seqs(path, mode, index)
+ return labels
+
+
+def load_all_labels(path, mode, setting_index):
+ train_index, test_index = gen_index(setting_index)
+ train_labels = load_train_labels(path, mode,train_index)
+ test_labels = load_train_labels(path, mode,test_index)
+ return train_labels, test_labels
\ No newline at end of file
diff --git a/results/index.md b/results/index.md
index 96ce61c..bb787a5 100644
--- a/results/index.md
+++ b/results/index.md
@@ -1,47 +1,43 @@
-# Your Name (id)
+# Your Name 104061213 林倢愷
-#Project 5: Deep Classification
+#Project 1: Deep Classification
## Overview
-The project is related to
-> quote
-
+The project is related to finetuning VGG16.
+Reference
+https://gist.github.com/omoindrot/dedc857cdc0e680dfb1be99762990c9c
+https://github.com/bgshih/vgg16.tf
## Implementation
-1. One
- * item
- * item
+1. One load in data
2. Two
-
-```
-Code highlights
-```
+ load in pretrained VGG16
+ restore VGG16 weight
+ delete FC8 layer cause for our own classification task.
+3. Three
+ finetune from pretrained VGG16
+
+VGG architecture
+
+original VGG paper : https://arxiv.org/pdf/1409.1556.pdf
+
+finetune
## Installation
-* Other required packages.
-* How to compile from source?
+Tensorflow, numpy, scipy...etc
### Results
+test accuracy : 0.48 (I will train a pretrain resnet50 to surpass 0.6)
+
+### discussion
+原本預計使用pretrained model就可以達到還不錯的成績,
+但是最後的結果很慘,
+所以來分析一下,
+大部分純classification的task用pretrained model+finetune都可以達到還不錯的結果,
+我結果差我認為有兩個可能。
+1. 本身dataset跟imagenet差異太大,之前看過一個分析是目前用imagenet來train的model都有明顯的缺點在小的物體、薄的物體、半透明的物體...等,而老師的dataset大部分的物體都很小,因此model辨識不出來,所以導致result很差。
+2. finetune不夠遠,我freeze住除了FC8跟最後兩個conv以外的所有層數,根據上一點,應該要finetune更遠,可能一半的network都finetune才可以達到比較好的result,原本finetune不遠的考量是dataset不是很大,所以才決定只finetune最後幾層。
+
-
-
-
-
-
-
-
- |
-
-
-
-
-
-
-
-
- |
-
-
-
diff --git a/train.py b/train.py
new file mode 100644
index 0000000..6027beb
--- /dev/null
+++ b/train.py
@@ -0,0 +1,320 @@
+import argparse
+import os
+import numpy as np
+import tensorflow as tf
+import tensorflow.contrib.slim as slim
+import tensorflow.contrib.slim.nets
+from load_datas import *
+
+label_path='../labels'
+
+parser = argparse.ArgumentParser()
+parser.add_argument('--train_dir', default='../frames/train')
+parser.add_argument('--val_dir', default='../frames/test')
+parser.add_argument('--model_path', default='vgg_16.ckpt', type=str)
+parser.add_argument('--batch_size', default=32, type=int)
+parser.add_argument('--num_workers', default=4, type=int)
+parser.add_argument('--num_epochs1', default=15, type=int)
+parser.add_argument('--num_epochs2', default=5, type=int)
+parser.add_argument('--learning_rate1', default=1e-3, type=float)
+parser.add_argument('--learning_rate2', default=1e-5, type=float)
+parser.add_argument('--dropout_keep_prob', default=0.5, type=float)
+parser.add_argument('--weight_decay', default=5e-4, type=float)
+
+VGG_MEAN = [123.68, 116.78, 103.94]
+
+#modify this function to get frames
+def list_images(directory):
+ #load training/testing labels
+ scenes = os.listdir(directory)
+ filenames = []
+ for scene in scenes:
+ scene_path = os.path.join(directory,scene)
+ videos = os.listdir(scene_path)
+ for video in videos:
+ video_path = os.path.join(scene_path, video)
+ left_path = os.path.join(video_path, 'Lhand')
+ fs = os.listdir(left_path)
+ for f in fs:
+ file_path = os.path.join(left_path, f)
+ filenames.append(file_path)
+ right_path = os.path.join(video_path, 'Rhand')
+ fs = os.listdir(right_path)
+ for f in fs:
+ file_path = os.path.join(right_path, f)
+ filenames.append(file_path)
+ filenames = list(filenames)
+ return filenames
+
+
+def check_accuracy(sess, correct_prediction, is_training, dataset_init_op):
+ """
+ Check the accuracy of the model on either train or val (depending on dataset_init_op).
+ """
+ # Initialize the correct dataset
+ sess.run(dataset_init_op)
+ num_correct, num_samples = 0, 0
+ while True:
+ try:
+ correct_pred = sess.run(correct_prediction, {is_training: False})
+ num_correct += correct_pred.sum()
+ num_samples += correct_pred.shape[0]
+ except tf.errors.OutOfRangeError:
+ break
+
+ # Return the fraction of datapoints that were correctly classified
+ acc = float(num_correct) / num_samples
+ return acc
+
+def list_flatten(label):
+ label_f = []
+ for i in range(len(label)):
+ temp = label[i].tolist()
+ label_f = label_f + temp
+ return(label_f)
+
+def fit_batch(data,label,batch_size):
+ rm = len(data) % batch_size
+ if(rm!=0):
+ data = data[:-rm]
+ label = label[:-rm]
+ return(data,label)
+
+def label_to_int(labels):
+ unique_labels = list(set(labels))
+ label_to_int = {}
+ for i, label in enumerate(unique_labels):
+ label_to_int[label] = i
+ labels = [label_to_int[l] for l in labels]
+ return labels
+
+def main(args):
+ # Get the list of filenames and corresponding list of labels for training et validation
+ train_filenames = list_images(args.train_dir)
+ train_labels, val_labels = load_all_labels(label_path, 'obj',0)
+ train_labels = list_flatten(train_labels)
+ train_labels = label_to_int(train_labels)
+ train_filenames,train_labels = fit_batch(train_filenames,train_labels,args.batch_size)
+ num_classes = len(set(train_labels))
+
+
+ # --------------------------------------------------------------------------
+ # In TensorFlow, you first want to define the computation graph with all the
+ # necessary operations: loss, training op, accuracy...
+ # Any tensor created in the `graph.as_default()` scope will be part of `graph`
+ graph = tf.Graph()
+ with graph.as_default():
+ # Standard preprocessing for VGG on ImageNet taken from here:
+ # https://github.com/tensorflow/models/blob/master/slim/preprocessing/vgg_preprocessing.py
+ # Also see the VGG paper for more details: https://arxiv.org/pdf/1409.1556.pdf
+
+ # Preprocessing (for both training and validation):
+ # (1) Decode the image from jpg format
+ # (2) Resize the image so its smaller side is 256 pixels long
+ def _parse_function(filename, label):
+ image_string = tf.read_file(filename)
+ image_decoded = tf.image.decode_png(image_string, channels=3) # (1)
+ image = tf.cast(image_decoded, tf.float32)
+
+ smallest_side = 256.0
+ height, width = tf.shape(image)[0], tf.shape(image)[1]
+ height = tf.to_float(height)
+ width = tf.to_float(width)
+
+ scale = tf.cond(tf.greater(height, width),
+ lambda: smallest_side / width,
+ lambda: smallest_side / height)
+ new_height = tf.to_int32(height * scale)
+ new_width = tf.to_int32(width * scale)
+
+ resized_image = tf.image.resize_images(image, [new_height, new_width]) # (2)
+ return resized_image, label
+
+ # Preprocessing (for training)
+ # (3) Take a random 224x224 crop to the scaled image
+ # (4) Horizontally flip the image with probability 1/2
+ # (5) Substract the per color mean `VGG_MEAN`
+ # Note: we don't normalize the data here, as VGG was trained without normalization
+ def training_preprocess(image, label):
+ crop_image = tf.random_crop(image, [224, 224, 3]) # (3)
+ flip_image = tf.image.random_flip_left_right(crop_image) # (4)
+
+ means = tf.reshape(tf.constant(VGG_MEAN), [1, 1, 3])
+ centered_image = flip_image - means # (5)
+
+ return centered_image, label
+
+ # Preprocessing (for validation)
+ # (3) Take a central 224x224 crop to the scaled image
+ # (4) Substract the per color mean `VGG_MEAN`
+ # Note: we don't normalize the data here, as VGG was trained without normalization
+ def val_preprocess(image, label):
+ crop_image = tf.image.resize_image_with_crop_or_pad(image, 224, 224) # (3)
+
+ means = tf.reshape(tf.constant(VGG_MEAN), [1, 1, 3])
+ centered_image = crop_image - means # (4)
+
+ return centered_image, label
+
+ # ----------------------------------------------------------------------
+ # DATASET CREATION using tf.contrib.data.Dataset
+ # https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/data
+
+ # The tf.contrib.data.Dataset framework uses queues in the background to feed in
+ # data to the model.
+ # We initialize the dataset with a list of filenames and labels, and then apply
+ # the preprocessing functions described above.
+ # Behind the scenes, queues will load the filenames, preprocess them with multiple
+ # threads and apply the preprocessing in parallel, and then batch the data
+
+ # Training dataset
+ train_filenames = tf.constant(train_filenames)
+ train_labels = tf.constant(train_labels)
+ train_dataset = tf.contrib.data.Dataset.from_tensor_slices((train_filenames, train_labels))
+ train_dataset = train_dataset.map(_parse_function,
+ num_threads=args.num_workers, output_buffer_size=args.batch_size)
+ train_dataset = train_dataset.map(training_preprocess,
+ num_threads=args.num_workers, output_buffer_size=args.batch_size)
+ train_dataset = train_dataset.shuffle(buffer_size=10000) # don't forget to shuffle
+ batched_train_dataset = train_dataset.batch(args.batch_size)
+
+ # Validation dataset
+ """
+ val_filenames = tf.constant(val_filenames)
+ val_labels = tf.constant(val_labels)
+ val_dataset = tf.contrib.data.Dataset.from_tensor_slices((val_filenames, val_labels))
+ val_dataset = val_dataset.map(_parse_function,
+ num_threads=args.num_workers, output_buffer_size=args.batch_size)
+ val_dataset = val_dataset.map(val_preprocess,
+ num_threads=args.num_workers, output_buffer_size=args.batch_size)
+ batched_val_dataset = val_dataset.batch(args.batch_size)
+ """
+
+ # Now we define an iterator that can operator on either dataset.
+ # The iterator can be reinitialized by calling:
+ # - sess.run(train_init_op) for 1 epoch on the training set
+ # - sess.run(val_init_op) for 1 epoch on the valiation set
+ # Once this is done, we don't need to feed any value for images and labels
+ # as they are automatically pulled out from the iterator queues.
+
+ # A reinitializable iterator is defined by its structure. We could use the
+ # `output_types` and `output_shapes` properties of either `train_dataset`
+ # or `validation_dataset` here, because they are compatible.
+ iterator = tf.contrib.data.Iterator.from_structure(batched_train_dataset.output_types,
+ batched_train_dataset.output_shapes)
+ images, labels = iterator.get_next()
+
+ train_init_op = iterator.make_initializer(batched_train_dataset)
+ #val_init_op = iterator.make_initializer(batched_val_dataset)
+
+ # Indicates whether we are in training or in test mode
+ is_training = tf.placeholder(tf.bool)
+
+ # ---------------------------------------------------------------------
+ # Now that we have set up the data, it's time to set up the model.
+ # For this example, we'll use VGG-16 pretrained on ImageNet. We will remove the
+ # last fully connected layer (fc8) and replace it with our own, with an
+ # output size num_classes=8
+ # We will first train the last layer for a few epochs.
+ # Then we will train the entire model on our dataset for a few epochs.
+
+ # Get the pretrained model, specifying the num_classes argument to create a new
+ # fully connected replacing the last one, called "vgg_16/fc8"
+ # Each model has a different architecture, so "vgg_16/fc8" will change in another model.
+ # Here, logits gives us directly the predicted scores we wanted from the images.
+ # We pass a scope to initialize "vgg_16/fc8" weights with he_initializer
+ vgg = tf.contrib.slim.nets.vgg
+ with slim.arg_scope(vgg.vgg_arg_scope(weight_decay=args.weight_decay)):
+ logits, _ = vgg.vgg_16(images, num_classes=num_classes, is_training=is_training,
+ dropout_keep_prob=args.dropout_keep_prob)
+
+ # Specify where the model checkpoint is (pretrained weights).
+ model_path = args.model_path
+ assert(os.path.isfile(model_path))
+
+ # Restore only the layers up to fc7 (included)
+ # Calling function `init_fn(sess)` will load all the pretrained weights.
+ variables_to_restore = tf.contrib.framework.get_variables_to_restore(exclude=['vgg_16/fc8'])
+ init_fn = tf.contrib.framework.assign_from_checkpoint_fn(model_path, variables_to_restore)
+
+ # Initialization operation from scratch for the new "fc8" layers
+ # `get_variables` will only return the variables whose name starts with the given pattern
+ fc8_variables = tf.contrib.framework.get_variables('vgg_16/fc8')
+ fc8_init = tf.variables_initializer(fc8_variables)
+
+ # ---------------------------------------------------------------------
+ # Using tf.losses, any loss is added to the tf.GraphKeys.LOSSES collection
+ # We can then call the total loss easily
+ tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
+ loss = tf.losses.get_total_loss()
+
+ # First we want to train only the reinitialized last layer fc8 for a few epochs.
+ # We run minimize the loss only with respect to the fc8 variables (weight and bias).
+ fc8_optimizer = tf.train.GradientDescentOptimizer(args.learning_rate1)
+ fc8_train_op = fc8_optimizer.minimize(loss, var_list=fc8_variables)
+
+ # Then we want to finetune the entire model for a few epochs.
+ # We run minimize the loss only with respect to all the variables.
+ full_optimizer = tf.train.GradientDescentOptimizer(args.learning_rate2)
+ full_train_op = full_optimizer.minimize(loss)
+
+ # Evaluation metrics
+ prediction = tf.to_int32(tf.argmax(logits, 1))
+ correct_prediction = tf.equal(prediction, labels)
+ accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
+ #!!!!!!!!!!!save
+ #saver = tf.train.Saver()
+ tf.get_default_graph().finalize()
+
+ # --------------------------------------------------------------------------
+ # Now that we have built the graph and finalized it, we define the session.
+ # The session is the interface to *run* the computational graph.
+ # We can call our training operations with `sess.run(train_op)` for instance
+ with tf.Session(graph=graph) as sess:
+ init_fn(sess) # load the pretrained weights
+ sess.run(fc8_init) # initialize the new fc8 layer
+
+ # Update only the last layer for a few epochs.
+ for epoch in range(args.num_epochs1):
+ # Run an epoch over the training data.
+ print('Starting epoch %d / %d' % (epoch + 1, args.num_epochs1))
+ # Here we initialize the iterator with the training set.
+ # This means that we can go through an entire epoch until the iterator becomes empty.
+ sess.run(train_init_op)
+ #counter=0
+ while True:
+ try:
+ _ = sess.run(fc8_train_op, {is_training: True})
+ #counter = counter+ 1
+ #print("batch%d in done" % counter)
+ except tf.errors.OutOfRangeError:
+ break
+ #!!!!!!!!!!!save
+ #saver.save(sess , './saved_model/model.ckpt')
+ train_acc = check_accuracy(sess, correct_prediction, is_training, train_init_op)
+ #val_acc = check_accuracy(sess, correct_prediction, is_training, val_init_op)
+ print('Train accuracy: %f' % train_acc)
+ #print('Val accuracy: %f\n' % val_acc)
+
+
+ # Train the entire model for a few more epochs, continuing with the *same* weights.
+ for epoch in range(args.num_epochs2):
+ print('Starting epoch %d / %d' % (epoch + 1, args.num_epochs2))
+ sess.run(train_init_op)
+ while True:
+ try:
+ _ = sess.run(full_train_op, {is_training: True})
+ except tf.errors.OutOfRangeError:
+ break
+ #!!!!!!!!!!!save
+ #saver.save(sess,'./saved_model/model.ckpt')
+ # Check accuracy on the train and val sets every epoch
+ train_acc = check_accuracy(sess, correct_prediction, is_training, train_init_op)
+ #val_acc = check_accuracy(sess, correct_prediction, is_training, val_init_op)
+ print('Train accuracy: %f' % train_acc)
+ #print('Val accuracy: %f\n' % val_acc)
+
+
+if __name__ == '__main__':
+ args = parser.parse_args()
+ main(args)
\ No newline at end of file