diff --git a/MTCNN/.ipynb_checkpoints/Face_Extractor_BB_Landmarks_Test-checkpoint.ipynb b/MTCNN/.ipynb_checkpoints/Face_Extractor_BB_Landmarks_Test-checkpoint.ipynb new file mode 100644 index 0000000..81ac812 --- /dev/null +++ b/MTCNN/.ipynb_checkpoints/Face_Extractor_BB_Landmarks_Test-checkpoint.ipynb @@ -0,0 +1,166 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Import Modules" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import warnings\n", + "warnings.filterwarnings('ignore')" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "from src import detect_faces, show_bboxes\n", + "from PIL import Image\n", + "\n", + "import torch\n", + "from torchvision import transforms, datasets\n", + "import numpy as np\n", + "import os" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Path Definitions" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "dataset_path = '../Dataset/emotiw/'\n", + "\n", + "processed_dataset_path = '../Dataset/FaceCoordinates/'" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Load Test Dataset" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "test = sorted(os.listdir(dataset_path + 'test_shared/test/'))" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "test_filelist = [x.split('.')[0] for x in test]" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "['test_1', 'test_10', 'test_100', 'test_1000', 'test_1001', 'test_1002', 'test_1003', 'test_1004', 'test_1005', 'test_1006']\n" + ] + } + ], + "source": [ + "print(test_filelist[:10])" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "3011\n" + ] + } + ], + "source": [ + "print(len(test_filelist))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Extract Faces from Image using MTCNN" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for i in range(len(test_filelist)):\n", + " print(test_filelist[i])\n", + " img_name = os.path.join(dataset_path, 'test_shared/test/', test_filelist[i]+ '.jpg')\n", + " image = Image.open(img_name)\n", + " try:\n", + " if os.path.isfile(processed_dataset_path + 'test/' + test_filelist[i] + '.npz'):\n", + " print(test_filelist[i] + ' Already present')\n", + " continue\n", + " bounding_boxes, landmarks = detect_faces(image)\n", + " bounding_boxes = np.asarray(bounding_boxes)\n", + " if bounding_boxes.size == 0:\n", + " print('MTCNN model handling empty face condition at ' + test_filelist[i])\n", + " np.savez(processed_dataset_path + 'test/' + test_filelist[i] , a=bounding_boxes, b=landmarks)\n", + " \n", + " except ValueError:\n", + " print('No faces detected for ' + test_filelist[i] + \". Also MTCNN failed.\")\n", + " np.savez(processed_dataset_path + 'test/' + test_filelist[i] , a=np.zeros(1), b=np.zeros(1))" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.4" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/MTCNN/.ipynb_checkpoints/Face_Extractor_BB_Landmarks-checkpoint.ipynb b/MTCNN/.ipynb_checkpoints/Face_Extractor_BB_Landmarks_Train-checkpoint.ipynb similarity index 93% rename from MTCNN/.ipynb_checkpoints/Face_Extractor_BB_Landmarks-checkpoint.ipynb rename to MTCNN/.ipynb_checkpoints/Face_Extractor_BB_Landmarks_Train-checkpoint.ipynb index 03f7eee..8f73eaf 100644 --- a/MTCNN/.ipynb_checkpoints/Face_Extractor_BB_Landmarks-checkpoint.ipynb +++ b/MTCNN/.ipynb_checkpoints/Face_Extractor_BB_Landmarks_Train-checkpoint.ipynb @@ -211,6 +211,57 @@ "# Extract Faces from Image using MTCNN" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for i in range(len(training_dataset)):\n", + " image, label = training_dataset[i]\n", + " print(train_filelist[i])\n", + " try:\n", + " if label == 0:\n", + " if os.path.isfile(processed_dataset_path + 'train/Negative/' + train_filelist[i] + '.npz'):\n", + " print(train_filelist[i] + ' Already present')\n", + " continue\n", + " bounding_boxes, landmarks = detect_faces(image)\n", + " bounding_boxes = np.asarray(bounding_boxes)\n", + " if bounding_boxes.size == 0:\n", + " print('MTCNN model handling empty face condition at ' + train_filelist[i])\n", + " np.savez(processed_dataset_path + 'train/Negative/' + train_filelist[i] , a=bounding_boxes, b=landmarks)\n", + "\n", + " elif label == 1:\n", + " if os.path.isfile(processed_dataset_path + 'train/Neutral/' + train_filelist[i] + '.npz'):\n", + " print(train_filelist[i] + ' Already present')\n", + " continue\n", + " bounding_boxes, landmarks = detect_faces(image)\n", + " bounding_boxes = np.asarray(bounding_boxes)\n", + " if bounding_boxes.size == 0:\n", + " print('MTCNN model handling empty face condition at ' + train_filelist[i]) \n", + " np.savez(processed_dataset_path + 'train/Neutral/' + train_filelist[i] , a=bounding_boxes, b=landmarks)\n", + "\n", + " else:\n", + " if os.path.isfile(processed_dataset_path + 'train/Positive/' + train_filelist[i] + '.npz'):\n", + " print(train_filelist[i] + ' Already present')\n", + " continue\n", + " bounding_boxes, landmarks = detect_faces(image)\n", + " bounding_boxes = np.asarray(bounding_boxes)\n", + " if bounding_boxes.size == 0:\n", + " print('MTCNN model handling empty face condition at ' + train_filelist[i])\n", + " np.savez(processed_dataset_path + 'train/Positive/' + train_filelist[i] , a=bounding_boxes, b=landmarks)\n", + " \n", + " except ValueError:\n", + " print('No faces detected for ' + train_filelist[i] + \". Also MTCNN failed.\")\n", + " if label == 0:\n", + " np.savez(processed_dataset_path + 'train/Negative/' + train_filelist[i] , a=np.zeros(1), b=np.zeros(1))\n", + " elif label == 1:\n", + " np.savez(processed_dataset_path + 'train/Neutral/' + train_filelist[i] , a=np.zeros(1), b=np.zeros(1))\n", + " else:\n", + " np.savez(processed_dataset_path + 'train/Positive/' + train_filelist[i] , a=np.zeros(1), b=np.zeros(1))\n", + " continue" + ] + }, { "cell_type": "code", "execution_count": null, diff --git a/MTCNN/Face_Extractor_BB_Landmarks_Test.ipynb b/MTCNN/Face_Extractor_BB_Landmarks_Test.ipynb new file mode 100644 index 0000000..81ac812 --- /dev/null +++ b/MTCNN/Face_Extractor_BB_Landmarks_Test.ipynb @@ -0,0 +1,166 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Import Modules" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import warnings\n", + "warnings.filterwarnings('ignore')" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "from src import detect_faces, show_bboxes\n", + "from PIL import Image\n", + "\n", + "import torch\n", + "from torchvision import transforms, datasets\n", + "import numpy as np\n", + "import os" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Path Definitions" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "dataset_path = '../Dataset/emotiw/'\n", + "\n", + "processed_dataset_path = '../Dataset/FaceCoordinates/'" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Load Test Dataset" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "test = sorted(os.listdir(dataset_path + 'test_shared/test/'))" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "test_filelist = [x.split('.')[0] for x in test]" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "['test_1', 'test_10', 'test_100', 'test_1000', 'test_1001', 'test_1002', 'test_1003', 'test_1004', 'test_1005', 'test_1006']\n" + ] + } + ], + "source": [ + "print(test_filelist[:10])" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "3011\n" + ] + } + ], + "source": [ + "print(len(test_filelist))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Extract Faces from Image using MTCNN" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for i in range(len(test_filelist)):\n", + " print(test_filelist[i])\n", + " img_name = os.path.join(dataset_path, 'test_shared/test/', test_filelist[i]+ '.jpg')\n", + " image = Image.open(img_name)\n", + " try:\n", + " if os.path.isfile(processed_dataset_path + 'test/' + test_filelist[i] + '.npz'):\n", + " print(test_filelist[i] + ' Already present')\n", + " continue\n", + " bounding_boxes, landmarks = detect_faces(image)\n", + " bounding_boxes = np.asarray(bounding_boxes)\n", + " if bounding_boxes.size == 0:\n", + " print('MTCNN model handling empty face condition at ' + test_filelist[i])\n", + " np.savez(processed_dataset_path + 'test/' + test_filelist[i] , a=bounding_boxes, b=landmarks)\n", + " \n", + " except ValueError:\n", + " print('No faces detected for ' + test_filelist[i] + \". Also MTCNN failed.\")\n", + " np.savez(processed_dataset_path + 'test/' + test_filelist[i] , a=np.zeros(1), b=np.zeros(1))" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.4" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/MTCNN/Face_Extractor_BB_Landmarks.ipynb b/MTCNN/Face_Extractor_BB_Landmarks_Train.ipynb similarity index 93% rename from MTCNN/Face_Extractor_BB_Landmarks.ipynb rename to MTCNN/Face_Extractor_BB_Landmarks_Train.ipynb index 03f7eee..8f73eaf 100644 --- a/MTCNN/Face_Extractor_BB_Landmarks.ipynb +++ b/MTCNN/Face_Extractor_BB_Landmarks_Train.ipynb @@ -211,6 +211,57 @@ "# Extract Faces from Image using MTCNN" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for i in range(len(training_dataset)):\n", + " image, label = training_dataset[i]\n", + " print(train_filelist[i])\n", + " try:\n", + " if label == 0:\n", + " if os.path.isfile(processed_dataset_path + 'train/Negative/' + train_filelist[i] + '.npz'):\n", + " print(train_filelist[i] + ' Already present')\n", + " continue\n", + " bounding_boxes, landmarks = detect_faces(image)\n", + " bounding_boxes = np.asarray(bounding_boxes)\n", + " if bounding_boxes.size == 0:\n", + " print('MTCNN model handling empty face condition at ' + train_filelist[i])\n", + " np.savez(processed_dataset_path + 'train/Negative/' + train_filelist[i] , a=bounding_boxes, b=landmarks)\n", + "\n", + " elif label == 1:\n", + " if os.path.isfile(processed_dataset_path + 'train/Neutral/' + train_filelist[i] + '.npz'):\n", + " print(train_filelist[i] + ' Already present')\n", + " continue\n", + " bounding_boxes, landmarks = detect_faces(image)\n", + " bounding_boxes = np.asarray(bounding_boxes)\n", + " if bounding_boxes.size == 0:\n", + " print('MTCNN model handling empty face condition at ' + train_filelist[i]) \n", + " np.savez(processed_dataset_path + 'train/Neutral/' + train_filelist[i] , a=bounding_boxes, b=landmarks)\n", + "\n", + " else:\n", + " if os.path.isfile(processed_dataset_path + 'train/Positive/' + train_filelist[i] + '.npz'):\n", + " print(train_filelist[i] + ' Already present')\n", + " continue\n", + " bounding_boxes, landmarks = detect_faces(image)\n", + " bounding_boxes = np.asarray(bounding_boxes)\n", + " if bounding_boxes.size == 0:\n", + " print('MTCNN model handling empty face condition at ' + train_filelist[i])\n", + " np.savez(processed_dataset_path + 'train/Positive/' + train_filelist[i] , a=bounding_boxes, b=landmarks)\n", + " \n", + " except ValueError:\n", + " print('No faces detected for ' + train_filelist[i] + \". Also MTCNN failed.\")\n", + " if label == 0:\n", + " np.savez(processed_dataset_path + 'train/Negative/' + train_filelist[i] , a=np.zeros(1), b=np.zeros(1))\n", + " elif label == 1:\n", + " np.savez(processed_dataset_path + 'train/Neutral/' + train_filelist[i] , a=np.zeros(1), b=np.zeros(1))\n", + " else:\n", + " np.savez(processed_dataset_path + 'train/Positive/' + train_filelist[i] , a=np.zeros(1), b=np.zeros(1))\n", + " continue" + ] + }, { "cell_type": "code", "execution_count": null, diff --git a/MTCNN/src/__pycache__/__init__.cpython-36.pyc b/MTCNN/src/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000..1fb85c1 Binary files /dev/null and b/MTCNN/src/__pycache__/__init__.cpython-36.pyc differ diff --git a/MTCNN/src/__pycache__/box_utils.cpython-36.pyc b/MTCNN/src/__pycache__/box_utils.cpython-36.pyc new file mode 100644 index 0000000..fcc713d Binary files /dev/null and b/MTCNN/src/__pycache__/box_utils.cpython-36.pyc differ diff --git a/MTCNN/src/__pycache__/detector.cpython-36.pyc b/MTCNN/src/__pycache__/detector.cpython-36.pyc new file mode 100644 index 0000000..3242f3d Binary files /dev/null and b/MTCNN/src/__pycache__/detector.cpython-36.pyc differ diff --git a/MTCNN/src/__pycache__/first_stage.cpython-36.pyc b/MTCNN/src/__pycache__/first_stage.cpython-36.pyc new file mode 100644 index 0000000..d6c85d3 Binary files /dev/null and b/MTCNN/src/__pycache__/first_stage.cpython-36.pyc differ diff --git a/MTCNN/src/__pycache__/get_nets.cpython-36.pyc b/MTCNN/src/__pycache__/get_nets.cpython-36.pyc new file mode 100644 index 0000000..083925b Binary files /dev/null and b/MTCNN/src/__pycache__/get_nets.cpython-36.pyc differ diff --git a/MTCNN/src/__pycache__/visualization_utils.cpython-36.pyc b/MTCNN/src/__pycache__/visualization_utils.cpython-36.pyc new file mode 100644 index 0000000..23662aa Binary files /dev/null and b/MTCNN/src/__pycache__/visualization_utils.cpython-36.pyc differ