diff --git a/.gitignore b/.gitignore index 7a94321..7fa6a32 100755 --- a/.gitignore +++ b/.gitignore @@ -1,7 +1,159 @@ build dist dannce.egg-info -__pycache__ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +#poetry.lock + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# PyCharm +# JetBrains specific template is maintainted in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. +#.idea/ + dannce/engine/__pycache__/ dannce/labeling/__pycache__/ old_repo/ @@ -31,6 +183,11 @@ demo/markerless_mouse_1/DANNCE/predict_results/ demo/markerless_mouse_1/DANNCE/train_results/AVG/weights* demo/markerless_mouse_1/DANNCE/predict_test/ +demo/markerless_mouse_1/DANNCE/train_test_ln/ +demo/markerless_mouse_1/DANNCE/train_test_in/ +demo/markerless_mouse_1/DANNCE/train_test_in_dgp/ +demo/markerless_mouse_1/DANNCE/train_test_ln_dgp/ + demo/markerless_mouse_1/COM/train_test/ demo/markerless_mouse_1/COM/train_test/logs/* demo/markerless_mouse_1/COM/train_test/*.pickle @@ -54,3 +211,8 @@ demo/markerless_mouse_2/DANNCE/predict_results/copy* demo/markerless_mouse_1/videos/ demo/markerless_mouse_2/videos/ + +tests/configs/alabel3d_temp_dannce.mat +tests/configs/log_file.txt + +.vscode/ \ No newline at end of file diff --git a/README.md b/README.md index 844c61e..81ba8a4 100644 --- a/README.md +++ b/README.md @@ -21,8 +21,8 @@ DANNCE (3-Dimensional Aligned Neural Network for Computational Ethology) is a co | OS | Python | TensorFlow | CUDA | cuDNN | PyTorch | |:---------------------:|:------:|:----------:|:----:|:-----:|:-------:| -| Ubuntu 16.04 or 18.04 | 3.7.x | 2.2.0 - 2.3.0 | 10.1 | 7.6 | 1.5.0 - 1.7.0 | -| Windows 10 | 3.7.x | 2.2.0 - 2.3.0 | 10.1 | 7.6 | 1.5.0 - 1.7.0 | +| Ubuntu 16.04 or 18.04 | 3.7.x | 2.6.0 | 11.1 | 8.1 | 1.9.1 | +| Windows 10 **_Not yet retested_** | 3.7.x | 2.6.0 | 11.1 | 8.1 | 1.9.1 | We recommend installing DANNCE using the following steps: @@ -35,13 +35,13 @@ cd dannce 2. If you do not already have it, install [Anaconda](https://www.anaconda.com/products/individual). 3. Set up a new Anaconda environment with the following configuration: \ -`conda create -n dannce python=3.7 cudatoolkit=10.1 cudnn ffmpeg` +`conda create -n dannce python=3.7 cudatoolkit=11.1 cudnn=8.1 ffmpeg -c nvidia -c conda-forge` 4. Activate the new Anaconda environment: \ `conda activate dannce` 5. Install PyTorch: \ -`conda install pytorch=1.7 -c pytorch` +`conda install pytorch=1.9.1 -c pytorch` 6. Update setuptools: \ `pip install -U setuptools` @@ -98,9 +98,9 @@ The demo should take less than 2 minutes to run on an NVIDIA Titan X, Titan V, T Please see the *Wiki* for more details on running DANNCE and customizing configuration files. -## Using DANNCE on your data +# Using DANNCE on your data -### Camera Calibration +## 1. Camera Calibration To use DANNCE, acquisition cameras must calibrated. Ideally, the acquired data will also be compressed. Synchronization is best done with a frametime trigger and a supplementary readout of frame times. Calibration is the process of determining the distortion introduced into an image from the camera lens (camera intrinsics) and the position and orientation of cameras relative to one another in space (camera extrinsics). When acquiring our data, we typically calibrated cameras in a two-step process. We first used a checkerboard to find the camera intrinsics. We then used an 'L-frame' to determine the camera extrinsics. The L-frame is a calibrated grid of four or more points that are labeled in each camera. A checkerboard can also be used for both procedures. We have included two examples of calibration using MATLAB (in `Calibration/`). Some tips: @@ -120,12 +120,12 @@ Cameras tested: 3. Basler ace aca1920-155uc -## Formatting The Data +## 2. Formatting The Data DANNCE requires a set of videos across multiple views and a `*dannce.mat` file that contains camera calibration parameters, a structure that synchronizes frames across views, and in the case of training, the 3D labels. We recommend setting up individual project folders for each video recording session, as in the dannce demos (`./demo`), although the dannce configuration files are flexible enough to support more custom file and directory organizations. The demo project folders also contain examples of all of the following formatting information. -**video directories**. +**2.1-video directories**. DANNCE requires a parent video directory with *n* sub-directories, one for each of *n* cameras. Within each subdirectory, videos must be named according the frame index of the first frame in the file. For example, for a three-camera system, the video directory must look like: @@ -144,7 +144,7 @@ DANNCE requires a parent video directory with *n* sub-directories, one for each |\_\_+--0.mp4 -**configuration files**. +**2.2-configuration files**. `DANNCE` uses two configuration files and one data file. @@ -152,36 +152,69 @@ DANNCE requires a parent video directory with *n* sub-directories, one for each - *io config*, e.g. `demo/markerless_mouse_1/io.yaml`. This file defines input data and ouput directories. It is used for a single experiment. - *dannce.mat*, e.g. `demo/markerless_mouse_1/label3d_dannce.mat`. This file contains three cell arrays of matlab structures. `params` stores the camera parameters for each camera. `sync` stores a vector that synchronizes all cameras. `labelData` stores the frame identities and 3d labels for hand-labeled frames. This file can be produced automatically with `Label3D.exportDannce()`. -**camera calibration parameters**. +**2.3-camera calibration parameters**. Dannce requires structs for each camera containing the camera's rotation matrix, translation vector, intrinsic matrix, radial distortion, and tangential distortion. If you use our included calibration scripts, you can convert the output to the required format with `utils/convert_calibration.m`. A properly formatted calibration struct has the following fields, `['R','t','K','RDistort','TDistort']`. -**synchronization files**. +**2.4-synchronization files**. DANNCE requires a set of sync structs, one for each camera, which define frame synchrony across the different cameras over time. If you know your cameras are reliably synchronized at all times (e.g. via hardware triggering), these files can be generated with the aid of `dannce/utils/makeSyncFiles.py`. Once your video directories are set up correctly, sync files can get generated by running `python dannce/utils/makeSyncFiles.py {path_to_videos} {acquisition_frame_rate} {number_tracked_landmarks}`, where {.} denotes variables you must replace with relevant values. See the `makeSyncFiles.py` docstring for more information. If your cameras are not natively synchronized, but you can collect timestaps for each frame, sync files should be generated by `dannce/utils/preprocess_data.m`, which will generate sync files from a properly formatted `.mat` file listing the frameID for each camera at each timepoint. See `/dannce/utils/example_matchedframs.mat` file for how these timestamp data should be formatted before running `preprocess_data.m`. -## Hand-Labeling +## 3. Hand-Labeling For fine-tuning DANNCE to work with your animal and system, we developed a labeling GUI, which can be found in a separate repo: https://github.com/diegoaldarondo/Label3D. The `Label3D` repository should be cloned with DANNCE automatically as a submodule when using `git clone --recursive https://github.com/spoonsso/dannce` When labeling is completed, the labels can be used to train DANNCE and the COMfinder network (see below) after converting the Label3D files to DANNCE format using `Label3D.exportDannce()`. -### Training and Predicting with the COMfinder U-Net +## 4. Training and Predicting with the COMfinder U-Net DANNCE requires a reasonable estimate of the 3D position of the animal in each frame. We obtain this by triangulating the 2D center of mass (COM) of the animal in each frame. You can use your favorite method to find an estimate of the animal COM in each frame, but we trained a 2D U-Net to do it. Our U-Net typically requires some additional training data to get it working on new views, new environments, and new species. If working with hand-labeled data, your same data structures can be used to train both the COMfinder network and the DANNCE network. -Given formatted data, a properly organized directory structure, and a config file (see config and demo folder, and wiki), navigate to your project folder and run -`com-train /path/to/main_com_config.yaml` +### 4.1 Training COMfinder U-Net +1. Get the data formatted and directory structure organized as described in section 2.1 +2. Get the main config and other config files set as described in 2.2. Note, different _io.yaml_ and _dannce.mat_ config files need to be present in each project directory.(see config and demo folder, and wiki) +3. Navigate to your project folder and run `com-train /path/to/main_com_config.yaml` The trained COM network weights will go into `com_train_dir` defined in the io.yaml. +### 4.2 Generating COM predictions After training, run `com-predict /path/to/main_com_config.yaml` -to generate center of mass predictions. +to generate center of mass predictions. -### Training and Predicting with DANNCE +The COM predictions may be stored in a separate file or appended to the `label3d_dannce.mat` file. -Once the COM is found, the main DANNCE network can be trained by running: +## 5. Training and Predicting with DANNCE + +### 5.1 Training DANNCE +1. Get the COM predictions for your experiments (Section 4.1 and 4.2) +2. If you want to run Dannce from your experiments directory, you can simply train dannce by running +`dannce-train /path/to/main_config.yaml` +3. The training step for DANNCE can be run from a directory other than the project directory. To do this one needs to define the `label3d_dannce.mat` file and corresponding video directories using the `exp` option. Check the expanded io.yaml +4. Once the io.yaml is set appropriately, dannce can be trined by running `dannce-train /path/to/main_config.yaml` -After training, run +### 5.2 Generating DANNCE Predictions +1. Navigate to the project folder +2. Make sure that the label3d_dannce.mat and io config files are present and properly configured. The io file should point to the weights file which needs to be loaded. +3. Also verify the directory structure for videos. Note, path to videos can be modified in the io to point to exact video file location, and will default to `./videos`. +4. Run `dannce-predict /path/to/main_config.yaml` from within an experiment/project folder to make 3D predictions using the trained model. -`dannce-predict /path/to/main_config.yaml` from within an experiment folder to make 3D predictions using the trained model. Consult the demo folder for directory and config file formatting examples + +__*Note*__: All the dannce scripts should be ideally run from the project folders. This is because, dannce scripts look for all config files (except main_config), videos and `dannce.mat` file inside the directory from which they are called. `dannce-train` could be run from a separate folder, but the directory paths need to be adjusted accordingly in both io config and main config. + +__*Note*__: If you are running dannce scripts on a slurm cluster, there are additional config files that can be set according to your node configurations. Please refer to Wiki section Dannce on Slurm HPC for more details + +## Contributing + +Before you push into the repository, make sure to run the standard tests on the code. The `tests` directory contains all the standard tests to be run. You will need to have the weights and videos in order to run the tests. + +After your changes are done, the tests can be run by using the following simple steps: + - Go to the dannce install directory + - Do a pip install + - run `tests/tests.sh` +``` + cd dannce + pip install -e . + sh tests/tests.sh +``` + +If you think the functionality you have included needs to be tested separately, you can include a test for the same under `tests/cli_test.py` diff --git a/cluster/com.sh b/cluster/com.sh deleted file mode 100755 index 7c72cd9..0000000 --- a/cluster/com.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/bash -# Script to run all steps of dannce in a single job. -# -# Inputs: com_config - path to com config. -# Example: sbatch com.sh /path/to/com_config.yaml -#SBATCH --job-name=com -#SBATCH --mem=5000 -#SBATCH -t 5-00:00 -#SBATCH -N 1 -#SBATCH -c 1 -#SBATCH -p olveczky -set -e -sbatch --wait holy_com_train.sh $1 -wait -sbatch --wait holy_com_predict.sh $1 diff --git a/cluster/com_and_dannce.sh b/cluster/com_and_dannce.sh deleted file mode 100755 index b15213b..0000000 --- a/cluster/com_and_dannce.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/bash -# Script to run all steps of dannce in a single job. -# -# Inputs: com_config - path to com config. -# dannce_config - path to com config. -# Example: sbatch com_and_dannce.sh /path/to/com_config.yaml /path/to/dannce_config.yaml -#SBATCH --job-name=com_and_dannce -#SBATCH --mem=5000 -#SBATCH -t 5-00:00 -#SBATCH -N 1 -#SBATCH -c 1 -#SBATCH -p olveczky -set -e -sbatch --wait holy_com_train.sh $1 -wait -sbatch --wait holy_com_predict.sh $1 -wait -sbatch --wait holy_dannce_train.sh $2 -wait -sbatch --wait holy_dannce_predict.sh $2 diff --git a/cluster/com_and_dannce_multi_gpu.sh b/cluster/com_and_dannce_multi_gpu.sh deleted file mode 100755 index cb74393..0000000 --- a/cluster/com_and_dannce_multi_gpu.sh +++ /dev/null @@ -1,34 +0,0 @@ -#!/bin/bash -# Script to run all steps of dannce in a single job using multi-gpu prediction. -# -# Inputs: com_config - path to com config. -# dannce_config - path to com config. -# Example: sbatch com_and_dannce_multi_gpu.sh /path/to/com_config.yaml /path/to/dannce_config.yaml -#SBATCH --job-name=com_and_dannce -#SBATCH --mem=10000 -#SBATCH -t 5-00:00 -#SBATCH -N 1 -#SBATCH -c 1 -#SBATCH -p olveczky -set -e - -# Setup the dannce environment -module load Anaconda3/5.0.1-fasrc02 -module load ffmpeg/4.0.2-fasrc01 -source activate dannce - -# Train com network -sbatch --wait holy_com_train.sh $1 -wait - -# Predict with com network in parallel and merge results -com-predict-multi-gpu $1 -com-merge $1 - -# Train dannce network -sbatch --wait holy_dannce_train.sh $2 -wait - -# Predict with dannce network in parallel and merge results -dannce-predict-multi-gpu $2 -dannce-merge $2 diff --git a/cluster/com_multi_gpu.sh b/cluster/com_multi_gpu.sh deleted file mode 100755 index 46d1352..0000000 --- a/cluster/com_multi_gpu.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/bash -# Script to run all steps of com in a single job using multi-gpu prediction. -# -# Inputs: com_config - path to com config. -# -# Example: sbatch com_multi_gpu.sh /path/to/com_config.yaml -#SBATCH --job-name=com_multi_gpu -#SBATCH --mem=10000 -#SBATCH -t 5-00:00 -#SBATCH -N 1 -#SBATCH -c 1 -#SBATCH -p olveczky -set -e - -# Setup the dannce environment -module load Anaconda3/5.0.1-fasrc02 -module load ffmpeg/4.0.2-fasrc01 -source activate dannce - -# Train com network -sbatch --wait holy_com_train.sh $1 -wait - -# Predict with com network in parallel and merge results -com-predict-multi-gpu $1 -com-merge $1 - diff --git a/cluster/dannce.sh b/cluster/dannce.sh deleted file mode 100755 index 233dc40..0000000 --- a/cluster/dannce.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/bash -# Script to run all steps of dannce in a single job. -# -# Inputs: dannce_config - path to com config. -# Example: sbatch dannce.sh /path/to/dannce_config.yaml -#SBATCH --job-name=dannce -#SBATCH --mem=5000 -#SBATCH -t 5-00:00 -#SBATCH -N 1 -#SBATCH -c 1 -#SBATCH -p olveczky -set -e -sbatch --wait holy_dannce_train.sh $1 -wait -sbatch --wait holy_dannce_predict.sh $1 diff --git a/cluster/dannce_multi_gpu.sh b/cluster/dannce_multi_gpu.sh deleted file mode 100755 index 9b44274..0000000 --- a/cluster/dannce_multi_gpu.sh +++ /dev/null @@ -1,25 +0,0 @@ -#!/bin/bash -# Script to run all steps of dannce in a single job using multi-gpu prediction. -# -# Inputs: dannce_config - path to com config. -# Example: sbatch dannce_multi_gpu.sh /path/to/dannce_config.yaml -#SBATCH --job-name=dannce_multi_gpu -#SBATCH --mem=10000 -#SBATCH -t 5-00:00 -#SBATCH -N 1 -#SBATCH -c 1 -#SBATCH -p olveczky -set -e - -# Setup the dannce environment -module load Anaconda3/5.0.1-fasrc02 -module load ffmpeg/4.0.2-fasrc01 -source activate dannce - -# Train dannce network -sbatch --wait holy_dannce_train.sh $1 -wait - -# Predict with dannce network in parallel and merge results -dannce-predict-multi-gpu $1 -dannce-merge $1 diff --git a/cluster/duke.yaml b/cluster/duke.yaml new file mode 100644 index 0000000..4eadac9 --- /dev/null +++ b/cluster/duke.yaml @@ -0,0 +1,16 @@ +# Dannce slurm configuration +dannce_train: "--job-name=trainDannce -p gpu-common,dsplus-gpu --mem=80000 -t 3-00:00 --gres=gpu:1 -N 1 -c 16 --account=plusds" +# dannce_train: "--job-name=trainDannce -p gpu-common,tdunn --mem=80000 -t 3-00:00 --gres=gpu:4 -N 1 -c 16 " +dannce_train_grid: "--job-name=trainDannce -p gpu-common,dsplus-gpu --mem=80000 -t 3-00:00 --gres=gpu:1 -N 1 -c 16 --account=plusds" +# dannce_train_grid: "--job-name=trainDannce -p gpu-common,tdunn --mem=80000 -t 3-00:00 --gres=gpu:1 -N 1 -c 16" +dannce_predict: "--job-name=predictDannce -p gpu-common,dsplus-gpu --mem=30000 -t 1-00:00 --gres=gpu:1 -N 1 -c 8 --account=plusds" +dannce_multi_predict: "--job-name=predictDannce -p gpu-common,dsplus-gpu --mem=30000 -t 0-03:00 --gres=gpu:1 -N 1 -n 8 --account=plusds" + +# Com slurm configuration +com_train: "--job-name=trainCom -p gpu-common,dsplus-gpu --mem=30000 -t 3-00:00 --gres=gpu:1 -N 1 -c 8 --account=plusds" +com_predict: "--job-name=predictCom -p gpu-common,dsplus-gpu --mem=10000 -t 3-00:00 --gres=gpu:1 -N 1 -c 8 --account=plusds" +com_multi_predict: "--job-name=predictCom -p gpu-common,dsplus-gpu --mem=10000 -t 0-03:00 --gres=gpu:1 -N 1 -n 8 --account=plusds" + +# Setup functions (optional, set to "" if no setup is required. Trailing ; is required) +inference: '--job-name=inference gpu-common,dsplus-gpu --mem=30000 -t 3-00:00 -N 1 -n 8 --account=plusds' +setup: ". ~/.bashrc; conda activate dannce_aux;" diff --git a/cluster/grid.py b/cluster/grid.py index e5a6488..d60acff 100755 --- a/cluster/grid.py +++ b/cluster/grid.py @@ -1,75 +1,157 @@ -import numpy as np import sys import pickle import os import yaml import argparse import ast -from scipy.io import savemat -from dannce.engine.io import load_sync, load_com -from dannce.engine.processing import prepare_save_metadata -from dannce import ( - _param_defaults_shared, - _param_defaults_dannce, - _param_defaults_com, -) +from typing import Text, List, Tuple +from cluster.multi_gpu import build_params_from_config_and_batch, load_params +import subprocess +import time +import logging + +FILE_PATH = "dance.cluster.grid" + class GridHandler: def __init__( self, - config, - grid_config, - verbose=True, - test=False, - dannce_file=None, + config: Text, + grid_config: Text, + verbose: bool = True, + test: bool = False, + dannce_file: Text = None, ): + """Initialize grid search handler + + Args: + config (Text): Path to base config .yaml file. + grid_config (Text): Path to grid search config .yaml file. + verbose (bool, optional): If True, print out batch parameters. Defaults to True. + test (bool, optional): If True, print out system command, but do not run. Defaults to False. + dannce_file (Text, optional): Path to dannce.mat file. Defaults to None. + """ self.config = config self.grid_config = grid_config self.batch_param_file = "_grid_params.p" self.verbose = verbose self.test = test - def load_params(self, param_path): - """Load a params file""" + def load_params(self, param_path: Text) -> List: + """Load the training parameters + + Args: + param_path (Text): Path to parameters file. + + Returns: + List: Training parameters for each batch + """ with open(param_path, "rb") as file: params = yaml.safe_load(file) return params["batch_params"] - def save_batch_params(self, batch_params): - """Save the batch_param dictionary to the batch_param file""" + def save_batch_params(self, batch_params: List): + """Save the batch_param dictionary to the batch_param file + + Args: + batch_params (List): List of batch training parameters + """ out_dict = {"batch_params": batch_params} with open(self.batch_param_file, "wb") as file: pickle.dump(out_dict, file) - def load_batch_params(self): + def load_batch_params(self) -> List: + """Load the batch parameters + + Returns: + List: Batch training parameters + """ with open(self.batch_param_file, "rb") as file: in_dict = pickle.load(file) return in_dict["batch_params"] - def generate_batch_params_dannce(self): + def generate_batch_params_dannce(self) -> List: + """Generate the batch parameters + + Returns: + List: Training parameters for each batch + """ return self.load_params(self.grid_config) - def submit_jobs(self, batch_params, cmd): - """Print out description of command and issue system command""" + def submit_jobs(self, batch_params: List, cmd: Text): + """Print out description of command and issue system command + + Args: + batch_params (List): List of batch training parameters. + cmd (Text): System command to be issued. + """ + # Set logging prepend + prepend_log_msg = FILE_PATH + ".GridHandler.submit_jobs " + if self.verbose: for batch_param in batch_params: - print(batch_param) - print("Command issued: ", cmd) + logging.info(prepend_log_msg + str(batch_param.values())) + logging.info(prepend_log_msg + "Command issued: " + cmd) if not self.test: - os.system(cmd) + if isinstance(cmd, list): + for i in range(len(cmd)): + os.environ["SLURM_ARRAY_TASK_ID"] = str(i) + os.system(cmd[i]) + time.sleep(0.05) + elif isinstance(cmd, str): + os.system(cmd) + + def get_parent_job_id(self): + """Return the job id in the last row of squeue -u slurm command. + The assumption here is that the last line of the squeue command would + be the job_id of the parent sbatch job from which the array of jobs has + to be called. This job_id will be used while iterating over the number + of jobs to set customized output file names. + """ + + get_user_cmd = "whoami" + get_user_process = subprocess.Popen(get_user_cmd.split(), stdout=subprocess.PIPE) + slurm_uname = get_user_process.communicate()[0].decode("utf-8").rstrip() + + get_queue_cmd = "squeue -u " + slurm_uname + get_queue_process = subprocess.Popen(get_queue_cmd.split(), stdout=subprocess.PIPE) + queue = get_queue_process.communicate()[0].decode("utf-8").split('\n') + current_job_row = queue[-2].strip() + job_id = current_job_row.split(' ')[0] + + return job_id, slurm_uname + - def submit_dannce_train_grid(self): + def submit_dannce_train_grid(self) -> Tuple[List, Text]: """Submit dannce grid search. Submit a training job with parameter modifications - listed in self.grid_config. + listed in grid_config. + + Returns: + Tuple[List, Text]: Batch parameters list, system command """ batch_params = self.generate_batch_params_dannce() - cmd = "sbatch --array=0-%d holy_dannce_train_grid.sh %s %s" % ( - len(batch_params) - 1, - self.config, - self.grid_config, + + # import pdb; pdb.set_trace() + # Setup Logging for dannce_train_single_batch + if not os.path.exists(os.path.dirname(load_params(self.config)["log_dest"])): + os.makedirs(os.path.dirname(load_params(self.config)["log_dest"])) + logging.basicConfig(filename=load_params(self.config)["log_dest"], level=load_params(self.config)["log_level"], + format='%(asctime)s %(levelname)s:%(message)s', datefmt='%m/%d/%Y %I:%M:%S %p') + + slurm_config = load_params(load_params(self.config)["slurm_config"]) + + cmd = ( + 'sbatch --wait --array=0-%d %s --wrap="%s dannce-train-single-batch %s %s"' + % ( + len(batch_params) - 1, + slurm_config["dannce_train_grid"], + slurm_config["setup"], + self.config, + self.grid_config, + ) ) if len(batch_params) > 0: self.save_batch_params(batch_params) @@ -77,31 +159,8 @@ def submit_dannce_train_grid(self): return batch_params, cmd -def build_params_from_config_and_batch(config, batch_param, dannce_net=True): - from dannce.interface import build_params - from dannce.engine.processing import infer_params - - # Build final parameter dictionary - params = build_params(config, dannce_net=dannce_net) - for key, value in batch_param.items(): - params[key] = value - if dannce_net: - for key, value in _param_defaults_dannce.items(): - if key not in params: - params[key] = value - else: - for key, value in _param_defaults_com.items(): - if key not in params: - params[key] = value - for key, value in _param_defaults_shared.items(): - if key not in params: - params[key] = value - - params = infer_params(params, dannce_net=dannce_net, prediction=False) - return params - - def dannce_train_single_batch(): + """CLI entrypoint to train a single batch.""" from dannce.interface import dannce_train # Load in parameters to modify @@ -111,16 +170,27 @@ def dannce_train_single_batch(): batch_params = handler.load_batch_params() task_id = int(os.getenv("SLURM_ARRAY_TASK_ID")) batch_param = batch_params[task_id] - print(batch_param) + # Build final parameter dictionary params = build_params_from_config_and_batch(config, batch_param) + # Setup Logging for dannce_train_single_batch + if not os.path.exists(os.path.dirname(params["log_dest"])): + os.makedirs(os.path.dirname(params["log_dest"])) + logging.basicConfig(filename=params["log_dest"], level=params["log_level"], + format='%(asctime)s %(levelname)s:%(message)s', datefmt='%m/%d/%Y %I:%M:%S %p') + prepend_log_msg = FILE_PATH + ".dannce_train_single_batch " + + logging.info(prepend_log_msg + "Task ID = " + str(task_id)) + logging.info(prepend_log_msg + str(batch_param)) + # Train dannce_train(params) def dannce_train_grid(): + """CLI entrypoint to submit a set of training parameters.""" # Load in parameters to modify args = cmdline_args() handler = GridHandler(**args.__dict__) @@ -128,15 +198,18 @@ def dannce_train_grid(): def cmdline_args(): + """Parse command line arguments + + Returns: + [type]: Argparser values + """ # Make parser object p = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter, ) p.add_argument("config", help="Path to .yaml configuration file") - p.add_argument( - "grid_config", help="Path to .yaml grid search configuration file" - ) + p.add_argument("grid_config", help="Path to .yaml grid search configuration file") p.add_argument( "--verbose", dest="verbose", diff --git a/cluster/holy_com_predict.sh b/cluster/holy_com_predict.sh deleted file mode 100755 index 116de88..0000000 --- a/cluster/holy_com_predict.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/bash -#SBATCH --job-name=predCOM -# Job name -#SBATCH --mem=30000 -# Job memory request -#SBATCH -t 3-00:00 -# Time limit hrs:min:sec -#SBATCH -N 1 -#SBATCH -c 8 -#SBATCH -p olveczkygpu,gpu -#SBATCH --gres=gpu:1 -module load Anaconda3/5.0.1-fasrc02 -source activate dannce -com-predict "$@" diff --git a/cluster/holy_com_predict_multi_gpu.sh b/cluster/holy_com_predict_multi_gpu.sh deleted file mode 100755 index 7817a23..0000000 --- a/cluster/holy_com_predict_multi_gpu.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/bash -#SBATCH --job-name=predictCom -# Job name -#SBATCH --mem=10000 -# Job memory request -#SBATCH -t 0-03:00 -# Time limit hrs:min:sec -#SBATCH -N 1 -#SBATCH -n 8 -#SBATCH -p olveczkygpu,gpu,cox,gpu_requeue -#SBATCH --gres=gpu:1 -#SBATCH --constraint=cc5.2 -module load Anaconda3/5.0.1-fasrc02 -source activate dannce -com-predict-single-batch "$@" diff --git a/cluster/holy_com_train.sh b/cluster/holy_com_train.sh deleted file mode 100755 index 8e3ae19..0000000 --- a/cluster/holy_com_train.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/bash -#SBATCH --job-name=trainCOM -# Job name -#SBATCH --mem=30000 -# Job memory request -#SBATCH -t 3-00:00 -# Time limit hrs:min:sec -#SBATCH -N 1 -#SBATCH -c 8 -#SBATCH -p olveczkygpu,gpu -#SBATCH --gres=gpu:1 -module load Anaconda3/5.0.1-fasrc02 -source activate dannce -com-train "$@" diff --git a/cluster/holy_dannce_predict.sh b/cluster/holy_dannce_predict.sh deleted file mode 100755 index 32c00ba..0000000 --- a/cluster/holy_dannce_predict.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/bash -#SBATCH --job-name=predictDannce -# Job name -#SBATCH --mem=10000 -# Job memory request -#SBATCH -t 1-00:00 -# Time limit hrs:min:sec -#SBATCH -N 1 -#SBATCH -c 8 -#SBATCH -p olveczkygpu,gpu -#SBATCH --gres=gpu:1 -#SBATCH --constraint=cc5.2 -module load Anaconda3/5.0.1-fasrc02 -source activate dannce -dannce-predict "$@" \ No newline at end of file diff --git a/cluster/holy_dannce_predict_multi_gpu.sh b/cluster/holy_dannce_predict_multi_gpu.sh deleted file mode 100755 index 9375346..0000000 --- a/cluster/holy_dannce_predict_multi_gpu.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/bash -#SBATCH --job-name=predictDannce -# Job name -#SBATCH --mem=10000 -# Job memory request -#SBATCH -t 0-03:00 -# Time limit hrs:min:sec -#SBATCH -N 1 -#SBATCH -n 8 -#SBATCH -p olveczkygpu,gpu,cox,gpu_requeue -#SBATCH --gres=gpu:1 -#SBATCH --constraint=cc5.2 -module load Anaconda3/5.0.1-fasrc02 -source activate dannce -dannce-predict-single-batch "$@" diff --git a/cluster/holy_dannce_train.sh b/cluster/holy_dannce_train.sh deleted file mode 100755 index 013a25e..0000000 --- a/cluster/holy_dannce_train.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/bash -#SBATCH --job-name=trainDannce -# Job name -#SBATCH --mem=60000 -# Job memory request -#SBATCH -t 2-00:00 -# Time limit hrs:min:sec -#SBATCH -N 1 -#SBATCH -c 16 -#SBATCH -p olveczkygpu,gpu -#SBATCH --gres=gpu:1 -module load Anaconda3/5.0.1-fasrc02 -source activate dannce -dannce-train "$@" diff --git a/cluster/holy_dannce_train_grid.sh b/cluster/holy_dannce_train_grid.sh deleted file mode 100755 index b67f799..0000000 --- a/cluster/holy_dannce_train_grid.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/bash -#SBATCH --job-name=trainDannce -# Job name -#SBATCH --mem=60000 -# Job memory request -#SBATCH -t 2-00:00 -# Time limit hrs:min:sec -#SBATCH -N 1 -#SBATCH -c 16 -#SBATCH -p olveczkygpu,gpu -#SBATCH --gres=gpu:1 -module load Anaconda3/5.0.1-fasrc02 -source activate dannce -dannce-train-single-batch "$@" diff --git a/cluster/holyoke.yaml b/cluster/holyoke.yaml new file mode 100644 index 0000000..73d5cab --- /dev/null +++ b/cluster/holyoke.yaml @@ -0,0 +1,16 @@ +# Dannce slurm configuration +dannce_train: "--job-name=trainDannce -p olveczkygpu,gpu --mem=80000 -t 3-00:00 --gres=gpu:1 -N 1 -n 8 " +dannce_train_grid: "--job-name=trainDannce -p olveczkygpu,gpu --mem=80000 -t 3-00:00 --gres=gpu:1 -N 1 -n 8 " +dannce_predict: "--job-name=predictDannce -p olveczkygpu,gpu,cox,gpu_requeue --mem=30000 -t 1-00:00 --gres=gpu:1 -N 1 -n 8 " +dannce_multi_predict: "--job-name=predictDannce -p olveczkygpu,gpu,cox,gpu_requeue --mem=30000 -t 0-03:00 --gres=gpu:1 -N 1 -n 8 " + +# Com slurm configuration +com_train: "--job-name=trainCom -p olveczkygpu,gpu --mem=30000 -t 3-00:00 --gres=gpu:1 -N 1 -n 8 " +com_predict: "--job-name=predictCom -p olveczkygpu,gpu,cox,gpu_requeue --mem=10000 -t 1-00:00 --gres=gpu:1 -N 1 -n 8 " +com_multi_predict: "--job-name=predictCom -p olveczkygpu,gpu,cox,gpu_requeue --mem=10000 -t 0-03:00 --gres=gpu:1 -N 1 -n 8 " + +# Inference +inference: '--job-name=inference -p olveczky,shared --mem=30000 -t 3-00:00 -N 1 -n 8 --constraint="intel&avx2"' +# Setup functions (optional, set to "" if no setup is required. Trailing ; is required) +# setup: "module load Anaconda3/2020.11; source activate dannce;" +setup: "module load Anaconda3/2020.11; source activate dannce; module load cuda/11.0.3-fasrc01; module load cudnn/8.0.4.30_cuda11.0-fasrc01;" diff --git a/cluster/multi_gpu.py b/cluster/multi_gpu.py index 027adde..f8bdc22 100755 --- a/cluster/multi_gpu.py +++ b/cluster/multi_gpu.py @@ -7,33 +7,42 @@ import ast from scipy.io import savemat from dannce.engine.io import load_sync, load_com -from dannce.engine.processing import prepare_save_metadata from dannce import ( _param_defaults_shared, _param_defaults_dannce, _param_defaults_com, ) import scipy.io as spio +from typing import Dict, List, Text +import logging -DANNCE_PRED_FILE_BASE_NAME = "save_data_AVG" -COM_PRED_FILE_BASE_NAME = "com3d" +DANNCE_BASE_NAME = "save_data_AVG" +COM_BASE_NAME = "com3d" +FILE_PATH = "dannce.cluster.multi_gpu" -def loadmat(filename): - """ - this function should be called instead of direct spio.loadmat +def loadmat(filename: Text) -> Dict: + """Wrapper to spio loadmat. + + This function should be called instead of direct spio.loadmat as it cures the problem of not properly recovering python dictionaries from mat files. It calls the function check keys to cure all entries which are still mat-objects + + Returns: + Dict: Matlab file contents """ data = spio.loadmat(filename, struct_as_record=False, squeeze_me=True) return _check_keys(data) -def _check_keys(dict): - """ - checks if entries in dictionary are mat-objects. If yes - todict is called to change them to nested dictionaries +def _check_keys(dict: Dict) -> Dict: + """Checks if entries in dictionary are mat-objects. + + If yes, todict is called to change them to nested dictionaries + + Returns: + Dict: Matlab file contents """ for key in dict: if isinstance(dict[key], spio.matlab.mio5_params.mat_struct): @@ -55,19 +64,45 @@ def _todict(matobj): return dict +def load_params(param_path: Text) -> Dict: + """Load a params file + + Args: + param_path (Text): Path to parameters file + + Returns: + Dict: Parameters dictionary + """ + with open(param_path, "rb") as file: + params = yaml.safe_load(file) + return params + + class MultiGpuHandler: def __init__( self, - config, - n_samples_per_gpu=5000, - only_unfinished=False, - predict_path=None, - com_file=None, + config: Text, + n_samples_per_gpu: int = 5000, + only_unfinished: bool = False, + predict_path: Text = None, + com_file: Text = None, # batch_param_file="_batch_params.p", - verbose=True, - test=False, - dannce_file=None, + verbose: bool = True, + test: bool = False, + dannce_file: Text = None, ): + """Initialize multi-gpu handler + + Args: + config (Text): Path to base configuration .yaml file + n_samples_per_gpu (int, optional): Number of samples to evaluate for each job. Defaults to 5000. + only_unfinished (bool, optional): If True, only evaluate unfinished jobs. Defaults to False. + predict_path (Text, optional): Path to prediction folder. Defaults to None. + com_file (Text, optional): Path to com file. Defaults to None. + verbose (bool, optional): If True, print out job details. Defaults to True. + test (bool, optional): If True, only print system commands, but do not run them. Defaults to False. + dannce_file (Text, optional): Path to *dannce.mat file. Defaults to None. + """ self.config = config self.n_samples_per_gpu = n_samples_per_gpu self.only_unfinished = only_unfinished @@ -76,38 +111,82 @@ def __init__( self.verbose = verbose self.com_file = com_file self.test = test + self.io_config = load_params(self.config)["io_config"] if dannce_file is None: self.dannce_file = self.load_dannce_file() else: self.dannce_file = dannce_file + + self.setup_logging() - def load_params(self, param_path): - """Load a params file""" + def load_params(self, param_path: Text) -> Dict: + """Load a params file + + Args: + param_path (Text): Path to parameters file + + Returns: + Dict: Parameters dictionary + """ with open(param_path, "rb") as file: params = yaml.safe_load(file) return params - - def save_batch_params(self, batch_params): - """Save the batch_param dictionary to the batch_param file""" + + def setup_logging(self): + + params = self.load_params(self.config) + if not os.path.exists(os.path.dirname(params["log_dest"])): + os.makedirs(os.path.dirname(params["log_dest"])) + logging.basicConfig(filename=params["log_dest"], level=params["log_level"], + format='%(asctime)s %(levelname)s:%(message)s', datefmt='%m/%d/%Y %I:%M:%S %p') + + def save_batch_params(self, batch_params: List): + """Save the batch_param dictionary to the batch_param file. + + Args: + batch_params (List): List of batch parameters. + """ out_dict = {"batch_params": batch_params} with open(self.batch_param_file, "wb") as file: pickle.dump(out_dict, file) - def load_batch_params(self): + def load_batch_params(self) -> List: + """Load the batch parameters + + Returns: + List: batch parameters + """ with open(self.batch_param_file, "rb") as file: in_dict = pickle.load(file) return in_dict["batch_params"] - def load_dannce_file(self, path="."): - """Return the path to the first dannce.mat file in a project folder.""" + def load_dannce_file(self, path: Text = ".") -> Text: + """Return the path to the first dannce.mat file in a project folder. + + Args: + path (Text, optional): Path to folder in which to search for dannce file. Defaults to ".". + + Raises: + FileNotFoundError: If no dannce.mat file is found. + + Returns: + Text: Name of dannce.mat file + """ files = os.listdir(path) dannce_file = [f for f in files if "dannce.mat" in f] if len(dannce_file) == 0: raise FileNotFoundError("No dannce.mat file found.") return dannce_file[0] - def load_com_length_from_file(self): - """Return the length of a com file.""" + def load_com_length_from_file(self) -> int: + """Return the length of a com file. + + Raises: + ValueError: If file extension is not pickle or mat + + Returns: + int: Number of samples. + """ _, file_extension = os.path.splitext(self.com_file) if file_extension == ".pickle": @@ -121,42 +200,82 @@ def load_com_length_from_file(self): raise ValueError("com_file must be a .pickle or .mat file") return n_com_samples - def get_n_samples(self, dannce_file, use_com=False): + def get_n_samples(self, dannce_file: Text, use_com=False) -> int: """Get the number of samples in a project - :param dannce_file: Path to dannce.mat file containing sync and com for current project. + Args: + dannce_file (Text): Path to dannce.mat file containing sync and com for current project. + use_com (bool, optional): If True, get n_samples from the com file. Defaults to False. + + Returns: + int: Number of samples + """ + n_samples = self._n_samples_from_sync(dannce_file) + + if use_com: + com_samples = self._n_samples_from_com(dannce_file) + n_samples = np.min([com_samples, n_samples]) + return n_samples + + def _n_samples_from_com(self, dannce_file: Text) -> int: + """Get the number of samples from com estimates + + Args: + dannce_file (Text): Path to dannce file + + Raises: + KeyError: dannce.mat file needs com field or com_file needs to be specified in io.yaml. + + Returns: + int: Number of com samples + """ + # If a com file is specified, use it + if self.com_file is not None: + com_samples = self.load_com_length_from_file() + else: + # Try to use the com in the dannce .mat, otherwise error. + try: + com = load_com(dannce_file) + com_samples = len(com["sampleID"][0]) + except KeyError: + try: + params = load_params(self.io_config) + self.com_file = params["com_file"] + com_samples = self.load_com_length_from_file() + except: + raise KeyError( + "dannce.mat file needs com field or com_file needs to be specified in io.yaml." + ) + return com_samples + + def _n_samples_from_sync(self, dannce_file: Text) -> int: + """Get the number of samples from the sync field of a dannce.mat file. + + Args: + dannce_file (Text): Path to dannce.mat file + + Returns: + int: Number of samples """ sync = load_sync(dannce_file) n_samples = len(sync[0]["data_frame"]) if n_samples == 1: n_samples = len(sync[0]["data_frame"][0]) + return n_samples - if use_com: - # If a com file is specified, use it - if self.com_file is not None: - com_samples = self.load_com_length_from_file() - else: - # Try to use the com in the dannce .mat, otherwise error. - try: - com = load_com(dannce_file) - com_samples = len(com["sampleID"][0]) - except KeyError: - try: - params = self.load_params("io.yaml") - self.com_file = params["com_file"] - com_samples = self.load_com_length_from_file() - except: - raise KeyError( - "dannce.mat file needs com field or com_file needs to be specified in io.yaml." - ) - n_samples = np.min([com_samples, n_samples]) + def generate_batch_params_com(self, n_samples: int) -> List: + """Generate batch parameters list for com inference - return n_samples + Args: + n_samples (int): n_samples in the recording. - def generate_batch_params_com(self, n_samples): - start_samples = np.arange( - 0, n_samples, self.n_samples_per_gpu, dtype=np.int - ) + Raises: + ValueError: If predict_path or com_predict_dir are not specified. + + Returns: + List: Batch parameters list of dictionaries. + """ + start_samples = np.arange(0, n_samples, self.n_samples_per_gpu, dtype=np.int) max_samples = start_samples + self.n_samples_per_gpu batch_params = [ {"start_sample": sb, "max_num_samples": self.n_samples_per_gpu} @@ -164,9 +283,8 @@ def generate_batch_params_com(self, n_samples): ] if self.only_unfinished: - if self.predict_path is None: - params = self.load_params("io.yaml") + params = load_params(self.io_config) if params["com_predict_dir"] is None: raise ValueError( "Either predict_path (clarg) or com_predict_dir (in io.yaml) must be specified for merge" @@ -176,96 +294,184 @@ def generate_batch_params_com(self, n_samples): if not os.path.exists(self.predict_path): os.makedirs(self.predict_path) pred_files = [ - f - for f in os.listdir(self.predict_path) - if COM_PRED_FILE_BASE_NAME in f + f for f in os.listdir(self.predict_path) if COM_BASE_NAME in f ] pred_files = [ f for f in pred_files - if f != (COM_PRED_FILE_BASE_NAME + ".mat") + if not ( + f.endswith(COM_BASE_NAME + ".mat") + or f.endswith(COM_BASE_NAME + ".pickle") + ) ] if len(pred_files) > 1: - params = self.load_params(self.config) - pred_ids = [ - int(f.split(".")[0].split("3d")[1]) for f in pred_files - ] + params = load_params(self.config) + pred_ids = [int(f.split(".")[0].split("3d")[1]) for f in pred_files] for i, batch_param in reversed(list(enumerate(batch_params))): if batch_param["start_sample"] in pred_ids: del batch_params[i] return batch_params - def generate_batch_params_dannce(self, n_samples): - start_samples = np.arange( - 0, n_samples, self.n_samples_per_gpu, dtype=np.int - ) + def generate_batch_params_dannce(self, n_samples: int) -> List: + """Generate batch parameters list for dannce inference + + Args: + n_samples (int): n_samples in the recording. + + Raises: + ValueError: If predict_path or com_predict_dir are not specified. + + Returns: + List: Batch parameters list of dictionaries. + """ + start_samples = np.arange(0, n_samples, self.n_samples_per_gpu, dtype=np.int) max_samples = start_samples + self.n_samples_per_gpu max_samples[-1] = n_samples - batch_params = [ - {"start_sample": sb, "max_num_samples": mb} - for sb, mb in zip(start_samples, max_samples) - ] - # Delete batch_params that were already finished - if self.only_unfinished: - - if self.predict_path is None: - params = self.load_params("io.yaml") - if params["dannce_predict_dir"] is None: - raise ValueError( - "Either predict_path (clarg) or dannce_predict_dir (in io.yaml) must be specified for merge" + params = load_params(self.config) + params = {**params, **load_params(self.io_config)} + if "n_instances" not in params: + params["n_instances"] = 1 + + # If multi-instance, set the com_file and dannce predict path automatically + if params["n_instances"] >= 2: + batch_params = [] + for n_instance in range(params["n_instances"]): + com_file = os.path.join( + params["com_predict_dir"], "instance%dcom3d.mat" % (n_instance) + ) + dannce_predict_dir = os.path.join( + params["dannce_predict_dir"], "instance%d" % (n_instance) + ) + os.makedirs(dannce_predict_dir, exist_ok=True) + for sb, mb in zip(start_samples, max_samples): + batch_params.append( + { + "start_sample": sb, + "max_num_samples": mb, + "com_file": com_file, + "dannce_predict_dir": dannce_predict_dir, + } ) - else: - self.predict_path = params["dannce_predict_dir"] - if not os.path.exists(self.predict_path): - os.makedirs(self.predict_path) - pred_files = [ - f - for f in os.listdir(self.predict_path) - if DANNCE_PRED_FILE_BASE_NAME in f - ] - pred_files = [ - f - for f in pred_files - if f != (DANNCE_PRED_FILE_BASE_NAME + ".mat") + # Delete batch_params that were already finished + if self.only_unfinished: + batch_params = self.remove_finished_batches_multi_instance(batch_params) + else: + batch_params = [ + {"start_sample": sb, "max_num_samples": mb} + for sb, mb in zip(start_samples, max_samples) ] + + # Delete batch_params that were already finished + if self.only_unfinished: + batch_params = self.remove_finished_batches(batch_params) + return batch_params + + def remove_finished_batches_multi_instance(self, batch_params: List) -> List: + """Remove finished batches from parameters list. + + Args: + batch_params (List): Batch parameters list. + + Returns: + (List): Updated batch parameters list. + """ + dannce_predict_dirs = [param["dannce_predict_dir"] for param in batch_params] + dannce_predict_dirs = list(set(dannce_predict_dirs)) + + # For each instance directory, find the completed batches and delete the params. + for pred_dir in dannce_predict_dirs: + # Get all of the files + pred_files = [f for f in os.listdir(pred_dir) if DANNCE_BASE_NAME in f] + + # Remove any of the default merged files. + pred_files = [f for f in pred_files if f != (DANNCE_BASE_NAME + ".mat")] if len(pred_files) > 1: - params = self.load_params(self.config) + params = load_params(self.config) pred_ids = [ int(f.split(".")[0].split("AVG")[1]) * params["batch_size"] for f in pred_files ] for i, batch_param in reversed(list(enumerate(batch_params))): - if batch_param["start_sample"] in pred_ids: + if ( + batch_param["start_sample"] in pred_ids + and batch_param["dannce_predict_dir"] == pred_dir + ): del batch_params[i] return batch_params - def submit_jobs(self, batch_params, cmd): - """Print out description of command and issue system command""" + def remove_finished_batches(self, batch_params: List) -> List: + """Remove finished batches from parameters list. + + Args: + batch_params (List): Batch parameters list. + + Returns: + (List): Updated batch parameters list. + """ + if self.predict_path is None: + params = load_params(self.io_config) + if params["dannce_predict_dir"] is None: + raise ValueError( + "Either predict_path (clarg) or dannce_predict_dir (in io.yaml) must be specified for merge" + ) + else: + self.predict_path = params["dannce_predict_dir"] + if not os.path.exists(self.predict_path): + os.makedirs(self.predict_path) + + # Get all of the files + pred_files = [f for f in os.listdir(self.predict_path) if DANNCE_BASE_NAME in f] + + # Remove any of the default merged files. + pred_files = [f for f in pred_files if f != (DANNCE_BASE_NAME + ".mat")] + if len(pred_files) > 1: + params = load_params(self.config) + pred_ids = [ + int(f.split(".")[0].split("AVG")[1]) * params["batch_size"] + for f in pred_files + ] + for i, batch_param in reversed(list(enumerate(batch_params))): + if batch_param["start_sample"] in pred_ids: + del batch_params[i] + return batch_params + + def submit_jobs(self, batch_params: List, cmd: str): + """Print out description of command and issue system command + + Args: + batch_params (List): Batch parameters list + cmd (str): System command + """ + prepend_log_msg = FILE_PATH + ".MultiGpuHandler.submit_jobs " if self.verbose: for batch_param in batch_params: - print("Start sample:", batch_param["start_sample"]) - print("End sample:", batch_param["max_num_samples"]) - print("Command issued: ", cmd) + logging.debug("Start sample:", batch_param["start_sample"]) + logging.debug("End sample:", batch_param["max_num_samples"]) + logging.info(prepend_log_msg + "Command issued: ", cmd) if not self.test: - sys.exit(os.WEXITSTATUS(os.system(cmd))) + return os.WEXITSTATUS(os.system(cmd)) def submit_dannce_predict_multi_gpu(self): """Predict dannce over multiple gpus in parallel. Divide project into equal chunks of n_samples_per_gpu samples. Submit an array job that predicts over each chunk in parallel. - """ n_samples = self.get_n_samples(self.dannce_file, use_com=True) batch_params = self.generate_batch_params_dannce(n_samples) + slurm_config = load_params(load_params(self.config)["slurm_config"]) + cmd = ( - "sbatch --wait --array=0-%d holy_dannce_predict_multi_gpu.sh %s" + 'sbatch --wait --array=0-%d %s --wrap="%s dannce-predict-single-batch %s"' % ( len(batch_params) - 1, + slurm_config["dannce_multi_predict"], + slurm_config["setup"], self.config, ) ) + if len(batch_params) > 0: self.save_batch_params(batch_params) self.submit_jobs(batch_params, cmd) @@ -276,14 +482,21 @@ def submit_com_predict_multi_gpu(self): Divide project into equal chunks of n_samples_per_gpu samples. Submit an array job that predicts over each chunk in parallel. - """ + prepend_log_msg = FILE_PATH + ".MultiGpuHandler.submit_com_predict_multi_gpu " + n_samples = self.get_n_samples(self.dannce_file, use_com=False) - print(n_samples) + logging.info(prepend_log_msg + str(n_samples)) batch_params = self.generate_batch_params_com(n_samples) - cmd = "sbatch --wait --array=0-%d holy_com_predict_multi_gpu.sh %s" % ( - len(batch_params) - 1, - self.config, + slurm_config = load_params(load_params(self.config)["slurm_config"]) + cmd = ( + 'sbatch --wait --array=0-%d %s --wrap="%s com-predict-single-batch %s"' + % ( + len(batch_params) - 1, + slurm_config["com_multi_predict"], + slurm_config["setup"], + self.config, + ) ) if len(batch_params) > 0: self.save_batch_params(batch_params) @@ -291,10 +504,16 @@ def submit_com_predict_multi_gpu(self): return batch_params, cmd def com_merge(self): + """Merge com chunks into a single file. + + Raises: + ValueError: If predict_path or com_predict_dir are not specified. + FileNotFoundError: If no prediction files were found in the prediction dir + """ # Get all of the paths if self.predict_path is None: # Try to get it from io.yaml - params = self.load_params("io.yaml") + params = load_params(self.io_config) if params["com_predict_dir"] is None: raise ValueError( "Either predict_path (clarg) or com_predict_dir (in io.yaml) must be specified for merge" @@ -304,17 +523,14 @@ def com_merge(self): pred_files = [ f for f in os.listdir(self.predict_path) - if COM_PRED_FILE_BASE_NAME in f and ".mat" in f + if COM_BASE_NAME in f and ".mat" in f ] pred_files = [ f for f in pred_files - if f != (COM_PRED_FILE_BASE_NAME + ".mat") and "instance" not in f - ] - pred_inds = [ - int(f.split(COM_PRED_FILE_BASE_NAME)[-1].split(".")[0]) - for f in pred_files + if f != (COM_BASE_NAME + ".mat") and "instance" not in f ] + pred_inds = [int(f.split(COM_BASE_NAME)[-1].split(".")[0]) for f in pred_files] pred_files = [pred_files[i] for i in np.argsort(pred_inds)] if len(pred_files) == 0: @@ -336,14 +552,12 @@ def com_merge(self): metadata["start_sample"] = 0 metadata["max_num_samples"] = "max" + # if len(com.shape == 3), there are multiple instanes if len(com.shape) == 3: for n_instance in range(com.shape[2]): fn = os.path.join( self.predict_path, - "instance" - + str(n_instance) - + COM_PRED_FILE_BASE_NAME - + ".mat", + "instance" + str(n_instance) + COM_BASE_NAME + ".mat", ) savemat( fn, @@ -355,35 +569,30 @@ def com_merge(self): ) # save to a single file. else: - fn = os.path.join( - self.predict_path, COM_PRED_FILE_BASE_NAME + ".mat" - ) - savemat( - fn, {"com": com, "sampleID": sampleID, "metadata": metadata} - ) + fn = os.path.join(self.predict_path, COM_BASE_NAME + ".mat") + savemat(fn, {"com": com, "sampleID": sampleID, "metadata": metadata}) def dannce_merge(self): + """Merge dannce chunks into a single file. + + Raises: + ValueError: If predict_path or com_predict_dir are not specified. + FileNotFoundError: If no prediction files were found in the prediction dir + """ # Get all of the paths if self.predict_path is None: # Try to get it from io.yaml - params = self.load_params("io.yaml") + params = load_params(self.io_config) if params["dannce_predict_dir"] is None: raise ValueError( "Either predict_path (clarg) or dannce_predict_dir (in io.yaml) must be specified for merge" ) else: self.predict_path = params["dannce_predict_dir"] - pred_files = [ - f - for f in os.listdir(self.predict_path) - if DANNCE_PRED_FILE_BASE_NAME in f - ] - pred_files = [ - f for f in pred_files if f != (DANNCE_PRED_FILE_BASE_NAME + ".mat") - ] + pred_files = [f for f in os.listdir(self.predict_path) if DANNCE_BASE_NAME in f] + pred_files = [f for f in pred_files if f != (DANNCE_BASE_NAME + ".mat")] pred_inds = [ - int(f.split(DANNCE_PRED_FILE_BASE_NAME)[-1].split(".")[0]) - for f in pred_files + int(f.split(DANNCE_BASE_NAME)[-1].split(".")[0]) for f in pred_files ] pred_files = [pred_files[i] for i in np.argsort(pred_inds)] if len(pred_files) == 0: @@ -409,9 +618,7 @@ def dannce_merge(self): metadata["max_num_samples"] = "max" # save to a single file. - fn = os.path.join( - self.predict_path, DANNCE_PRED_FILE_BASE_NAME + ".mat" - ) + fn = os.path.join(self.predict_path, DANNCE_BASE_NAME + ".mat") savemat( fn, { @@ -424,7 +631,19 @@ def dannce_merge(self): ) -def build_params_from_config_and_batch(config, batch_param, dannce_net=True): +def build_params_from_config_and_batch( + config: Text, batch_param: Dict, dannce_net: bool = True +) -> Dict: + """Build parameters from configuration file and batch parameters + + Args: + config (Text): Path to base config .yaml file. + batch_param (Dict): batch parameters dictionary + dannce_net (bool, optional): If True, treat with defaults for dannce nets. Defaults to True. + + Returns: + Dict: Parameters dictionary + """ from dannce.interface import build_params from dannce.engine.processing import infer_params @@ -449,15 +668,23 @@ def build_params_from_config_and_batch(config, batch_param, dannce_net=True): def dannce_predict_single_batch(): + """CLI entrypoint to predict a single batch.""" from dannce.interface import dannce_predict + prepend_log_msg = FILE_PATH + "dannce_predict_single_batch" + # Load in parameters to modify config = sys.argv[1] handler = MultiGpuHandler(config) batch_params = handler.load_batch_params() task_id = int(os.getenv("SLURM_ARRAY_TASK_ID")) batch_param = batch_params[task_id] - print(batch_param) + if not os.path.exists(os.path.dirname(handler.load_params(handler.config)["log_dest"])): + os.makedirs(os.path.dirname(handler.load_params(handler.config)["log_dest"])) + logging.basicConfig(filename=handler.load_params(handler.config)["log_dest"], + level=handler.load_params(handler.config)["log_level"], + format='%(asctime)s %(levelname)s:%(message)s', datefmt='%m/%d/%Y %I:%M:%S %p') + logging.info(prepend_log_msg + str(batch_param)) # Build final parameter dictionary params = build_params_from_config_and_batch(config, batch_param) @@ -467,8 +694,11 @@ def dannce_predict_single_batch(): def com_predict_single_batch(): + """CLI entrypoint to predict a single batch.""" from dannce.interface import com_predict + prepend_log_msg = FILE_PATH + "com_predict_single_batch" + # Load in parameters to modify config = sys.argv[1] handler = MultiGpuHandler(config) @@ -476,12 +706,15 @@ def com_predict_single_batch(): task_id = int(os.getenv("SLURM_ARRAY_TASK_ID")) # task_id = 0 batch_param = batch_params[task_id] - print(batch_param) + if not os.path.exists(os.path.dirname(handler.load_params(handler.config)["log_dest"])): + os.makedirs(os.path.dirname(handler.load_params(handler.config)["log_dest"])) + logging.basicConfig(filename=handler.load_params(handler.config)["log_dest"], + level=handler.load_params(handler.config)["log_level"], + format='%(asctime)s %(levelname)s:%(message)s', datefmt='%m/%d/%Y %I:%M:%S %p') + logging.info(prepend_log_msg + str(batch_param)) # Build final parameter dictionary - params = build_params_from_config_and_batch( - config, batch_param, dannce_net=False - ) + params = build_params_from_config_and_batch(config, batch_param, dannce_net=False) # Predict try: @@ -492,7 +725,131 @@ def com_predict_single_batch(): com_predict(params) +def inference(): + """CLI entrypoint to coordinate full inference job.""" + # Make parser object + args = inference_clargs() + + # Load in parameters to modify + handler = MultiGpuHandler( + args["com_config"], only_unfinished=True, test=args["test"] + ) + handler.submit_com_predict_multi_gpu() + handler.submit_com_predict_multi_gpu() + handler.submit_com_predict_multi_gpu() + handler.submit_com_predict_multi_gpu() + handler.submit_com_predict_multi_gpu() + if args["test"]: + print("Skipping com merge during test.") + else: + handler.com_merge() + + handler = MultiGpuHandler( + args["dannce_config"], only_unfinished=True, test=args["test"] + ) + handler.submit_dannce_predict_multi_gpu() + handler.submit_dannce_predict_multi_gpu() + handler.submit_dannce_predict_multi_gpu() + handler.submit_dannce_predict_multi_gpu() + handler.submit_dannce_predict_multi_gpu() + if args["test"]: + print("Skipping dannce merge during test.") + else: + handler.dannce_merge() + + +def submit_inference(): + """CLI entrypoint to submit jobs to coordinate full inference.""" + # Make parser object + args = inference_clargs() + com_config = load_params(args["com_config"]) + dannce_config = load_params(args["dannce_config"]) + slurm_config = load_params(dannce_config["slurm_config"]) + io_config = load_params(dannce_config["io_config"]) + + # Determine whether running multi instance or single instance + for config in [com_config, dannce_config, io_config]: + if "n_instances" in config: + if config["n_instances"] >= 2: + inference_command = "dannce-multi-instance-inference" + break + else: + inference_command = "dannce-inference" + else: + inference_command = "dannce-inference" + + cmd = 'sbatch %s --wrap="%s %s %s %s"' % ( + slurm_config["inference"], + slurm_config["setup"], + inference_command, + args["com_config"], + args["dannce_config"], + ) + print(cmd) + if not args["test"]: + os.system(cmd) + + +def inference_clargs(): + p = argparse.ArgumentParser( + description=__doc__, + formatter_class=argparse.RawDescriptionHelpFormatter, + ) + p.add_argument("com_config", help="Path to .yaml configuration file") + p.add_argument("dannce_config", help="Path to .yaml configuration file") + p.add_argument( + "--test", + dest="test", + type=ast.literal_eval, + default=False, + help="If True, print out submission command and info, but do not submit jobs.", + ) + return p.parse_args().__dict__ + + +def multi_instance_inference(): + args = inference_clargs() + # Load in parameters to modify + handler = MultiGpuHandler( + args["com_config"], only_unfinished=True, test=args["test"] + ) + handler.submit_com_predict_multi_gpu() + handler.submit_com_predict_multi_gpu() + handler.submit_com_predict_multi_gpu() + if args["test"]: + print("Skipping com merge during test.") + else: + handler.com_merge() + + handler = MultiGpuHandler( + args["dannce_config"], only_unfinished=True, test=args["test"] + ) + handler.submit_dannce_predict_multi_gpu() + handler.submit_dannce_predict_multi_gpu() + handler.submit_dannce_predict_multi_gpu() + + params = load_params(handler.io_config) + instance_0_path = os.path.join(params["dannce_predict_dir"], "instance0") + instance_1_path = os.path.join(params["dannce_predict_dir"], "instance1") + + handler = MultiGpuHandler( + args["dannce_config"], predict_path=instance_0_path, test=args["test"] + ) + if args["test"]: + print("Skipping dannce merge during test.") + else: + handler.dannce_merge() + handler = MultiGpuHandler( + args["dannce_config"], predict_path=instance_1_path, test=args["test"] + ) + if args["test"]: + print("Skipping dannce merge during test.") + else: + handler.dannce_merge() + + def dannce_predict_multi_gpu(): + """CLI entrypoint to submit batch jobs.""" # Load in parameters to modify args = cmdline_args() handler = MultiGpuHandler(**args.__dict__) @@ -500,6 +857,7 @@ def dannce_predict_multi_gpu(): def com_predict_multi_gpu(): + """CLI entrypoint to submit batch jobs.""" # Load in parameters to modify args = cmdline_args() handler = MultiGpuHandler(**args.__dict__) @@ -507,18 +865,25 @@ def com_predict_multi_gpu(): def com_merge(): + """CLI entrypoint to merge batch jobs.""" args = cmdline_args() handler = MultiGpuHandler(**args.__dict__) handler.com_merge() def dannce_merge(): + """CLI entrypoint to merge batch jobs.""" args = cmdline_args() handler = MultiGpuHandler(**args.__dict__) handler.dannce_merge() def cmdline_args(): + """Handle command line arguments + + Returns: + [type]: argparse parser values + """ # Make parser object p = argparse.ArgumentParser( description=__doc__, diff --git a/cluster/multi_gpu_test.py b/cluster/multi_gpu_test.py index 6c4d300..8c38af4 100755 --- a/cluster/multi_gpu_test.py +++ b/cluster/multi_gpu_test.py @@ -1,15 +1,18 @@ """Tests for locomotion.tasks.two_tap.""" -import cluster.multi_gpu as multi_gpu +import multi_gpu import functools +from unittest.mock import patch from absl.testing import absltest import numpy as np import os -DEMO_PATH = "../demo/markerless_mouse1" -CONFIG_PATH = "../tests/configs/dannce_mouse_config.yaml" -DANNCE_PATH = "../tests/configs/label3d_dannce.mat" +DEMO_PATH = "../demo/markerless_mouse_1" +os.chdir(DEMO_PATH) +CONFIG_PATH = "../../tests/configs/config_mousetest.yaml" +MULTI_INSTANCE_CONFIG_PATH = "../../tests/configs/config_mousetest_multi_instance.yaml" +DANNCE_PATH = "../../tests/configs/label3d_dannce.mat" class MultiGpuTest(absltest.TestCase): @@ -31,6 +34,22 @@ def test_dannce_predict_batch_params(self): self.assertTrue(os.path.exists(handler.batch_param_file)) self.assertTrue(len(batch_params) == 10) + def test_dannce_predict_batch_params_multi_instance(self): + handler = multi_gpu.MultiGpuHandler( + MULTI_INSTANCE_CONFIG_PATH, + n_samples_per_gpu=100, + verbose=False, + test=True, + dannce_file=DANNCE_PATH, + ) + batch_params, _ = handler.submit_dannce_predict_multi_gpu() + print(batch_params) + self.assertTrue(len(batch_params) == 20) + + def test_dannce_inference_submission(self): + with patch("sys.argv", ["dannce-inference", MULTI_INSTANCE_CONFIG_PATH, MULTI_INSTANCE_CONFIG_PATH, "--test=True"]): + multi_gpu.submit_inference() + def test_com_predict_batch_params(self): handler = multi_gpu.MultiGpuHandler( CONFIG_PATH, @@ -44,22 +63,30 @@ def test_com_predict_batch_params(self): self.assertTrue(len(batch_params) == 180) def test_raises_error_if_no_dannce_file(self): + # Move to a directory in which there is no dannce.mat file + os.chdir("..") with self.assertRaises(FileNotFoundError): handler = multi_gpu.MultiGpuHandler( CONFIG_PATH, n_samples_per_gpu=100, verbose=False, test=True ) def test_dannce_predict_multi_gpu_cli(self): - cmd = "dannce-predict-multi-gpu %s --test=True --verbose=False --dannce-file=%s" % ( - CONFIG_PATH, - DANNCE_PATH, + cmd = ( + "dannce-predict-multi-gpu %s --test=True --verbose=False --dannce-file=%s" + % ( + CONFIG_PATH, + DANNCE_PATH, + ) ) os.system(cmd) def test_com_predict_multi_gpu_cli(self): - cmd = "com-predict-multi-gpu %s --test=True --verbose=False --dannce-file=%s" % ( - CONFIG_PATH, - DANNCE_PATH, + cmd = ( + "com-predict-multi-gpu %s --test=True --verbose=False --dannce-file=%s" + % ( + CONFIG_PATH, + DANNCE_PATH, + ) ) os.system(cmd) diff --git a/configs/dannce_mouse_config.yaml b/configs/dannce_mouse_config.yaml index 73473b6..797f4f3 100755 --- a/configs/dannce_mouse_config.yaml +++ b/configs/dannce_mouse_config.yaml @@ -33,4 +33,7 @@ nvox: 64 max_num_samples: 1000 # By default, will load in the first hdf5 file at this location for fine-tuning. If training from scratch, set to None -dannce_finetune_weights: ./DANNCE/weights/weights.rat.MAX/ \ No newline at end of file +dannce_finetune_weights: ./DANNCE/weights/weights.rat.MAX/ + +log_level: DEBUG +# log_dest: ../logs/dannce-04-15-22.log \ No newline at end of file diff --git a/configs/dannce_rig_com_config.yaml b/configs/dannce_rig_com_config.yaml index 035efb6..5487608 100755 --- a/configs/dannce_rig_com_config.yaml +++ b/configs/dannce_rig_com_config.yaml @@ -50,3 +50,5 @@ max_num_samples: 'max' dsmode: dsm # Medianfilter window medfilt_window: 30 + +slurm_config: /n/holylfs02/LABS/olveczky_lab/Diego/code/dannce/cluster/holyoke.yaml \ No newline at end of file diff --git a/configs/dannce_rig_dannce_config.yaml b/configs/dannce_rig_dannce_config.yaml index 44ccf03..48ba242 100755 --- a/configs/dannce_rig_dannce_config.yaml +++ b/configs/dannce_rig_dannce_config.yaml @@ -63,7 +63,7 @@ train_mode: finetune n_layers_locked: 0 # DANNCE training. Metric to be monitored in addition to loss -metric: ['mse'] +metric: ['mse', 'euclidean_distance_3D'] #['euclidean_distance_3D','centered_euclidean_distance_3D'] # How many samples from each animal do you want to (randomly) set aside for a validation metric? @@ -115,3 +115,8 @@ predict_mode: 'torch' medfilt_window: 30 # debug_volume_tifdir: ./volumes +rand_view_replace: True + +n_rand_views: 6 + +slurm_config: /n/holylfs02/LABS/olveczky_lab/Diego/code/dannce/cluster/holyoke.yaml \ No newline at end of file diff --git a/configs/left_or_right_colormap.mat b/configs/left_or_right_colormap.mat new file mode 100644 index 0000000..2dde3d6 Binary files /dev/null and b/configs/left_or_right_colormap.mat differ diff --git a/dannce/__init__.py b/dannce/__init__.py index ebd175b..27dd114 100755 --- a/dannce/__init__.py +++ b/dannce/__init__.py @@ -1,14 +1,15 @@ +from datetime import datetime """Dannce module and default parameters""" # Default parameters, which can be superseded by CL arguments or # config files _param_defaults_shared = { "immode": "vid", "verbose": 1, - "gpu_id": "0", + "gpu_id": None, "loss": "mask_nan_keep_loss", "start_batch": 0, "exp": None, - "viddir": 'videos', + "viddir": "videos", "io_config": None, "crop_height": None, "crop_width": None, @@ -30,6 +31,9 @@ "augment_hue_val": 0.05, "augment_bright_val": 0.05, "augment_rotation_val": 5, + "mirror_augmentation": False, + "right_keypoints": None, + "left_keypoints": None, "drop_landmark": None, "raw_im_h": None, "raw_im_w": None, @@ -41,6 +45,10 @@ "use_npy": False, "data_split_seed": None, "valid_exp": None, + "norm_method":"layer", + "slurm_config": None, + "log_level": "INFO", + "log_dest": "../../logs/dannce_"+datetime.now().strftime("%b%d_%Y")+ ".log", } _param_defaults_dannce = { "metric": ["euclidean_distance_3D"], @@ -89,6 +97,8 @@ "heatmap_reg": False, "heatmap_reg_coeff": 0.01, "save_pred_targets": False, + "huber-delta": 1.35, #Change Adapted from implementation by robb + "avg+max": None, } _param_defaults_com = { "dsmode": "nn", diff --git a/dannce/callbacks.py b/dannce/callbacks.py new file mode 100644 index 0000000..e13c21f --- /dev/null +++ b/dannce/callbacks.py @@ -0,0 +1,123 @@ +import tensorflow.keras as keras +from tensorflow.keras import backend as K +import os +import scipy.io as sio +import numpy as np +import dannce.engine.processing as processing +from dannce.engine import losses +import gc + +class savePredTargets(keras.callbacks.Callback): + def __init__( + self, total_epochs, td, tgrid, vd, vgrid, tID, vID, odir, tlabel, vlabel + ): + self.td = td + self.vd = vd + self.tID = tID + self.vID = vID + self.total_epochs = total_epochs + self.val_loss = 1e10 + self.odir = odir + self.tgrid = tgrid + self.vgrid = vgrid + self.tlabel = tlabel + self.vlabel = vlabel + + def on_epoch_end(self, epoch, logs=None): + lkey = "val_loss" if "val_loss" in logs else "loss" + if ( + epoch == self.total_epochs - 1 + or logs[lkey] < self.val_loss + ): + print( + "Saving predictions on train and validation data, after epoch {}".format( + epoch + ) + ) + self.val_loss = logs[lkey] + pred_t = self.model.predict([self.td, self.tgrid], batch_size=1) + pred_v = self.model.predict([self.vd, self.vgrid], batch_size=1) + ofile = os.path.join( + self.odir, "checkpoint_predictions_e{}.mat".format(epoch) + ) + sio.savemat( + ofile, + { + "pred_train": pred_t, + "pred_valid": pred_v, + "target_train": self.tlabel, + "target_valid": self.vlabel, + "train_sampleIDs": self.tID, + "valid_sampleIDs": self.vID, + }, + ) + +class saveMaxPreds(keras.callbacks.Callback): + """ + This callback fully evaluates MAX predictions and logs the euclidean + distance error to a file. + """ + + def __init__(self, vID, vData, vLabel, odir, com, params): + self.vID = vID + self.vData = vData + self.odir = odir + self.com = com + self.param_mat = params + self.total_epochs = params["epochs"] + + fn = os.path.join(odir, "max_euclid_error.csv") + self.fn = fn + + self.vLabel = np.zeros((len(vID), 3, params["new_n_channels_out"])) + + # Now run thru sample IDs, pull out the correct COM, and add it in + for j in range(len(self.vID)): + id_ = self.vID[j] + self.vLabel[j] = vLabel[id_] + + with open(fn, "w") as fd: + fd.write("epoch,error\n") + + def on_epoch_end(self, epoch, logs=None): + pred_v = self.model.predict([self.vData], batch_size=1) + d_coords = np.zeros((pred_v.shape[0], 3, pred_v.shape[-1])) + for j in range(pred_v.shape[0]): + xcoord, ycoord, zcoord = processing.plot_markers_3d(pred_v[j]) + d_coords[j] = np.stack([xcoord, ycoord, zcoord]) + + vsize = (self.param_mat["vmax"] - self.param_mat["vmin"]) / self.param_mat[ + "nvox" + ] + # # First, need to move coordinates over to centers of voxels + pred_out_world = self.param_mat["vmin"] + d_coords * vsize + vsize / 2 + + # Now run thru sample IDs, pull out the correct COM, and add it in + for j in range(len(self.vID)): + id_ = self.vID[j] + tcom = self.com[id_] + pred_out_world[j] = pred_out_world[j] + tcom[:, np.newaxis] + + # Calculate euclidean_distance_3d + e3d = K.eval(losses.euclidean_distance_3D(self.vLabel, pred_out_world)) + + print("epoch {} euclidean_distance_3d: {}".format(epoch, e3d)) + with open(self.fn, "a") as fd: + fd.write("{},{}\n".format(epoch, e3d)) + +class saveCheckPoint(keras.callbacks.Callback): + def __init__(self, odir, total_epochs): + self.odir = odir + self.saveE = np.arange(0, total_epochs, 250) + + def on_epoch_end(self, epoch, logs=None): + lkey = "val_loss" if "val_loss" in logs else "loss" + val_loss = logs[lkey] + if epoch in self.saveE: + # Do a garbage collect to combat keras memory leak + gc.collect() + print("Saving checkpoint weights at epoch {}".format(epoch)) + savename = "weights.checkpoint.epoch{}.{}{:.5f}.hdf5".format( + epoch, lkey, val_loss + ) + self.model.save(os.path.join(self.odir, savename)) \ No newline at end of file diff --git a/dannce/cli.py b/dannce/cli.py index 463fc78..0466f34 100755 --- a/dannce/cli.py +++ b/dannce/cli.py @@ -12,10 +12,91 @@ _param_defaults_shared, _param_defaults_com, ) +import os import sys import ast import argparse +import yaml from typing import Dict, Text +import logging + + +def load_params(param_path: Text) -> Dict: + """Load a params file + + Args: + param_path (Text): Path to .yaml file + + Returns: + Dict: Parameters + """ + with open(param_path, "rb") as file: + params = yaml.safe_load(file) + return params + + +def parse_sbatch() -> Text: + """Parse sbatch call for base config + + Returns: + Text: Base config path + """ + parser = argparse.ArgumentParser( + description="Com predict CLI", + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + ) + parser.add_argument( + "base_config", metavar="base_config", help="Path to base config." + ) + return parser.parse_args().base_config + + +def sbatch_dannce_predict_cli(): + """CLI to submit dannce prediction through sbatch using the slurm config specified in the base config.""" + base_config = parse_sbatch() + slurm_config = load_params(load_params(base_config)["slurm_config"]) + cmd = 'sbatch %s --wrap="%s dannce-predict %s"' % ( + slurm_config["dannce_predict"], + slurm_config["setup"], + base_config, + ) + os.system(cmd) + + +def sbatch_dannce_train_cli(): + """CLI to submit dannce training through sbatch using the slurm config specified in the base config.""" + base_config = parse_sbatch() + slurm_config = load_params(load_params(base_config)["slurm_config"]) + cmd = 'sbatch %s --wrap="%s dannce-train %s"' % ( + slurm_config["dannce_train"], + slurm_config["setup"], + base_config, + ) + os.system(cmd) + + +def sbatch_com_predict_cli(): + """CLI to submit com predition through sbatch using the slurm config specified in the base config.""" + base_config = parse_sbatch() + slurm_config = load_params(load_params(base_config)["slurm_config"]) + cmd = 'sbatch %s --wrap="%s com-predict %s"' % ( + slurm_config["com_predict"], + slurm_config["setup"], + base_config, + ) + os.system(cmd) + + +def sbatch_com_train_cli(): + """CLI to submit com training through sbatch using the slurm config specified in the base config.""" + base_config = parse_sbatch() + slurm_config = load_params(load_params(base_config)["slurm_config"]) + cmd = 'sbatch %s --wrap="%s com-train %s"' % ( + slurm_config["com_train"], + slurm_config["setup"], + base_config, + ) + os.system(cmd) def com_predict_cli(): @@ -104,9 +185,7 @@ def add_shared_args( parser.add_argument( "base_config", metavar="base_config", help="Path to base config." ) - parser.add_argument( - "--viddir", dest="viddir", help="Directory containing videos." - ) + parser.add_argument("--viddir", dest="viddir", help="Directory containing videos.") parser.add_argument( "--crop-height", dest="crop_height", @@ -126,9 +205,7 @@ def add_shared_args( help="List of ordered camera names.", ) - parser.add_argument( - "--io-config", dest="io_config", help="Path to io.yaml file." - ) + parser.add_argument("--io-config", dest="io_config", help="Path to io.yaml file.") parser.add_argument( "--n-channels-out", @@ -153,15 +230,11 @@ def add_shared_args( dest="verbose", help="verbose=0 prints nothing to std out. verbose=1 prints training summary to std out.", ) - parser.add_argument( - "--net", dest="net", help="Network architecture. See nets.py" - ) + parser.add_argument("--net", dest="net", help="Network architecture. See nets.py") parser.add_argument( "--gpu-id", dest="gpu_id", help="String identifying GPU to use." ) - parser.add_argument( - "--immode", dest="immode", help="Data format for images." - ) + parser.add_argument("--immode", dest="immode", help="Data format for images.") parser.add_argument( "--mono", @@ -177,6 +250,24 @@ def add_shared_args( help="If true, uses a single video file for multiple views.", ) + parser.add_argument( + "--norm-method", + dest="norm_method", + help="Normalization method to use, can be 'batch', 'instance', or 'layer'.", + ) + + parser.add_argument( + "--log-level", + dest="log_level", + help="Level of logging to use, can be 'DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'. Default is 'INFO'.", + ) + + parser.add_argument( + "--log-dest", + dest="log_dest", + help="Log File location to where logs are to be written to. By default, location is set to ../logs/dannce.log ", + ) + return parser @@ -202,6 +293,13 @@ def add_shared_train_args( dest="loss", help="Loss function to use during training. See losses.py.", ) + # Taken from changes by robb + parser.add_argument( + "--huber-delta", + dest="huber-delta", + type=float, + help="Delta Value if using huber loss", + ) parser.add_argument( "--epochs", dest="epochs", type=int, help="Number of epochs to train." ) @@ -422,7 +520,8 @@ def add_dannce_shared_args( "--n-views", dest="n_views", type=int, - help="Sets the absolute number of views (when using fewer than 6 views only)") + help="Sets the absolute number of views (when using fewer than 6 views only)", + ) parser.add_argument( "--train-mode", dest="train_mode", @@ -467,6 +566,12 @@ def add_dannce_train_args( type=ast.literal_eval, help="If True, rotate all images in each sample of the training set by a random value between [-5 and 5] degrees during training.", ) + parser.add_argument( + "--mirror-augmentation", + dest="mirror_augmentation", + type=ast.literal_eval, + help="If True, mirror the images in half of the samples of the training set.", + ) parser.add_argument( "--drop-landmark", dest="drop_landmark", @@ -477,19 +582,19 @@ def add_dannce_train_args( "--use-npy", dest="use_npy", type=ast.literal_eval, - help="If True, loads training data from npy files" + help="If True, loads training data from npy files", ) parser.add_argument( "--rand-view-replace", dest="rand_view_replace", type=ast.literal_eval, - help="If True, samples n_rand_views with replacement" + help="If True, samples n_rand_views with replacement", ) parser.add_argument( "--n-rand-views", dest="n_rand_views", type=ast.literal_eval, - help="Number of views to sample from the full viewset during training" + help="Number of views to sample from the full viewset during training", ) parser.add_argument( "--multi-gpu-train", @@ -515,6 +620,13 @@ def add_dannce_train_args( type=ast.literal_eval, help="If True, save predictions evaluated at checkpoints during training. Note that for large training datasets, this can cause memory issues.", ) + parser.add_argument( + "--avg-max", + dest="avg+max", + type=float, + help="Pass a floating point value here for DANNCE to enter AVG+MAX training mode, where the 3D maps are MAX-like regularized to be Gaussian. The avg+max value is used to weight the contribution of the MAX-like loss." + ) + return parser @@ -557,11 +669,11 @@ def add_dannce_predict_args( help="If True, attempt to load in a prediction model without requiring a full model file (i.e. just using weights). May fail for some model types.", ) parser.add_argument( - "--write-npy", - dest="write_npy", - help="If not None, uses this base path to write large dataset to npy files" + "--write-npy", + dest="write_npy", + help="If not None, uses this base path to write large dataset to npy files", ) - + return parser @@ -722,9 +834,7 @@ def parse_clargs( return parser.parse_args() -def combine( - base_params: Dict, clargs: argparse.Namespace, dannce_net: bool -) -> Dict: +def combine(base_params: Dict, clargs: argparse.Namespace, dannce_net: bool) -> Dict: """Combine command line, io, and base configurations. Args: @@ -749,7 +859,11 @@ def combine( base_params[k] = v elif v is not None: base_params[k] = v - + + if not os.path.exists(os.path.dirname(base_params["log_dest"])): + os.makedirs(os.path.dirname(base_params["log_dest"])) + logging.basicConfig(filename=base_params["log_dest"], level=base_params["log_level"], + format='%(asctime)s %(levelname)s:%(message)s', datefmt='%m/%d/%Y %I:%M:%S %p') for k, v in base_params.items(): - print("{} set to: {}".format(k, v)) + logging.info("{} set to: {}".format(k, v)) return base_params diff --git a/dannce/engine/generator.py b/dannce/engine/generator.py index 8245359..f5cfb75 100755 --- a/dannce/engine/generator.py +++ b/dannce/engine/generator.py @@ -11,14 +11,25 @@ import time import scipy.ndimage.interpolation import tensorflow as tf +import logging # from tensorflow_graphics.geometry.transformation.axis_angle import rotate from multiprocessing.dummy import Pool as ThreadPool from typing import List, Dict, Tuple, Text +MISSING_KEYPOINTS_MSG = ( + "If mirror augmentation is used, the right_keypoints indices and left_keypoints " + + "indices must be specified as well. " + + "For the skeleton, ['RHand, 'LHand', 'RFoot', 'LFoot'], " + + "set right_keypoints: [0, 2] and left_keypoints: [1, 3] in the config file" +) + +TF_GPU_MEMORY_FRACTION = 0.9 +FILE_PATH = "dannce.engine.generator" + class DataGenerator(keras.utils.Sequence): - """Generate data for Keras. + """Generate data for Keras. The object creating instance of this class should have logging enabled. Attributes: batch_size (int): Batch size to generate @@ -115,10 +126,9 @@ def __init__( assert len(self.list_IDs) == len(self.clusterIDs) - self.load_frame = LoadVideoFrame(self._N_VIDEO_FRAMES, - self.vidreaders, - self.camnames, - self.predict_flag) + self.load_frame = LoadVideoFrame( + self._N_VIDEO_FRAMES, self.vidreaders, self.camnames, self.predict_flag + ) def __len__(self) -> int: """Denote the number of batches per epoch. @@ -178,7 +188,7 @@ def random_rotate(self, X: np.ndarray, y_3d: np.ndarray, log: bool = False): class DataGenerator_3Dconv(DataGenerator): - """Update generator class to handle multiple experiments. + """Update generator class to handle multiple experiments. The object creator should have logging enabled. Attributes: camera_params (Dict): Camera parameters dictionary. @@ -322,7 +332,7 @@ def __init__( self.interp = interp self.depth = depth self.channel_combo = channel_combo - print(self.channel_combo) + logging.info(self.channel_combo) self.mode = mode self.immode = immode self.tifdirs = tifdirs @@ -547,7 +557,9 @@ def __data_generation(self, list_IDs_temp: List) -> Tuple: if self.mirror and self.camera_params[experimentID][camname]["m"] == 1: thisim = self.raw_im.copy() thisim = thisim[-1::-1] - elif self.mirror and self.camera_params[experimentID][camname]["m"] == 0: + elif ( + self.mirror and self.camera_params[experimentID][camname]["m"] == 0 + ): thisim = self.raw_im elif self.mirror: raise Exception("Invalid mirror parameter, m, must be 0 or 1") @@ -777,7 +789,7 @@ class DataGenerator_3Dconv_torch(DataGenerator): norm_im (bool): If True, normalize images. nvox (int): Number of voxels per box side rotation (bool): If True, use simple rotation augmentation. - session (tf.compat.v1.InteractiveSession): tensorflow session. + session (tf.compat.v1.Session): tensorflow session. threadpool (Threadpool): threadpool object for parallelizing video loading tifdirs (List): Directories of .tifs var_reg (bool): If True, adds a variance regularization term to the loss function. @@ -902,7 +914,7 @@ def __init__( self.interp = interp self.depth = depth self.channel_combo = channel_combo - print(self.channel_combo) + logging.info(FILE_PATH + ".DataGenerator_3Dconv_torch.__init__" + self.channel_combo if channel_combo is not None else "None") self.gpu_id = gpu_id self.mode = mode self.immode = immode @@ -928,9 +940,10 @@ def __init__( ts = time.time() # Limit GPU memory usage by Tensorflow to leave memory for PyTorch config = tf.compat.v1.ConfigProto() - config.gpu_options.per_process_gpu_memory_fraction = 0.45 + config.gpu_options.per_process_gpu_memory_fraction = TF_GPU_MEMORY_FRACTION config.gpu_options.allow_growth = True - self.session = tf.compat.v1.InteractiveSession(config=config, graph=tf.Graph()) + self.session = tf.compat.v1.Session(config=config, graph=tf.Graph()) + logging.info(FILE_PATH + ".DataGenerator_3Dconv_torch.__init__" + "Executing eagerly: " + str(tf.executing_eagerly()))#, flush=True) for i, ID in enumerate(list_IDs): experimentID = int(ID.split("_")[0]) for camname in self.camnames[experimentID]: @@ -943,7 +956,7 @@ def __init__( ) self.camera_params[experimentID][camname]["M"] = M - print("Init took {} sec.".format(time.time() - ts)) + logging.info(FILE_PATH + ".DataGenerator_3Dconv_torch.__init__" + "Init took {} sec.".format(time.time() - ts)) def __getitem__(self, index: int): """Generate one batch of data. @@ -953,7 +966,7 @@ def __getitem__(self, index: int): Returns: Tuple[np.ndarray, np.ndarray]: One batch of data X - (np.ndarray): Input volume y + (np.ndarray): Input volume y (np.ndarray): Target """ # Generate indexes of the batch @@ -1027,11 +1040,12 @@ def project_grid(self, X_grid, camname, ID, experimentID): camname, extension=self.extension, )[ - self.crop_height[0]: self.crop_height[1], - self.crop_width[0]: self.crop_width[1], + self.crop_height[0] : self.crop_height[1], + self.crop_width[0] : self.crop_width[1], ] - return self.pj_grid_post(X_grid, camname, ID, experimentID, - com, com_precrop, thisim) + return self.pj_grid_post( + X_grid, camname, ID, experimentID, com, com_precrop, thisim + ) def pj_grid_mirror(self, X_grid, camname, ID, experimentID, thisim): this_y = self.torch.as_tensor( @@ -1051,7 +1065,9 @@ def pj_grid_mirror(self, X_grid, camname, ID, experimentID, thisim): com = self.torch.mean(this_y, axis=1) if not self.mirror: - raise Exception("Trying to project onto mirrored images without mirror being set properly") + raise Exception( + "Trying to project onto mirrored images without mirror being set properly" + ) if self.camera_params[experimentID][camname]["m"] == 1: passim = thisim[-1::-1].copy() @@ -1060,12 +1076,11 @@ def pj_grid_mirror(self, X_grid, camname, ID, experimentID, thisim): else: raise Exception("Invalid mirror parameter, m, must be 0 or 1") + return self.pj_grid_post( + X_grid, camname, ID, experimentID, com, com_precrop, passim + ) - return self.pj_grid_post(X_grid, camname, ID, experimentID, - com, com_precrop, passim) - - def pj_grid_post(self, X_grid, camname, ID, experimentID, - com, com_precrop, thisim): + def pj_grid_post(self, X_grid, camname, ID, experimentID, com, com_precrop, thisim): # separate the porjection and sampling into its own function so that # when mirror == True, this can be called directly if self.crop_im: @@ -1242,12 +1257,18 @@ def __data_generation(self, list_IDs_temp): self.camnames[experimentID][0], extension=self.extension, )[ - self.crop_height[0]: self.crop_height[1], - self.crop_width[0]: self.crop_width[1], + self.crop_height[0] : self.crop_height[1], + self.crop_width[0] : self.crop_width[1], ] for c in range(num_cams): arglist.append( - [X_grid[i], self.camnames[experimentID][c], ID, experimentID, loadim] + [ + X_grid[i], + self.camnames[experimentID][c], + ID, + experimentID, + loadim, + ] ) result = self.threadpool.starmap(self.pj_grid_mirror, arglist) else: @@ -1407,6 +1428,7 @@ class DataGenerator_3Dconv_tf(DataGenerator): vsize (float): Side length of one voxel predict_flag (bool): If True, use imageio for reading videos, rather than OpenCV """ + def __init__( self, list_IDs, @@ -1523,7 +1545,7 @@ def __init__( self.interp = interp self.depth = depth self.channel_combo = channel_combo - print(self.channel_combo) + logging.info(FILE_PATH + ".DataGenerator_3Dconv_tf.__init__ " + self.channel_combo if channel_combo is not None else "None") self.gpu_id = gpu_id self.mode = mode self.immode = immode @@ -1563,7 +1585,7 @@ def __init__( ops.camera_matrix(K, R, t), dtype="float32" ) - print("Init took {} sec.".format(time.time() - ts)) + logging.info(FILE_PATH + ".DataGenerator_3Dconv_tf.__init__ " + "Init took {} sec.".format(time.time() - ts)) def __getitem__(self, index): """Generate one batch of data. @@ -1931,7 +1953,6 @@ def __data_generation(self, list_IDs_temp): (self.batch_size, self.nvox, self.nvox, self.nvox, 3), ) - X, X_grid = self.random_rotate(X, X_grid) # Need to reshape back to raveled version X_grid = tf.reshape(X_grid, (self.batch_size, -1, 3)) @@ -1999,6 +2020,7 @@ def __data_generation(self, list_IDs_temp): else: return X, y_3d + def random_continuous_rotation(X, y_3d, max_delta=5): """Rotates X and y_3d a random amount around z-axis. @@ -2013,9 +2035,7 @@ def random_continuous_rotation(X, y_3d, max_delta=5): """ rotangle = np.random.rand() * (2 * max_delta) - max_delta X = tf.reshape(X, [X.shape[0], X.shape[1], X.shape[2], -1]).numpy() - y_3d = tf.reshape( - y_3d, [y_3d.shape[0], y_3d.shape[1], y_3d.shape[2], -1] - ).numpy() + y_3d = tf.reshape(y_3d, [y_3d.shape[0], y_3d.shape[1], y_3d.shape[2], -1]).numpy() for i in range(X.shape[0]): X[i] = tf.keras.preprocessing.image.apply_affine_transform( X[i], @@ -2046,6 +2066,7 @@ def random_continuous_rotation(X, y_3d, max_delta=5): return X, y_3d + # TODO(inherit): Several methods are repeated, consider inheriting from parent class DataGenerator_3Dconv_frommem(keras.utils.Sequence): """Generate 3d conv data from memory. @@ -2072,6 +2093,7 @@ class DataGenerator_3Dconv_frommem(keras.utils.Sequence): xgrid (np.ndarray): For the AVG network, this contains the 3D grid coordinates n_rand_views (int): Number of reviews to sample randomly from the full set replace (bool): If True, samples n_rand_views with replacement + aux_labels (np.ndarray): If not None, contains the 3D MAX training targets for AVG+MAX training. """ def __init__( @@ -2091,6 +2113,9 @@ def __init__( augment_brightness=True, augment_hue=True, augment_continuous_rotation=True, + mirror_augmentation=False, + right_keypoints=None, + left_keypoints=None, bright_val=0.05, hue_val=0.05, rotation_val=5, @@ -2098,6 +2123,7 @@ def __init__( n_rand_views=None, heatmap_reg=False, heatmap_reg_coeff=0.01, + aux_labels=None, ): """Initialize data generator. @@ -2122,6 +2148,7 @@ def __init__( rotation_val (float, optional): Range of angles used for continuous rotation augmentation n_rand_views (int, optional): Number of reviews to sample randomly from the full set replace (bool, optional): If True, samples n_rand_views with replacement + aux_labels (np.ndarray, optional): If not None, contains the 3D MAX training targets for AVG+MAX training. """ self.list_IDs = list_IDs self.data = data @@ -2135,6 +2162,13 @@ def __init__( self.augment_hue = augment_hue self.augment_continuous_rotation = augment_continuous_rotation self.augment_brightness = augment_brightness + self.mirror_augmentation = mirror_augmentation + self.right_keypoints = right_keypoints + self.left_keypoints = left_keypoints + if self.mirror_augmentation and ( + self.right_keypoints is None or self.left_keypoints is None + ): + raise Exception(MISSING_KEYPOINTS_MSG) self.var_reg = var_reg self.xgrid = xgrid self.nvox = nvox @@ -2145,6 +2179,7 @@ def __init__( self.replace = replace self.heatmap_reg = heatmap_reg self.heatmap_reg_coeff = heatmap_reg_coeff + self.aux_labels = aux_labels self.on_epoch_end() def __len__(self): @@ -2196,6 +2231,17 @@ def rot90(self, X): X = X[:, ::-1, :, :] return X + def mirror(self, X, y_3d, X_grid): + # Flip the image and x coordinates about the x axis + X = X[:, ::-1, ...] + X_grid = X_grid[:, ::-1, ...] + + # Flip the left and right keypoints. + temp = y_3d[..., self.left_keypoints].copy() + y_3d[..., self.left_keypoints] = y_3d[..., self.right_keypoints] + y_3d[..., self.right_keypoints] = temp + return X, y_3d, X_grid + def rot180(self, X): """Rotate X by 180 degrees. @@ -2208,13 +2254,13 @@ def rot180(self, X): X = X[::-1, ::-1, :, :] return X - def random_rotate(self, X, y_3d): + def random_rotate(self, X, y_3d, aux=None): """Rotate each sample by 0, 90, 180, or 270 degrees. Args: X (np.ndarray): Image volumes y_3d (np.ndarray): 3D grid coordinates (AVG) or training target volumes (MAX) - + aux (np.ndarray or None): Populated in MAX+AVG mode with the training target volumes Returns: X (np.ndarray): Rotated image volumes y_3d (np.ndarray): Rotated 3D grid coordinates (AVG) or training target volumes (MAX) @@ -2227,18 +2273,28 @@ def random_rotate(self, X, y_3d): # Rotate180 X[i] = self.rot180(X[i]) y_3d[i] = self.rot180(y_3d[i]) + if aux is not None: + aux[i] = self.rot180(aux[i]) elif rots[i] == 2: # Rotate90 X[i] = self.rot90(X[i]) y_3d[i] = self.rot90(y_3d[i]) + if aux is not None: + aux[i] = self.rot90(aux[i]) elif rots[i] == 3: # Rotate -90/270 X[i] = self.rot90(X[i]) X[i] = self.rot180(X[i]) y_3d[i] = self.rot90(y_3d[i]) y_3d[i] = self.rot180(y_3d[i]) + if aux is not None: + aux[i] = self.rot90(aux[i]) + aux[i] = self.rot180(aux[i]) - return X, y_3d + if aux is not None: + return X, y_3d, aux + else: + return X, y_3d def visualize(self, original, augmented): """Plots example image after augmentation @@ -2260,13 +2316,14 @@ def visualize(self, original, augmented): plt.show() input("Press Enter to continue...") - def do_augmentation(self, X, X_grid, y_3d): + def do_augmentation(self, X, X_grid, y_3d, aux=None): """Applies augmentation Args: X (np.ndarray): image volumes X_grid (np.ndarray): 3D grid coordinates y_3d (np.ndarray): training targets + aux (np.ndarray or None): additional target volumes if using MAX+AVG mode Returns: X (np.ndarray): Augemented image volumes @@ -2280,13 +2337,16 @@ def do_augmentation(self, X, X_grid, y_3d): X_grid, (self.batch_size, self.nvox, self.nvox, self.nvox, 3), ) - X, X_grid = self.random_rotate(X.copy(), X_grid.copy()) + if aux is not None: + X, X_grid, aux = self.random_rotate(X.copy(), X_grid.copy(), aux.copy()) + else: + X, X_grid = self.random_rotate(X.copy(), X_grid.copy()) # Need to reshape back to raveled version X_grid = np.reshape(X_grid, (self.batch_size, -1, 3)) else: X, y_3d = self.random_rotate(X.copy(), y_3d.copy()) - if self.augment_continuous_rotation: + if self.augment_continuous_rotation and aux is None: if self.expval: # First make X_grid 3d X_grid = np.reshape( @@ -2327,7 +2387,20 @@ def do_augmentation(self, X, X_grid, y_3d): X[..., channel_ids], self.bright_val ) - return X, X_grid, y_3d + if self.mirror_augmentation and self.expval and aux is None: + if np.random.rand() > 0.5: + X_grid = np.reshape( + X_grid, + (self.batch_size, self.nvox, self.nvox, self.nvox, 3), + ) + # Flip the image and the symmetric keypoints + X, y_3d, X_grid = self.mirror(X.copy(), y_3d.copy(), X_grid.copy()) + X_grid = np.reshape(X_grid, (self.batch_size, -1, 3)) + else: + pass + ##TODO: implement mirror augmentation for max and avg+max modes + + return X, X_grid, y_3d, aux def do_random(self, X): """Randomly re-order camera views @@ -2339,45 +2412,50 @@ def do_random(self, X): X (np.ndarray): Shuffled image volumes """ if self.random: - X = np.reshape(X, - (X.shape[0], - X.shape[1], - X.shape[2], - X.shape[3], - self.chan_num, - -1), - order='F') + X = np.reshape( + X, + (X.shape[0], X.shape[1], X.shape[2], X.shape[3], self.chan_num, -1), + order="F", + ) X = X[:, :, :, :, :, np.random.permutation(X.shape[-1])] - X = np.reshape(X, - (X.shape[0], - X.shape[1], - X.shape[2], - X.shape[3], - X.shape[4]*X.shape[5]), order='F') + X = np.reshape( + X, + ( + X.shape[0], + X.shape[1], + X.shape[2], + X.shape[3], + X.shape[4] * X.shape[5], + ), + order="F", + ) if self.n_rand_views is not None: # Select a set of cameras randomly with replacement. - X = np.reshape(X, - (X.shape[0], - X.shape[1], - X.shape[2], - X.shape[3], - self.chan_num, - -1), - order='F') + X = np.reshape( + X, + (X.shape[0], X.shape[1], X.shape[2], X.shape[3], self.chan_num, -1), + order="F", + ) if self.replace: X = X[..., np.random.randint(X.shape[-1], size=(self.n_rand_views,))] else: if not self.random: - raise Exception("For replace=False for n_rand_views, random must be turned on") - X = X[:, :, :, :, :, :self.n_rand_views] - X = np.reshape(X, - (X.shape[0], - X.shape[1], - X.shape[2], - X.shape[3], - X.shape[4]*X.shape[5]), - order='F') + raise Exception( + "For replace=False for n_rand_views, random must be turned on" + ) + X = X[:, :, :, :, :, : self.n_rand_views] + X = np.reshape( + X, + ( + X.shape[0], + X.shape[1], + X.shape[2], + X.shape[3], + X.shape[4] * X.shape[5], + ), + order="F", + ) return X @@ -2386,13 +2464,14 @@ def get_max_gt_ind(self, X_grid, y_3d): Used for heatmap regularization. """ - diff = np.sum((X_grid[:, :, :, np.newaxis] - y_3d[:, np.newaxis, :, :])**2, axis=2) + diff = np.sum( + (X_grid[:, :, :, np.newaxis] - y_3d[:, np.newaxis, :, :]) ** 2, axis=2 + ) inds = np.argmin(diff, axis=1) - grid_d = int(np.round(X_grid.shape[1]**(1/3))) + grid_d = int(np.round(X_grid.shape[1] ** (1 / 3))) inds = np.unravel_index(inds, (grid_d, grid_d, grid_d)) return np.stack(inds, axis=1) - def __data_generation(self, list_IDs_temp): """Generate data containing batch_size samples. X : (n_samples, *dim, n_channels) @@ -2412,19 +2491,27 @@ def __data_generation(self, list_IDs_temp): X = np.zeros((self.batch_size, *self.data.shape[1:])) y_3d = np.zeros((self.batch_size, *self.labels.shape[1:])) - # Only used when + # Only used for AVG mode if self.expval: X_grid = np.zeros((self.batch_size, *self.xgrid.shape[1:])) else: X_grid = None - + + # Only used for AVG+MAX mode + if self.aux_labels is not None: + aux = np.zeros((*X.shape[:4], y_3d.shape[-1])) + else: + aux = None + for i, ID in enumerate(list_IDs_temp): X[i] = self.data[ID].copy() y_3d[i] = self.labels[ID] if self.expval: X_grid[i] = self.xgrid[ID] + if aux is not None: + aux[i] = self.aux_labels[ID] - X, X_grid, y_3d = self.do_augmentation(X, X_grid, y_3d) + X, X_grid, y_3d, aux = self.do_augmentation(X, X_grid, y_3d, aux) # Randomly re-order, if desired X = self.do_random(X) @@ -2433,71 +2520,78 @@ def __data_generation(self, list_IDs_temp): if self.heatmap_reg: return [X, X_grid, self.get_max_gt_ind(X_grid, y_3d)], [y_3d, self.heatmap_reg_coeff*np.ones((self.batch_size, y_3d.shape[-1]), dtype='float32')] + elif aux is not None: + return [X, X_grid], [y_3d, aux] return [X, X_grid], y_3d else: return X, y_3d + class DataGenerator_3Dconv_npy(DataGenerator_3Dconv_frommem): """Generates 3d conv data from npy files. - Attributes: - augment_brightness (bool): If True, applies brightness augmentation - augment_continuous_rotation (bool): If True, applies rotation augmentation in increments smaller than 90 degrees - augment_hue (bool): If True, applies hue augmentation - batch_size (int): Batch size - bright_val (float): Brightness augmentation range (-bright_val, bright_val), as fraction of raw image brightness - chan_num (int): Number of input channels - labels_3d (Dict): training targets - expval (bool): If True, crafts input for an AVG network - hue_val (float): Hue augmentation range (-hue_val, hue_val), as fraction of raw image hue range - indexes (np.ndarray): Sample indices used for batch generation - list_IDs (List): List of sampleIDs - nvox (int): Number of voxels in each grid dimension - random (bool): If True, shuffles camera order for each batch - rotation (bool): If True, applies rotation augmentation in 90 degree increments - rotation_val (float): Range of angles used for continuous rotation augmentation - shuffle (bool): If True, shuffle the samples before each epoch - var_reg (bool): If True, returns input used for variance regularization - n_rand_views (int): Number of reviews to sample randomly from the full set - replace (bool): If True, samples n_rand_views with replacement - imdir (Text): Name of image volume npy subfolder - griddir (Text): Name of grid volumw npy subfolder - mono (bool): If True, return monochrome image volumes - sigma (float): For MAX network, size of target Gaussian (mm) - cam1 (bool): If True, prepares input for training a single camea network - prefeat (bool): If True, prepares input for a network performing volume feature extraction before fusion - npydir (Dict): path to each npy volume folder for each recording (i.e. experiment) + Attributes: + augment_brightness (bool): If True, applies brightness augmentation + augment_continuous_rotation (bool): If True, applies rotation augmentation in increments smaller than 90 degrees + augment_hue (bool): If True, applies hue augmentation + batch_size (int): Batch size + bright_val (float): Brightness augmentation range (-bright_val, bright_val), as fraction of raw image brightness + chan_num (int): Number of input channels + labels_3d (Dict): training targets + expval (bool): If True, crafts input for an AVG network + hue_val (float): Hue augmentation range (-hue_val, hue_val), as fraction of raw image hue range + indexes (np.ndarray): Sample indices used for batch generation + list_IDs (List): List of sampleIDs + nvox (int): Number of voxels in each grid dimension + random (bool): If True, shuffles camera order for each batch + rotation (bool): If True, applies rotation augmentation in 90 degree increments + rotation_val (float): Range of angles used for continuous rotation augmentation + shuffle (bool): If True, shuffle the samples before each epoch + var_reg (bool): If True, returns input used for variance regularization + n_rand_views (int): Number of reviews to sample randomly from the full set + replace (bool): If True, samples n_rand_views with replacement + imdir (Text): Name of image volume npy subfolder + griddir (Text): Name of grid volumw npy subfolder + mono (bool): If True, return monochrome image volumes + sigma (float): For MAX network, size of target Gaussian (mm) + cam1 (bool): If True, prepares input for training a single camea network + prefeat (bool): If True, prepares input for a network performing volume feature extraction before fusion + npydir (Dict): path to each npy volume folder for each recording (i.e. experiment) """ - def __init__(self, - list_IDs, - labels_3d, - npydir, - batch_size, - rotation=True, - random=False, - chan_num=3, - shuffle=True, - expval=False, - var_reg=False, - imdir='image_volumes', - griddir='grid_volumes', - nvox=64, - n_rand_views=None, - mono=False, - cam1=False, - replace=True, - prefeat=False, - sigma=10, - augment_brightness=True, - augment_hue=True, - augment_continuous_rotation=True, - bright_val=0.05, - hue_val=0.05, - rotation_val=5, - heatmap_reg=False, - heatmap_reg_coeff=0.01, - ): + def __init__( + self, + list_IDs, + labels_3d, + npydir, + batch_size, + rotation=True, + random=False, + chan_num=3, + shuffle=True, + expval=False, + var_reg=False, + imdir="image_volumes", + griddir="grid_volumes", + nvox=64, + n_rand_views=None, + mono=False, + cam1=False, + replace=True, + prefeat=False, + sigma=10, + augment_brightness=True, + augment_hue=True, + augment_continuous_rotation=True, + mirror_augmentation=False, + right_keypoints=None, + left_keypoints=None, + bright_val=0.05, + hue_val=0.05, + rotation_val=5, + heatmap_reg=False, + heatmap_reg_coeff=0.01, + ): """Generates 3d conv data from npy files. Args: @@ -2549,6 +2643,13 @@ def __init__(self, self.augment_hue = augment_hue self.augment_continuous_rotation = augment_continuous_rotation self.augment_brightness = augment_brightness + self.mirror_augmentation = mirror_augmentation + self.right_keypoints = right_keypoints + self.left_keypoints = left_keypoints + if self.mirror_augmentation and ( + self.right_keypoints is None or self.left_keypoints is None + ): + raise Exception(MISSING_KEYPOINTS_MSG) self.bright_val = bright_val self.hue_val = hue_val self.rotation_val = rotation_val @@ -2576,7 +2677,7 @@ def __getitem__(self, index): y (np.ndarray): Target """ # Generate indexes of the batch - indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size] + indexes = self.indexes[index * self.batch_size : (index + 1) * self.batch_size] # Find list of IDs list_IDs_temp = [self.list_IDs[k] for k in indexes] @@ -2590,7 +2691,7 @@ def on_epoch_end(self): """Update indexes after each epoch.""" self.indexes = np.arange(len(self.list_IDs)) if self.shuffle == True: - print("SHUFFLING DATA INDICES") + logging.info( FILE_PATH + ".DataGenerator_3Dconv_npy.on_epoch_end " + "SHUFFLING DATA INDICES") np.random.shuffle(self.indexes) def rot90(self, X): @@ -2600,8 +2701,8 @@ def rot90(self, X): return X - def rot180(self,X): - #Rotate 180 + def rot180(self, X): + # Rotate 180 X = X[::-1, ::-1, :, :] return X @@ -2613,18 +2714,18 @@ def random_rotate(self, X, y_3d): rots = np.random.choice(np.arange(4), X.shape[0]) for i in range(X.shape[0]): - if rots[i]==0: + if rots[i] == 0: pass - elif rots[i]==1: - #Rotate180 + elif rots[i] == 1: + # Rotate180 X[i] = self.rot180(X[i]) y_3d[i] = self.rot180(y_3d[i]) - elif rots[i]==2: - #Rotate90 + elif rots[i] == 2: + # Rotate90 X[i] = self.rot90(X[i]) y_3d[i] = self.rot90(y_3d[i]) - elif rots[i]==3: - #Rotate -90/270 + elif rots[i] == 3: + # Rotate -90/270 X[i] = self.rot90(X[i]) X[i] = self.rot180(X[i]) y_3d[i] = self.rot90(y_3d[i]) @@ -2661,41 +2762,43 @@ def __data_generation(self, list_IDs_temp): eID = int(IDkey[0]) sID = IDkey[1] - X.append(np.load(os.path.join(self.npydir[eID], - self.imdir, - '0_' + sID + '.npy')).astype('float32')) + X.append( + np.load( + os.path.join(self.npydir[eID], self.imdir, "0_" + sID + ".npy") + ).astype("float32") + ) y_3d.append(self.labels_3d[ID]) - X_grid.append(np.load(os.path.join(self.npydir[eID], - self.griddir, - '0_' + sID + '.npy'))) + X_grid.append( + np.load( + os.path.join(self.npydir[eID], self.griddir, "0_" + sID + ".npy") + ) + ) X = np.stack(X) y_3d = np.stack(y_3d) X_grid = np.stack(X_grid) if not self.expval: - y_3d_max = np.zeros((self.batch_size, - self.nvox, - self.nvox, - self.nvox, - y_3d.shape[-1])) + y_3d_max = np.zeros( + (self.batch_size, self.nvox, self.nvox, self.nvox, y_3d.shape[-1]) + ) if not self.expval: - X_grid = np.reshape(X_grid, (-1, - self.nvox, - self.nvox, - self.nvox, - 3)) + X_grid = np.reshape(X_grid, (-1, self.nvox, self.nvox, self.nvox, 3)) for gridi in range(X_grid.shape[0]): x_coord_3d = X_grid[gridi, :, :, :, 0] y_coord_3d = X_grid[gridi, :, :, :, 1] z_coord_3d = X_grid[gridi, :, :, :, 2] for j in range(y_3d_max.shape[-1]): - y_3d_max[gridi, :, :, :, j] = \ - np.exp(-((y_coord_3d-y_3d[gridi, 1, j])**2 + - (x_coord_3d-y_3d[gridi, 0, j])**2 + - (z_coord_3d-y_3d[gridi, 2, j])**2)/(2*self.sigma**2)) + y_3d_max[gridi, :, :, :, j] = np.exp( + -( + (y_coord_3d - y_3d[gridi, 1, j]) ** 2 + + (x_coord_3d - y_3d[gridi, 0, j]) ** 2 + + (z_coord_3d - y_3d[gridi, 2, j]) ** 2 + ) + / (2 * self.sigma ** 2) + ) if self.mono and self.chan_num == 3: # Convert from RGB to mono using the skimage formula. Drop the duplicated frames. @@ -2718,30 +2821,22 @@ def __data_generation(self, list_IDs_temp): + X[:, :, :, :, 2] * 0.0721 ) - ncam = int(X.shape[-1]//self.chan_num) + ncam = int(X.shape[-1] // self.chan_num) - X, X_grid, y_3d = self.do_augmentation(X, X_grid, y_3d) + X, X_grid, y_3d, aux = self.do_augmentation(X, X_grid, y_3d) # Randomly re-order, if desired X = self.do_random(X) if self.cam1: # collapse the cameras to the batch dimensions. - X = np.reshape(X, - (X.shape[0], - X.shape[1], - X.shape[2], - X.shape[3], - self.chan_num, - -1), - order='F') + X = np.reshape( + X, + (X.shape[0], X.shape[1], X.shape[2], X.shape[3], self.chan_num, -1), + order="F", + ) X = np.transpose(X, [0, 5, 1, 2, 3, 4]) - X = np.reshape(X, - (-1, - X.shape[2], - X.shape[3], - X.shape[4], - X.shape[5])) + X = np.reshape(X, (-1, X.shape[2], X.shape[3], X.shape[4], X.shape[5])) if self.expval: y_3d = np.tile(y_3d, [ncam, 1, 1]) X_grid = np.tile(X_grid, [ncam, 1, 1]) @@ -2753,7 +2848,7 @@ def __data_generation(self, list_IDs_temp): XX = [] if self.prefeat: for ix in range(ncam): - XX.append(X[..., ix*self.chan_num:(ix+1)*self.chan_num]) + XX.append(X[..., ix * self.chan_num : (ix + 1) * self.chan_num]) X = XX if self.expval: @@ -2763,8 +2858,11 @@ def __data_generation(self, list_IDs_temp): if self.expval: if self.heatmap_reg: - return [X, X_grid, self.get_max_gt_ind(X_grid, y_3d)], [y_3d, - self.heatmap_reg_coeff*np.ones((self.batch_size, y_3d.shape[-1]), dtype='float32')] + return [X, X_grid, self.get_max_gt_ind(X_grid, y_3d)], [ + y_3d, + self.heatmap_reg_coeff + * np.ones((self.batch_size, y_3d.shape[-1]), dtype="float32"), + ] return X, y_3d else: return X, y_3d_max diff --git a/dannce/engine/inference.py b/dannce/engine/inference.py index a44bf4f..812371c 100755 --- a/dannce/engine/inference.py +++ b/dannce/engine/inference.py @@ -1,5 +1,6 @@ """Handle inference procedures for dannce and com networks. """ + import numpy as np import os import time @@ -12,29 +13,33 @@ import torch import matplotlib from dannce.engine.processing import savedata_tomat, savedata_expval +import logging matplotlib.use("Agg") import matplotlib.pyplot as plt +FILE_PATH = "dannce.engine.inference" + def print_checkpoint( n_frame: int, start_ind: int, end_time: float, sample_save: int = 100 ) -> float: """Print checkpoint messages indicating frame and fps for inference. - + Args: n_frame (int): Frame number start_ind (int): Start index end_time (float): Timing reference sample_save (int, optional): Number of samples to use in fps estimation. - + No Longer Returned: float: New timing reference. """ - print("Predicting on sample %d" % (n_frame), flush=True) + prepend_log_msg = FILE_PATH + ".print_checkpoint " + logging.info(prepend_log_msg + "Predicting on sample %d" % (n_frame))# flush=True) if (n_frame - start_ind) % sample_save == 0 and n_frame != start_ind: - print(n_frame) - print("{} samples took {} seconds".format(sample_save, time.time() - end_time)) + logging.info(prepend_log_msg + str(n_frame)) + logging.info(prepend_log_msg + "{} samples took {} seconds".format(sample_save, time.time() - end_time)) end_time = time.time() return end_time @@ -43,13 +48,13 @@ def predict_batch( model: Model, generator: keras.utils.Sequence, n_frame: int, params: Dict ) -> np.ndarray: """Predict for a single batch and reformat output. - + Args: model (Model): interence model generator (keras.utils.Sequence): Data generator n_frame (int): Frame number params (Dict): Parameters dictionary. - + No Longer Returned: np.ndarray: n_batch x n_cam x h x w x c predictions """ @@ -74,7 +79,7 @@ def debug_com( n_cam: int, ): """Print useful figures for COM debugging. - + Args: params (Dict): Parameters dictionary. pred (np.ndarray): Reformatted batch predictions. @@ -85,6 +90,7 @@ def debug_com( n_batch (int): Batch number n_cam (int): Camera number """ + prepend_log_msg = FILE_PATH + ".debug_com " com_predict_dir = params["com_predict_dir"] cmapdir = os.path.join(com_predict_dir, "cmap") overlaydir = os.path.join(com_predict_dir, "overlay") @@ -92,9 +98,10 @@ def debug_com( os.makedirs(cmapdir) if not os.path.exists(overlaydir): os.makedirs(overlaydir) - print("Writing " + params["com_debug"] + " confidence maps to " + cmapdir) - print("Writing " + params["com_debug"] + "COM-image overlays to " + overlaydir) + logging.info(prepend_log_msg + "Writing " + params["com_debug"] + " confidence maps to " + cmapdir) + logging.info(prepend_log_msg + "Writing " + params["com_debug"] + "COM-image overlays to " + overlaydir) + batch_size = pred_batch.shape[0] # Write preds plt.figure(0) plt.cla() @@ -102,13 +109,13 @@ def debug_com( plt.savefig( os.path.join( cmapdir, - params["com_debug"] + str(n_frame + n_batch) + ".png", + params["com_debug"] + str(n_frame * batch_size + n_batch) + ".png", ) ) plt.figure(1) plt.cla() - im = generator.__getitem__(n_frame * n_batches + n_batch) + im = generator.__getitem__(n_frame * batch_size + n_batch) plt.imshow(processing.norm_im(im[0][n_cam])) plt.plot( (ind[0] - params["crop_width"][0]) / params["downfac"], @@ -118,7 +125,7 @@ def debug_com( plt.savefig( os.path.join( overlaydir, - params["com_debug"] + str(n_frame + n_batch) + ".png", + params["com_debug"] + str(n_frame * batch_size + n_batch) + ".png", ) ) @@ -136,7 +143,7 @@ def extract_multi_instance_single_channel( generator: keras.utils.Sequence, ) -> Dict: """Extract prediction indices for multi-instance single-channel tracking. - + Args: pred (np.ndarray): Reformatted batch predictions. pred_batch (np.ndarray): Batch prediction. @@ -148,7 +155,7 @@ def extract_multi_instance_single_channel( save_data (Dict): Saved data dictionary. cameras (Dict): Camera dictionary generator (keras.utils.Sequence): DataGenerator - + No Longer Returned: (Dict): Updated saved data dictionary. """ @@ -224,7 +231,7 @@ def extract_multi_instance_multi_channel( generator: keras.utils.Sequence, ) -> Dict: """Extract prediction indices for multi-instance multi-channel tracking. - + Args: pred (np.ndarray): Reformatted batch predictions. pred_batch (np.ndarray): Batch prediction. @@ -236,7 +243,7 @@ def extract_multi_instance_multi_channel( save_data (Dict): Saved data dictionary. cameras (Dict): Camera dictionary generator (keras.utils.Sequence): DataGenerator - + No Longer Returned: (Dict): Updated saved data dictionary. """ @@ -302,7 +309,7 @@ def extract_single_instance( generator: keras.utils.Sequence, ): """Extract prediction indices for single-instance tracking. - + Args: pred (np.ndarray): Reformatted batch predictions. pred_batch (np.ndarray): Batch prediction. @@ -314,7 +321,7 @@ def extract_single_instance( save_data (Dict): Saved data dictionary. cameras (Dict): Camera dictionary generator (keras.utils.Sequence): DataGenerator - + No Longer Returned: (Dict): Updated saved data dictionary. """ @@ -329,7 +336,7 @@ def extract_single_instance( # mirror flip each coord if indicated if params["mirror"] and cameras[params["camnames"][n_cam]]["m"] == 1: ind[1] = params["raw_im_h"] - ind[1] - 1 - + # now, the center of mass is (x,y) instead of (i,j) # now, we need to use camera calibration to triangulate # from 2D to 3D @@ -371,14 +378,14 @@ def triangulate_single_instance( n_cams: int, sample_id: Text, params: Dict, camera_mats: Dict, save_data: Dict ) -> Dict: """Triangulate for a single instance. - + Args: n_cams (int): Numver of cameras sample_id (Text): Sample identifier. params (Dict): Parameters dictionary. camera_mats (Dict): Camera matrices dictioanry. save_data (Dict): Saved data dictionary. - + No Longer Returned: Dict: Updated saved data dictionary. """ @@ -407,14 +414,14 @@ def triangulate_multi_instance_multi_channel( n_cams: int, sample_id: Text, params: Dict, camera_mats: Dict, save_data: Dict ) -> Dict: """Triangulate for multi-instance multi-channel. - + Args: n_cams (int): Numver of cameras sample_id (Text): Sample identifier. params (Dict): Parameters dictionary. camera_mats (Dict): Camera matrices dictioanry. save_data (Dict): Saved data dictionary. - + No Longer Returned: Dict: Updated saved data dictionary. """ @@ -465,7 +472,7 @@ def triangulate_multi_instance_single_channel( save_data: Dict, ) -> Dict: """Triangulate for multi-instance single-channel. - + Args: n_cams (int): Numver of cameras sample_id (Text): Sample identifier. @@ -473,7 +480,7 @@ def triangulate_multi_instance_single_channel( camera_mats (Dict): Camera matrices dictioanry. cameras (Dict): Camera dictionary. save_data (Dict): Saved data dictionary. - + No Longer Returned: Dict: Updated saved data dictionary. """ @@ -558,7 +565,7 @@ def infer_com( sample_save: int = 100, ): """Perform COM detection over a set of frames. - + Args: start_ind (int): Starting frame index end_ind (int): Ending frame index @@ -589,7 +596,9 @@ def infer_com( pred = pred_batch[n_batch, 0] pred = np.transpose(pred, (2, 0, 1)) elif params["mirror"]: - raise Exception("mirror mode with multiple animal instances not currently supported.") + raise Exception( + "mirror mode with multiple animal instances not currently supported." + ) elif params["n_instances"] > 1 and params["n_channels_out"] > 1: pred = pred_batch[n_batch, ...] else: @@ -636,18 +645,15 @@ def infer_com( def infer_dannce( - start_ind: int, - end_ind: int, generator: keras.utils.Sequence, params: Dict, model: Model, partition: Dict, - save_data: Dict, - device: Text, n_chn: int, + com_dict: Dict, ): """Perform dannce detection over a set of frames. - + Args: start_ind (int): Starting frame index end_ind (int): Ending frame index @@ -655,21 +661,23 @@ def infer_dannce( params (Dict): Parameters dictionary. model (Model): Inference model. partition (Dict): Partition dictionary - save_data (Dict): Saved data dictionary device (Text): Gpu device name n_chn (int): Number of output channels """ - + prepend_log_msg = FILE_PATH + ".infer_dannce " end_time = time.time() + save_data = {} + start_ind = params["start_batch"] + end_ind = params["maxbatch"] for idx, i in enumerate(range(start_ind, end_ind)): - print("Predicting on batch {}".format(i), flush=True) + logging.debug("Predicting on batch {}".format(i))#, flush=True) if (i - start_ind) % 10 == 0 and i != start_ind: - print(i) - print("10 batches took {} seconds".format(time.time() - end_time)) + logging.debug(i) + logging.debug("10 batches took {} seconds".format(time.time() - end_time)) end_time = time.time() if (i - start_ind) % 1000 == 0 and i != start_ind: - print("Saving checkpoint at {}th batch".format(i)) + logging.debug("Saving checkpoint at {}th batch".format(i)) if params["expval"]: p_n = savedata_expval( params["dannce_predict_dir"] + "save_data_AVG.mat", @@ -691,6 +699,7 @@ def infer_dannce( data=save_data, num_markers=n_chn, tcoord=False, + addCOM=com_dict, ) ims = generator.__getitem__(i) @@ -708,78 +717,24 @@ def infer_dannce( "sampleID": sampleID, } else: - predict_mode = ( - params["predict_mode"] - if params["predict_mode"] is not None - else "numpy" - ) - if predict_mode == "torch": - for j in range(pred.shape[0]): - preds = torch.as_tensor(pred[j], dtype=torch.float32, device=device) - pred_max = preds.max(0).values.max(0).values.max(0).values - pred_total = preds.sum((0, 1, 2)) - ( - xcoord, - ycoord, - zcoord, - ) = processing.plot_markers_3d_torch(preds) - coord = torch.stack([xcoord, ycoord, zcoord]) - pred_log = pred_max.log() - pred_total.log() - sampleID = partition["valid_sampleIDs"][i * pred.shape[0] + j] - - save_data[idx * pred.shape[0] + j] = { - "pred_max": pred_max.cpu().numpy(), - "pred_coord": coord.cpu().numpy(), - "true_coord_nogrid": ims[1][j], - "logmax": pred_log.cpu().numpy(), - "sampleID": sampleID, - } - - elif predict_mode == "tf": - # get coords for each map - with tf.device(device): - for j in range(pred.shape[0]): - preds = tf.constant(pred[j], dtype="float32") - pred_max = tf.math.reduce_max( - tf.math.reduce_max(tf.math.reduce_max(preds)) - ) - pred_total = tf.math.reduce_sum( - tf.math.reduce_sum(tf.math.reduce_sum(preds)) - ) - ( - xcoord, - ycoord, - zcoord, - ) = processing.plot_markers_3d_tf(preds) - coord = tf.stack([xcoord, ycoord, zcoord], axis=0) - pred_log = tf.math.log(pred_max) - tf.math.log(pred_total) - sampleID = partition["valid_sampleIDs"][i * pred.shape[0] + j] - - save_data[idx * pred.shape[0] + j] = { - "pred_max": pred_max.numpy(), - "pred_coord": coord.numpy(), - "true_coord_nogrid": ims[1][j], - "logmax": pred_log.numpy(), - "sampleID": sampleID, - } + for j in range(pred.shape[0]): + preds = torch.as_tensor(pred[j], dtype=torch.float32) + pred_max = preds.max(0).values.max(0).values.max(0).values + pred_total = preds.sum((0, 1, 2)) + ( + xcoord, + ycoord, + zcoord, + ) = processing.plot_markers_3d_torch(preds) + coord = torch.stack([xcoord, ycoord, zcoord]) + pred_log = pred_max.log() - pred_total.log() + sampleID = partition["valid_sampleIDs"][i * pred.shape[0] + j] - else: - # get coords for each map - for j in range(pred.shape[0]): - pred_max = np.max(pred[j], axis=(0, 1, 2)) - pred_total = np.sum(pred[j], axis=(0, 1, 2)) - xcoord, ycoord, zcoord = processing.plot_markers_3d( - pred[j, :, :, :, :] - ) - coord = np.stack([xcoord, ycoord, zcoord]) - pred_log = np.log(pred_max) - np.log(pred_total) - sampleID = partition["valid_sampleIDs"][i * pred.shape[0] + j] - - save_data[idx * pred.shape[0] + j] = { - "pred_max": pred_max, - "pred_coord": coord, - "true_coord_nogrid": ims[1][j], - "logmax": pred_log, - "sampleID": sampleID, - } - return save_data \ No newline at end of file + save_data[idx * pred.shape[0] + j] = { + "pred_max": pred_max.cpu().numpy(), + "pred_coord": coord.cpu().numpy(), + "true_coord_nogrid": ims[1][j], + "logmax": pred_log.cpu().numpy(), + "sampleID": sampleID, + } + return save_data diff --git a/dannce/engine/io.py b/dannce/engine/io.py index fbc888c..32dbeb1 100755 --- a/dannce/engine/io.py +++ b/dannce/engine/io.py @@ -2,6 +2,7 @@ import numpy as np import scipy.io as sio from typing import List, Dict, Text, Union +import mat73 def load_label3d_data(path: Text, key: Text): @@ -14,20 +15,24 @@ def load_label3d_data(path: Text, key: Text): Returns: TYPE: Data from field """ - d = sio.loadmat(path)[key] - dataset = [f[0] for f in d] - - # Data are loaded in this annoying structure where the array - # we want is at dataset[i][key][0,0], as a nested array of arrays. - # Simplify this structure (a numpy record array) here. - # Additionally, cannot use views here because of shape mismatches. Define - # new dict and return. - data = [] - for d in dataset: - d_ = {} - for key in d.dtype.names: - d_[key] = d[key][0, 0] - data.append(d_) + try: + d = sio.loadmat(path)[key] + dataset = [f[0] for f in d] + + # Data are loaded in this annoying structure where the array + # we want is at dataset[i][key][0,0], as a nested array of arrays. + # Simplify this structure (a numpy record array) here. + # Additionally, cannot use views here because of shape mismatches. Define + # new dict and return. + data = [] + for d in dataset: + d_ = {} + for key in d.dtype.names: + d_[key] = d[key][0, 0] + data.append(d_) + except: + d = mat73.loadmat(path)[key] + data = [f[0] for f in d] return data @@ -88,7 +93,11 @@ def load_com(path: Text) -> Dict: Returns: Dict: Dictionary with com data """ - d = sio.loadmat(path)["com"] + try: + d = sio.loadmat(path)["com"] + except: + d = mat73.loadmat(path)["com"] + data = {} data["com3d"] = d["com3d"][0, 0] data["sampleID"] = d["sampleID"][0, 0].astype(int) @@ -104,13 +113,25 @@ def load_camnames(path: Text) -> Union[List, None]: Returns: Union[List, None]: List of cameranames """ - label_3d_file = sio.loadmat(path) - if "camnames" in label_3d_file: - names = label_3d_file["camnames"][:] - if len(names) != len(label_3d_file["labelData"]): - camnames = [name[0] for name in names[0]] + try: + label_3d_file = sio.loadmat(path) + if "camnames" in label_3d_file: + names = label_3d_file["camnames"][:] + if len(names) != len(label_3d_file["labelData"]): + camnames = [name[0] for name in names[0]] + else: + camnames = [name[0][0] for name in names] + else: + camnames = None + except: + label_3d_file = mat73.loadmat(path) + if "camnames" in label_3d_file: + names = label_3d_file["camnames"][:] + if len(names) != len(label_3d_file["labelData"]): + camnames = [name[0] for name in names[0]] + else: + camnames = names else: - camnames = [name[0][0] for name in names] - else: - camnames = None + camnames = None + return camnames diff --git a/dannce/engine/losses.py b/dannce/engine/losses.py index 430cf0e..22c146f 100755 --- a/dannce/engine/losses.py +++ b/dannce/engine/losses.py @@ -1,6 +1,7 @@ """Losses for tf models.""" import tensorflow as tf from tensorflow.keras import backend as K +import dannce.engine.processing as processing def mask_nan(y_true, y_pred): """Mask nans and return tensors for use by loss functions @@ -106,6 +107,26 @@ def K_nanmean_infmean(tensor): def euclidean_distance_3D(y_true, y_pred): + """ + Get voxelized 3D euclidean distance + + Assumes predictions of shape (batch_size, nvox, nvox, nvox, channels) + + Ignores NaN when necessary. But because K.sqrt(NaN) == inf, whenthere + are NaNs in the labels, the distance function returns inf + + """ + n_dim = len(y_pred.shape) + if n_dim < 4: + return euclidean_distance_3D_kps(y_true, y_pred) + else: + tf.print("shape of y_true", y_true.shape) + tf.print("shape of y_pred", y_pred.shape) + tf.print("shape of diff", (y_true - y_pred).shape) + ed3D = K.flatten(K.sqrt(K.sum(K.pow(y_true - y_pred, 2), axis=(1,2,3)))) + return K_nanmean_infmean(ed3D) + +def euclidean_distance_3D_kps(y_true, y_pred): """Get 3d Euclidean distance. Assumes predictions of shape (batch_size,3,num_markers) @@ -139,4 +160,102 @@ def heatmap_max_regularizer(y_true, y_pred): """ - return -1*K.mean(K.flatten(y_true)*K.log(K.flatten(y_pred))) \ No newline at end of file + return -1*K.mean(K.flatten(y_true)*K.log(K.flatten(y_pred))) + +# Huber and Cosh losses copied from implementation by robb +def huber_loss(delta): + def huber_model(y_true,y_pred): + y_pred, y_true, num_notnan = mask_nan(y_true, y_pred) + + model = tf.keras.losses.Huber(delta=delta,reduction=tf.keras.losses.Reduction.SUM) + h = model((y_true), (y_pred))/num_notnan + + loss = h + return tf.where(~tf.math.is_nan(loss), loss, 0) + + return huber_model + +def log_cosh_loss(y_true, y_pred): + y_pred, y_true, num_notnan = mask_nan(y_true, y_pred) + + lc_ = tf.keras.losses.LogCosh(reduction=tf.keras.losses.Reduction.SUM) + lc = lc_(y_true, y_pred)/num_notnan + + loss = lc + + return tf.where(~tf.math.is_nan(loss), loss, 0) + +def gaussian_cross_entropy_loss(y_true, y_pred): + """Get cross entropy loss of output distribution and Gaussian centered around target + + Assumes predictions of shape (batch_size,3,num_markers) + """ + y_pred, y_true, num_notnan = mask_nan(y_true, y_pred) + loss = K.sum(tf.nn.sigmoid_cross_entropy_with_logits(labels=K.flatten(y_true), logits=K.flatten(y_pred))) / num_notnan + return tf.where(~tf.math.is_nan(loss), loss, 0) + +def max_euclidean_distance(param_mat): + """ + Get the metric to log 3D euclidean distance metric from MAX model outputs + Assumes predictions of shape (batch_size,3,num_markers) + + param_mat: Parameters passed to the model for training + """ + + def max_euclidean_distance_metric(y_true, y_pred): + # print ("y_pred = ", y_pred.shape) + nvox = param_mat["nvox"] + + (x,y,z) = tf.meshgrid(tf.range(nvox), tf.range(nvox), tf.range(nvox)) + import pdb; pdb.set_trace() + d_coords = tf.map_fn(fn=processing.plot_markers_3d_tf, elems=y_pred, fn_output_signature=tf.int32) + + vsize = (param_mat["vmax"] - param_mat["vmin"]) / param_mat[ + "nvox" + ] + + + y_true_normed = y_true/tf.expand_dims(tf.expand_dims(tf.expand_dims(K.sum(y_true, [1,2,3]),1),2),3) + + + x_coord = tf.expand_dims(tf.broadcast_to(tf.expand_dims(K.cast(x, "float32"), + -1), + y_pred.shape[1:]), + axis=0) * y_true_normed + x_coord = K.sum(x_coord, [1,2,3]) + + y_coord = tf.expand_dims(tf.broadcast_to(tf.expand_dims(K.cast(y, "float32"), + -1), + y_pred.shape[1:]), + axis=0) * y_true_normed + y_coord = K.sum(y_coord, [1,2,3]) + + z_coord = tf.expand_dims(tf.broadcast_to(tf.expand_dims(K.cast(z, "float32"), + -1), + y_pred.shape[1:]), + axis=0) *y_true_normed + z_coord = K.sum(z_coord, [1,2,3]) + + y_trues = tf.stack([x_coord, y_coord, z_coord], axis=1) + # y_trues = tf.stack([y_coord, x_coord, z_coord], axis=1) + + + # import pdb; pdb.set_trace() + + pred_out_world = param_mat["vmin"] + K.cast(d_coords,"float32") * vsize + vsize / 2 + + true_out_world = param_mat["vmin"] + y_trues * vsize + vsize / 2 + + + # tf.print("Y_true: ", y_true[0]) + # tf.print("y_pred: ", y_pred) + # tf.print("Normalized y_true sum: ", y_true/K.sum(y_true, [1,2,3])) + # tf.print("\nVmin: ", param_mat["vmin"]) + # tf.print("Vmax: ", param_mat["vmax"]) + # tf.print("\ny_trues: ", y_trues[0]) + # tf.print("Pred_out_world= ", pred_out_world) + # tf.print("True Out World= ", true_out_world) + + return euclidean_distance_3D(true_out_world, pred_out_world) + + return max_euclidean_distance_metric \ No newline at end of file diff --git a/dannce/engine/nets.py b/dannce/engine/nets.py index bdb9487..42f280f 100644 --- a/dannce/engine/nets.py +++ b/dannce/engine/nets.py @@ -5,6 +5,8 @@ from tensorflow.keras.layers import MaxPooling3D, Conv3DTranspose from tensorflow.keras.layers import Add from tensorflow.keras.layers import Activation +from tensorflow.keras.layers import GlobalMaxPooling3D +from tensorflow.keras import activations from tensorflow.keras.optimizers import Adam from tensorflow.keras.layers import BatchNormalization from tensorflow.keras import backend as K @@ -14,6 +16,29 @@ import numpy as np import h5py import tensorflow as tf +import logging +import sys + +FILE_PATH = "dannce.engine.nets" + +def setup_logging(logfile_path, log_lvl, params=None): + import os + if logfile_path != None and log_lvl != None: + if not os.path.exists(os.path.dirname(params["log_dest"])): + os.makedirs(os.path.dirname(params["log_dest"])) + logging.basicConfig(filename= logfile_path, + level= log_lvl, + format='%(asctime)s %(levelname)s:%(message)s', + datefmt='%m/%d/%Y %I:%M:%S %p') + elif params != None: + if not os.path.exists(os.path.dirname(params["log_dest"])): + os.makedirs(os.path.dirname(params["log_dest"])) + logging.basicConfig(filename=params["log_dest"], + level=params["log_level"], + format='%(asctime)s %(levelname)s:%(message)s', + datefmt='%m/%d/%Y %I:%M:%S %p') + else: + print("log file path and log level, or params dictionary must be passed") def get_metrics(params): """ @@ -28,10 +53,45 @@ def get_metrics(params): return metrics +# TODO (JOSH). Move the if/else normalization block to its own function. And in this function +# add the correct InstanceNormalization() call, using the appropriate axis setting. +def norm_fun( + norm_method=None, +): + """ + method: Normalization method can be "batch", "instance", or "layer" + """ + prepend_log_msg = FILE_PATH + ".norm_fun " + + method_parse = norm_method.lower() + if method_parse.startswith("batch"): + logging.info(prepend_log_msg + "using batch normalization") + + def fun(inputs): + logging.info(prepend_log_msg + "calling batch norm fun") + return BatchNormalization()(inputs) + elif method_parse.startswith("layer"): + logging.info(prepend_log_msg + "using layer normalization") + + def fun(inputs): + logging.info(prepend_log_msg + "calling layer norm fun") + return ops.InstanceNormalization(axis=None)(inputs) + elif method_parse.startswith("instance"): + logging.info(prepend_log_msg + "using instance normalization") + + def fun(inputs): + logging.info(prepend_log_msg + "calling instance norm fun") + return ops.InstanceNormalization(axis=-1)(inputs) + else: + def fun(inputs): + return inputs + + return fun + def unet2d_fullbn( lossfunc, lr, input_dim, feature_num, metric="mse", include_top=True ): - """Initialize 2D U-net. + """Initialize 2D U-net with batch normalization Uses the Keras functional API to construct a U-Net. The net is fully convolutional, so it can be trained and tested on variable size input @@ -44,89 +104,14 @@ def unet2d_fullbn( outputs-- model: Keras model object """ - inputs = Input((None, None, input_dim)) - conv1 = Conv2D(32, (3, 3), padding="same")(inputs) - conv1 = Activation("relu")(BatchNormalization()(conv1)) - conv1 = Conv2D(32, (3, 3), padding="same")(conv1) - conv1 = Activation("relu")(BatchNormalization()(conv1)) - pool1 = MaxPooling2D(pool_size=(2, 2))(conv1) - - conv2 = Conv2D(64, (3, 3), padding="same")(pool1) - conv2 = Activation("relu")(BatchNormalization()(conv2)) - conv2 = Conv2D(64, (3, 3), padding="same")(conv2) - conv2 = Activation("relu")(BatchNormalization()(conv2)) - pool2 = MaxPooling2D(pool_size=(2, 2))(conv2) - - conv3 = Conv2D(128, (3, 3), padding="same")(pool2) - conv3 = Activation("relu")(BatchNormalization()(conv3)) - conv3 = Conv2D(128, (3, 3), padding="same")(conv3) - conv3 = Activation("relu")(BatchNormalization()(conv3)) - pool3 = MaxPooling2D(pool_size=(2, 2))(conv3) - - conv4 = Conv2D(256, (3, 3), padding="same")(pool3) - conv4 = Activation("relu")(BatchNormalization()(conv4)) - conv4 = Conv2D(256, (3, 3), padding="same")(conv4) - conv4 = Activation("relu")(BatchNormalization()(conv4)) - pool4 = MaxPooling2D(pool_size=(2, 2))(conv4) - - conv5 = Conv2D(512, (3, 3), padding="same")(pool4) - conv5 = Activation("relu")(BatchNormalization()(conv5)) - conv5 = Conv2D(512, (3, 3), padding="same")(conv5) - conv5 = Activation("relu")(BatchNormalization()(conv5)) - - up6 = concatenate( - [Conv2DTranspose(256, (2, 2), strides=(2, 2), padding="same")(conv5), conv4], - axis=3, - ) - conv6 = Conv2D(256, (3, 3), padding="same")(up6) - conv6 = Activation("relu")(BatchNormalization()(conv6)) - conv6 = Conv2D(256, (3, 3), padding="same")(conv6) - conv6 = Activation("relu")(BatchNormalization()(conv6)) - - up7 = concatenate( - [Conv2DTranspose(128, (2, 2), strides=(2, 2), padding="same")(conv6), conv3], - axis=3, - ) - conv7 = Conv2D(128, (3, 3), padding="same")(up7) - conv7 = Activation("relu")(BatchNormalization()(conv7)) - conv7 = Conv2D(128, (3, 3), padding="same")(conv7) - conv7 = Activation("relu")(BatchNormalization()(conv7)) - - up8 = concatenate( - [Conv2DTranspose(64, (2, 2), strides=(2, 2), padding="same")(conv7), conv2], - axis=3, - ) - conv8 = Conv2D(64, (3, 3), padding="same")(up8) - conv8 = Activation("relu")(BatchNormalization()(conv8)) - conv8 = Conv2D(64, (3, 3), padding="same")(conv8) - conv8 = Activation("relu")(BatchNormalization()(conv8)) - - up9 = concatenate( - [Conv2DTranspose(32, (2, 2), strides=(2, 2), padding="same")(conv8), conv1], - axis=3, - ) - conv9 = Conv2D(32, (3, 3), padding="same")(up9) - conv9 = Activation("relu")(BatchNormalization()(conv9)) - conv9 = Conv2D(32, (3, 3), padding="same")(conv9) - conv9 = Activation("relu")(BatchNormalization()(conv9)) - - conv10 = Conv2D(feature_num, (1, 1), activation="sigmoid")(conv9) - - if include_top: - model = Model(inputs=[inputs], outputs=[conv10]) - else: - model = Model(inputs=[inputs], outputs=[conv9]) - - model.compile(optimizer=Adam(lr=lr), loss=lossfunc, metrics=[metric]) - - return model - + return unet2d_full(lossfunc, lr, input_dim, feature_num, metric=metric, + include_top=include_top, norm_method="batch") def unet2d_fullIN( lossfunc, lr, input_dim, feature_num, metric="mse", include_top=True ): """ - Initialize 2D U-net + Initialize 2D U-net with instance normalization Uses the Keras functional API to construct a U-Net. The net is fully convolutional, so it can be trained and tested on variable size input (thus the x-y input dimensions are undefined) @@ -138,667 +123,128 @@ def unet2d_fullIN( outputs-- model: Keras model object """ + + return unet2d_full(lossfunc, lr, input_dim, feature_num, metric=metric, + include_top=include_top, norm_method="instance") + +def unet2d_full( + lossfunc, lr, input_dim, feature_num, metric="mse", include_top=True, + norm_method="layer" +): + """Initialize 2D U-net. + + Uses the Keras functional API to construct a U-Net. The net is fully + convolutional, so it can be trained and tested on variable size input + (thus the x-y input dimensions are undefined) + inputs-- + lossfunc: loss function + lr: float; learning rate + input_dim: int; number of feature channels in input + feature_num: int; number of output features + norm_method: str; normalization method ("instance","batch","layer",None) + outputs-- + model: Keras model object + """ + fun = norm_fun(norm_method) + inputs = Input((None, None, input_dim)) conv1 = Conv2D(32, (3, 3), padding="same")(inputs) - conv1 = Activation("relu")(ops.InstanceNormalization()(conv1)) + conv1 = Activation("relu")(fun(conv1)) conv1 = Conv2D(32, (3, 3), padding="same")(conv1) - conv1 = Activation("relu")(ops.InstanceNormalization()(conv1)) + conv1 = Activation("relu")(fun(conv1)) pool1 = MaxPooling2D(pool_size=(2, 2))(conv1) conv2 = Conv2D(64, (3, 3), padding="same")(pool1) - conv2 = Activation("relu")(ops.InstanceNormalization()(conv2)) + conv2 = Activation("relu")(fun(conv2)) conv2 = Conv2D(64, (3, 3), padding="same")(conv2) - conv2 = Activation("relu")(ops.InstanceNormalization()(conv2)) + conv2 = Activation("relu")(fun(conv2)) pool2 = MaxPooling2D(pool_size=(2, 2))(conv2) conv3 = Conv2D(128, (3, 3), padding="same")(pool2) - conv3 = Activation("relu")(ops.InstanceNormalization()(conv3)) + conv3 = Activation("relu")(fun(conv3)) conv3 = Conv2D(128, (3, 3), padding="same")(conv3) - conv3 = Activation("relu")(ops.InstanceNormalization()(conv3)) + conv3 = Activation("relu")(fun(conv3)) pool3 = MaxPooling2D(pool_size=(2, 2))(conv3) conv4 = Conv2D(256, (3, 3), padding="same")(pool3) - conv4 = Activation("relu")(ops.InstanceNormalization()(conv4)) + conv4 = Activation("relu")(fun(conv4)) conv4 = Conv2D(256, (3, 3), padding="same")(conv4) - conv4 = Activation("relu")(ops.InstanceNormalization()(conv4)) + conv4 = Activation("relu")(fun(conv4)) pool4 = MaxPooling2D(pool_size=(2, 2))(conv4) conv5 = Conv2D(512, (3, 3), padding="same")(pool4) - conv5 = Activation("relu")(ops.InstanceNormalization()(conv5)) + conv5 = Activation("relu")(fun(conv5)) conv5 = Conv2D(512, (3, 3), padding="same")(conv5) - conv5 = Activation("relu")(ops.InstanceNormalization()(conv5)) + conv5 = Activation("relu")(fun(conv5)) up6 = concatenate( [Conv2DTranspose(256, (2, 2), strides=(2, 2), padding="same")(conv5), conv4], axis=3, ) conv6 = Conv2D(256, (3, 3), padding="same")(up6) - conv6 = Activation("relu")(ops.InstanceNormalization()(conv6)) + conv6 = Activation("relu")(fun(conv6)) conv6 = Conv2D(256, (3, 3), padding="same")(conv6) - conv6 = Activation("relu")(ops.InstanceNormalization()(conv6)) + conv6 = Activation("relu")(fun(conv6)) up7 = concatenate( [Conv2DTranspose(128, (2, 2), strides=(2, 2), padding="same")(conv6), conv3], axis=3, ) conv7 = Conv2D(128, (3, 3), padding="same")(up7) - conv7 = Activation("relu")(ops.InstanceNormalization()(conv7)) + conv7 = Activation("relu")(fun(conv7)) conv7 = Conv2D(128, (3, 3), padding="same")(conv7) - conv7 = Activation("relu")(ops.InstanceNormalization()(conv7)) + conv7 = Activation("relu")(fun(conv7)) up8 = concatenate( [Conv2DTranspose(64, (2, 2), strides=(2, 2), padding="same")(conv7), conv2], axis=3, ) - conv8 = Conv2D(64, (3, 3), padding="same")(up8) - conv8 = Activation("relu")(ops.InstanceNormalization()(conv8)) - conv8 = Conv2D(64, (3, 3), padding="same")(conv8) - conv8 = Activation("relu")(ops.InstanceNormalization()(conv8)) - - up9 = concatenate( - [Conv2DTranspose(32, (2, 2), strides=(2, 2), padding="same")(conv8), conv1], - axis=3, - ) - conv9 = Conv2D(32, (3, 3), padding="same")(up9) - conv9 = Activation("relu")(ops.InstanceNormalization()(conv9)) - conv9 = Conv2D(32, (3, 3), padding="same")(conv9) - conv9 = Activation("relu")(ops.InstanceNormalization()(conv9)) - - conv10 = Conv2D(feature_num, (1, 1), activation="sigmoid")(conv9) - - if include_top: - model = Model(inputs=[inputs], outputs=[conv10]) - else: - model = Model(inputs=[inputs], outputs=[conv9]) - - model.compile(optimizer=Adam(lr=lr), loss=lossfunc, metrics=[metric]) - - return model - -def unet2d_fullIN(lossfunc, lr, input_dim, feature_num, metric='mse', include_top = True): - """ - Initialize 2D U-net - - Uses the Keras functional API to construct a U-Net. The net is fully convolutional, so it can be trained - and tested on variable size input (thus the x-y input dimensions are undefined) - inputs-- - lossfunc: loss function - lr: float; learning rate - input_dim: int; number of feature channels in input - feature_num: int; number of output features - outputs-- - model: Keras model object - """ - inputs = Input((None, None, input_dim)) - conv1 = Conv2D(32, (3, 3), padding='same')(inputs) - conv1 = Activation('relu')(ops.InstanceNormalization()(conv1)) - conv1 = Conv2D(32, (3, 3), padding='same')(conv1) - conv1 = Activation('relu')(ops.InstanceNormalization()(conv1)) - pool1 = MaxPooling2D(pool_size=(2, 2))(conv1) - - conv2 = Conv2D(64, (3, 3), padding='same')(pool1) - conv2 = Activation('relu')(ops.InstanceNormalization()(conv2)) - conv2 = Conv2D(64, (3, 3), padding='same')(conv2) - conv2 = Activation('relu')(ops.InstanceNormalization()(conv2)) - pool2 = MaxPooling2D(pool_size=(2, 2))(conv2) - - conv3 = Conv2D(128, (3, 3), padding='same')(pool2) - conv3 = Activation('relu')(ops.InstanceNormalization()(conv3)) - conv3 = Conv2D(128, (3, 3), padding='same')(conv3) - conv3 = Activation('relu')(ops.InstanceNormalization()(conv3)) - pool3 = MaxPooling2D(pool_size=(2, 2))(conv3) - - conv4 = Conv2D(256, (3, 3), padding='same')(pool3) - conv4 = Activation('relu')(ops.InstanceNormalization()(conv4)) - conv4 = Conv2D(256, (3, 3), padding='same')(conv4) - conv4 = Activation('relu')(ops.InstanceNormalization()(conv4)) - pool4 = MaxPooling2D(pool_size=(2, 2))(conv4) - - conv5 = Conv2D(512, (3, 3), padding='same')(pool4) - conv5 = Activation('relu')(ops.InstanceNormalization()(conv5)) - conv5 = Conv2D(512, (3, 3), padding='same')(conv5) - conv5 = Activation('relu')(ops.InstanceNormalization()(conv5)) - - up6 = concatenate([Conv2DTranspose(256, (2, 2), strides=(2, 2), padding='same')(conv5), conv4], axis=3) - conv6 = Conv2D(256, (3, 3), padding='same')(up6) - conv6 = Activation('relu')(ops.InstanceNormalization()(conv6)) - conv6 = Conv2D(256, (3, 3), padding='same')(conv6) - conv6 = Activation('relu')(ops.InstanceNormalization()(conv6)) - - up7 = concatenate([Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(conv6), conv3], axis=3) - conv7 = Conv2D(128, (3, 3), padding='same')(up7) - conv7 = Activation('relu')(ops.InstanceNormalization()(conv7)) - conv7 = Conv2D(128, (3, 3), padding='same')(conv7) - conv7 = Activation('relu')(ops.InstanceNormalization()(conv7)) - - up8 = concatenate([Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(conv7), conv2], axis=3) - conv8 = Conv2D(64, (3, 3), padding='same')(up8) - conv8 = Activation('relu')(ops.InstanceNormalization()(conv8)) - conv8 = Conv2D(64, (3, 3), padding='same')(conv8) - conv8 = Activation('relu')(ops.InstanceNormalization()(conv8)) - - up9 = concatenate([Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(conv8), conv1], axis=3) - conv9 = Conv2D(32, (3, 3), padding='same')(up9) - conv9 = Activation('relu')(ops.InstanceNormalization()(conv9)) - conv9 = Conv2D(32, (3, 3), padding='same')(conv9) - conv9 = Activation('relu')(ops.InstanceNormalization()(conv9)) - - conv10 = Conv2D(feature_num, (1, 1), activation='sigmoid')(conv9) - - - if include_top: - model = Model(inputs=[inputs], outputs=[conv10]) - else: - model = Model(inputs=[inputs], outputs=[conv9]) - - model.compile(optimizer=Adam(lr=lr), loss=lossfunc, metrics=[metric]) - - return model - -def unet3d_big_expectedvalue( - lossfunc, - lr, - input_dim, - feature_num, - num_cams, - gridsize=(64, 64, 64), - batch_norm=False, - instance_norm=False, - include_top=True, - regularize_var=False, - loss_weights=None, - metric=["mse"], - out_kernel=(1, 1, 1), -): - - if batch_norm and not instance_norm: - print("using batch normalization") - - def fun(inputs): - return BatchNormalization()(inputs) - - elif instance_norm: - print("using instance normalization") - - def fun(inputs): - return ops.InstanceNormalization()(inputs) - - else: - - def fun(inputs): - return inputs - - inputs = Input((*gridsize, input_dim * num_cams), name="image_input") - conv1_layer = Conv3D(64, (3, 3, 3), padding="same") - - conv1 = conv1_layer(inputs) - conv1 = Activation("relu")(fun(conv1)) - conv1 = Conv3D(64, (3, 3, 3), padding="same")(conv1) - conv1 = Activation("relu")(fun(conv1)) - pool1 = MaxPooling3D(pool_size=(2, 2, 2))(conv1) - - conv2 = Conv3D(128, (3, 3, 3), padding="same")(pool1) - conv2 = Activation("relu")(fun(conv2)) - conv2 = Conv3D(128, (3, 3, 3), padding="same")(conv2) - conv2 = Activation("relu")(fun(conv2)) - pool2 = MaxPooling3D(pool_size=(2, 2, 2))(conv2) - - conv3 = Conv3D(256, (3, 3, 3), padding="same")(pool2) - conv3 = Activation("relu")(fun(conv3)) - conv3 = Conv3D(256, (3, 3, 3), padding="same")(conv3) - conv3 = Activation("relu")(fun(conv3)) - pool3 = MaxPooling3D(pool_size=(2, 2, 2))(conv3) - - conv4 = Conv3D(512, (3, 3, 3), padding="same")(pool3) - conv4 = Activation("relu")(fun(conv4)) - conv4 = Conv3D(512, (3, 3, 3), padding="same")(conv4) - conv4 = Activation("relu")(fun(conv4)) - - up6 = concatenate( - [ - Conv3DTranspose(256, (2, 2, 2), strides=(2, 2, 2), padding="same")(conv4), - conv3, - ], - axis=4, - ) - conv6 = Conv3D(256, (3, 3, 3), padding="same")(up6) - conv6 = Activation("relu")(fun(conv6)) - conv6 = Conv3D(256, (3, 3, 3), padding="same")(conv6) - conv6 = Activation("relu")(fun(conv6)) - - up7 = concatenate( - [ - Conv3DTranspose(128, (2, 2, 2), strides=(2, 2, 2), padding="same")(conv6), - conv2, - ], - axis=4, - ) - conv7 = Conv3D(128, (3, 3, 3), padding="same")(up7) - conv7 = Activation("relu")(fun(conv7)) - conv7 = Conv3D(128, (3, 3, 3), padding="same")(conv7) - conv7 = Activation("relu")(fun(conv7)) - - up8 = concatenate( - [ - Conv3DTranspose(64, (2, 2, 2), strides=(2, 2, 2), padding="same")(conv7), - conv1, - ], - axis=4, - ) - conv8 = Conv3D(64, (3, 3, 3), padding="same")(up8) - conv8 = Activation("relu")(fun(conv8)) - conv8 = Conv3D(64, (3, 3, 3), padding="same")(conv8) - conv8 = Activation("relu")(fun(conv8)) - - conv10 = Conv3D(feature_num, out_kernel, activation="linear", padding="same")(conv8) - - grid_centers = Input((None, 3), name="grid_input") - - conv10 = Lambda(lambda x: ops.spatial_softmax(x), name="normed_map")(conv10) - - output = Lambda(lambda x: ops.expected_value_3d(x[0], x[1]), name="final_output")([conv10, grid_centers]) - - # Because I think it is easier, use a layer to calculate the variance and return it as a second output to be used for variance loss - - output_var = Lambda(lambda x: ops.var_3d(x[0], x[1], x[2]))( - [conv10, grid_centers, output] - ) - - if include_top: - if regularize_var: - model = Model(inputs=[inputs, grid_centers], outputs=[output, output_var]) - else: - model = Model(inputs=[inputs, grid_centers], outputs=[output]) - else: - model = Model(inputs=[inputs], outputs=[conv8]) - - # model.compile(optimizer=Adam(lr=lr), loss=[lossfunc[0], lossfunc[1]], metrics=['mse']) - model.compile( - optimizer=Adam(lr=lr), loss=lossfunc, metrics=metric, loss_weights=loss_weights - ) - - return model - - -def slice_input(inp, k): - print(K.int_shape(inp)) - return inp[:, :, :, :, k * 3 : (k + 1) * 3] - - -def unet3d_big_tiedfirstlayer_expectedvalue( - lossfunc, - lr, - input_dim, - feature_num, - num_cams, - gridsize=(64, 64, 64), - batch_norm=False, - instance_norm=False, - include_top=True, - regularize_var=False, - loss_weights=None, - metric="mse", -): - - if batch_norm and not instance_norm: - print("using batch normalization") - - def fun(inputs): - return BatchNormalization()(inputs) - - elif instance_norm: - print("using instance normalization") - - def fun(inputs): - return ops.InstanceNormalization()(inputs) - - else: - - def fun(inputs): - return inputs - - def slice_input(inp, k): - print(K.int_shape(inp)) - return inp[:, :, :, :, k * input_dim : (k + 1) * input_dim] - - inputs = Input((*gridsize, input_dim * num_cams)) - conv1_layer = Conv3D(64, (3, 3, 3), padding="same") - - conv1_in = [] - for i in range(num_cams): - # conv1_in.append(conv1_layer(inputs[:,:,:,:,i*input_dim:(i+1)*input_dim])) - conv1_in.append(conv1_layer(Lambda(lambda x: slice_input(x, i))(inputs))) - - conv1 = Add()(conv1_in) - conv1 = Activation("relu")(fun(conv1)) - conv1 = Conv3D(64, (3, 3, 3), padding="same")(conv1) - conv1 = Activation("relu")(fun(conv1)) - pool1 = MaxPooling3D(pool_size=(2, 2, 2))(conv1) - - conv2 = Conv3D(128, (3, 3, 3), padding="same")(pool1) - conv2 = Activation("relu")(fun(conv2)) - conv2 = Conv3D(128, (3, 3, 3), padding="same")(conv2) - conv2 = Activation("relu")(fun(conv2)) - pool2 = MaxPooling3D(pool_size=(2, 2, 2))(conv2) - - conv3 = Conv3D(256, (3, 3, 3), padding="same")(pool2) - conv3 = Activation("relu")(fun(conv3)) - conv3 = Conv3D(256, (3, 3, 3), padding="same")(conv3) - conv3 = Activation("relu")(fun(conv3)) - pool3 = MaxPooling3D(pool_size=(2, 2, 2))(conv3) - - conv4 = Conv3D(512, (3, 3, 3), padding="same")(pool3) - conv4 = Activation("relu")(fun(conv4)) - conv4 = Conv3D(512, (3, 3, 3), padding="same")(conv4) - conv4 = Activation("relu")(fun(conv4)) - - up6 = concatenate( - [ - Conv3DTranspose(256, (2, 2, 2), strides=(2, 2, 2), padding="same")(conv4), - conv3, - ], - axis=4, - ) - conv6 = Conv3D(256, (3, 3, 3), padding="same")(up6) - conv6 = Activation("relu")(fun(conv6)) - conv6 = Conv3D(256, (3, 3, 3), padding="same")(conv6) - conv6 = Activation("relu")(fun(conv6)) - - up7 = concatenate( - [ - Conv3DTranspose(128, (2, 2, 2), strides=(2, 2, 2), padding="same")(conv6), - conv2, - ], - axis=4, - ) - conv7 = Conv3D(128, (3, 3, 3), padding="same")(up7) - conv7 = Activation("relu")(fun(conv7)) - conv7 = Conv3D(128, (3, 3, 3), padding="same")(conv7) - conv7 = Activation("relu")(fun(conv7)) - - up8 = concatenate( - [ - Conv3DTranspose(64, (2, 2, 2), strides=(2, 2, 2), padding="same")(conv7), - conv1, - ], - axis=4, - ) - conv8 = Conv3D(64, (3, 3, 3), padding="same")(up8) - conv8 = Activation("relu")(fun(conv8)) - conv8 = Conv3D(64, (3, 3, 3), padding="same")(conv8) - conv8 = Activation("relu")(fun(conv8)) - - conv10 = Conv3D(feature_num, (1, 1, 1), activation="linear")(conv8) - - grid_centers = Input((None, 3)) - - conv10 = Lambda(lambda x: ops.spatial_softmax(x))(conv10) - - output = Lambda(lambda x: ops.expected_value_3d(x[0], x[1]))([conv10, grid_centers]) - - # Because I think it is easier, use a layer to calculate the variance and return it as a second output to be used for variance loss - - output_var = Lambda(lambda x: ops.var_3d(x[0], x[1], x[2]))( - [conv10, grid_centers, output] - ) - - if include_top: - if regularize_var: - model = Model(inputs=[inputs, grid_centers], outputs=[output, output_var]) - else: - model = Model(inputs=[inputs, grid_centers], outputs=[output]) - else: - model = Model(inputs=[inputs], outputs=[conv8]) - - # model.compile(optimizer=Adam(lr=lr), loss=[lossfunc[0], lossfunc[1]], metrics=['mse']) - model.compile( - optimizer=Adam(lr=lr), - loss=lossfunc, - metrics=[metric], - loss_weights=loss_weights, - ) - - return model - - -def unet3d_big_1cam( - lossfunc, - lr, - input_dim, - feature_num, - num_cams, - batch_norm=False, - instance_norm=False, -): - - if batch_norm and not instance_norm: - print("using batch normalization") - - def fun(inputs): - return BatchNormalization()(inputs) - - elif instance_norm: - print("using instance normalization") - - def fun(inputs): - return ops.InstanceNormalization()(inputs) - - else: - - def fun(inputs): - return inputs - - inputs = Input((None, None, None, input_dim)) - conv1_layer = Conv3D(64, (3, 3, 3), padding="same") - - conv1 = conv1_layer(inputs) - conv1 = Activation("relu")(fun(conv1)) - conv1 = Conv3D(64, (3, 3, 3), padding="same")(conv1) - conv1 = Activation("relu")(fun(conv1)) - pool1 = MaxPooling3D(pool_size=(2, 2, 2))(conv1) - - conv2 = Conv3D(128, (3, 3, 3), padding="same")(pool1) - conv2 = Activation("relu")(fun(conv2)) - conv2 = Conv3D(128, (3, 3, 3), padding="same")(conv2) - conv2 = Activation("relu")(fun(conv2)) - pool2 = MaxPooling3D(pool_size=(2, 2, 2))(conv2) - - conv3 = Conv3D(256, (3, 3, 3), padding="same")(pool2) - conv3 = Activation("relu")(fun(conv3)) - conv3 = Conv3D(256, (3, 3, 3), padding="same")(conv3) - conv3 = Activation("relu")(fun(conv3)) - pool3 = MaxPooling3D(pool_size=(2, 2, 2))(conv3) - - conv4 = Conv3D(512, (3, 3, 3), padding="same")(pool3) - conv4 = Activation("relu")(fun(conv4)) - conv4 = Conv3D(512, (3, 3, 3), padding="same")(conv4) - conv4 = Activation("relu")(fun(conv4)) - - up6 = concatenate( - [ - Conv3DTranspose(256, (2, 2, 2), strides=(2, 2, 2), padding="same")(conv4), - conv3, - ], - axis=4, - ) - conv6 = Conv3D(256, (3, 3, 3), padding="same")(up6) - conv6 = Activation("relu")(fun(conv6)) - conv6 = Conv3D(256, (3, 3, 3), padding="same")(conv6) - conv6 = Activation("relu")(fun(conv6)) - - up7 = concatenate( - [ - Conv3DTranspose(128, (2, 2, 2), strides=(2, 2, 2), padding="same")(conv6), - conv2, - ], - axis=4, - ) - conv7 = Conv3D(128, (3, 3, 3), padding="same")(up7) - conv7 = Activation("relu")(fun(conv7)) - conv7 = Conv3D(128, (3, 3, 3), padding="same")(conv7) - conv7 = Activation("relu")(fun(conv7)) - - up8 = concatenate( - [ - Conv3DTranspose(64, (2, 2, 2), strides=(2, 2, 2), padding="same")(conv7), - conv1, - ], - axis=4, - ) - conv8 = Conv3D(64, (3, 3, 3), padding="same")(up8) - conv8 = Activation("relu")(fun(conv8)) - conv8 = Conv3D(64, (3, 3, 3), padding="same")(conv8) - conv8 = Activation("relu")(fun(conv8)) - - conv10 = Conv3D(feature_num, (1, 1, 1), activation="sigmoid")(conv8) - - model = Model(inputs=[inputs], outputs=[conv10]) - - model.compile(optimizer=Adam(lr=lr), loss=lossfunc, metrics=["mse"]) - - return model - - -def unet3d_big_tiedfirstlayer( - lossfunc, - lr, - input_dim, - feature_num, - num_cams, - batch_norm=False, - instance_norm=False, - bs=6, -): - - if batch_norm and not instance_norm: - print("using batch normalization") - - def fun(inputs): - return BatchNormalization()(inputs) - - elif instance_norm: - print("using instance normalization") - - def fun(inputs): - return ops.InstanceNormalization()(inputs) - - else: - - def fun(inputs): - return inputs - - def slice_input(inp, k): - print(K.int_shape(inp)) - return inp[:, :, :, :, k * input_dim : (k + 1) * input_dim] - - inputs = Input((None, None, None, input_dim * num_cams)) - conv1_layer = Conv3D(64, (3, 3, 3), padding="same") - - conv1_in = [] - for i in range(num_cams): - # conv1_in.append(conv1_layer(inputs[:,:,:,:,i*input_dim:(i+1)*input_dim])) - conv1_in.append(conv1_layer(Lambda(lambda x: slice_input(x, i))(inputs))) - - conv1 = Add()(conv1_in) - conv1 = Activation("relu")(fun(conv1)) - conv1 = Conv3D(64, (3, 3, 3), padding="same")(conv1) - conv1 = Activation("relu")(fun(conv1)) - pool1 = MaxPooling3D(pool_size=(2, 2, 2))(conv1) - - conv2 = Conv3D(128, (3, 3, 3), padding="same")(pool1) - conv2 = Activation("relu")(fun(conv2)) - conv2 = Conv3D(128, (3, 3, 3), padding="same")(conv2) - conv2 = Activation("relu")(fun(conv2)) - pool2 = MaxPooling3D(pool_size=(2, 2, 2))(conv2) - - conv3 = Conv3D(256, (3, 3, 3), padding="same")(pool2) - conv3 = Activation("relu")(fun(conv3)) - conv3 = Conv3D(256, (3, 3, 3), padding="same")(conv3) - conv3 = Activation("relu")(fun(conv3)) - pool3 = MaxPooling3D(pool_size=(2, 2, 2))(conv3) - - conv4 = Conv3D(512, (3, 3, 3), padding="same")(pool3) - conv4 = Activation("relu")(fun(conv4)) - conv4 = Conv3D(512, (3, 3, 3), padding="same")(conv4) - conv4 = Activation("relu")(fun(conv4)) - - up6 = concatenate( - [ - Conv3DTranspose(256, (2, 2, 2), strides=(2, 2, 2), padding="same")(conv4), - conv3, - ], - axis=4, - ) - conv6 = Conv3D(256, (3, 3, 3), padding="same")(up6) - conv6 = Activation("relu")(fun(conv6)) - conv6 = Conv3D(256, (3, 3, 3), padding="same")(conv6) - conv6 = Activation("relu")(fun(conv6)) - - up7 = concatenate( - [ - Conv3DTranspose(128, (2, 2, 2), strides=(2, 2, 2), padding="same")(conv6), - conv2, - ], - axis=4, - ) - conv7 = Conv3D(128, (3, 3, 3), padding="same")(up7) - conv7 = Activation("relu")(fun(conv7)) - conv7 = Conv3D(128, (3, 3, 3), padding="same")(conv7) - conv7 = Activation("relu")(fun(conv7)) - - up8 = concatenate( - [ - Conv3DTranspose(64, (2, 2, 2), strides=(2, 2, 2), padding="same")(conv7), - conv1, - ], - axis=4, - ) - conv8 = Conv3D(64, (3, 3, 3), padding="same")(up8) + conv8 = Conv2D(64, (3, 3), padding="same")(up8) conv8 = Activation("relu")(fun(conv8)) - conv8 = Conv3D(64, (3, 3, 3), padding="same")(conv8) + conv8 = Conv2D(64, (3, 3), padding="same")(conv8) conv8 = Activation("relu")(fun(conv8)) - conv10 = Conv3D(feature_num, (1, 1, 1), activation="sigmoid")(conv8) + up9 = concatenate( + [Conv2DTranspose(32, (2, 2), strides=(2, 2), padding="same")(conv8), conv1], + axis=3, + ) + conv9 = Conv2D(32, (3, 3), padding="same")(up9) + conv9 = Activation("relu")(fun(conv9)) + conv9 = Conv2D(32, (3, 3), padding="same")(conv9) + conv9 = Activation("relu")(fun(conv9)) - model = Model(inputs=[inputs], outputs=[conv10]) + conv10 = Conv2D(feature_num, (1, 1), activation="sigmoid")(conv9) - model.compile(optimizer=Adam(lr=lr), loss=lossfunc, metrics=["mse"]) + if include_top: + model = Model(inputs=[inputs], outputs=[conv10]) + else: + model = Model(inputs=[inputs], outputs=[conv9]) - return model + model.compile(optimizer=Adam(lr=lr), loss=lossfunc, metrics=[metric]) + return model -def unet3d_big( +def unet3d_big_expectedvalue( lossfunc, lr, input_dim, feature_num, num_cams, - batch_norm=False, - instance_norm=False, + gridsize=(64, 64, 64), + norm_method="layer", include_top=True, - last_kern_size=(1, 1, 1), - gridsize=None, + regularize_var=False, + loss_weights=None, + metric=["mse"], + out_kernel=(1, 1, 1), ): - # Gridsize unused, necessary for argument consistency with other nets - if batch_norm and not instance_norm: - print("using batch normalization") - - def fun(inputs): - return BatchNormalization()(inputs) - - elif instance_norm: - print("using instance normalization") - - def fun(inputs): - return ops.InstanceNormalization()(inputs) - else: + fun = norm_fun(norm_method) - def fun(inputs): - return inputs + inputs = Input((*gridsize, input_dim * num_cams), name="image_input") + conv1_layer = Conv3D(64, (3, 3, 3), padding="same") - inputs = Input((None, None, None, input_dim * num_cams)) - conv1 = Conv3D(64, (3, 3, 3), padding="same")(inputs) + conv1 = conv1_layer(inputs) conv1 = Activation("relu")(fun(conv1)) conv1 = Conv3D(64, (3, 3, 3), padding="same")(conv1) conv1 = Activation("relu")(fun(conv1)) @@ -857,71 +303,77 @@ def fun(inputs): conv8 = Conv3D(64, (3, 3, 3), padding="same")(conv8) conv8 = Activation("relu")(fun(conv8)) - conv10 = Conv3D(feature_num, last_kern_size, activation="sigmoid")(conv8) + conv10 = Conv3D(feature_num, out_kernel, activation="linear", padding="same")(conv8) + + grid_centers = Input((None, 3), name="grid_input") + + conv10 = Lambda(lambda x: ops.spatial_softmax(x), name="normed_map")(conv10) + + output = Lambda(lambda x: ops.expected_value_3d(x[0], x[1]), name="final_output")([conv10, grid_centers]) + + # Because I think it is easier, use a layer to calculate the variance and return it as a second output to be used for variance loss + + output_var = Lambda(lambda x: ops.var_3d(x[0], x[1], x[2]))( + [conv10, grid_centers, output] + ) if include_top: - model = Model(inputs=[inputs], outputs=[conv10]) + if regularize_var: + model = Model(inputs=[inputs, grid_centers], outputs=[output, output_var]) + else: + model = Model(inputs=[inputs, grid_centers], outputs=[output]) else: model = Model(inputs=[inputs], outputs=[conv8]) - model.compile(optimizer=Adam(lr=lr), loss=lossfunc, metrics=["mse"]) + # model.compile(optimizer=Adam(lr=lr), loss=[lossfunc[0], lossfunc[1]], metrics=['mse']) + model.compile( + optimizer=Adam(lr=lr), loss=lossfunc, metrics=metric, loss_weights=loss_weights + ) return model -def unet3d_big_IN_BN( +def slice_input(inp, k): + prepend_log_msg = FILE_PATH + ".{} ".format(sys._getframe( ).f_code.co_name) + logging.info(prepend_log_msg + "{K.int_shape(inp)}") + return inp[:, :, :, :, k * 3 : (k + 1) * 3] + +def unet3d_big_1cam( lossfunc, lr, input_dim, feature_num, num_cams, - batch_norm=False, - instance_norm=False, - include_top=True, - last_kern_size=(1, 1, 1), - gridsize=None, + norm_method="layer", ): - # Gridsize unused, necessary for argument consistency with other nets - if batch_norm and not instance_norm: - print("using batch normalization") - - def fun(inputs): - return BatchNormalization()(inputs) - - elif instance_norm: - print("using instance normalization") - - def fun(inputs): - return ops.InstanceNormalization()(inputs) - else: + fun = norm_fun(norm_method) - def fun(inputs): - return inputs + inputs = Input((None, None, None, input_dim)) + conv1_layer = Conv3D(64, (3, 3, 3), padding="same") - inputs = Input((None, None, None, input_dim * num_cams)) - conv1 = Conv3D(64, (3, 3, 3), padding="same")(inputs) + conv1 = conv1_layer(inputs) conv1 = Activation("relu")(fun(conv1)) conv1 = Conv3D(64, (3, 3, 3), padding="same")(conv1) conv1 = Activation("relu")(fun(conv1)) pool1 = MaxPooling3D(pool_size=(2, 2, 2))(conv1) conv2 = Conv3D(128, (3, 3, 3), padding="same")(pool1) - conv2 = Activation("relu")(BatchNormalization()(conv2)) + conv2 = Activation("relu")(fun(conv2)) conv2 = Conv3D(128, (3, 3, 3), padding="same")(conv2) - conv2 = Activation("relu")(BatchNormalization()(conv2)) + conv2 = Activation("relu")(fun(conv2)) pool2 = MaxPooling3D(pool_size=(2, 2, 2))(conv2) conv3 = Conv3D(256, (3, 3, 3), padding="same")(pool2) - conv3 = Activation("relu")(BatchNormalization()(conv3)) + conv3 = Activation("relu")(fun(conv3)) conv3 = Conv3D(256, (3, 3, 3), padding="same")(conv3) - conv3 = Activation("relu")(BatchNormalization()(conv3)) + conv3 = Activation("relu")(fun(conv3)) pool3 = MaxPooling3D(pool_size=(2, 2, 2))(conv3) conv4 = Conv3D(512, (3, 3, 3), padding="same")(pool3) - conv4 = Activation("relu")(BatchNormalization()(conv4)) + conv4 = Activation("relu")(fun(conv4)) conv4 = Conv3D(512, (3, 3, 3), padding="same")(conv4) - conv4 = Activation("relu")(BatchNormalization()(conv4)) + conv4 = Activation("relu")(fun(conv4)) up6 = concatenate( [ @@ -931,9 +383,9 @@ def fun(inputs): axis=4, ) conv6 = Conv3D(256, (3, 3, 3), padding="same")(up6) - conv6 = Activation("relu")(BatchNormalization()(conv6)) + conv6 = Activation("relu")(fun(conv6)) conv6 = Conv3D(256, (3, 3, 3), padding="same")(conv6) - conv6 = Activation("relu")(BatchNormalization()(conv6)) + conv6 = Activation("relu")(fun(conv6)) up7 = concatenate( [ @@ -943,9 +395,9 @@ def fun(inputs): axis=4, ) conv7 = Conv3D(128, (3, 3, 3), padding="same")(up7) - conv7 = Activation("relu")(BatchNormalization()(conv7)) + conv7 = Activation("relu")(fun(conv7)) conv7 = Conv3D(128, (3, 3, 3), padding="same")(conv7) - conv7 = Activation("relu")(BatchNormalization()(conv7)) + conv7 = Activation("relu")(fun(conv7)) up8 = concatenate( [ @@ -955,87 +407,54 @@ def fun(inputs): axis=4, ) conv8 = Conv3D(64, (3, 3, 3), padding="same")(up8) - conv8 = Activation("relu")(BatchNormalization()(conv8)) + conv8 = Activation("relu")(fun(conv8)) conv8 = Conv3D(64, (3, 3, 3), padding="same")(conv8) - conv8 = Activation("relu")(BatchNormalization()(conv8)) + conv8 = Activation("relu")(fun(conv8)) - conv10 = Conv3D(feature_num, last_kern_size, activation="sigmoid")(conv8) + conv10 = Conv3D(feature_num, (1, 1, 1), activation="sigmoid")(conv8) - if include_top: - model = Model(inputs=[inputs], outputs=[conv10]) - else: - model = Model(inputs=[inputs], outputs=[conv8]) + model = Model(inputs=[inputs], outputs=[conv10]) model.compile(optimizer=Adam(lr=lr), loss=lossfunc, metrics=["mse"]) return model - -def unet3d_big_regularized( +def unet3d_big( lossfunc, lr, input_dim, feature_num, num_cams, - batch_norm=False, - instance_norm=False, + norm_method="layer", include_top=True, last_kern_size=(1, 1, 1), gridsize=None, - regularizer=regularizers.l2(0.005), ): # Gridsize unused, necessary for argument consistency with other nets - if batch_norm and not instance_norm: - print("using batch normalization") - - def fun(inputs): - return BatchNormalization()(inputs) - - elif instance_norm: - print("using instance normalization") + fun = norm_fun(norm_method) - def fun(inputs): - return ops.InstanceNormalization()(inputs) - - else: - - def fun(inputs): - return inputs - - inputs = Input((None, None, None, input_dim * num_cams)) + inputs = Input((64, 64, 64, input_dim * num_cams)) conv1 = Conv3D(64, (3, 3, 3), padding="same")(inputs) conv1 = Activation("relu")(fun(conv1)) conv1 = Conv3D(64, (3, 3, 3), padding="same")(conv1) conv1 = Activation("relu")(fun(conv1)) pool1 = MaxPooling3D(pool_size=(2, 2, 2))(conv1) - conv2 = Conv3D(128, (3, 3, 3), padding="same", kernel_regularizer=regularizer)( - pool1 - ) + conv2 = Conv3D(128, (3, 3, 3), padding="same")(pool1) conv2 = Activation("relu")(fun(conv2)) - conv2 = Conv3D(128, (3, 3, 3), padding="same", kernel_regularizer=regularizer)( - conv2 - ) + conv2 = Conv3D(128, (3, 3, 3), padding="same")(conv2) conv2 = Activation("relu")(fun(conv2)) pool2 = MaxPooling3D(pool_size=(2, 2, 2))(conv2) - conv3 = Conv3D(256, (3, 3, 3), padding="same", kernel_regularizer=regularizer)( - pool2 - ) + conv3 = Conv3D(256, (3, 3, 3), padding="same")(pool2) conv3 = Activation("relu")(fun(conv3)) - conv3 = Conv3D(256, (3, 3, 3), padding="same", kernel_regularizer=regularizer)( - conv3 - ) + conv3 = Conv3D(256, (3, 3, 3), padding="same")(conv3) conv3 = Activation("relu")(fun(conv3)) pool3 = MaxPooling3D(pool_size=(2, 2, 2))(conv3) - conv4 = Conv3D(512, (3, 3, 3), padding="same", kernel_regularizer=regularizer)( - pool3 - ) + conv4 = Conv3D(512, (3, 3, 3), padding="same")(pool3) conv4 = Activation("relu")(fun(conv4)) - conv4 = Conv3D(512, (3, 3, 3), padding="same", kernel_regularizer=regularizer)( - conv4 - ) + conv4 = Conv3D(512, (3, 3, 3), padding="same")(conv4) conv4 = Activation("relu")(fun(conv4)) up6 = concatenate( @@ -1045,11 +464,9 @@ def fun(inputs): ], axis=4, ) - conv6 = Conv3D(256, (3, 3, 3), padding="same", kernel_regularizer=regularizer)(up6) + conv6 = Conv3D(256, (3, 3, 3), padding="same")(up6) conv6 = Activation("relu")(fun(conv6)) - conv6 = Conv3D(256, (3, 3, 3), padding="same", kernel_regularizer=regularizer)( - conv6 - ) + conv6 = Conv3D(256, (3, 3, 3), padding="same")(conv6) conv6 = Activation("relu")(fun(conv6)) up7 = concatenate( @@ -1059,11 +476,9 @@ def fun(inputs): ], axis=4, ) - conv7 = Conv3D(128, (3, 3, 3), padding="same", kernel_regularizer=regularizer)(up7) + conv7 = Conv3D(128, (3, 3, 3), padding="same")(up7) conv7 = Activation("relu")(fun(conv7)) - conv7 = Conv3D(128, (3, 3, 3), padding="same", kernel_regularizer=regularizer)( - conv7 - ) + conv7 = Conv3D(128, (3, 3, 3), padding="same")(conv7) conv7 = Activation("relu")(fun(conv7)) up8 = concatenate( @@ -1073,12 +488,15 @@ def fun(inputs): ], axis=4, ) - conv8 = Conv3D(64, (3, 3, 3), padding="same", kernel_regularizer=regularizer)(up8) + conv8 = Conv3D(64, (3, 3, 3), padding="same")(up8) conv8 = Activation("relu")(fun(conv8)) - conv8 = Conv3D(64, (3, 3, 3), padding="same", kernel_regularizer=regularizer)(conv8) + conv8 = Conv3D(64, (3, 3, 3), padding="same")(conv8) conv8 = Activation("relu")(fun(conv8)) - conv10 = Conv3D(feature_num, last_kern_size, activation="sigmoid")(conv8) + if "gaussian_cross_entropy_loss" in str(lossfunc): + conv10 = Conv3D(feature_num, last_kern_size, activation="linear")(conv8) + else: + conv10 = Conv3D(feature_num, last_kern_size, activation="sigmoid")(conv8) if include_top: model = Model(inputs=[inputs], outputs=[conv10]) @@ -1089,7 +507,6 @@ def fun(inputs): return model - def finetune_AVG( lossfunc, lr, @@ -1100,8 +517,7 @@ def finetune_AVG( new_n_channels_out, weightspath, num_layers_locked=2, - batch_norm=False, - instance_norm=False, + norm_method="layer", gridsize=(64, 64, 64), ): """ @@ -1112,6 +528,9 @@ def finetune_AVG( that will be locked (non-trainable) during fine-tuning. """ + prepend_log_msg = ".finetune_AVG " + + # model = netobj() model = unet3d_big_expectedvalue( lossfunc, lr, @@ -1119,25 +538,23 @@ def finetune_AVG( feature_num, num_cams, gridsize, - batch_norm, - instance_norm, + norm_method, include_top=False, ) - pre = model.get_weights() # Load weights model = renameLayers(model, weightspath) post = model.get_weights() - print("evaluating weight deltas in the first conv layer") + logging.info(prepend_log_msg + "evaluating weight deltas in the first conv layer") - print("pre-weights") - print(pre[1][0]) - print("post-weights") - print(post[1][0]) - print("delta:") - print(np.sum(pre[1][0] - post[1][0])) + logging.info(prepend_log_msg + "pre-weights") + logging.info(prepend_log_msg + str(pre[1][0])) + logging.info(prepend_log_msg + "post-weights") + logging.info(prepend_log_msg + str(post[1][0])) + logging.info(prepend_log_msg + "delta:") + logging.info(prepend_log_msg + str(np.sum(pre[1][0] - post[1][0]))) # Lock desired number of layers for layer in model.layers[:num_layers_locked]: @@ -1175,8 +592,7 @@ def finetune_fullmodel_AVG( new_n_channels_out, weightspath, num_layers_locked=2, - batch_norm=False, - instance_norm=False, + norm_method="layer", gridsize=(64, 64, 64), ): """ @@ -1195,8 +611,12 @@ def finetune_fullmodel_AVG( "slice_input": slice_input, "mask_nan_keep_loss": losses.mask_nan_keep_loss, "mask_nan_l1_loss": losses.mask_nan_l1_loss, + "log_cosh_loss": losses.log_cosh_loss, + "huber_loss": losses.huber_loss, "euclidean_distance_3D": losses.euclidean_distance_3D, "centered_euclidean_distance_3D": losses.centered_euclidean_distance_3D, + "gaussian_cross_entropy_loss": losses.gaussian_cross_entropy_loss, + "max_euclidean_distance": losses.max_euclidean_distance, }, compile=False, ) @@ -1230,6 +650,41 @@ def finetune_fullmodel_AVG( return model +def add_exposed_heatmap(model): + """ + Given a normal AVG model, add an extra output for supervision of the penultimate heatmap representation + """ + lay = [l.name for l in model.layers] + if "exposed_heatmap" not in lay: + model.layers[-1]._name = "final_output" + model.layers[-4]._name = "exposed_heatmap" + sigmoid_output = Activation(activations.sigmoid, + name="sigmoid_exposed_hetmap") + model = Model( + inputs=[model.layers[0].input, model.layers[-2].input], + outputs=[model.layers[-1].output, sigmoid_output(model.layers[-4].output)], + ) + + return model + +def remove_exposed_heatmap(model): + """ + Given an AVG+MAX model, removes the exposes heatmap output so that only the continuous AVG output is + generated. + + To fully support "continued" mode training, this should only be called during dannce-predict, and before + the p_max output is added to the network. + """ + + lay = [l.name for l in model.layers] + if "exposed_heatmap" in lay: + model = Model( + inputs=[model.get_layer("image_input").input, model.get_layer("grid_input").input], + outputs=[model.get_layer("final_output").output], + ) + + return model + def heatmap_reg(hmap, inds): """ Returns the value of the 3D hmap at inds @@ -1247,11 +702,12 @@ def heatmap_reg(hmap, inds): def add_heatmap_output(model): """ - Given at AVG model, splice on a new input (GT voxel index) and output (amplitude of normalized heatmap at that GT index) + Given an AVG model, splice on a new input (GT voxel index) and output (amplitude of normalized heatmap at that GT index) """ + prepend_log_msg = FILE_PATH + ".{} ".format(sys._getframe( ).f_code.co_name) lay = [l.name for l in model.layers] if "heatmap_output" not in lay: - print("Adding heatmap regularization arm") + logging.info(prepend_log_msg + "Adding heatmap regularization arm") norm_layer = "normed_map" image_input_layer = "image_input" grid_input_layer = "grid_input" @@ -1285,10 +741,12 @@ def remove_heatmap_output(model, params): """ Remove any heatmap regularizer layers so saved models can be run normally with dannce predict. """ + prepend_log_msg = FILE_PATH + ".{} ".format(sys._getframe( ).f_code.co_name) + lay = [l.name for l in model.layers] if "heatmap_output" in lay: - print("Removing heatmap regularization arm") + logging.info(prepend_log_msg + "Removing heatmap regularization arm") opt = Adam(lr=float(params["lr"])) mets = get_metrics(params) @@ -1321,7 +779,7 @@ def load_attributes_from_hdf5_group(group, name): """ if name not in group.attrs: group = group["model_weights"] - data = [n.decode("utf8") for n in group.attrs[name]] + data = [n if isinstance(n, str) else n.decode("utf8") for n in group.attrs[name]] return data @@ -1331,6 +789,7 @@ def renameLayers(model, weightspath): Rename layers in the model if we detect differences from the layer names in the weights file. """ + prepend_log_msg = FILE_PATH + ".{} ".format(sys._getframe( ).f_code.co_name) with h5py.File(weightspath, "r") as f: lnames = load_attributes_from_hdf5_group(f, "layer_names") @@ -1338,14 +797,20 @@ def renameLayers(model, weightspath): for (i, layer) in enumerate(model.layers): tf2_names.append(layer.name) if layer.name != lnames[i]: - print( + logging.info( prepend_log_msg + "Correcting mismatch in layer name, model: {}, weights: {}".format( layer.name, lnames[i] ) ) layer._name = lnames[i] - model.load_weights(weightspath, by_name=True) + import traceback + try: + model.load_weights(weightspath, by_name=True, skip_mismatch=True) + except ValueError: + logging.warn(prepend_log_msg + "Loading model weights failed") + logging.warn(prepend_log_msg + str(traceback.format_exc())) + # We need to change the model layer names back to the TF2 version otherwise the model # won't save @@ -1366,8 +831,7 @@ def finetune_MAX( new_n_channels_out, weightspath, num_layers_locked=2, - batch_norm=False, - instance_norm=False, + norm_method="layer", gridsize=(64, 64, 64), ): """ @@ -1381,8 +845,7 @@ def finetune_MAX( input_dim, feature_num, num_cams, - batch_norm, - instance_norm, + norm_method="layer", include_top=False, ) @@ -1402,113 +865,14 @@ def finetune_MAX( old_out = model(input_) # Add new output conv. layer - new_conv = Conv3D( - new_n_channels_out, new_last_kern_size, activation="sigmoid", padding="same" - )(old_out) - - model = Model(inputs=[input_], outputs=[new_conv]) - - return model - - -def finetune_MAX_IN_BN( - lossfunc, - lr, - input_dim, - feature_num, - num_cams, - new_last_kern_size, - new_n_channels_out, - weightspath, - num_layers_locked=2, - batch_norm=False, - instance_norm=False, - gridsize=(64, 64, 64), -): - """ - makes necessary calls to network constructors to set up nets for fine-tuning - the argmax version of the network. - """ - - model = unet3d_big_IN_BN( - lossfunc, - lr, - input_dim, - feature_num, - num_cams, - batch_norm, - instance_norm, - include_top=False, - ) - - # Load weights - model.load_weights(weightspath, by_name=True) - - # Lock desired number of layers - for layer in model.layers[:num_layers_locked]: - layer.trainable = False - - # Do forward pass all the way until end - input_ = Input((None, None, None, input_dim * num_cams)) - - old_out = model(input_) - - # Add new output conv. layer - new_conv = Conv3D( - new_n_channels_out, new_last_kern_size, activation="sigmoid", padding="same" - )(old_out) - - model = Model(inputs=[input_], outputs=[new_conv]) - - return model - - -def finetune_MAX_regularized( - lossfunc, - lr, - input_dim, - feature_num, - num_cams, - new_last_kern_size, - new_n_channels_out, - weightspath, - num_layers_locked=2, - batch_norm=False, - instance_norm=False, - gridsize=(64, 64, 64), -): - """ - makes necessary calls to network constructors to set up nets for fine-tuning - the argmax version of the network. - """ - - model = unet3d_big_regularized( - lossfunc, - lr, - input_dim, - feature_num, - num_cams, - batch_norm, - instance_norm, - include_top=False, - ) - - # Load weights - model.load_weights(weightspath, by_name=True) - - # Lock desired number of layers - for layer in model.layers[:num_layers_locked]: - layer.trainable = False - - # Do forward pass all the way until end - input_ = Input((None, None, None, input_dim * num_cams)) - - old_out = model(input_) - - # Add new output conv. layer - new_conv = Conv3D( + if "gaussian_cross_entropy_loss" in str(lossfunc): + new_conv = Conv3D( + new_n_channels_out, new_last_kern_size, activation="linear", padding="same" + )(old_out) + else: + new_conv = Conv3D( new_n_channels_out, new_last_kern_size, activation="sigmoid", padding="same" - )(old_out) + )(old_out) model = Model(inputs=[input_], outputs=[new_conv]) diff --git a/dannce/engine/ops.py b/dannce/engine/ops.py index 030b6d7..2753cf1 100755 --- a/dannce/engine/ops.py +++ b/dannce/engine/ops.py @@ -708,6 +708,7 @@ def proj_slice( # Compute Xc - points in camera frame Xc = tf.matrix_triangular_solve(K_, rs_grid, lower=False, name="KinvX") + # Leaving this print statement as is. There are no calls going into this print(K.int_shape(Xc)) # Define z values of samples along ray diff --git a/dannce/engine/processing.py b/dannce/engine/processing.py index 695cd77..5714b09 100755 --- a/dannce/engine/processing.py +++ b/dannce/engine/processing.py @@ -10,7 +10,7 @@ import scipy.io as sio from scipy.ndimage.filters import maximum_filter -from dannce.engine import io +from dannce.engine import io, nets, ops, losses import matplotlib import warnings @@ -20,56 +20,129 @@ import yaml import shutil import time +from typing import Dict +from tensorflow.keras.models import Model, load_model +import logging +FILE_PATH = "dannce.engine.preprocessing.py" -def initialize_vids(CONFIG_PARAMS, datadict, e, vids, pathonly=True): + +def write_debug( + params: Dict, + ims_train: np.ndarray, + ims_valid: np.ndarray, + y_train: np.ndarray, + model: Model, + trainData: bool = True, +): + """Factoring re-used debug output code. + + Args: + params (Dict): Parameters dictionary + ims_train (np.ndarray): Training images + ims_valid (np.ndarray): Validation images + y_train (np.ndarray): Training targets + model (Model): Model + trainData (bool, optional): If True use training data for debug. Defaults to True. + """ + + prepend_log_msg = FILE_PATH + ".write_debug " + + def plot_out(imo, lo, imn): + plot_markers_2d(norm_im(imo), lo, newfig=False) + plt.gca().xaxis.set_major_locator(plt.NullLocator()) + plt.gca().yaxis.set_major_locator(plt.NullLocator()) + + imname = imn + plt.savefig(os.path.join(debugdir, imname), bbox_inches="tight", pad_inches=0) + + if params["debug"] and not params["multi_mode"]: + + if trainData: + outdir = "debug_im_out" + ims_out = ims_train + label_out = y_train + else: + outdir = "debug_im_out_valid" + ims_out = ims_valid + label_out = model.predict(ims_valid, batch_size=1) + + # Plot all training images and save + # create new directory for images if necessary + debugdir = os.path.join(params["com_train_dir"], outdir) + + logging.info(prepend_log_msg + "Saving debug images to: " + debugdir) + if not os.path.exists(debugdir): + os.makedirs(debugdir) + + plt.figure() + + for i in range(ims_out.shape[0]): + plt.cla() + if params["mirror"]: + for j in range(label_out.shape[-1]): + plt.cla() + plot_out( + ims_out[i], + label_out[i, :, :, j : j + 1], + str(i) + "_cam_" + str(j) + ".png", + ) + else: + plot_out(ims_out[i], label_out[i], str(i) + ".png") + + elif params["debug"] and params["multi_mode"]: + logging.info( prepend_log_msg + "Note: Cannot output debug information in COM multi-mode") + + +def initialize_vids(params, datadict, e, vids, pathonly=True): """ Initializes video path dictionaries for a training session. This is different than a predict session because it operates over a single animal ("experiment") at a time """ - for i in range(len(CONFIG_PARAMS["experiment"][e]["camnames"])): + for i in range(len(params["experiment"][e]["camnames"])): # Rather than opening all vids, only open what is needed based on the # maximum frame ID for this experiment and Camera flist = [] for key in datadict.keys(): if int(key.split("_")[0]) == e: flist.append( - datadict[key]["frames"][ - CONFIG_PARAMS["experiment"][e]["camnames"][i] - ] + datadict[key]["frames"][params["experiment"][e]["camnames"][i]] ) flist = max(flist) # For COM prediction, we don't prepend experiment IDs # So detect this case and act accordingly. - basecam = CONFIG_PARAMS["experiment"][e]["camnames"][i] + basecam = params["experiment"][e]["camnames"][i] if "_" in basecam: basecam = basecam.split("_")[1] - if CONFIG_PARAMS["vid_dir_flag"]: + if params["vid_dir_flag"]: addl = "" else: addl = os.listdir( - os.path.join(CONFIG_PARAMS["experiment"][e]["viddir"], basecam,) + os.path.join( + params["experiment"][e]["viddir"], + basecam, + ) )[0] r = generate_readers( - CONFIG_PARAMS["experiment"][e]["viddir"], + params["experiment"][e]["viddir"], os.path.join(basecam, addl), maxopt=flist, # Large enough to encompass all videos in directory. - extension=CONFIG_PARAMS["experiment"][e]["extension"], + extension=params["experiment"][e]["extension"], pathonly=pathonly, ) - if "_" in CONFIG_PARAMS["experiment"][e]["camnames"][i]: - vids[CONFIG_PARAMS["experiment"][e]["camnames"][i]] = {} + if "_" in params["experiment"][e]["camnames"][i]: + vids[params["experiment"][e]["camnames"][i]] = {} for key in r: - vids[CONFIG_PARAMS["experiment"][e]["camnames"][i]][ - str(e) + "_" + key - ] = r[key] + vids[params["experiment"][e]["camnames"][i]][str(e) + "_" + key] = r[ + key + ] else: - vids[CONFIG_PARAMS["experiment"][e]["camnames"][i]] = r + vids[params["experiment"][e]["camnames"][i]] = r return vids @@ -79,6 +152,16 @@ def infer_params(params, dannce_net, prediction): Some parameters that were previously specified in configs can just be inferred from others, thus relieving config bloat """ + + curr_dir = os.path.dirname(__file__) + os.environ["DANNCE_HOME"] = os.path.dirname(curr_dir) + + if not os.path.exists(os.path.dirname(params["log_dest"])): + os.makedirs(os.path.dirname(params["log_dest"])) + # Setting up logging + logging.basicConfig(filename=params["log_dest"], level=params["log_level"], + format='%(asctime)s %(levelname)s:%(message)s', datefmt='%m/%d/%Y %I:%M:%S %p') + # Grab the camnames from *dannce.mat if not in config if params["camnames"] is None: f = grab_predict_label3d_file() @@ -117,13 +200,14 @@ def infer_params(params, dannce_net, prediction): intermediate_folder = os.listdir(camdir) camdir = os.path.join(camdir, intermediate_folder[0]) video_files = os.listdir(camdir) - video_files = [f for f in video_files if ".mp4" in f] + video_files = [f for f in video_files if extension in f] video_files = sorted(video_files, key=lambda x: int(x.split(".")[0])) chunks[name] = np.sort([int(x.split(".")[0]) for x in video_files]) print_and_set(params, "chunks", chunks) - camf = os.path.join(viddir, video_files[0]) + firstvid = str(chunks[params["camnames"][0]][0]) + params["extension"] + camf = os.path.join(viddir, firstvid) # Infer n_channels_in from the video info v = imageio.get_reader(camf) @@ -135,6 +219,12 @@ def infer_params(params, dannce_net, prediction): print_and_set(params, "raw_im_h", im.shape[0]) print_and_set(params, "raw_im_w", im.shape[1]) + if dannce_net and params["avg+max"] is not None: + # To use avg+max, need to start with an AVG network + # In case the net type is not properly specified, set it here + print_and_set(params, "expval", True) + print_and_set(params, "net_type", "AVG") + if dannce_net and params["net"] is None: # Here we assume that if the network and expval are specified by the user # then there is no reason to infer anything. net + expval compatibility @@ -235,7 +325,9 @@ def infer_params(params, dannce_net, prediction): print_and_set(params, "vmax", params["vol_size"] / 2) if params["heatmap_reg"] and not params["expval"]: - raise Exception("Heatmap regularization enabled only for AVG networks -- you are using MAX") + raise Exception( + "Heatmap regularization enabled only for AVG networks -- you are using MAX" + ) if params["n_rand_views"] == "None": print_and_set(params, "n_rand_views", None) @@ -249,13 +341,32 @@ def infer_params(params, dannce_net, prediction): ) warnings.warn(msg) + # Handle COM network name backwards compatibility + if params["net"].lower() == "unet2d_fullbn": + print_and_set(params, "norm_method", "batch") + elif params["net"] == "unet2d_fullIN": + print_and_set(params, "norm_method", "layer") + + if not dannce_net: + print_and_set(params, "net", "unet2d_full") + + # set GPU ID + # Because of issues with the Duke Compute Cluster, the default behavior is + # to not set any gpu_id and let --gres handle it. But for local workstatioins + # where specific GPUs need to be targeted, there is still an option to do so. + # Addign the CUDA_VISIBLE_DEVICES assignment here makes this extend to all downstream + # types of calls to interface.py (i.e. dannce-train, dannce-predict, etc..) + if params["gpu_id"] is not None: + os.environ["CUDA_VISIBLE_DEVICES"] = params["gpu_id"] + return params def print_and_set(params, varname, value): # Should add new values to params in place, no need to return + prepend_log_msg = FILE_PATH + ".print_and_set " params[varname] = value - print("Setting {} to {}.".format(varname, params[varname])) + logging.info(prepend_log_msg + "Setting {} to {}.".format(varname, params[varname])) def check_config(params, dannce_net, prediction): @@ -336,30 +447,33 @@ def check_net_expval(params): raise Exception("expval is set to False but you are using an AVG network") -def copy_config(RESULTSDIR, main_config, io_config): +def copy_config(results_dir, main_config, io_config): """ Copies config files into the results directory, and creates results directory if necessary """ - print("Saving results to: {}".format(RESULTSDIR)) + # Leaving this print statement as is since there does not seem to be any references going out or into it + print("Saving results to: {}".format(results_dir)) - if not os.path.exists(RESULTSDIR): - os.makedirs(RESULTSDIR) + if not os.path.exists(results_dir): + os.makedirs(results_dir) mconfig = os.path.join( - RESULTSDIR, "copy_main_config_" + main_config.split(os.sep)[-1] + results_dir, "copy_main_config_" + main_config.split(os.sep)[-1] ) - dconfig = os.path.join(RESULTSDIR, "copy_io_config_" + io_config.split(os.sep)[-1]) + dconfig = os.path.join(results_dir, "copy_io_config_" + io_config.split(os.sep)[-1]) shutil.copyfile(main_config, mconfig) shutil.copyfile(io_config, dconfig) -def make_data_splits(samples, params, RESULTSDIR, num_experiments): +def make_data_splits(samples, params, results_dir, num_experiments): """ Make train/validation splits from list of samples, or load in a specific list of sampleIDs if desired. """ + # Setup prepend for log messages + prepend_log_msg = FILE_PATH + ".make_data_split " # TODO: Switch to .mat from .pickle so that these lists are easier to read # and change. @@ -386,7 +500,9 @@ def make_data_splits(samples, params, RESULTSDIR, num_experiments): ) valid_inds = list(np.sort(valid_inds)) - train_inds = list(set(all_inds) - set(all_valid_inds))#[i for i in all_inds if i not in all_valid_inds] + train_inds = list( + set(all_inds) - set(all_valid_inds) + ) # [i for i in all_inds if i not in all_valid_inds] elif params["num_validation_per_exp"] > 0: # if 0, do not perform validation for e in range(num_experiments): tinds = [ @@ -408,41 +524,48 @@ def make_data_splits(samples, params, RESULTSDIR, num_experiments): train_samples = samples[train_inds] train_inds = [] if params["valid_exp"] is not None: - train_expts = [f for f in range(num_experiments) if f not in params["valid_exp"]] + train_expts = [ + f for f in range(num_experiments) if f not in params["valid_exp"] + ] else: train_expts = np.arange(num_experiments) - print("TRAIN EXPTS: {}".format(train_expts)) + logging.info(prepend_log_msg + "TRAIN EXPTS: {}".format(train_expts)) if params["num_train_per_exp"] is not None: # Then sample randomly without replacement from training sampleIDs for e in train_expts: tinds = [ - i for i in range(len(train_samples)) if int(train_samples[i].split("_")[0]) == e - ] - print(e) - print(len(tinds)) + i + for i in range(len(train_samples)) + if int(train_samples[i].split("_")[0]) == e + ] + logging.debug(e) + logging.debug(len(tinds)) train_inds = train_inds + list( - np.random.choice(tinds, (params["num_train_per_exp"],), replace=False) + np.random.choice( + tinds, (params["num_train_per_exp"],), replace=False + ) ) train_inds = list(np.sort(train_inds)) else: train_inds = np.arange(len(train_samples)) - - partition["valid_sampleIDs"] = samples[valid_inds] partition["train_sampleIDs"] = train_samples[train_inds] # Save train/val inds - with open(os.path.join(RESULTSDIR, "val_samples.pickle"), "wb") as f: + with open(os.path.join(results_dir, "val_samples.pickle"), "wb") as f: cPickle.dump(partition["valid_sampleIDs"], f) - with open(os.path.join(RESULTSDIR, "train_samples.pickle"), "wb") as f: + with open(os.path.join(results_dir, "train_samples.pickle"), "wb") as f: cPickle.dump(partition["train_sampleIDs"], f) else: # Load validation samples from elsewhere - with open(os.path.join(params["load_valid"], "val_samples.pickle"), "rb",) as f: + with open( + os.path.join(params["load_valid"], "val_samples.pickle"), + "rb", + ) as f: partition["valid_sampleIDs"] = cPickle.load(f) partition["train_sampleIDs"] = [ f for f in samples if f not in partition["valid_sampleIDs"] @@ -454,32 +577,98 @@ def make_data_splits(samples, params, RESULTSDIR, num_experiments): return partition + +def __initAvgMax(t, g, o, params): + """ + Helper function for creating 3D targets + """ + gridsize = tuple([params["nvox"]] * 3) + g = np.reshape( + g, + (-1, *gridsize, 3), + ) + + for i in range(o.shape[0]): + for j in range(o.shape[-1]): + o[i, ..., j] = np.exp( + -( + (g[i, ..., 1] - t[i, 1, j]) ** 2 + + (g[i, ..., 0] - t[i, 0, j]) ** 2 + + (g[i, ..., 2] - t[i, 2, j]) ** 2 + ) + / (2 * params["sigma"] ** 2) + ) + + return o + + +def initAvgMax(y_train, y_valid, Xtg, Xvg, params): + """ + Converts 3D coordinate targets into 3D volumes, for AVG+MAX training + """ + gridsize = tuple([params["nvox"]] * 3) + y_train_aux = np.zeros( + ( + y_train.shape[0], + *gridsize, + params["new_n_channels_out"], + ), + dtype="float32", + ) + + y_valid_aux = np.zeros( + ( + y_valid.shape[0], + *gridsize, + params["new_n_channels_out"], + ), + dtype="float32", + ) + + return ( + __initAvgMax(y_train, Xtg, y_train_aux, params), + __initAvgMax(y_valid, Xvg, y_valid_aux, params), + ) + + def remove_samples_npy(npydir, samples, params): """ Remove any samples from sample list if they do not have corresponding volumes in the image or grid directories """ + prepend_log_msg = FILE_PATH + ".remove_samples_npy " # image_volumes # grid_volumes samps = [] for e in npydir.keys(): - imvol = os.path.join(npydir[e], 'image_volumes') - gridvol = os.path.join(npydir[e], 'grid_volumes') + imvol = os.path.join(npydir[e], "image_volumes") + gridvol = os.path.join(npydir[e], "grid_volumes") ims = os.listdir(imvol) grids = os.listdir(gridvol) - npysamps = ['0_' + f.split("_")[1] + '.npy' for f in samples if int(f.split("_")[0]) == e] + npysamps = [ + "0_" + f.split("_")[1] + ".npy" + for f in samples + if int(f.split("_")[0]) == e + ] goodsamps = list(set(npysamps) & set(ims) & set(grids)) - samps = samps + [str(e) + '_' + f.split("_")[1].split(".")[0] for f in goodsamps] + samps = samps + [ + str(e) + "_" + f.split("_")[1].split(".")[0] for f in goodsamps + ] sampdiff = len(npysamps) - len(goodsamps) - #import pdb; pdb.set_trace() - print("Removed {} samples from {} because corresponding image or grid files could not be found".format(sampdiff, params["experiment"][e]["label3d_file"])) + # import pdb; pdb.set_trace() + logging.info(prepend_log_msg + + "Removed {} samples from {} because corresponding image or grid files could not be found".format( + sampdiff, params["experiment"][e]["label3d_file"] + ) + ) return np.array(samps) + def rename_weights(traindir, kkey, mon): """ At the end of DANNCe or COM training, rename the best weights file with the epoch # @@ -490,12 +679,19 @@ def rename_weights(traindir, kkey, mon): e = r["epoch"] q = r[mon] minq = np.min(q) - beste = e[np.argmin(q)] + if e.size == 1: + beste = e + else: + beste = e[np.argmin(q)] newname = "weights." + str(int(beste)) + "-" + "{:.5f}".format(minq) + ".hdf5" + newname_out = os.path.join(traindir, newname) + os.rename(os.path.join(traindir, kkey), newname_out) - os.rename(os.path.join(traindir, kkey), os.path.join(traindir, newname)) + outdict = {mon: minq, + 'beste': beste} + return newname_out, outdict def make_paths_safe(params): """Given a parameter dictionary, loops through the keys and replaces any \\ or / with os.sep @@ -551,6 +747,33 @@ def make_none_safe(pdict): return pdict return pdict +def save_pred_targets(best_pth, model, save_callback, bestdict, params): + + # set the callback to be the current model, which will eb the final model + save_callback.set_model(model) + + # then manually call the callback to make predictiosn and save + save_callback.on_epoch_end(epoch=save_callback.total_epochs-1, + logs={'val_loss': 1e10}) + + model = load_model( + best_pth, + custom_objects={ + "ops": ops, + "slice_input": nets.slice_input, + "mask_nan_keep_loss": losses.mask_nan_keep_loss, + "mask_nan_l1_loss": losses.mask_nan_l1_loss, + "euclidean_distance_3D": losses.euclidean_distance_3D, + "centered_euclidean_distance_3D": losses.centered_euclidean_distance_3D, + "gaussian_cross_entropy_loss": losses.gaussian_cross_entropy_loss, + }, + ) + + model = nets.remove_heatmap_output(model, params) + save_callback.set_model(model) + + save_callback.on_epoch_end(epoch=bestdict['beste'], + logs=bestdict) def prepare_save_metadata(params): """ @@ -583,13 +806,20 @@ def save_COM_dannce_mat(params, com3d, sampleID): Instead of saving 3D COM to com3d.mat, save it into the dannce.mat file, which streamlines subsequent dannce access. """ + # Setup Logging + if not os.path.exists(os.path.dirname(params["log_dest"])): + os.makedirs(os.path.dirname(params["log_dest"])) + logging.basicConfig(filename=params["log_dest"], level=params["log_level"], + format='%(asctime)s %(levelname)s:%(message)s', datefmt='%m/%d/%Y %I:%M:%S %p') + prepend_log_msg = FILE_PATH + ".save_COM_dannce_mat " + com = {} com["com3d"] = com3d com["sampleID"] = sampleID com["metadata"] = prepare_save_metadata(params) # Open dannce.mat file, add com and re-save - print("Saving COM predictions to " + params["label3d_file"]) + logging.info(prepend_log_msg + "Saving COM predictions to " + params["label3d_file"]) rr = sio.loadmat(params["label3d_file"]) # For safety, save old file to temp and delete it at the end sio.savemat(params["label3d_file"] + ".temp", rr) @@ -600,14 +830,17 @@ def save_COM_dannce_mat(params, com3d, sampleID): def save_COM_checkpoint( - save_data, RESULTSDIR, datadict_, cameras, params, file_name="com3d" + save_data, results_dir, datadict_, cameras, params, file_name="com3d" ): """ Saves COM pickle and matfiles """ + # Set Prepend log message + prepend_log_msg = FILE_PATH + ".save_COM_dannce_mat " + # Save undistorted 2D COMs and their 3D triangulations - f = open(os.path.join(RESULTSDIR, file_name + ".pickle"), "wb") + f = open(os.path.join(results_dir, file_name + ".pickle"), "wb") cPickle.dump(save_data, f) f.close() @@ -623,7 +856,7 @@ def save_COM_checkpoint( else: linking_method = "euclidean" _, com3d_dict = serve_data_DANNCE.prepare_COM_multi_instance( - os.path.join(RESULTSDIR, file_name + ".pickle"), + os.path.join(results_dir, file_name + ".pickle"), datadict_save, comthresh=0, weighted=False, @@ -633,7 +866,7 @@ def save_COM_checkpoint( else: prepare_func = serve_data_DANNCE.prepare_COM _, com3d_dict = serve_data_DANNCE.prepare_COM( - os.path.join(RESULTSDIR, file_name + ".pickle"), + os.path.join(results_dir, file_name + ".pickle"), datadict_save, comthresh=0, weighted=False, @@ -641,8 +874,8 @@ def save_COM_checkpoint( method="median", ) - cfilename = os.path.join(RESULTSDIR, file_name + ".mat") - print("Saving 3D COM to {}".format(cfilename)) + cfilename = os.path.join(results_dir, file_name + ".mat") + logging.info(prepend_log_msg + "Saving 3D COM to {}".format(cfilename)) samples_keys = list(com3d_dict.keys()) if params["n_instances"] > 1: @@ -674,6 +907,7 @@ def inherit_config(child, parent, keys): for key in keys: if key not in child.keys(): child[key] = parent[key] + # Leaving this print statement as is, since params are not built at this point print( "{} not found in io.yaml file, falling back to main config".format(key) ) @@ -685,6 +919,8 @@ def grab_predict_label3d_file(defaultdir=""): """ Finds the paths to the training experiment yaml files. """ + # Set Logging prepend message + prepend_log_msg = FILE_PATH + ".grab_predict_label3d_file " def_ep = os.path.join(".", defaultdir) label3d_files = os.listdir(def_ep) label3d_files = [ @@ -694,7 +930,7 @@ def grab_predict_label3d_file(defaultdir=""): if len(label3d_files) == 0: raise Exception("Did not find any *dannce.mat file in {}".format(def_ep)) - print("Using the following *dannce.mat files: {}".format(label3d_files[0])) + logging.debug(prepend_log_msg + "Using the following *dannce.mat files: {}".format(label3d_files[0])) return label3d_files[0] @@ -703,7 +939,10 @@ def load_expdict(params, e, expdict, _DEFAULT_VIDDIR): Load in camnames and video directories and label3d files for a single experiment during training. """ - _DEFAULT_NPY_DIR = 'npy_volumes' + # Set Logging prepend msg + prepend_log_msg = FILE_PATH + ".load_expdict " + + _DEFAULT_NPY_DIR = "npy_volumes" exp = params.copy() exp = make_paths_safe(exp) exp["label3d_file"] = expdict["label3d_file"] @@ -715,14 +954,14 @@ def load_expdict(params, e, expdict, _DEFAULT_VIDDIR): exp["viddir"] = os.path.join(exp["base_exp_folder"], _DEFAULT_VIDDIR) else: exp["viddir"] = expdict["viddir"] - print("Experiment {} using videos in {}".format(e, exp["viddir"])) + logging.debug(prepend_log_msg + "Experiment {} using videos in {}".format(e, exp["viddir"])) l3d_camnames = io.load_camnames(expdict["label3d_file"]) if "camnames" in expdict: exp["camnames"] = expdict["camnames"] elif l3d_camnames is not None: exp["camnames"] = l3d_camnames - print("Experiment {} using camnames: {}".format(e, exp["camnames"])) + logging.debug(prepend_log_msg + "Experiment {} using camnames: {}".format(e, exp["camnames"])) # Use the camnames to find the chunks for each video chunks = {} @@ -740,7 +979,7 @@ def load_expdict(params, e, expdict, _DEFAULT_VIDDIR): [int(x.split(".")[0]) for x in video_files] ) exp["chunks"] = chunks - print(chunks) + logging.debug(prepend_log_msg + str(chunks)) # For npy volume training if params["use_npy"]: @@ -866,6 +1105,9 @@ def generate_readers( viddir, camname, minopt=0, maxopt=300000, pathonly=False, extension=".mp4" ): """Open all mp4 objects with imageio, and return them in a dictionary.""" + # Set prepend message + prepend_log_msg = FILE_PATH + ".generate_readers " + out = {} mp4files = [ os.path.join(camname, f) @@ -892,7 +1134,7 @@ def generate_readers( if pathonly: out[mp4files_scrub[i]] = os.path.join(viddir, mp4files[i]) else: - print( + logging.info( "NOTE: Ignoring {} files numbered above {}".format(extensions, maxopt) ) out[mp4files_scrub[i]] = imageio.get_reader( @@ -937,14 +1179,14 @@ def cropcom(im, com, size=512): return out -def write_config(resultsdir, configdict, message, filename="modelconfig.cfg"): +def write_config(results_dir, configdict, message, filename="modelconfig.cfg"): """Write a dictionary of k-v pairs to file. A much more customizable configuration writer. Accepts a dictionary of key-value pairs and just writes them all to file, together with a custom message """ - f = open(resultsdir + filename, "w") + f = open(results_dir + filename, "w") for key in configdict: f.write("{}: {}\n".format(key, configdict[key])) f.write("message:" + message) @@ -956,9 +1198,9 @@ def read_config(filename): :param filename: Path to configuration file. """ with open(filename) as f: - CONFIG_PARAMS = yaml.safe_load(f) + params = yaml.safe_load(f) - return CONFIG_PARAMS + return params def plot_markers_2d(im, markers, newfig=True): @@ -1161,7 +1403,8 @@ def savedata_expval( } if write and data is None: sio.savemat( - fname.split(".pickle")[0] + ".mat", sdict, + fname.split(".pickle")[0] + ".mat", + sdict, ) elif write and data is not None: sio.savemat(fname, sdict) @@ -1213,11 +1456,13 @@ def savedata_tomat( if tcoord and tcoord_scale: t_coords = vmin + t_coords * vsize + vsize / 2 + # import pdb; pdb.set_trace() if addCOM is not None: # We use the passed comdict to add back in the com, this is useful # if one wnats to bootstrap on these values for COMnet or otherwise for i in range(len(sID)): - pred_out_world[i] = pred_out_world[i] + addCOM[int(sID)][:, np.newaxis] + # pred_out_world[i] = pred_out_world[i] + addCOM[int(sID)][:, np.newaxis] + pred_out_world[i] = pred_out_world[i] + addCOM['0_'+str(int(sID[i]))][:, np.newaxis] sdict = { "pred": pred_out_world, @@ -1229,11 +1474,13 @@ def savedata_tomat( } if write and data is None: sio.savemat( - fname.split(".pickle")[0] + ".mat", sdict, + fname.split(".pickle")[0] + ".mat", + sdict, ) elif write and data is not None: sio.savemat( - fname, sdict, + fname, + sdict, ) return pred_out_world, t_coords, p_max, log_p_max, sID @@ -1265,54 +1512,19 @@ def spatial_entropy(map_): return -1 * np.sum(map_ * np.log(map_)) -def dupe_params(exp, dupes, n_views): - """ - When The number of views (n_views) required - as input to the network is greater than the - number of actual cameras (e.g. when trying to - fine-tune a 6-camera network on data from a - 2-camera system), automatically duplicate necessary - parameters to match the required n_views. - """ - - for d in dupes: - val = exp[d] - if n_views % len(val) == 0: - num_reps = n_views // len(val) - exp[d] = val * num_reps - - else: - prompt = "The length of the {} list must divide evenly into {}. Duplicate a subset of the views starting from the first camera (y/n)?".format( - d, n_views - ) - val_in = input(prompt) - if val_in == "y": - num_reps = n_views // len(val) - num_extra = n_views % len(val) - duped = val * num_reps - for i in range(num_extra): - duped.append(duped[i]) - print("Duping {}. Changed from {} to {}".format(d, val, duped)) - exp[d] = duped - else: - raise Exception( - "The length of the {} list must divide evenly into {}. Exiting".format( - d, n_views - ) - ) - - return exp - def write_npy(uri, gen): """ Creates a new image folder and grid folder at the uri and uses the generator to generate samples and save them as npy files """ - imdir = os.path.join(uri, 'image_volumes') + # Set log prepend msg + prepend_log_msg = FILE_PATH + ".write_npy " + + imdir = os.path.join(uri, "image_volumes") if not os.path.exists(imdir): os.makedirs(imdir) - griddir = os.path.join(uri, 'grid_volumes') + griddir = os.path.join(uri, "grid_volumes") if not os.path.exists(griddir): os.makedirs(griddir) @@ -1328,15 +1540,16 @@ def write_npy(uri, gen): bs = gen.batch_size for i in range(len(gen)): if i % 1000 == 0: - print(i) + logging.debug(i) # Generate batch bch = gen.__getitem__(i) # loop over all examples in batch and save volume for j in range(bs): - #get the frame name / unique ID - fname = gen.list_IDs[gen.indexes[i*bs + j]] + # get the frame name / unique ID + fname = gen.list_IDs[gen.indexes[i * bs + j]] + + # and save + logging.debug(fname) + np.save(os.path.join(imdir, fname + ".npy"), bch[0][0][j].astype("uint8")) + np.save(os.path.join(griddir, fname + ".npy"), bch[0][1][j]) - #and save - print(fname) - np.save(os.path.join(imdir, fname + '.npy'), bch[0][0][j].astype('uint8')) - np.save(os.path.join(griddir, fname + '.npy'), bch[0][1][j]) \ No newline at end of file diff --git a/dannce/engine/serve_data_DANNCE.py b/dannce/engine/serve_data_DANNCE.py index 12fadf1..1443f2d 100755 --- a/dannce/engine/serve_data_DANNCE.py +++ b/dannce/engine/serve_data_DANNCE.py @@ -9,13 +9,15 @@ from scipy.ndimage import median_filter import warnings from copy import deepcopy +import logging + +FILE_PATH = "dannce.engine.serve_data_DANNCE" def prepare_data( - CONFIG_PARAMS, + params, com_flag=True, - nanflag=True, - multimode=False, + nanflag=False, prediction=False, return_cammat=False, ): @@ -28,29 +30,36 @@ def prepare_data( multimode: when this True, we output all 2D markers AND their 2D COM """ + # Set Log Prepend Msg + prepend_log_msg = FILE_PATH + ".prepare_data " + if prediction: - labels = load_sync(CONFIG_PARAMS["label3d_file"]) + labels = load_sync(params["label3d_file"]) nFrames = np.max(labels[0]["data_frame"].shape) - nKeypoints = CONFIG_PARAMS["n_channels_out"] - if "new_n_channels_out" in CONFIG_PARAMS.keys(): - if CONFIG_PARAMS["new_n_channels_out"] is not None: - nKeypoints = CONFIG_PARAMS["new_n_channels_out"] + nKeypoints = params["n_channels_out"] + if "new_n_channels_out" in params.keys(): + if params["new_n_channels_out"] is not None: + nKeypoints = params["new_n_channels_out"] for i in range(len(labels)): labels[i]["data_3d"] = np.zeros((nFrames, 3 * nKeypoints)) labels[i]["data_2d"] = np.zeros((nFrames, 2 * nKeypoints)) # import pdb # pdb.set_trace() else: - print(CONFIG_PARAMS["label3d_file"]) - labels = load_labels(CONFIG_PARAMS["label3d_file"]) + logging.info(prepend_log_msg + params["label3d_file"]) + labels = load_labels(params["label3d_file"]) - params = load_camera_params(CONFIG_PARAMS["label3d_file"]) - cameras = {name: params[i] for i, name in enumerate(CONFIG_PARAMS["camnames"])} + camera_params = load_camera_params(params["label3d_file"]) + cameras = {name: camera_params[i] for i, name in enumerate(params["camnames"])} - if "m" in params[0] and not CONFIG_PARAMS["mirror"]: - warnings.warn("found mirror field in camera params, but the network is not set to run in mirror mode") - elif CONFIG_PARAMS["mirror"] and "m" not in params[0]: - raise Exception("network set to run in mirror mode, but cannot find mirror (m) field in camera params") + if "m" in camera_params[0] and not params["mirror"]: + warnings.warn( + "found mirror field in camera params, but the network is not set to run in mirror mode" + ) + elif params["mirror"] and "m" not in camera_params[0]: + raise Exception( + "network set to run in mirror mode, but cannot find mirror (m) field in camera params" + ) samples = np.squeeze(labels[0]["data_sampleID"]) @@ -63,36 +72,28 @@ def prepare_data( # Collect data labels and matched frames info. We will keep the 2d labels # here just because we could in theory use this for training later. # No need to collect 3d data but it useful for checking predictions - if len(CONFIG_PARAMS["camnames"]) != len(labels): + if len(params["camnames"]) != len(labels): raise Exception("need an entry in label3d_file for every camera") framedict = {} ddict = {} for i, label in enumerate(labels): - framedict[CONFIG_PARAMS["camnames"][i]] = np.squeeze( - label["data_frame"] - ) + framedict[params["camnames"][i]] = np.squeeze(label["data_frame"]) data = label["data_2d"] # reshape data_2d so that it is shape (time points, 2, 20) - data = np.transpose( - np.reshape(data, [data.shape[0], -1, 2]), [0, 2, 1] - ) + data = np.transpose(np.reshape(data, [data.shape[0], -1, 2]), [0, 2, 1]) # Correct for Matlab "1" indexing data = data - 1 - if CONFIG_PARAMS["mirror"] and cameras[CONFIG_PARAMS["camnames"][i]]["m"] == 1: + if params["mirror"] and cameras[params["camnames"][i]]["m"] == 1: # then we need to flip the 2D coords -- for now assuemd only horizontal flipping - data[:, 1] = CONFIG_PARAMS["raw_im_h"] - data[:, 1] - 1 + data[:, 1] = params["raw_im_h"] - data[:, 1] - 1 - if multimode: - print( - "Entering multi-mode with {} + 1 targets".format( - data.shape[-1] - ) - ) + if params["multi_mode"]: + logging.debug(prepend_log_msg + "Entering multi-mode with {} + 1 targets".format(data.shape[-1])) if nanflag: dcom = np.mean(data, axis=2, keepdims=True) else: @@ -100,43 +101,39 @@ def prepare_data( data = np.concatenate((data, dcom), axis=-1) elif com_flag: # Convert to COM only if not already - if len(data.shape) == 3 and CONFIG_PARAMS["n_instances"] == 1: + if len(data.shape) == 3 and params["n_instances"] == 1: if nanflag: data = np.mean(data, axis=2) else: data = np.nanmean(data, axis=2) data = data[:, :, np.newaxis] - ddict[CONFIG_PARAMS["camnames"][i]] = data + ddict[params["camnames"][i]] = data data_3d = labels[0]["data_3d"] - data_3d = np.transpose( - np.reshape(data_3d, [data_3d.shape[0], -1, 3]), [0, 2, 1] - ) + data_3d = np.transpose(np.reshape(data_3d, [data_3d.shape[0], -1, 3]), [0, 2, 1]) - #If specific markers are set to be excluded, set them to NaN here. - if CONFIG_PARAMS["drop_landmark"] is not None and not prediction: - print("Setting landmarks {} to NaN. These landmarks will not be included in loss or metric evaluations".format(CONFIG_PARAMS["drop_landmark"])) - data_3d[:, :, CONFIG_PARAMS["drop_landmark"]] = np.nan + # If specific markers are set to be excluded, set them to NaN here. + if params["drop_landmark"] is not None and not prediction: + logging.debug( prepend_log_msg + + "Setting landmarks {} to NaN. These landmarks will not be included in loss or metric evaluations".format( + params["drop_landmark"] + ) + ) + data_3d[:, :, params["drop_landmark"]] = np.nan datadict = {} datadict_3d = {} for i in range(len(samples)): frames = {} data = {} - for j in range(len(CONFIG_PARAMS["camnames"])): - frames[CONFIG_PARAMS["camnames"][j]] = framedict[ - CONFIG_PARAMS["camnames"][j] - ][i] - data[CONFIG_PARAMS["camnames"][j]] = ddict[ - CONFIG_PARAMS["camnames"][j] - ][i] + for j in range(len(params["camnames"])): + frames[params["camnames"][j]] = framedict[params["camnames"][j]][i] + data[params["camnames"][j]] = ddict[params["camnames"][j]][i] datadict[samples[i]] = {"data": data, "frames": frames} datadict_3d[samples[i]] = data_3d[i] - params = load_camera_params(CONFIG_PARAMS["label3d_file"]) - cameras = { - name: params[i] for i, name in enumerate(CONFIG_PARAMS["camnames"]) - } + camera_params = load_camera_params(params["label3d_file"]) + cameras = {name: camera_params[i] for i, name in enumerate(params["camnames"])} if return_cammat: camera_mats = { name: ops.camera_matrix(cam["K"], cam["r"], cam["t"]) @@ -174,9 +171,7 @@ def prepare_COM_multi_instance( firstkey = list(com.keys())[0] - camnames = np.array( - list(datadict[list(datadict.keys())[0]]["data"].keys()) - ) + camnames = np.array(list(datadict[list(datadict.keys())[0]]["data"].keys())) # Because I repeat cameras to fill up 6 camera quota, I need grab only # the unique names @@ -254,22 +249,22 @@ def prepare_COM( detected by the generator to return nans such that bad camera frames do not get averaged in to image data """ + # Set log prepend msg + prepend_log_msg = FILE_PATH + ".prepare_COM " with open(comfile, "rb") as f: com = cPickle.load(f) com3d_dict = {} if method == "mean": - print("using mean to get 3D COM") + logging.debug(prepend_log_msg + "using mean to get 3D COM") elif method == "median": - print("using median to get 3D COM") + logging.debug(prepend_log_msg + "using median to get 3D COM") firstkey = list(com.keys())[0] - camnames = np.array( - list(datadict[list(datadict.keys())[0]]["data"].keys()) - ) + camnames = np.array(list(datadict[list(datadict.keys())[0]]["data"].keys())) # Because I repeat cameras to fill up 6 camera quota, I need grab only # the unique names @@ -290,18 +285,13 @@ def prepare_COM( if key in datadict.keys(): for k in range(len(camnames)): - datadict[key]["data"][camnames[k]] = this_com[camnames[k]][ - "COM" - ][:, np.newaxis].astype("float32") + datadict[key]["data"][camnames[k]] = this_com[camnames[k]]["COM"][ + :, np.newaxis + ].astype("float32") # Quick & dirty way to dynamically scale the confidence map output - if ( - conf_rescale is not None - and camnames[k] in conf_rescale.keys() - ): - this_com[camnames[k]]["pred_max"] *= conf_rescale[ - camnames[k] - ] + if conf_rescale is not None and camnames[k] in conf_rescale.keys(): + this_com[camnames[k]]["pred_max"] *= conf_rescale[camnames[k]] # then, set to nan if this_com[camnames[k]]["pred_max"] <= comthresh: @@ -413,6 +403,7 @@ def remove_samples(s, d3d, mode="clean", auxmode=None): sample_mask[i] = 0 if auxmode == "JDM52d2": + # Leaving this print statement as is, since there are no calls to this module print("removing bad JDM52d2 frames") for i in range(len(s)): if s[i] >= 20000 and s[i] <= 32000: @@ -468,20 +459,16 @@ def add_experiment( datadict_out[str(experiment) + "_" + str(int(key))] = datadict_in[key] for key in datadict_3d_in.keys(): - datadict_3d_out[ - str(experiment) + "_" + str(int(key)) - ] = datadict_3d_in[key] + datadict_3d_out[str(experiment) + "_" + str(int(key))] = datadict_3d_in[key] for key in com3d_dict_in.keys(): - com3d_dict_out[str(experiment) + "_" + str(int(key))] = com3d_dict_in[ - key - ] + com3d_dict_out[str(experiment) + "_" + str(int(key))] = com3d_dict_in[key] return samples_out, datadict_out, datadict_3d_out, com3d_dict_out def prepend_experiment( - CONFIG_PARAMS, + params, datadict, num_experiments, camnames, @@ -504,20 +491,18 @@ def prepend_experiment( cameras_[e][str(e) + "_" + key] = cameras[e][key] camnames[e] = [str(e) + "_" + f for f in camnames[e]] - CONFIG_PARAMS["experiment"][e]["camnames"] = camnames[e] + params["experiment"][e]["camnames"] = camnames[e] for n_cam, name in enumerate(camnames[e]): # print(name) - # print(CONFIG_PARAMS["experiment"][e]["chunks"][name]) + # print(params["experiment"][e]["chunks"][name]) if dannce_prediction: - new_chunks[name] = CONFIG_PARAMS["experiment"][e]["chunks"][ + new_chunks[name] = params["experiment"][e]["chunks"][ prev_camnames[e][n_cam] ] else: - new_chunks[name] = CONFIG_PARAMS["experiment"][e]["chunks"][ - name - ] - CONFIG_PARAMS["experiment"][e]["chunks"] = new_chunks + new_chunks[name] = params["experiment"][e]["chunks"][name] + params["experiment"][e]["chunks"] = new_chunks for key in datadict.keys(): enum = key.split("_")[0] @@ -525,11 +510,7 @@ def prepend_experiment( datadict_[key]["data"] = {} datadict_[key]["frames"] = {} for key_ in datadict[key]["data"]: - datadict_[key]["data"][enum + "_" + key_] = datadict[key]["data"][ - key_ - ] - datadict_[key]["frames"][enum + "_" + key_] = datadict[key][ - "frames" - ][key_] - - return cameras_, datadict_, CONFIG_PARAMS + datadict_[key]["data"][enum + "_" + key_] = datadict[key]["data"][key_] + datadict_[key]["frames"][enum + "_" + key_] = datadict[key]["frames"][key_] + + return cameras_, datadict_, params diff --git a/dannce/engine/video.py b/dannce/engine/video.py index ba47357..2650a18 100755 --- a/dannce/engine/video.py +++ b/dannce/engine/video.py @@ -1,4 +1,5 @@ """ Video reading and writing interfaces for different formats. """ +from copy import Error import os import cv2 import numpy as np @@ -7,6 +8,9 @@ import imageio import time from typing import List, Dict, Tuple, Text +import logging + +FILE_PATH = "dannce.engine.video" @attr.s(auto_attribs=True, eq=False, order=False) @@ -159,6 +163,7 @@ def get_frame(self, idx: int, grayscale: bool = None) -> np.ndarray: return frame + class LoadVideoFrame: """ This class generalized load_vid_frame for access by all generators @@ -169,12 +174,7 @@ class LoadVideoFrame: predict_flag: If True, uses imageio rather than OpenCV """ - def __init__( - self, - _N_VIDEO_FRAMES, - vidreaders, - camnames, - predict_flag): + def __init__(self, _N_VIDEO_FRAMES, vidreaders, camnames, predict_flag): self._N_VIDEO_FRAMES = _N_VIDEO_FRAMES self.vidreaders = vidreaders @@ -189,7 +189,6 @@ def __init__( self.currvideo[cc] = None self.currvideo_name[cc] = None - def load_vid_frame( self, ind: int, camname: Text, extension: Text = ".mp4" ) -> np.ndarray: @@ -205,6 +204,9 @@ def load_vid_frame( Returns: np.ndarray: Video frame as w x h x c numpy ndarray """ + # Set log prepend msg + prepend_log_msg = FILE_PATH + ".LoadVideoFrame.load_vid_frame " + chunks = self._N_VIDEO_FRAMES[camname] cur_video_id = np.nonzero([c <= ind for c in chunks])[0][-1] cur_first_frame = chunks[cur_video_id] @@ -220,26 +222,74 @@ def load_vid_frame( else: # use imageio for prediction, because linear seeking # is faster with imageio than opencv - vid = imageio.get_reader(thisvid_name) if self.predict_flag else \ - MediaVideo(thisvid_name, grayscale=False) - print("Loading new video: {} for {}".format(abname, camname)) + try: + vid = ( + imageio.get_reader(thisvid_name) + if self.predict_flag + else MediaVideo(thisvid_name, grayscale=False) + ) + except (OSError, IOError, RuntimeError): + time.sleep(2) + vid = ( + imageio.get_reader(thisvid_name) + if self.predict_flag + else MediaVideo(thisvid_name, grayscale=False) + ) + logging.info(prepend_log_msg + "Loading new video: {} for {}".format(abname, camname)) self.currvideo_name[camname] = abname # close current vid # Without a sleep here, ffmpeg can hang on video close time.sleep(0.25) - if self.currvideo[camname] is not None: - self.currvideo[camname].close() if self.predict_flag else \ - self.currvideo[camname]._reader_.release() + + # Close previously opened and unneeded videos by their camera name + # Assumes the camera names do not contain underscores other than the expid. + # previous_camera_name = "_".join(camname.split("_")[1:]) + previous_camera_name = camname.split("_")[-1] + for key, value in self.currvideo.items(): + if previous_camera_name in key: + if value is not None: + self.currvideo[ + key + ].close() if self.predict_flag else self.currvideo[ + key + ]._reader_.release() self.currvideo[camname] = vid + im = self._load_frame_multiple_attempts(frame_num, vid) + return im + + def _load_frame_multiple_attempts(self, frame_num, vid, n_attempts=10): + attempts = 0 + while attempts < n_attempts: + im = self._load_frame(frame_num, vid) + if im is None: + attempts += 1 + else: + break + else: + raise KeyError + return im - # This deals with a strange indexing error in the pup data. - try: + def _load_frame(self, frame_num, vid): + # Set log prepend msg + prepend_log_msg = FILE_PATH + ".LoadVideoFrame._load_frame " - im = vid.get_data(frame_num).astype("uint8") if self.predict_flag \ + im = None + try: + im = ( + vid.get_data(frame_num).astype("uint8") + if self.predict_flag else vid.get_frame(frame_num) + ) + # This deals with a strange indexing error in the pup data. except IndexError: - print("Indexing error, using previous frame") - im = vid.get_data(frame_num - 1).astype("uint8") if self.predict_flag \ + logging.error(prepend_log_msg + "Indexing error, using previous frame") + im = ( + vid.get_data(frame_num - 1).astype("uint8") + if self.predict_flag else vid.get_frame(frame_num - 1) - - return im \ No newline at end of file + ) + # Files can lock if other processes are also trying to access the data. + except KeyError: + time.sleep(5) + pass + return im diff --git a/dannce/interface.py b/dannce/interface.py index e56b39f..d09bbd8 100755 --- a/dannce/interface.py +++ b/dannce/interface.py @@ -10,20 +10,15 @@ import tensorflow as tf import tensorflow.keras as keras import tensorflow.keras.losses as keras_losses +from tensorflow.keras import backend as K from tensorflow.keras.models import load_model, Model +from tensorflow.keras.layers import GlobalMaxPooling3D from tensorflow.keras.optimizers import Adam from tensorflow.keras.callbacks import ModelCheckpoint, CSVLogger, TensorBoard +import dannce.callbacks as cb import dannce.engine.serve_data_DANNCE as serve_data_DANNCE -from dannce.engine.generator import DataGenerator_3Dconv -from dannce.engine.generator import DataGenerator_3Dconv_frommem -from dannce.engine.generator import DataGenerator_3Dconv_npy -from dannce.engine.generator import DataGenerator_3Dconv_torch -from dannce.engine.generator import DataGenerator_3Dconv_tf -from dannce.engine.generator_aux import ( - DataGenerator_downsample, - DataGenerator_downsample_multi_instance, -) -from dannce.engine.generator_aux import DataGenerator_downsample_frommem +import dannce.engine.generator as generator +import dannce.engine.generator_aux as generator_aux import dannce.engine.processing as processing from dannce.engine.processing import savedata_tomat, savedata_expval from dannce.engine import nets, losses, ops, io @@ -33,11 +28,12 @@ _param_defaults_com, ) import dannce.engine.inference as inference -import matplotlib - -matplotlib.use("Agg") -import matplotlib.pyplot as plt from typing import List, Dict, Text +import os, psutil +import logging + +process = psutil.Process(os.getpid()) +file_path = "dannce.interface" _DEFAULT_VIDDIR = "videos" _DEFAULT_COMSTRING = "COM" @@ -60,6 +56,7 @@ def check_unrecognized_params(params: Dict): in_com = key in _param_defaults_com in_dannce = key in _param_defaults_dannce in_shared = key in _param_defaults_shared + print (in_com, in_dannce, in_shared) if not (in_com or in_dannce or in_shared): invalid_keys.append(key) @@ -84,9 +81,7 @@ def build_params(base_config: Text, dannce_net: bool): base_params = processing.make_paths_safe(base_params) params = processing.read_config(base_params["io_config"]) params = processing.make_paths_safe(params) - params = processing.inherit_config( - params, base_params, list(base_params.keys()) - ) + params = processing.inherit_config(params, base_params, list(base_params.keys())) check_unrecognized_params(params) return params @@ -114,66 +109,18 @@ def com_predict(params: Dict): Args: params (Dict): Parameters dictionary. - """ - # Make the prediction directory if it does not exist. - make_folder("com_predict_dir", params) - - # Load the appropriate loss function and network - try: - params["loss"] = getattr(losses, params["loss"]) - except AttributeError: - params["loss"] = getattr(keras_losses, params["loss"]) - params["net"] = getattr(nets, params["net"]) - - os.environ["CUDA_VISIBLE_DEVICES"] = params["gpu_id"] - - # If params['n_channels_out'] is greater than one, we enter a mode in - # which we predict all available labels + the COM - MULTI_MODE = params["n_channels_out"] > 1 & params["n_instances"] == 1 - params["n_channels_out"] = params["n_channels_out"] + int(MULTI_MODE) - - # channels out is equal to the number of views when using a single video stream with mirrors - eff_n_channels_out = int(params["n_views"]) if params["mirror"] else params["n_channels_out"] - - # Grab the input file for prediction - params["label3d_file"] = processing.grab_predict_label3d_file() - - print("Using camnames: {}".format(params["camnames"])) + # Enable Logging for com_predict + if not os.path.exists(os.path.dirname(params["log_dest"])): + os.makedirs(os.path.dirname(params["log_dest"])) + logging.basicConfig(filename=params["log_dest"], level=params["log_level"], + format='%(asctime)s %(levelname)s:%(message)s', datefmt='%m/%d/%Y %I:%M:%S %p') + prepend_log_msg = file_path + ".{} ".format(sys._getframe( ).f_code.co_name) - # Also add parent params under the 'experiment' key for compatibility - # with DANNCE's video loading function - params["experiment"] = {} - params["experiment"][0] = params + params = setup_com_predict(params) - # For real mono training - params["chan_num"] = 1 if params["mono"] else params["n_channels_in"] - - # Build net - print("Initializing Network...") - model = params["net"]( - params["loss"], - float(params["lr"]), - params["chan_num"], - eff_n_channels_out, - ["mse"], - ) - - # If the weights are not specified, use the train directory. - if params["com_predict_weights"] is None: - wdir = params["com_train_dir"] - weights = os.listdir(wdir) - weights = [f for f in weights if ".hdf5" in f] - weights = sorted( - weights, key=lambda x: int(x.split(".")[1].split("-")[0]) - ) - weights = weights[-1] - params["com_predict_weights"] = os.path.join(wdir, weights) - - print("Loading weights from " + params["com_predict_weights"]) - model.load_weights(params["com_predict_weights"]) - - print("COMPLETE\n") + # Get the model + model = build_com_network(params) ( samples, @@ -183,10 +130,8 @@ def com_predict(params: Dict): camera_mats, ) = serve_data_DANNCE.prepare_data( params, - multimode=MULTI_MODE, prediction=True, return_cammat=True, - nanflag=False, ) # Zero any negative frames @@ -197,58 +142,31 @@ def com_predict(params: Dict): # The generator expects an experimentID in front of each sample key samples = ["0_" + str(f) for f in samples] - datadict_ = {} - for key in datadict.keys(): - datadict_["0_" + str(key)] = datadict[key] - - datadict = datadict_ + datadict = {"0_" + str(key): val for key, val in datadict.items()} # Initialize video dictionary. paths to videos only. vids = {} vids = processing.initialize_vids(params, datadict, 0, vids, pathonly=True) # Parameters - predict_params = { - "dim_in": ( - params["crop_height"][1] - params["crop_height"][0], - params["crop_width"][1] - params["crop_width"][0], - ), - "n_channels_in": params["n_channels_in"], - "batch_size": 1, - "n_channels_out": params["n_channels_out"], - "out_scale": params["sigma"], - "camnames": {0: params["camnames"]}, - "crop_width": params["crop_width"], - "crop_height": params["crop_height"], - "downsample": params["downfac"], - "labelmode": "coord", - "chunks": params["chunks"], - "shuffle": False, - "dsmode": params["dsmode"], - "mono": params["mono"], - "mirror": params["mirror"], - "predict_flag": True, - } - - partition = {} - partition["valid_sampleIDs"] = samples - labels = datadict + predict_params = get_com_predict_params(params) + partition = {"valid_sampleIDs": samples} save_data = {} # If multi-instance mode is on, use the correct generator # and eval function. if params["n_instances"] > 1: - predict_generator = DataGenerator_downsample_multi_instance( + predict_generator = generator_aux.DataGenerator_downsample_multi_instance( params["n_instances"], partition["valid_sampleIDs"], - labels, + datadict, vids, **predict_params ) else: - predict_generator = DataGenerator_downsample( - partition["valid_sampleIDs"], labels, vids, **predict_params + predict_generator = generator_aux.DataGenerator_downsample( + partition["valid_sampleIDs"], datadict, vids, **predict_params ) # If we just want to analyze a chunk of video... @@ -265,10 +183,10 @@ def com_predict(params: Dict): cameras, ) processing.save_COM_checkpoint( - save_data, params["com_predict_dir"], datadict_, cameras, params + save_data, params["com_predict_dir"], datadict, cameras, params ) else: - endIdx = np.min( + end_idx = np.min( [ params["start_sample"] + params["max_num_samples"], len(predict_generator), @@ -276,7 +194,7 @@ def com_predict(params: Dict): ) save_data = inference.infer_com( params["start_sample"], - endIdx, + end_idx, predict_generator, params, model, @@ -288,39 +206,146 @@ def com_predict(params: Dict): processing.save_COM_checkpoint( save_data, params["com_predict_dir"], - datadict_, + datadict, cameras, params, file_name="com3d%d" % (params["start_sample"]), ) - print("done!") + logging.info(prepend_log_msg+"done!") -def com_train(params: Dict): - """Train COM network +def setup_com_predict(params: Dict): + """Sets up the parameters dictionary for com prediction Args: - params (Dict): Parameters dictionary. + params (Dict): Parameters dictionary """ - # Make the train directory if it does not exist. - make_folder("com_train_dir", params) + # Prepend part for logging + prepend_log_msg = file_path + ".{} ".format(sys._getframe( ).f_code.co_name) - params["loss"] = getattr(losses, params["loss"]) + # Make the prediction directory if it does not exist. + make_folder("com_predict_dir", params) + + # Load the appropriate loss function and network + try: + params["loss"] = getattr(losses, params["loss"]) + except AttributeError: + params["loss"] = getattr(keras_losses, params["loss"]) params["net"] = getattr(nets, params["net"]) - os.environ["CUDA_VISIBLE_DEVICES"] = params["gpu_id"] + #os.environ["CUDA_VISIBLE_DEVICES"] = params["gpu_id"] - # MULTI_MODE is where the full set of markers is trained on, rather than - # the COM only. In some cases, this can help improve COMfinder performance. - MULTI_MODE = params["n_channels_out"] > 1 & params["n_instances"] == 1 - params["n_channels_out"] = params["n_channels_out"] + int(MULTI_MODE) + # If params['n_channels_out'] is greater than one, we enter a mode in + # which we predict all available labels + the COM + params["multi_mode"] = (params["n_channels_out"] > 1) & (params["n_instances"] == 1) + params["n_channels_out"] = params["n_channels_out"] + int(params["multi_mode"]) - samples = [] - datadict = {} - datadict_3d = {} - cameras = {} - camnames = {} + # Grab the input file for prediction + params["label3d_file"] = processing.grab_predict_label3d_file() + + logging.info(prepend_log_msg+"Using camnames: {}".format(params["camnames"])) + + # Also add parent params under the 'experiment' key for compatibility + # with DANNCE's video loading function + params["experiment"] = {} + params["experiment"][0] = params + + # For real mono training + params["chan_num"] = 1 if params["mono"] else params["n_channels_in"] + return params + + +def build_com_network(params: Dict) -> Model: + """Builds a com network for prediciton + + Args: + params (Dict): Parameters dictionary + + Returns: + Model: com network + """ + # Prepend Logging message for build_com_network + prepend_log_msg = file_path + ".build_com_network " + + # channels out is equal to the number of views when using a single video stream with mirrors + eff_n_channels_out = ( + int(params["n_views"]) if params["mirror"] else params["n_channels_out"] + ) + logging.info("Initializing Network...") + # Build net + model = params["net"]( + params["loss"], + float(params["lr"]), + params["chan_num"], + eff_n_channels_out, + norm_method=params["norm_method"], + metric=["mse"], + ) + + # If the weights are not specified, use the train directory. + if params["com_predict_weights"] is None: + weights = os.listdir(params["com_train_dir"]) + weights = [f for f in weights if ".hdf5" in f] + weights = sorted(weights, key=lambda x: int(x.split(".")[1].split("-")[0])) + weights = weights[-1] + params["com_predict_weights"] = os.path.join(params["com_train_dir"], weights) + + logging.info(prepend_log_msg + "Loading weights from " + params["com_predict_weights"]) + model.load_weights(params["com_predict_weights"]) + + logging.info(prepend_log_msg+"COMPLETE\n") + return model + + +def get_com_predict_params(params: Dict) -> Dict: + """Helper to get com prediction parameters. + + Args: + params (Dict): Parameters dictionary. + + Returns: + Dict: Prediction parameters dictionary. + """ + predict_params = { + "dim_in": ( + params["crop_height"][1] - params["crop_height"][0], + params["crop_width"][1] - params["crop_width"][0], + ), + "n_channels_in": params["n_channels_in"], + "batch_size": 1, + "n_channels_out": params["n_channels_out"], + "out_scale": params["sigma"], + "camnames": {0: params["camnames"]}, + "crop_width": params["crop_width"], + "crop_height": params["crop_height"], + "downsample": params["downfac"], + "labelmode": "coord", + "chunks": params["chunks"], + "shuffle": False, + "dsmode": params["dsmode"], + "mono": params["mono"], + "mirror": params["mirror"], + "predict_flag": True, + } + return predict_params + + +def com_train(params: Dict): + """Train COM network + + Args: + params (Dict): Parameters dictionary. + """ + + # Setup Logging for com_train + if not os.path.exists(os.path.dirname(params["log_dest"])): + os.makedirs(os.path.dirname(params["log_dest"])) + logging.basicConfig(filename=params["log_dest"], level=params["log_level"], + format='%(asctime)s %(levelname)s:%(message)s', datefmt='%m/%d/%Y %I:%M:%S %p') + prepend_log_msg = file_path + ".com_train " + + params = setup_com_train(params) # Use the same label files and experiment settings as DANNCE unless # indicated otherwise by using a 'com_exp' block in io.yaml. @@ -331,23 +356,22 @@ def com_train(params: Dict): else: exps = params["exp"] num_experiments = len(exps) + params["experiment"] = {} total_chunks = {} + cameras = {} + camnames = {} + datadict = {} + datadict_3d = {} + samples = [] for e, expdict in enumerate(exps): exp = processing.load_expdict(params, e, expdict, _DEFAULT_VIDDIR) params["experiment"][e] = exp - ( - samples_, - datadict_, - datadict_3d_, - cameras_, - ) = serve_data_DANNCE.prepare_data( + (samples_, datadict_, datadict_3d_, cameras_,) = serve_data_DANNCE.prepare_data( params["experiment"][e], - nanflag=False, - com_flag=not MULTI_MODE, - multimode=MULTI_MODE, + com_flag=not params["multi_mode"], ) # No need to prepare any COM file (they don't exist yet). @@ -369,10 +393,8 @@ def com_train(params: Dict): for name, chunk in exp["chunks"].items(): total_chunks[name] = chunk - com_train_dir = params["com_train_dir"] - # Dump the params into file for reproducibility - processing.save_params(com_train_dir, params) + processing.save_params(params["com_train_dir"], params) # Additionally, to keep videos unique across experiments, need to add # experiment labels in other places. E.g. experiment 0 CameraE's "camname" @@ -388,11 +410,9 @@ def com_train(params: Dict): # Initialize video objects vids = {} for e in range(num_experiments): - vids = processing.initialize_vids( - params, datadict, e, vids, pathonly=True - ) + vids = processing.initialize_vids(params, datadict, e, vids, pathonly=True) - print("Using {} downsampling".format(params["dsmode"])) + logging.info(prepend_log_msg + "Using {} downsampling".format(params["dsmode"])) train_params = { "dim_in": ( @@ -418,7 +438,7 @@ def com_train(params: Dict): valid_params["shuffle"] = False partition = processing.make_data_splits( - samples, params, com_train_dir, num_experiments + samples, params, params["com_train_dir"], num_experiments ) labels = datadict @@ -427,19 +447,22 @@ def com_train(params: Dict): params["chan_num"] = 1 if params["mono"] else params["n_channels_in"] # effective n_channels, which is different if using a mirror arena configuration - eff_n_channels_out = len(camnames[0]) if params["mirror"] else params["n_channels_out"] + eff_n_channels_out = ( + len(camnames[0]) if params["mirror"] else params["n_channels_out"] + ) # Build net - print("Initializing Network...") + logging.info(prepend_log_msg + "Initializing Network...") model = params["net"]( params["loss"], float(params["lr"]), params["chan_num"], eff_n_channels_out, - ["mse"], + norm_method=params["norm_method"], + metric=["mse"], ) - print("COMPLETE\n") + logging.info(prepend_log_msg + "COMPLETE\n") if params["com_finetune_weights"] is not None: weights = os.listdir(params["com_finetune_weights"]) @@ -447,16 +470,14 @@ def com_train(params: Dict): weights = weights[0] try: - model.load_weights( - os.path.join(params["com_finetune_weights"], weights) - ) + model.load_weights(os.path.join(params["com_finetune_weights"], weights)) except: - print( + logging.error( prepend_log_msg + "Note: model weights could not be loaded due to a mismatch in dimensions.\ Assuming that this is a fine-tune with a different number of outputs and removing \ the top of the net accordingly" ) - model.layers[-1].name = "top_conv" + model.layers[-1]._name = "top_conv" model.load_weights( os.path.join(params["com_finetune_weights"], weights), by_name=True, @@ -477,26 +498,27 @@ def com_train(params: Dict): # Create checkpoint and logging callbacks model_checkpoint = ModelCheckpoint( - os.path.join(com_train_dir, kkey), + os.path.join(params["com_train_dir"], kkey), monitor=mon, save_best_only=True, save_weights_only=True, ) - csvlog = CSVLogger(os.path.join(com_train_dir, "training.csv")) + csvlog = CSVLogger(os.path.join(params["com_train_dir"], "training.csv")) tboard = TensorBoard( - log_dir=os.path.join(com_train_dir, "logs"), write_graph=False, update_freq=100 + log_dir=os.path.join(params["com_train_dir"], "logs"), + write_graph=False, + update_freq=100, ) # Initialize data structures if params["mirror"]: - ncams = 1 # Effectively, for the purpose of batch indexing + ncams = 1 # Effectively, for the purpose of batch indexing else: ncams = len(camnames[0]) dh = (params["crop_height"][1] - params["crop_height"][0]) // params["downfac"] dw = (params["crop_width"][1] - params["crop_width"][0]) // params["downfac"] - ims_train = np.zeros( ( ncams * len(partition["train_sampleIDs"]), @@ -526,14 +548,14 @@ def com_train(params: Dict): # Set up generators if params["n_instances"] > 1: - train_generator = DataGenerator_downsample_multi_instance( + train_generator = generator_aux.DataGenerator_downsample_multi_instance( params["n_instances"], partition["train_sampleIDs"], labels, vids, **train_params ) - valid_generator = DataGenerator_downsample_multi_instance( + valid_generator = generator_aux.DataGenerator_downsample_multi_instance( params["n_instances"], partition["valid_sampleIDs"], labels, @@ -541,27 +563,27 @@ def com_train(params: Dict): **valid_params ) else: - train_generator = DataGenerator_downsample( + train_generator = generator_aux.DataGenerator_downsample( partition["train_sampleIDs"], labels, vids, **train_params ) - valid_generator = DataGenerator_downsample( + valid_generator = generator_aux.DataGenerator_downsample( partition["valid_sampleIDs"], labels, vids, **valid_params ) - print("Loading data") + logging.info(prepend_log_msg + "Loading data") for i in range(len(partition["train_sampleIDs"])): - print(i, end="\r") + logging.debug("%s\r", i) ims = train_generator.__getitem__(i) ims_train[i * ncams : (i + 1) * ncams] = ims[0] y_train[i * ncams : (i + 1) * ncams] = ims[1] for i in range(len(partition["valid_sampleIDs"])): - print(i, end="\r") + logging.debug("%s\r", i) ims = valid_generator.__getitem__(i) ims_valid[i * ncams : (i + 1) * ncams] = ims[0] y_valid[i * ncams : (i + 1) * ncams] = ims[1] - train_generator = DataGenerator_downsample_frommem( + train_generator = generator_aux.DataGenerator_downsample_frommem( np.arange(ims_train.shape[0]), ims_train, y_train, @@ -580,7 +602,8 @@ def com_train(params: Dict): zoom_val=params["augment_zoom_val"], chan_num=params["chan_num"], ) - valid_generator = DataGenerator_downsample_frommem( + + valid_generator = generator_aux.DataGenerator_downsample_frommem( np.arange(ims_valid.shape[0]), ims_valid, y_valid, @@ -589,61 +612,7 @@ def com_train(params: Dict): chan_num=params["chan_num"], ) - def write_debug(trainData=True): - """Factoring re-used debug output code. - - Writes training or validation images to an output directory, together - with the ground truth COM labels and predicted COM labels, respectively. - - Args: - trainData (bool, optional): If True use training data for debug. - """ - def plot_out(imo, lo, imn): - processing.plot_markers_2d( - processing.norm_im(imo), lo, newfig=False - ) - plt.gca().xaxis.set_major_locator(plt.NullLocator()) - plt.gca().yaxis.set_major_locator(plt.NullLocator()) - - imname = imn - plt.savefig( - os.path.join(debugdir, imname), bbox_inches="tight", pad_inches=0 - ) - - if params["debug"] and not MULTI_MODE: - - if trainData: - outdir = "debug_im_out" - ims_out = ims_train - label_out = y_train - else: - outdir = "debug_im_out_valid" - ims_out = ims_valid - label_out = model.predict(ims_valid, batch_size=1) - - # Plot all training images and save - # create new directory for images if necessary - debugdir = os.path.join(params["com_train_dir"], outdir) - print("Saving debug images to: " + debugdir) - if not os.path.exists(debugdir): - os.makedirs(debugdir) - - plt.figure() - - for i in range(ims_out.shape[0]): - plt.cla() - if params["mirror"]: - for j in range(label_out.shape[-1]): - plt.cla() - plot_out(ims_out[i], label_out[i, :, :, j:j+1], - str(i) + "_cam_" + str(j) + ".png") - else: - plot_out(ims_out[i], label_out[i], str(i) + ".png") - - elif params["debug"] and MULTI_MODE: - print("Note: Cannot output debug information in COM multi-mode") - - write_debug(trainData=True) + processing.write_debug(params, ims_train, ims_valid, y_train, model) model.fit( x=train_generator, @@ -652,22 +621,40 @@ def plot_out(imo, lo, imn): validation_steps=len(valid_generator), verbose=params["verbose"], epochs=params["epochs"], - workers=6, + # workers=6, callbacks=[csvlog, model_checkpoint, tboard], ) - write_debug(trainData=False) + processing.write_debug( + params, ims_train, ims_valid, y_train, model, trainData=False + ) - print("Renaming weights file with best epoch description") - processing.rename_weights(com_train_dir, kkey, mon) + logging.info(prepend_log_msg + "Renaming weights file with best epoch description") + processing.rename_weights(params["com_train_dir"], kkey, mon) - print("Saving full model at end of training") + logging.info(prepend_log_msg + "Saving full model at end of training") sdir = os.path.join(params["com_train_dir"], "fullmodel_weights") if not os.path.exists(sdir): os.makedirs(sdir) model.save(os.path.join(sdir, "fullmodel_end.hdf5")) +def setup_com_train(params: Dict) -> Dict: + # Make the train directory if it does not exist. + make_folder("com_train_dir", params) + + params["loss"] = getattr(losses, params["loss"]) + params["net"] = getattr(nets, params["net"]) + + #os.environ["CUDA_VISIBLE_DEVICES"] = params["gpu_id"] + + # MULTI_MODE is where the full set of markers is trained on, rather than + # the COM only. In some cases, this can help improve COMfinder performance. + params["multi_mode"] = (params["n_channels_out"] > 1) & (params["n_instances"] == 1) + params["n_channels_out"] = params["n_channels_out"] + int(params["multi_mode"]) + return params + + def dannce_train(params: Dict): """Train dannce network. @@ -677,32 +664,45 @@ def dannce_train(params: Dict): Raises: Exception: Error if training mode is invalid. """ + + # Setup Logging for com_train + if not os.path.exists(os.path.dirname(params["log_dest"])): + os.makedirs(os.path.dirname(params["log_dest"])) + logging.basicConfig(filename=params["log_dest"], level=params["log_level"], + format='%(asctime)s %(levelname)s:%(message)s', datefmt='%m/%d/%Y %I:%M:%S %p') + prepend_log_msg = file_path + ".dannce_train " + # Depth disabled until next release. params["depth"] = False - + params["multi_mode"] = False # Make the training directory if it does not exist. make_folder("dannce_train_dir", params) - params["loss"] = getattr(losses, params["loss"]) + # Adopted from implementation by robb + if "huber_loss" in params["loss"]: + params["loss"] = losses.huber_loss(params["huber-delta"]) + else: + params["loss"] = getattr(losses, params["loss"]) + params["net"] = getattr(nets, params["net"]) # Default to 6 views but a smaller number of views can be specified in the # DANNCE config. If the legnth of the camera files list is smaller than # n_views, relevant lists will be duplicated in order to match n_views, if # possible. - n_views = int(params["n_views"]) + params["n_views"] = int(params["n_views"]) + + # Pass delta value into huber loss function + if params["huber-delta"] is not None: + losses.huber_loss(params["huber-delta"]) # Convert all metric strings to objects metrics = nets.get_metrics(params) - # set GPU ID - if not params["multi_gpu_train"]: - os.environ["CUDA_VISIBLE_DEVICES"] = params["gpu_id"] - # find the weights given config path if params["dannce_finetune_weights"] is not None: params["dannce_finetune_weights"] = processing.get_ft_wt(params) - print("Fine-tuning from {}".format(params["dannce_finetune_weights"])) + logging.info(prepend_log_msg + "Fine-tuning from {}".format(params["dannce_finetune_weights"])) samples = [] datadict = {} @@ -726,9 +726,9 @@ def dannce_train(params: Dict): datadict_3d_, cameras_, com3d_dict_, - ) = do_COM_load(exp, expdict, n_views, e, params) + ) = do_COM_load(exp, expdict, e, params) - print("Using {} samples total.".format(len(samples_))) + logging.debug(prepend_log_msg + "Using {} samples total.".format(len(samples_))) ( samples, @@ -749,7 +749,7 @@ def dannce_train(params: Dict): cameras[e] = cameras_ camnames[e] = exp["camnames"] - print("Using the following cameras: {}".format(camnames[e])) + logging.debug(prepend_log_msg + "Using the following cameras: {}".format(camnames[e])) params["experiment"][e] = exp for name, chunk in exp["chunks"].items(): total_chunks[name] = chunk @@ -771,11 +771,33 @@ def dannce_train(params: Dict): if params["use_npy"]: # Add all npy volume directories to list, to be used by generator - npydir = {} + dirnames = ["image_volumes", "grid_volumes", "targets"] + npydir, missing_npydir = {}, {} for e in range(num_experiments): npydir[e] = params["experiment"][e]["npy_vol_dir"] + + if not os.path.exists(npydir[e]): + missing_npydir[e] = npydir[e] + for dir in dirnames: + os.makedirs(os.path.join(npydir[e], dir)) + else: + for dir in dirnames: + dirpath = os.path.join(npydir[e], dir) + if (not os.path.exists(dirpath)) or (len(os.listdir(dirpath)) == 0): + missing_npydir[e] = npydir[e] + os.makedirs(dirpath, exist_ok=True) + + # samples = processing.remove_samples_npy(npydir, samples, params) + missing_samples = np.array([samp for samp in samples if int(samp.split("_")[0]) in list(missing_npydir.keys())]) + if len(missing_samples) != 0: + logging.info(prepend_log_msg + "{} npy files for experiments {} are missing.".format(len(missing_samples), list(missing_npydir.keys()))) + + vids = {} + for e in range(num_experiments): + vids = processing.initialize_vids(params, datadict, e, vids, pathonly=True) + else: + logging.info(prepend_log_msg + "No missing npy files. Ready for training.") - samples = processing.remove_samples_npy(npydir, samples, params) else: # Initialize video objects vids = {} @@ -783,7 +805,7 @@ def dannce_train(params: Dict): if params["immode"] == "vid": vids = processing.initialize_vids( params, datadict, e, vids, pathonly=True - ) + ) # Parameters if params["expval"]: @@ -802,13 +824,74 @@ def dannce_train(params: Dict): cam3_train = False partition = processing.make_data_splits( - samples, params, dannce_train_dir, num_experiments - ) + samples, params, dannce_train_dir, num_experiments + ) if params["use_npy"]: # mono conversion will happen from RGB npy files, and the generator # needs to b aware that the npy files contain RGB content params["chan_num"] = params["n_channels_in"] + + if len(missing_samples) != 0: + valid_params = { + "dim_in": ( + params["crop_height"][1] - params["crop_height"][0], + params["crop_width"][1] - params["crop_width"][0], + ), + "n_channels_in": params["n_channels_in"], + "batch_size": 1, + "n_channels_out": params["new_n_channels_out"], + "out_scale": params["sigma"], + "crop_width": params["crop_width"], + "crop_height": params["crop_height"], + "vmin": params["vmin"], + "vmax": params["vmax"], + "nvox": params["nvox"], + "interp": params["interp"], + "depth": params["depth"], + "channel_combo": None, + "mode": "coordinates", + "camnames": camnames, + "immode": params["immode"], + "shuffle": False, + "rotation": False, + "vidreaders": vids, + "distort": True, + "expval": True, + "crop_im": False, + "chunks": total_chunks, + "mono": params["mono"], + "mirror": params["mirror"], + "predict_flag": False, + "norm_im": False + } + + tifdirs = [] + npy_generator = generator.DataGenerator_3Dconv_torch( + missing_samples, + datadict, + datadict_3d, + cameras, + missing_samples, + com3d_dict, + tifdirs, + **valid_params + ) + logging.debug(prepend_log_msg + "Generating missing npy files ...") + for i, samp in enumerate(missing_samples): + exp = int(samp.split("_")[0]) + save_root = missing_npydir[exp] + fname = "0_{}.npy".format(samp.split("_")[1]) + + rr = npy_generator.__getitem__(i) + logging.debug( prepend_log_msg + "{} + \r".format(i)) + np.save(os.path.join(save_root, "image_volumes", fname), rr[0][0][0].astype("uint8")) + np.save(os.path.join(save_root, "grid_volumes", fname), rr[0][1][0]) + np.save(os.path.join(save_root, "targets", fname), rr[1][0]) + + samples = processing.remove_samples_npy(npydir, samples, params) + logging.info("{} samples ready for npy training.".format(len(samples))) + else: # Used to initialize arrays for mono, and also in *frommem (the final generator) params["chan_num"] = 1 if params["mono"] else params["n_channels_in"] @@ -847,7 +930,7 @@ def dannce_train(params: Dict): # Setup a generator that will read videos and labels tifdirs = [] # Training from single images not yet supported in this demo - train_generator = DataGenerator_3Dconv( + train_generator = generator.DataGenerator_3Dconv( partition["train_sampleIDs"], datadict, datadict_3d, @@ -857,7 +940,7 @@ def dannce_train(params: Dict): tifdirs, **valid_params ) - valid_generator = DataGenerator_3Dconv( + valid_generator = generator.DataGenerator_3Dconv( partition["valid_sampleIDs"], datadict, datadict_3d, @@ -935,13 +1018,13 @@ def dannce_train(params: Dict): dtype="float32", ) - print( - "Loading training data into memory. This can take a while to seek through", - "large sets of video. This process is much faster if the frame indices", + logging.info( prepend_log_msg + + "Loading training data into memory. This can take a while to seek through" + + "large sets of video. This process is much faster if the frame indices" + "are sorted in ascending order in your label data file.", ) for i in range(len(partition["train_sampleIDs"])): - print(i, end="\r") + logging.debug("%s\r", i) rr = train_generator.__getitem__(i) if params["expval"]: X_train[i] = rr[0][0] @@ -956,7 +1039,7 @@ def dannce_train(params: Dict): # This can be used for debugging problems with calibration or # COM estimation tifdir = params["debug_volume_tifdir"] - print("Dump training volumes to {}".format(tifdir)) + logging.info(prepend_log_msg + "Dump training volumes to {}".format(tifdir)) for i in range(X_train.shape[0]): for j in range(len(camnames[0])): im = X_train[ @@ -973,12 +1056,11 @@ def dannce_train(params: Dict): partition["train_sampleIDs"][i] + "_cam" + str(j) + ".tif", ) imageio.mimwrite(of, np.transpose(im, [2, 0, 1, 3])) - print("Done! Exiting.") - sys.exit() + return - print("Loading validation data into memory") + logging.info(prepend_log_msg + "Loading validation data into memory") for i in range(len(partition["valid_sampleIDs"])): - print(i, end="\r") + logging.debug("%s\r", i) rr = valid_generator.__getitem__(i) if params["expval"]: X_valid[i] = rr[0][0] @@ -987,6 +1069,17 @@ def dannce_train(params: Dict): X_valid[i] = rr[0] y_valid[i] = rr[1] + # For AVG+MAX training or training with intermediate supervision, + # need to update the expval flag in the generators + # and re-generate the 3D training targets + # TODO: Add code to infer_params + y_train_aux = None + y_valid_aux = None + if params["avg+max"] is not None : + y_train_aux, y_valid_aux = processing.initAvgMax( + y_train, y_valid, X_train_grid, X_valid_grid, params + ) + # Now we can generate from memory with shuffling, rotation, etc. randflag = params["channel_combo"] == "random" @@ -996,84 +1089,113 @@ def dannce_train(params: Dict): randflag = True if params["n_rand_views"] == 0: - print("Using default n_rand_views augmentation with {} views and with replacement".format(n_views)) - print("To disable n_rand_views augmentation, set it to None in the config.") - params["n_rand_views"] = n_views + logging.info( prepend_log_msg + + "Using default n_rand_views augmentation with {} views and with replacement".format( + params["n_views"] + ) + ) + logging.info( prepend_log_msg + "To disable n_rand_views augmentation, set it to None in the config.") + params["n_rand_views"] = params["n_views"] params["rand_view_replace"] = True - - shared_args = {'chan_num': params["chan_num"], - 'expval': params["expval"], - 'nvox': params["nvox"], - 'heatmap_reg': params["heatmap_reg"], - 'heatmap_reg_coeff': params["heatmap_reg_coeff"]} - shared_args_train = {'batch_size': params["batch_size"], - 'rotation': params["rotate"], - 'augment_hue': params["augment_hue"], - 'augment_brightness': params["augment_brightness"], - 'augment_continuous_rotation': params["augment_continuous_rotation"], - 'bright_val': params["augment_bright_val"], - 'hue_val': params["augment_hue_val"], - 'rotation_val': params["augment_rotation_val"], - 'replace': params["rand_view_replace"], - 'random': randflag, - 'n_rand_views': params["n_rand_views"], - } - shared_args_valid = {'batch_size': 4, - 'rotation': False, - 'augment_hue': False, - 'augment_brightness': False, - 'augment_continuous_rotation': False, - 'shuffle': False, - 'replace': False, - 'n_rand_views': params["n_rand_views"] if cam3_train else None, - 'random': True if cam3_train else False} + shared_args = { + "chan_num": params["chan_num"], + "expval": params["expval"], + "nvox": params["nvox"], + "heatmap_reg": params["heatmap_reg"], + "heatmap_reg_coeff": params["heatmap_reg_coeff"], + } + shared_args_train = { + "batch_size": params["batch_size"], + "rotation": params["rotate"], + "augment_hue": params["augment_hue"], + "augment_brightness": params["augment_brightness"], + "augment_continuous_rotation": params["augment_continuous_rotation"], + "mirror_augmentation": params["mirror_augmentation"], + "right_keypoints": params["right_keypoints"], + "left_keypoints": params["left_keypoints"], + "bright_val": params["augment_bright_val"], + "hue_val": params["augment_hue_val"], + "rotation_val": params["augment_rotation_val"], + "replace": params["rand_view_replace"], + "random": randflag, + "n_rand_views": params["n_rand_views"], + } + shared_args_valid = { + "batch_size": 4, + "rotation": False, + "augment_hue": False, + "augment_brightness": False, + "augment_continuous_rotation": False, + "mirror_augmentation": False, + "shuffle": False, + "replace": False, + "n_rand_views": params["n_rand_views"] if cam3_train or params["n_rand_views"] is not None and params["n_rand_views"] < len(camnames[0]) else None, + "random": True if cam3_train or params["n_rand_views"] is not None and params["n_rand_views"] < len(camnames[0]) else False, + } if params["use_npy"]: - genfunc = DataGenerator_3Dconv_npy - args_train = {'list_IDs': partition["train_sampleIDs"], - 'labels_3d': datadict_3d, - 'npydir': npydir, - } - args_train = {**args_train, - **shared_args_train, - **shared_args, - 'sigma': params["sigma"], - 'mono': params["mono"]} - - args_valid = {'list_IDs': partition["valid_sampleIDs"], - 'labels_3d': datadict_3d, - 'npydir': npydir, - } - args_valid = {**args_valid, - **shared_args_valid, - **shared_args, - 'sigma': params["sigma"], - 'mono': params["mono"]} + genfunc = generator.DataGenerator_3Dconv_npy + args_train = { + "list_IDs": partition["train_sampleIDs"], + "labels_3d": datadict_3d, + "npydir": npydir, + } + args_train = { + **args_train, + **shared_args_train, + **shared_args, + "sigma": params["sigma"], + "mono": params["mono"], + } + + args_valid = { + "list_IDs": partition["valid_sampleIDs"], + "labels_3d": datadict_3d, + "npydir": npydir, + } + args_valid = { + **args_valid, + **shared_args_valid, + **shared_args, + "sigma": params["sigma"], + "mono": params["mono"], + } else: - genfunc = DataGenerator_3Dconv_frommem - args_train = {'list_IDs': np.arange(len(partition["train_sampleIDs"])), - 'data': X_train, - 'labels': y_train, - } - args_train = {**args_train, - **shared_args_train, - **shared_args, - 'xgrid': X_train_grid} - - args_valid = {'list_IDs': np.arange(len(partition["valid_sampleIDs"])), - 'data': X_valid, - 'labels': y_valid, - } - args_valid = {**args_valid, - **shared_args_valid, - **shared_args, - 'xgrid': X_valid_grid} + genfunc = generator.DataGenerator_3Dconv_frommem + args_train = { + "list_IDs": np.arange(len(partition["train_sampleIDs"])), + "data": X_train, + "labels": y_train, + } + args_train = { + **args_train, + **shared_args_train, + **shared_args, + "xgrid": X_train_grid, + "aux_labels": y_train_aux, + } + args_valid = { + "list_IDs": np.arange(len(partition["valid_sampleIDs"])), + "data": X_valid, + "labels": y_valid, + "aux_labels": y_valid_aux, + } + args_valid = { + **args_valid, + **shared_args_valid, + **shared_args, + "xgrid": X_valid_grid, + } train_generator = genfunc(**args_train) valid_generator = genfunc(**args_valid) + # if params["use_npy""]: + # processing.write_npy(params["write_npy"], train) + # samples = processing.remove_samples_npy(npydir, samples, params) + # Build net - print("Initializing Network...") + logging.info( prepend_log_msg + "Initializing Network...") # Currently, we expect four modes of use: # 1) Training a new network from scratch @@ -1082,51 +1204,49 @@ def dannce_train(params: Dict): # if params["multi_gpu_train"]: strategy = tf.distribute.MirroredStrategy() - print('Number of devices: {}'.format(strategy.num_replicas_in_sync)) + logging.info(prepend_log_msg + "Number of devices: {}".format(strategy.num_replicas_in_sync)) scoping = strategy.scope() - # else: - # scoping = True - print("NUM CAMERAS: {}".format(len(camnames[0]))) + logging.info(prepend_log_msg + "NUM CAMERAS: {}".format(len(camnames[0]))) with scoping: if params["train_mode"] == "new": model = params["net"]( - params["loss"], - float(params["lr"]), - params["chan_num"] + params["depth"], - params["n_channels_out"], - len(camnames[0]), - batch_norm=False, - instance_norm=True, + lossfunc=params["loss"], + lr=float(params["lr"]), + input_dim=params["chan_num"] + params["depth"], + feature_num=params["n_channels_out"], + num_cams=params["n_rand_views"] if params["n_rand_views"] is not None and params["n_rand_views"] < len(camnames[0]) else len(camnames[0]), + norm_method=params["norm_method"], include_top=True, gridsize=gridsize, ) elif params["train_mode"] == "finetune": - fargs = [params["loss"], - float(params["lr"]), - params["chan_num"] + params["depth"], - params["n_channels_out"], - len(camnames[0]), - params["new_last_kernel_size"], - params["new_n_channels_out"], - params["dannce_finetune_weights"], - params["n_layers_locked"], - False, - True, - gridsize] + fargs = [ + params["loss"], + float(params["lr"]), + params["chan_num"] + params["depth"], + params["n_channels_out"], + params["n_rand_views"] if params["n_rand_views"] is not None and params["n_rand_views"] < len(camnames[0]) else len(camnames[0]), + params["new_last_kernel_size"], + params["new_n_channels_out"], + params["dannce_finetune_weights"], + params["n_layers_locked"], + params["norm_method"], + gridsize, + ] try: - model = params["net"]( - *fargs - ) + model = params["net"](*fargs) except: if params["expval"]: - print("Could not load weights for finetune (likely because you are finetuning a previously finetuned network). Attempting to finetune from a full finetune model file.") - model = nets.finetune_fullmodel_AVG( - *fargs + logging.warning(prepend_log_msg + + "Could not load weights for finetune (likely because you are finetuning a previously finetuned network). Attempting to finetune from a full finetune model file." ) + model = nets.finetune_fullmodel_AVG(*fargs) else: - raise Exception("Finetuning from a previously finetuned model is currently possible only for AVG models") + raise Exception( + "Finetuning from a previously finetuned model is currently possible only for AVG models" + ) elif params["train_mode"] == "continued": model = load_model( params["dannce_finetune_weights"], @@ -1135,8 +1255,12 @@ def dannce_train(params: Dict): "slice_input": nets.slice_input, "mask_nan_keep_loss": losses.mask_nan_keep_loss, "mask_nan_l1_loss": losses.mask_nan_l1_loss, + "log_cosh_loss": losses.log_cosh_loss, + "huber_loss": losses.huber_loss, "euclidean_distance_3D": losses.euclidean_distance_3D, "centered_euclidean_distance_3D": losses.centered_euclidean_distance_3D, + "gaussian_cross_entropy_loss": losses.gaussian_cross_entropy_loss, + "max_euclidean_distance": losses.max_euclidean_distance, }, ) elif params["train_mode"] == "continued_weights_only": @@ -1147,9 +1271,8 @@ def dannce_train(params: Dict): float(params["lr"]), params["chan_num"] + params["depth"], params["n_channels_out"], - 3 if cam3_train else len(camnames[0]), - batch_norm=False, - instance_norm=True, + 3 if cam3_train else params["n_rand_views"] if params["n_rand_views"] < len(camnames[0]) and params["n_rand_views"] is not None else len(camnames[0]), + norm_method=params["norm_method"], include_top=True, gridsize=gridsize, ) @@ -1160,17 +1283,30 @@ def dannce_train(params: Dict): if params["heatmap_reg"]: model = nets.add_heatmap_output(model) - + if params["avg+max"] is not None and params["train_mode"] != "continued": + model = nets.add_exposed_heatmap(model) if params["heatmap_reg"] or params["train_mode"] != "continued": # recompiling a full model will reset the optimizer state model.compile( optimizer=Adam(lr=float(params["lr"])), - loss=params["loss"] if not params["heatmap_reg"] else [params["loss"], losses.heatmap_max_regularizer], + loss=params["loss"] + if not params["heatmap_reg"] + else [params["loss"], losses.heatmap_max_regularizer], + loss_weights=[1, params["avg+max"]] + if params["avg+max"] is not None + else None, metrics=metrics, ) - print("COMPLETE\n") + if params["lr"] != model.optimizer.learning_rate: + logging.debug(prepend_log_msg + "Changing learning rate to {}".format(params["lr"])) + K.set_value(model.optimizer.learning_rate, params["lr"]) + logging.debug( prepend_log_msg + + "Confirming new learning rate: {}".format(model.optimizer.learning_rate) + ) + + logging.info(prepend_log_msg + "COMPLETE\n") # Create checkpoint and logging callbacks kkey = "weights.hdf5" @@ -1184,69 +1320,54 @@ def dannce_train(params: Dict): ) csvlog = CSVLogger(os.path.join(dannce_train_dir, "training.csv")) tboard = TensorBoard( - log_dir=os.path.join(dannce_train_dir,"logs"), write_graph=False, update_freq=100 + log_dir=os.path.join(dannce_train_dir, "logs"), + write_graph=False, + update_freq=100, ) - class savePredTargets(keras.callbacks.Callback): - def __init__(self, total_epochs, td, tgrid, vd, vgrid, tID, vID, odir, tlabel, vlabel): - self.td = td - self.vd = vd - self.tID = tID - self.vID = vID - self.total_epochs = total_epochs - self.val_loss = 1e10 - self.odir = odir - self.tgrid = tgrid - self.vgrid = vgrid - self.tlabel = tlabel - self.vlabel = vlabel - def on_epoch_end(self, epoch, logs=None): - lkey = 'val_loss' if 'val_loss' in logs else 'loss' - if epoch == self.total_epochs-1 or logs[lkey] < self.val_loss and epoch > 25: - print("Saving predictions on train and validation data, after epoch {}".format(epoch)) - self.val_loss = logs[lkey] - pred_t = model.predict([self.td, self.tgrid], batch_size=1) - pred_v = model.predict([self.vd, self.vgrid], batch_size=1) - ofile = os.path.join(self.odir,'checkpoint_predictions_e{}.mat'.format(epoch)) - sio.savemat(ofile, {'pred_train': pred_t, - 'pred_valid': pred_v, - 'target_train': self.tlabel, - 'target_valid': self.vlabel, - 'train_sampleIDs': self.tID, - 'valid_sampleIDs': self.vID}) - - class saveCheckPoint(keras.callbacks.Callback): - def __init__(self, odir, total_epochs): - self.odir = odir - self.saveE = np.arange(0, total_epochs, 250) - def on_epoch_end(self, epoch, logs=None): - lkey = 'val_loss' if 'val_loss' in logs else 'loss' - val_loss = logs[lkey] - if epoch in self.saveE: - # Do a garbage collect to combat keras memory leak - gc.collect() - print("Saving checkpoint weights at epoch {}".format(epoch)) - savename = 'weights.checkpoint.epoch{}.{}{:.5f}.hdf5'.format(epoch, - lkey, - val_loss) - self.model.save(os.path.join(self.odir, savename)) - - - callbacks = [csvlog, model_checkpoint, tboard, saveCheckPoint(params['dannce_train_dir'], params["epochs"])] - - if params['expval'] and not params["use_npy"] and not params["heatmap_reg"] and params["save_pred_targets"]: - save_callback = savePredTargets(params['epochs'], + callbacks = [ + csvlog, + model_checkpoint, + tboard, + cb.saveCheckPoint(params["dannce_train_dir"], params["epochs"]), + ] + + if ( + params["expval"] + and not params["use_npy"] + and not params["heatmap_reg"] + ): + save_callback = cb.savePredTargets( + params["epochs"], X_train, X_train_grid, X_valid, X_valid_grid, - partition['train_sampleIDs'], - partition['valid_sampleIDs'], - params['dannce_train_dir'], + partition["train_sampleIDs"], + partition["valid_sampleIDs"], + params["dannce_train_dir"], y_train, - y_valid) + y_valid, + ) + elif not params["expval"] and not params["use_npy"] and not params["heatmap_reg"]: + save_callback = cb.saveMaxPreds( + partition["train_sampleIDs"], + X_train, + datadict_3d, + params["dannce_train_dir"], + com3d_dict, + params, + ) + + if params["save_pred_targets"]: callbacks = callbacks + [save_callback] + logging.debug(prepend_log_msg + "Model Architecture: ") + # model.summary(print_fn = logging.debug()) + model.summary() + + # import pdb; pdb.set_trace() + model.fit( x=train_generator, steps_per_epoch=len(train_generator), @@ -1255,21 +1376,30 @@ def on_epoch_end(self, epoch, logs=None): verbose=params["verbose"], epochs=params["epochs"], callbacks=callbacks, - workers=6, ) - print("Renaming weights file with best epoch description") - processing.rename_weights(dannce_train_dir, kkey, mon) + logging.info(prepend_log_msg + "Renaming weights file with best epoch description") + bestmodel_pth, bestdict = processing.rename_weights(dannce_train_dir, kkey, mon) - print("Saving full model at end of training") + logging.info(prepend_log_msg + "Saving full model at end of training") sdir = os.path.join(params["dannce_train_dir"], "fullmodel_weights") if not os.path.exists(sdir): os.makedirs(sdir) model = nets.remove_heatmap_output(model, params) - model.save(os.path.join(sdir, "fullmodel_end.hdf5")) + finalmodel_pth = os.path.join(sdir, "fullmodel_end.hdf5") + model.save(finalmodel_pth) + + logging.info(prepend_log_msg + "Saving predictions for {} and {}".format(bestmodel_pth, + finalmodel_pth)) + logging.info(prepend_log_msg + "done!") - print("done!") + if params["save_pred_targets"]: + processing.save_pred_targets(bestmodel_pth, + model, + save_callback, + bestdict, + params) def dannce_predict(params: Dict): @@ -1278,45 +1408,19 @@ def dannce_predict(params: Dict): Args: params (Dict): Paremeters dictionary. """ - # Depth disabled until next release. - params["depth"] = False - # Make the prediction directory if it does not exist. - make_folder("dannce_predict_dir", params) - # Load the appropriate loss function and network - try: - params["loss"] = getattr(losses, params["loss"]) - except AttributeError: - params["loss"] = getattr(keras_losses, params["loss"]) - netname = params["net"] - params["net"] = getattr(nets, params["net"]) - # Default to 6 views but a smaller number of views can be specified in the DANNCE config. - # If the legnth of the camera files list is smaller than n_views, relevant lists will be - # duplicated in order to match n_views, if possible. - n_views = int(params["n_views"]) - - os.environ["CUDA_VISIBLE_DEVICES"] = params["gpu_id"] - gpu_id = params["gpu_id"] - - # While we can use experiment files for DANNCE training, - # for prediction we use the base data files present in the main config - # Grab the input file for prediction - params["label3d_file"] = processing.grab_predict_label3d_file() - params["base_exp_folder"] = os.path.dirname(params["label3d_file"]) + # Setup Logging for com_train + if not os.path.exists(os.path.dirname(params["log_dest"])): + os.makedirs(os.path.dirname(params["log_dest"])) + logging.basicConfig(filename=params["log_dest"], level=params["log_level"], + format='%(asctime)s %(levelname)s:%(message)s', datefmt='%m/%d/%Y %I:%M:%S %p') + prepend_log_msg = file_path + ".dannce_predict " + + #os.environ["CUDA_VISIBLE_DEVICES"] = params["gpu_id"] - # default to slow numpy backend if there is no predict_mode in config file. I.e. legacy support - predict_mode = ( - params["predict_mode"] - if params["predict_mode"] is not None - else "numpy" - ) - print("Using {} predict mode".format(predict_mode)) + make_folder("dannce_predict_dir", params) - print("Using camnames: {}".format(params["camnames"])) - # Also add parent params under the 'experiment' key for compatibility - # with DANNCE's video loading function - params["experiment"] = {} - params["experiment"][0] = params + params = setup_dannce_predict(params) ( params["experiment"][0], @@ -1328,7 +1432,6 @@ def dannce_predict(params: Dict): ) = do_COM_load( params["experiment"][0], params["experiment"][0], - n_views, 0, params, training=False, @@ -1336,12 +1439,7 @@ def dannce_predict(params: Dict): # Write 3D COM to file. This might be different from the input com3d file # if arena thresholding was applied. - cfilename = os.path.join(params["dannce_predict_dir"], "com3d_used.mat") - print("Saving 3D COM to {}".format(cfilename)) - c3d = np.zeros((len(samples_), 3)) - for i in range(len(samples_)): - c3d[i] = com3d_dict_[samples_[i]] - sio.savemat(cfilename, {"sampleID": samples_, "com": c3d}) + write_com_file(params, samples_, com3d_dict_) # The library is configured to be able to train over multiple animals ("experiments") # at once. Because supporting code expects to see an experiment ID# prepended to @@ -1350,12 +1448,7 @@ def dannce_predict(params: Dict): datadict = {} datadict_3d = {} com3d_dict = {} - ( - samples, - datadict, - datadict_3d, - com3d_dict, - ) = serve_data_DANNCE.add_experiment( + (samples, datadict, datadict_3d, com3d_dict,) = serve_data_DANNCE.add_experiment( 0, samples, datadict, @@ -1380,17 +1473,12 @@ def dannce_predict(params: Dict): samples = np.array(samples) - # For real mono prediction - params["chan_num"] = 1 if params["mono"] else params["n_channels_in"] - # Initialize video dictionary. paths to videos only. # TODO: Remove this immode option if we decide not # to support tifs if params["immode"] == "vid": vids = {} - vids = processing.initialize_vids( - params, datadict, 0, vids, pathonly=True - ) + vids = processing.initialize_vids(params, datadict, 0, vids, pathonly=True) # Parameters valid_params = { @@ -1426,27 +1514,24 @@ def dannce_predict(params: Dict): } # Datasets - partition = {} valid_inds = np.arange(len(samples)) - partition["valid_sampleIDs"] = samples[valid_inds] + partition = {"valid_sampleIDs": samples[valid_inds]} # TODO: Remove tifdirs arguments, which are deprecated tifdirs = [] # Generators - if predict_mode == "torch": + if params["predict_mode"] == "torch": import torch # Because CUDA_VISBILE_DEVICES is already set to a single GPU, the gpu_id here should be "0" - device = "cuda:0" - genfunc = DataGenerator_3Dconv_torch - elif predict_mode == "tf": - device = "/GPU:0" - genfunc = DataGenerator_3Dconv_tf + genfunc = generator.DataGenerator_3Dconv_torch + elif params["predict_mode"] == "tf": + genfunc = generator.DataGenerator_3Dconv_tf else: - genfunc = DataGenerator_3Dconv + genfunc = generator.DataGenerator_3Dconv - valid_generator = genfunc( + predict_generator = genfunc( partition["valid_sampleIDs"], datadict, datadict_3d, @@ -1457,8 +1542,155 @@ def dannce_predict(params: Dict): **valid_params ) + model = build_model(params, camnames) + + if params["maxbatch"] != "max" and params["maxbatch"] > len(predict_generator): + logging.info( + prepend_log_msg + + "Maxbatch was set to a larger number of matches than exist in the video. Truncating" + ) + processing.print_and_set(params, "maxbatch", len(predict_generator)) + + if params["maxbatch"] == "max": + processing.print_and_set(params, "maxbatch", len(predict_generator)) + + if params["write_npy"] is not None: + # Instead of running inference, generate all samples + # from valid_generator and save them to npy files. Useful + # for working with large datasets (such as Rat 7M) because + # .npy files can be loaded in quickly with random access + # during training. + logging.info(prepend_log_msg + "Writing samples to .npy files") + processing.write_npy(params["write_npy"], predict_generator) + return + + save_data = inference.infer_dannce( + predict_generator, + params, + model, + partition, + params["n_markers"], + com_dict = com3d_dict, + ) + + if params["expval"]: + if params["save_tag"] is not None: + path = os.path.join( + params["dannce_predict_dir"], + "save_data_AVG%d.mat" % (params["save_tag"]), + ) + else: + path = os.path.join(params["dannce_predict_dir"], "save_data_AVG.mat") + p_n = savedata_expval( + path, + params, + write=True, + data=save_data, + tcoord=False, + num_markers=params["n_markers"], + pmax=True, + ) + else: + if params["save_tag"] is not None: + path = os.path.join( + params["dannce_predict_dir"], + "save_data_MAX%d.mat" % (params["save_tag"]), + ) + else: + path = os.path.join(params["dannce_predict_dir"], "save_data_MAX.mat") + # import pdb; pdb.set_trace() + p_n = savedata_tomat( + path, + params, + params["vmin"], + params["vmax"], + params["nvox"], + write=True, + data=save_data, + num_markers=params["n_markers"], + tcoord=False, + addCOM=com3d_dict, + ) + + +def setup_dannce_predict(params): + prepend_log_msg = file_path + ".setup_dannce_predict " + + # Depth disabled until next release. + params["depth"] = False + # Make the prediction directory if it does not exist. + + # Load the appropriate loss function and network + try: + params["loss"] = getattr(losses, params["loss"]) + except AttributeError: + params["loss"] = getattr(keras_losses, params["loss"]) + params["net_name"] = params["net"] + params["net"] = getattr(nets, params["net_name"]) + # Default to 6 views but a smaller number of views can be specified in the DANNCE config. + # If the legnth of the camera files list is smaller than n_views, relevant lists will be + # duplicated in order to match n_views, if possible. + params["n_views"] = int(params["n_views"]) + + # While we can use experiment files for DANNCE training, + # for prediction we use the base data files present in the main config + # Grab the input file for prediction + params["label3d_file"] = processing.grab_predict_label3d_file() + params["base_exp_folder"] = os.path.dirname(params["label3d_file"]) + + # default to slow numpy backend if there is no predict_mode in config file. I.e. legacy support + params["predict_mode"] = ( + params["predict_mode"] if params["predict_mode"] is not None else "numpy" + ) + params["multi_mode"] = False + logging.info(prepend_log_msg + "Using {} predict mode".format(params["predict_mode"])) + + logging.info(prepend_log_msg + "Using camnames: {}".format(params["camnames"])) + # Also add parent params under the 'experiment' key for compatibility + # with DANNCE's video loading function + params["experiment"] = {} + params["experiment"][0] = params + + if params["start_batch"] is None: + params["start_batch"] = 0 + params["save_tag"] = None + else: + params["save_tag"] = params["start_batch"] + + if params["new_n_channels_out"] is not None: + params["n_markers"] = params["new_n_channels_out"] + else: + params["n_markers"] = params["n_channels_out"] + + # For real mono prediction + params["chan_num"] = 1 if params["mono"] else params["n_channels_in"] + + return params + + +def write_com_file(params, samples_, com3d_dict_): + prepend_log_msg = file_path + ".write_com_file " + cfilename = os.path.join(params["dannce_predict_dir"], "com3d_used.mat") + logging.info(prepend_log_msg + "Saving 3D COM to {}".format(cfilename)) + c3d = np.zeros((len(samples_), 3)) + for i in range(len(samples_)): + c3d[i] = com3d_dict_[samples_[i]] + sio.savemat(cfilename, {"sampleID": samples_, "com": c3d}) + + +def build_model(params: Dict, camnames: List) -> Model: + """Build model for dannce prediction. + + Args: + params (Dict): Parameters dictionary. + camnames (List): Camera names. + + Returns: + (Model): Dannce model + """ # Build net - print("Initializing Network...") + prepend_log_msg = file_path + ".build_model " + logging.info(prepend_log_msg + "Initializing Network...") # This requires that the network be saved as a full model, not just weights. # As a precaution, we import all possible custom objects that could be used @@ -1470,9 +1702,7 @@ def dannce_predict(params: Dict): wdir = params["dannce_train_dir"] weights = os.listdir(wdir) weights = [f for f in weights if ".hdf5" in f and "checkpoint" not in f] - weights = sorted( - weights, key=lambda x: int(x.split(".")[1].split("-")[0]) - ) + weights = sorted(weights, key=lambda x: int(x.split(".")[1].split("-")[0])) weights = weights[-1] mdl_file = os.path.join(wdir, weights) @@ -1480,19 +1710,22 @@ def dannce_predict(params: Dict): # set this file to dannce_predict_model so that it will still get saved with metadata params["dannce_predict_model"] = mdl_file - print("Loading model from " + mdl_file) + logging.info(prepend_log_msg + "Loading model from " + mdl_file) if ( - netname == "unet3d_big_tiedfirstlayer_expectedvalue" + params["net_name"] == "unet3d_big_tiedfirstlayer_expectedvalue" or params["from_weights"] is not None ): gridsize = tuple([params["nvox"]] * 3) params["dannce_finetune_weights"] = processing.get_ft_wt(params) if params["train_mode"] == "finetune": - - print("Initializing a finetune network from {}, into which weights from {} will be loaded.".format( - params["dannce_finetune_weights"], mdl_file)) + logging.info( + prepend_log_msg + + "Initializing a finetune network from {}, into which weights from {} will be loaded.".format( + params["dannce_finetune_weights"], mdl_file + ) + ) model = params["net"]( params["loss"], float(params["lr"]), @@ -1503,8 +1736,7 @@ def dannce_predict(params: Dict): params["new_n_channels_out"], params["dannce_finetune_weights"], params["n_layers_locked"], - batch_norm=False, - instance_norm=True, + norm_method=params["norm_method"], gridsize=gridsize, ) else: @@ -1516,8 +1748,7 @@ def dannce_predict(params: Dict): params["chan_num"] + params["depth"], params["n_channels_out"], len(camnames[0]), - batch_norm=False, - instance_norm=True, + norm_method=params["norm_method"], include_top=True, gridsize=gridsize, ) @@ -1530,117 +1761,40 @@ def dannce_predict(params: Dict): "slice_input": nets.slice_input, "mask_nan_keep_loss": losses.mask_nan_keep_loss, "mask_nan_l1_loss": losses.mask_nan_l1_loss, + "log_cosh_loss": losses.log_cosh_loss, + "huber_loss": losses.huber_loss, "euclidean_distance_3D": losses.euclidean_distance_3D, "centered_euclidean_distance_3D": losses.centered_euclidean_distance_3D, + "gaussian_cross_entropy_loss": losses.gaussian_cross_entropy_loss, + "max_euclidean_distance": losses.max_euclidean_distance, }, ) # If there is a heatmap regularization i/o, remove it model = nets.remove_heatmap_output(model, params) + # If there was an exposed heatmap for AVG+MAX training, remove it + model = nets.remove_exposed_heatmap(model) + # To speed up expval prediction, rather than doing two forward passes: one for the 3d coordinate # and one for the probability map, here we splice on a new output layer after # the softmax on the last convolutional layer if params["expval"]: - from tensorflow.keras.layers import GlobalMaxPooling3D - o2 = GlobalMaxPooling3D()(model.layers[-3].output) model = Model( inputs=[model.layers[0].input, model.layers[-2].input], outputs=[model.layers[-1].output, o2], ) - save_data = {} - - max_eval_batch = params["maxbatch"] - - if max_eval_batch != "max" and max_eval_batch > len(valid_generator): - print("Maxbatch was set to a larger number of matches than exist in the video. Truncating") - max_eval_batch = len(valid_generator) - processing.print_and_set(params, "maxbatch", max_eval_batch) - - if max_eval_batch == "max": - max_eval_batch = len(valid_generator) - - if params["start_batch"] is not None: - start_batch = params["start_batch"] - else: - start_batch = 0 - - if params["new_n_channels_out"] is not None: - n_chn = params["new_n_channels_out"] - else: - n_chn = params["n_channels_out"] - - if params["write_npy"] is not None: - # Instead of running inference, generate all samples - # from valid_generator and save them to npy files. Useful - # for working with large datasets (such as Rat 7M) because - # .npy files can be loaded in quickly with random access - # during training. - print("Writing samples to .npy files") - processing.write_npy(params["write_npy"], valid_generator) - print("Done, exiting program") - sys.exit() - - - save_data = inference.infer_dannce( - start_batch, - max_eval_batch, - valid_generator, - params, - model, - partition, - save_data, - device, - n_chn, - ) - - if params["expval"]: - if params["start_batch"] is not None: - path = os.path.join( - params["dannce_predict_dir"], "save_data_AVG%d.mat" % (start_batch) - ) - else: - path = os.path.join(params["dannce_predict_dir"], "save_data_AVG.mat") - p_n = savedata_expval( - path, - params, - write=True, - data=save_data, - tcoord=False, - num_markers=n_chn, - pmax=True, - ) - else: - if params["start_batch"] is not None: - path = os.path.join( - params["dannce_predict_dir"], "save_data_MAX%d.mat" % (start_batch) - ) - else: - path = os.path.join(params["dannce_predict_dir"], "save_data_MAX.mat") - p_n = savedata_tomat( - path, - params, - params["vmin"], - params["vmax"], - params["nvox"], - write=True, - data=save_data, - num_markers=n_chn, - tcoord=False, - ) + return model -def do_COM_load( - exp: Dict, expdict: Dict, n_views: int, e, params: Dict, training=True -): +def do_COM_load(exp: Dict, expdict: Dict, e, params: Dict, training=True): """Load and process COMs. Args: exp (Dict): Parameters dictionary for experiment expdict (Dict): Experiment specific overrides (e.g. com_file, vid_dir) - n_views (int): Number of views e (TYPE): Description params (Dict): Parameters dictionary. training (bool, optional): If true, load COM for training frames. @@ -1652,37 +1806,28 @@ def do_COM_load( Raises: Exception: Exception when invalid com file format. """ + # Set Prepend logging message + prepend_log_msg = file_path + ".do_COM_load " + ( samples_, datadict_, datadict_3d_, cameras_, - ) = serve_data_DANNCE.prepare_data( - exp, prediction=False if training else True, nanflag=False - ) - - # If len(exp['camnames']) divides evenly into n_views, duplicate here - # This must come after loading in this experiment's data because there - # is an assertion that len(exp['camnames']) == the number of cameras - # in the label files (which will not be duplicated) - exp = processing.dupe_params(exp, ["camnames"], n_views) + ) = serve_data_DANNCE.prepare_data(exp, prediction=False if training else True) # If there is "clean" data (full marker set), can take the # 3D COM from the labels if exp["com_fromlabels"] and training: - print("For experiment {}, calculating 3D COM from labels".format(e)) + logging.info(prepend_log_msg + "For experiment {}, calculating 3D COM from labels".format(e)) com3d_dict_ = deepcopy(datadict_3d_) for key in com3d_dict_.keys(): - com3d_dict_[key] = np.nanmean( - datadict_3d_[key], axis=1, keepdims=True - ) + com3d_dict_[key] = np.nanmean(datadict_3d_[key], axis=1, keepdims=True) elif "com_file" in expdict and expdict["com_file"] is not None: exp["com_file"] = expdict["com_file"] if ".mat" in exp["com_file"]: c3dfile = sio.loadmat(exp["com_file"]) - com3d_dict_ = check_COM_load( - c3dfile, "com", datadict_, params["medfilt_window"] - ) + com3d_dict_ = check_COM_load(c3dfile, "com", params["medfilt_window"]) elif ".pickle" in exp["com_file"]: datadict_, com3d_dict_ = serve_data_DANNCE.prepare_COM( exp["com_file"], @@ -1702,14 +1847,13 @@ def do_COM_load( # Then load COM from the label3d file exp["com_file"] = expdict["label3d_file"] c3dfile = io.load_com(exp["com_file"]) - com3d_dict_ = check_COM_load( - c3dfile, "com3d", datadict_, params["medfilt_window"] - ) + com3d_dict_ = check_COM_load(c3dfile, "com3d", params["medfilt_window"]) - print("Experiment {} using com3d: {}".format(e, exp["com_file"])) + logging.info(prepend_log_msg + "Experiment {} using com3d: {}".format(e, exp["com_file"])) if params["medfilt_window"] is not None: - print( + logging.info( + prepend_log_msg + "Median filtering COM trace with window size {}".format( params["medfilt_window"] ) @@ -1726,38 +1870,37 @@ def do_COM_load( cthresh=exp["cthresh"], ) msg = "Removed {} samples from the dataset because they either had COM positions over cthresh, or did not have matching sampleIDs in the COM file" - print(msg.format(pre - len(samples_))) + logging.info(prepend_log_msg + msg.format(pre - len(samples_))) return exp, samples_, datadict_, datadict_3d_, cameras_, com3d_dict_ -def check_COM_load(c3dfile: Dict, kkey: Text, datadict_: Dict, wsize: int): +def check_COM_load(c3dfile: Dict, kkey: Text, win_size: int): """Check that the COM file is of the appropriate format, and filter it. Args: c3dfile (Dict): Loaded com3d dictionary. kkey (Text): Key to use for extracting com. - datadict_ (Dict): Dictionary containing data. wsize (int): Window size. Returns: Dict: Dictionary containing com data. """ + # Set logging prepend message + prepend_log_msg = file_path + ".check_COM_load " + c3d = c3dfile[kkey] # do a median filter on the COM traces if indicated - if wsize is not None: - if wsize % 2 == 0: - wsize += 1 - print("medfilt_window was not odd, changing to: {}".format(wsize)) + if win_size is not None: + if win_size % 2 == 0: + win_size += 1 + logging.info(prepend_log_msg + "medfilt_window was not odd, changing to: {}".format(win_size)) from scipy.signal import medfilt - c3d = medfilt(c3d, (wsize, 1)) + c3d = medfilt(c3d, (win_size, 1)) c3dsi = np.squeeze(c3dfile["sampleID"]) - com3d_dict_ = {} - for (i, s) in enumerate(c3dsi): - com3d_dict_[s] = c3d[i] - - return com3d_dict_ + com3d_dict = {s: c3d[i] for (i, s) in enumerate(c3dsi)} + return com3d_dict diff --git a/dannce/utils/pdb/__init__.py b/dannce/utils/pdb/__init__.py new file mode 100755 index 0000000..e69de29 diff --git a/dannce/utils/pdb/loadStructs.py b/dannce/utils/pdb/loadStructs.py new file mode 100644 index 0000000..0b78599 --- /dev/null +++ b/dannce/utils/pdb/loadStructs.py @@ -0,0 +1,53 @@ +import scipy.io as sio +import numpy as np + + +def load_data(path, key): + d = sio.loadmat(path,struct_as_record=False) + dataset = vars(d[key][0][0]) + + # Data are loaded in this annoying structure where the array + # we want is at dataset[i][key][0,0], as a nested array of arrays. + # Simplify this structure (a numpy record array) here. + # Additionally, cannot use views here because of shape mismatches. Define + # new dict and return. + + import pdb;pdb.set_trace() + data = [] + for d in dataset: + d_ = {} + for key in d.dtype.names: + d_[key] = d[key][0, 0] + data.append(d_) + + return data + + +def load_cameras(path): + d = sio.loadmat(path,struct_as_record=False) + dataset = vars(d["cameras"][0][0]) + + camnames = dataset['_fieldnames'] + + cameras = {} + for i in range(len(camnames)): + cameras[camnames[i]] = {} + cam = vars(dataset[camnames[i]][0][0]) + fns = cam['_fieldnames'] + for fn in fns: + cameras[camnames[i]][fn] = cam[fn] + + return cameras + + +def load_mocap(path): + d = sio.loadmat(path,struct_as_record=False) + dataset = vars(d["mocap"][0][0]) + + markernames = dataset['_fieldnames'] + + mocap = [] + for i in range(len(markernames)): + mocap.append(dataset[markernames[i]]) + + return np.stack(mocap, axis=2) \ No newline at end of file diff --git a/dannce/utils/pdb/plot2DProjection.py b/dannce/utils/pdb/plot2DProjection.py new file mode 100644 index 0000000..0077c89 --- /dev/null +++ b/dannce/utils/pdb/plot2DProjection.py @@ -0,0 +1,335 @@ +""" +Example script for visualizing Parkinson DB data. 3D joint predictions are projected into each +camera view, plotted into a figure, and written into a video. + +To load the correct video file, the input mocap data structure filename must contain the subject +number and recording day number as 'mocap-s{subject#}-d{day#}.mat' + +Usage: + python plot2DProjection.py [path_to_label3d_dannce.mat (str)] [ path_to_save_data_AVG.mat (str)] [path_to_video_directory (str)] + [path_to_skeleton.mat (str)] [path_to_save_video (str)] [start_ sample(int)] [max_samples (int)] +""" + +import numpy as np +import scipy.io as sio +import os +import sys +import imageio + +import dannce.engine.ops as dops +import dannce.engine.io as dio + + +import matplotlib.pyplot as plt +import matplotlib +matplotlib.use("Agg") +from matplotlib.animation import FFMpegWriter + +dannceMat_filepath = sys.argv[1] +preditcions_filepath = sys.argv[2] +videofle_path = sys.argv[3] +skeleton_path = sys.argv[4] +video_save_path = sys.argv[5] +start_sample = int(sys.argv[6]) +max_samples = int(sys.argv[7]) + +if len(sys.argv) > 8: + fps_setting = sys.argv[8] + +if len(sys.argv) > 9: + com3d_file = sys.argv[9] + print("Com3d file specified. COM will be plotted") +else: + print("Com3d file not specified. COM will not be plotted") + com3d_file = None + +COLOR_DICT = [ + (1.0000, 0, 0, 0.5000), + (0.9565, 1.0000, 0, 0.5000), + (1.0000, 0.5217, 0, 0.5000), + (0.6957, 1.0000, 0, 0.5000), + (1.0000, 0.2609, 0, 0.5000), + (1.0000, 0.7826, 0, 0.5000), + (0.4348, 1.0000, 0, 0.5000), + (0.1739, 1.0000, 0, 0.5000), + (0, 1.0000, 0.0870, 0.5000), + (0, 1.0000, 0.3478, 0.5000), + (0, 1.0000, 0.6087, 0.5000), + (0, 1.0000, 0.8696, 0.5000), + (0, 0.6087, 1.0000, 0.5000), + (0, 0.8696, 1.0000, 0.5000), + (0, 0.3478, 1.0000, 0.5000), + (0.4348, 0, 1.0000, 0.5000), + (0.1739, 0, 1.0000, 0.5000), + (0.6957, 0, 1.0000, 0.5000), + (1.0000, 0, 0.7826, 0.5000), + (0.9565, 0, 1.0000, 0.5000), + (1.0000, 0, 0.5217, 0.5000), + (1.0000, 0, 0.2609, 0.5000), + ] + +"""### Load Data""" + +def get_data(dannceMat_filepath: str, preditcions_filepath: str, skeleton_path: str, com3d_filepath = None): + """ + # Function to load data from different files, read them into suitable data structures and return. + # Entry point where validity of file paths are checked. + + dannceMat_filepath: label3d_dannce.mat file path + preditcions_filepath: save_data_AVG.mat file path + skeleton_path: file path for skeletons file + com3d_filepath: com3d_used.mat filepath + """ + if dannceMat_filepath == None or preditcions_filepath == None or skeleton_path == None: + print("One or more file paths is missing. Please provide all file paths.") + elif os.path.exists(dannceMat_filepath) and os.path.exists(preditcions_filepath) and os.path.exists(skeleton_path) : + cam_names = dio.load_camnames(dannceMat_filepath) + sync = dio.load_sync(dannceMat_filepath) + params = dio.load_camera_params(dannceMat_filepath) + + skeleton = sio.loadmat(skeleton_path) + skeleton = {k:v for k, v in skeleton.items() if k[0] != '_'} + skeleton["joint_names"] = skeleton["joint_names"][0] + + predictions = sio.loadmat(preditcions_filepath) + predictions = {k:v for k, v in predictions.items() if k[0] != '_'} + + if com3d_filepath is not None and os.path.exists(com3d_filepath) : + # com_3d = sio.loadmat(os.path.join(preditcions_filepath, 'com3d_used.mat'))['com'] + com_3d = sio.loadmat(com3d_filepath)['com'] + else: + print ("No filepath specified for com_3d. Returning None.") + com_3d = None + + return cam_names, sync, params, skeleton, predictions, com_3d + else: + print("Enter valid os path for dannceMat, predictions and skeleton files") + +def get_camParams(params, skeleton, exclude_joints): + """ + # Method to extract camera params and links from params and skeleton + + params: dict of params read from .mat file or otherwise + skeleton: dict of fields from the skeleton file read from .mat file or otherwise. + Contains joint names and their indices. + exclude_joints: integer list of joints to exclude from plotting + """ + cameraParams = [] + rot = [] + trans = [] + mirror = [] + + for i in range(len(params)): + cameraParams.append({'IntrinsicMatrix': params[i]["K"], + 'RadialDistortion': params[i]["RDistort"], + 'TangentialDistortion': params[i]["TDistort"]}) + rot.append(params[i]['r']) + trans.append(params[i]['t']) + if 'm' in params[i].keys(): + mirror.append(params[i]['m']) + + links = skeleton["joints_idx"] + + goodmarks = list(range(1,len(skeleton["joint_names"])+1)) + for i in exclude_joints: + goodmarks.remove(i) # Drop specific joint from goodmarks + + return cameraParams, rot, trans, mirror, links, goodmarks + +def get_projected_points(predictions: dict, + params: dict, + cameraParams: list, + rot: list, + trans: list, + mirror = None, + com_3d = None ): + """ + ### Takes the predictions, com3d and camera parameters, and returns the projected points. + ### This assumes that cameras are in an indexed list (they are so in pdb data), and iterates over the length of params list + + predictions: dict of predictions read from .mat file or otherwise + params: dict of params read from .mat file or otherwise + cameraParams: list of dicts of camera parameters (Intrinsic Matrix, Radial Distort, and Tangential Distort) + rot: list of arrays of camera rotations + trans: list of arrays of camera translations + mirror: list of single element lists indicating whether the particular camera view is mirrored + com_3d: list of COM locations for each sample and view. + + """ + pose_3d = np.transpose(predictions["pred"],(0,2,1)) + n_samples = pose_3d.shape[0] + + # If com_3d points need to be plotted, then they need to be added to imagePoints + if com_3d is None: + tpred_bulk = pose_3d + else: + com_3d = np.expand_dims(com_3d, 1) + tpred_bulk = np.concatenate((pose_3d,com_3d), axis=1) + + n_channels = tpred_bulk.shape[1] + + tpred_bulk = np.reshape(tpred_bulk,(-1,3)) + + imagePoints_agg = [] + com_2d_agg = [] + + # Calculate projections for each cam view for all of the predicted points + for ncam in range (len(params)): + camParam = cameraParams[ncam] + rotation = rot[ncam] + translation = trans[ncam] + + imagePoints_blk = dops.project_to2d(tpred_bulk, + camParam["IntrinsicMatrix"], + rotation, + translation)[:, :2] + + imagePoints_blk = dops.distortPoints(imagePoints_blk, + camParam["IntrinsicMatrix"], + np.squeeze(camParam["RadialDistortion"]), + np.squeeze(camParam["TangentialDistortion"])).T + + if mirror!=None : + if mirror[ncam] == 1: + imagePoints_blk[:,1] = 2048 - imagePoints_blk[:,1] + else: + imagePoints_blk = imagePoints_blk.T + + imagePoints_blk = np.reshape(imagePoints_blk, (-1, n_channels, 2)) + + if not (com_3d is None): + com_2d_blk = imagePoints_blk[:,-1:,:] + imagePoints_blk = imagePoints_blk[:, :n_channels-1, :] + com_2d_agg.append(com_2d_blk) + + imagePoints_agg.append(imagePoints_blk) + + + + return imagePoints_agg, com_2d_agg + +def plot_projected_points(predictions, + sync, + params, + imagePoints_agg, + com_2d_agg, + goodmarks, + links, + color_dict, + videofle_path, + video_save_path, + start_sample = 0, + max_samples = 1000, + fps = 30, + ): + """ + # Plots the projected points and saves them to the locations specified in video_save_path + # Both videofle_path and video_save_path except full paths with filename and extension. + # This method is called from driver with all the related arguments passed. + + predictions: dict of predictions + sync: dict required to sync frames from the video with the preductions. + This is necessary to determine which predictions correspons to which sample + params: dict of params loaded from .mat file + imagePoints_agg: list of lists of projection points for each camera view + com_2d_agg: list of lists of projected Center of Mass for each camera view + goodmarks: List of joint indices to consider while plotting + links: List specifying which joint indices are connected to which with a bone + color_dict: List of tupples mentioning colors for each bone + videofle_path: Path from where to read video file + video_save_path: Path to save videos to + start_sample: SampleID to start reading frames from. + Default: 0 + max_samples: Max number of frames to read from video + Default: 1000 + """ + movie_reader = imageio.get_reader(videofle_path) + + metadata = dict(title='dannce_visualization', artist='Matplotlib') + writer = FFMpegWriter(fps=fps, metadata=metadata) + + fig, axes = plt.subplots(1, 1, figsize=(8, 8), dpi=300) + + if not os.path.exists(os.path.dirname(video_save_path)): + os.makedirs(os.path.dirname(video_save_path)) + + with writer.saving(fig, video_save_path, dpi=300): + + for i in range(start_sample, start_sample + max_samples): + + # frame should be taken from sync[0]["data_frame"] from an index where data_sampleID from sync[0] matches sampleID at i-th index from predictions + # using np.where for this gives a nested numpy array containing a single element(the index), so use squeeze + fr = sync[0]["data_frame"][(np.where(sync[0]["data_sampleID"] == predictions["sampleID"][0][i]))[0].squeeze()] + frame = movie_reader.get_data(fr[0]) + print("Sample: ", i) + + axes.imshow(frame) + + for ncam in range (len(params)): + + imagePoints = imagePoints_agg[ncam][i] + if com_2d_agg != None: + com = com_2d_agg[ncam][i] + axes.scatter(com[:,0], com[:,1], marker='.', color='red', linewidths=1) + + for mm in range(len(links)): + if links[mm][0] in goodmarks and links[mm][1] in goodmarks: + xx = [imagePoints[links[mm][0]-1,0], + imagePoints[links[mm][1]-1,0]] + yy = [imagePoints[links[mm][0]-1,1], + imagePoints[links[mm][1]-1,1]] + + axes.scatter(xx, yy, marker = '.', color='white', linewidths=0.5) + axes.plot(xx,yy, c=color_dict[mm], lw=2) + + axes.axis("off") + axes.set_title(str(i)) + + writer.grab_frame() + axes.clear() + +def driver(dannceMat_filepath : str, + preditcions_filepath : str, + videofle_path : str, + skeleton_path : str, + com3d_filepath : str, + exclude_joints: list, + video_save_path: str, + start_sample = 0, + max_samples = 1000, + fps=30, + ): + print("Com3d in drive = ", com3d_filepath) + cam_names, sync, params, skeleton, predictions, com_3d = get_data(dannceMat_filepath=dannceMat_filepath, + preditcions_filepath=preditcions_filepath, + skeleton_path=skeleton_path, + com3d_filepath=com3d_filepath) + + cameraParams, rot, trans, mirror, links, goodmarks = get_camParams(params = params, skeleton = skeleton, + exclude_joints = exclude_joints) + + imagePoints_agg, com_2d_agg = get_projected_points(predictions, params, cameraParams, rot, trans, mirror, com_3d) + + if com_2d_agg == [] : + com_2d_agg = None + + plot_projected_points(predictions, + sync, + params, + imagePoints_agg, + com_2d_agg, + goodmarks, + links, + COLOR_DICT, + videofle_path, + video_save_path, + start_sample = start_sample, + max_samples = max_samples, + fps=fps) + +driver(dannceMat_filepath, preditcions_filepath, videofle_path, skeleton_path, com3d_file, + exclude_joints = [7], + video_save_path = video_save_path, + start_sample = start_sample, + max_samples = max_samples, + fps=fps_setting) \ No newline at end of file diff --git a/demo/markerless_mouse_1/COM/predict_results/com3d.pickle b/demo/markerless_mouse_1/COM/predict_results/com3d.pickle old mode 100755 new mode 100644 diff --git a/demo/markerless_mouse_1/COM/train_results/weights.250-0.00036.hdf5 b/demo/markerless_mouse_1/COM/train_results/weights.250-0.00036.hdf5 old mode 100755 new mode 100644 diff --git a/demo/markerless_mouse_1/DANNCE/weights/weights.rat.AVG.MONO.5cams/weights_multigpu-v9.11-11.99217.hdf5singleGPU.hdf5 b/demo/markerless_mouse_1/DANNCE/weights/weights.rat.AVG.MONO.5cams/weights_multigpu-v9.11-11.99217.hdf5singleGPU.hdf5 new file mode 100755 index 0000000..48acd1b Binary files /dev/null and b/demo/markerless_mouse_1/DANNCE/weights/weights.rat.AVG.MONO.5cams/weights_multigpu-v9.11-11.99217.hdf5singleGPU.hdf5 differ diff --git a/demo/markerless_mouse_1/io.yaml b/demo/markerless_mouse_1/io.yaml index 5890e1c..8a476ff 100755 --- a/demo/markerless_mouse_1/io.yaml +++ b/demo/markerless_mouse_1/io.yaml @@ -1,14 +1,14 @@ ### COM ### # path to folder where COM weights and logs will be saved -com_train_dir: ./COM/train_results/ -com_predict_dir: ./COM/predict_results/ +com_train_dir: ./COM/train_results/AS_SCR/APR04_22/LCL_DEMO5V_01/ +com_predict_dir: ./COM/predict_results/AS_SCR/APR04_22/LCL_DEMO5V_01/ # During prediction, will look for the last epoch weights saved to ./COM/train_results/. To load in a different weights file, add the path here -#com_predict_weights: ./COM/train_results/weights.250-0.00036.hdf5 +com_predict_weights: ./COM/train_results/weights.250-0.00036.hdf5 ### Dannce ### # path to folder where DANNCE weights and logs will be saved -dannce_train_dir: ./DANNCE/train_results/AVG/ +dannce_train_dir: ./DANNCE/train_results/AVG_demo/ dannce_predict_dir: ./DANNCE/predict_results/ # During prediction, will look for the last epoch weights saved to ./DANNCE/train_results/. To load in a different weights file, add the path here @@ -17,5 +17,9 @@ dannce_predict_model: ./DANNCE/train_results/AVG/weights.1200-12.77642.hdf5 exp: - label3d_file: './label3d_demo.mat' + viddir: ../../demo/markerless_mouse_1/videos/ - label3d_file: '../markerless_mouse_2/label3d_demo.mat' + viddir: ../../demo/markerless_mouse_2/videos/ + +epochs: 3 diff --git a/demo/markerless_mouse_1/run_tests.sh b/demo/markerless_mouse_1/run_tests.sh new file mode 100644 index 0000000..de3732f --- /dev/null +++ b/demo/markerless_mouse_1/run_tests.sh @@ -0,0 +1,19 @@ +#!/bin/bash +#SBATCH --job-name=jhwTest +#SBATCH --mem=60000 +#SBATCH -t 6-23:59 +#SBATCH -N 1 +#SBATCH -n 12 +#SBATCH -p tdunn +#SBATCH --gres=gpu:4 + +source activate dannce_test + +dannce-train dgptest_config.yaml --dannce-train-dir=./DANNCE/train_test_ln/ + +dannce-train dgptest_config.yaml --norm-method=instance --dannce-train-dir=./DANNCE/train_test_in/ + +# echo "Testing DANNCE training, dgp MAX net from scratch with layer norm and sigmoid cross entropy with gaussian targets" +# dannce-train dgptest_config.yaml --loss=gaussian_cross_entropy_loss --n-channels-out=22 --dannce-train-dir=./DANNCE/train_test_ln_dgp/ + +dannce-train dgptest_config.yaml --norm-method=instance --loss=gaussian_cross_entropy_loss \ No newline at end of file diff --git a/demo/markerless_mouse_1/videos/link_to_videos.txt b/demo/markerless_mouse_1/videos/link_to_videos.txt deleted file mode 100644 index b62a618..0000000 --- a/demo/markerless_mouse_1/videos/link_to_videos.txt +++ /dev/null @@ -1 +0,0 @@ -https://www.dropbox.com/sh/wn1x8erb5k3n9vr/AADE_Ca-2farKhd38ZvsNi84a?dl=0 diff --git a/demo/markerless_mouse_2/videos/link_to_videos.txt b/demo/markerless_mouse_2/videos/link_to_videos.txt deleted file mode 100644 index 4b1dff1..0000000 --- a/demo/markerless_mouse_2/videos/link_to_videos.txt +++ /dev/null @@ -1 +0,0 @@ -https://www.dropbox.com/sh/tspmwo36gbj6b4x/AAA_sWJA6K1ksX8f6hBoZf7Ia?dl=0 diff --git a/setup.py b/setup.py index ead6d77..4032232 100644 --- a/setup.py +++ b/setup.py @@ -3,38 +3,32 @@ setup( name="dannce", - version="1.2.0", + version="1.3.0r", packages=find_packages(), install_requires=[ "six", "pyyaml", "imageio==2.8.0", "imageio-ffmpeg", - "numpy<1.19.0", + "numpy>1.19.0", "scikit-image", "matplotlib", "attr", "attrs", "multiprocess", "opencv-python", - "tensorflow==2.3.1", - "torch", + "tensorflow==2.6", + "keras==2.6.*", # Required to resolve pip keras install bug for tf 2.6 + "psutil", + "mat73", ], - scripts=['cluster/holy_dannce_train.sh', - 'cluster/holy_dannce_train_grid.sh', - 'cluster/holy_com_train.sh', - 'cluster/holy_dannce_predict.sh', - 'cluster/holy_com_predict.sh', - 'cluster/holy_com_predict_multi_gpu.sh', - 'cluster/holy_dannce_predict_multi_gpu.sh', - 'cluster/com_and_dannce.sh', - 'cluster/dannce.sh', - 'cluster/com.sh', - 'cluster/com_and_dannce_multi_gpu.sh', - 'cluster/dannce_multi_gpu.sh', - 'cluster/com_multi_gpu.sh'], + # scripts=[], entry_points={ "console_scripts": [ + "dannce-predict-sbatch = dannce.cli:sbatch_dannce_predict_cli", + "dannce-train-sbatch = dannce.cli:sbatch_dannce_train_cli", + "com-predict-sbatch = dannce.cli:sbatch_com_predict_cli", + "com-train-sbatch = dannce.cli:sbatch_com_train_cli", "dannce-train = dannce.cli:dannce_train_cli", "dannce-train-grid = cluster.grid:dannce_train_grid", "dannce-predict = dannce.cli:dannce_predict_cli", @@ -47,6 +41,9 @@ "com-predict-single-batch = cluster.multi_gpu:com_predict_single_batch", "dannce-merge = cluster.multi_gpu:dannce_merge", "com-merge = cluster.multi_gpu:com_merge", + "dannce-inference-sbatch = cluster.multi_gpu:submit_inference", + "dannce-inference = cluster.multi_gpu:inference", + "dannce-multi-instance-inference = cluster.multi_gpu:multi_instance_inference", ] }, ) diff --git a/setup_symlinks.sh b/setup_symlinks.sh new file mode 100644 index 0000000..f893022 --- /dev/null +++ b/setup_symlinks.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +# 1st arg - DANNCE HOME location from where the weights and videos actually are downloaded +# 2nd arg - DANNCE HOME location to where the links are videos need to be linked to + +cd $2/demo/markerless_mouse_1; + +rm -rf videos; +ln -s $1/demo/markerless_mouse_1/videos/ ./; + +rm -rf DANNCE/train_results/* +ln -s $1/demo/markerless_mouse_1/DANNCE/train_results/* DANNCE/train_results/ + +rm -rf DANNCE/weights/* +ln -s $1/demo/markerless_mouse_1/DANNCE/weights/* DANNCE/weights/ + +cd ../markerless_mouse_2; +rm -rf videos; +ln -s $1/demo/markerless_mouse_2/videos/ ./; \ No newline at end of file diff --git a/tests/cli_test.py b/tests/cli_test.py new file mode 100644 index 0000000..9a2dee9 --- /dev/null +++ b/tests/cli_test.py @@ -0,0 +1,429 @@ +from absl.testing import absltest +import tensorflow as tf +import dannce.cli as cli +import os +import numpy as np +import scipy.io as sio +import sys +import unittest +from unittest.mock import patch +from typing import Text + +# Initialize the gpu prior to testing +# tf.test.is_gpu_available() +tf.config.list_physical_devices('GPU') + +# Move to the testing project folder +os.chdir("configs") +N_TEST_IMAGES = 8 + +def compare_predictions(file_1: Text, file_2: Text, th: float = 0.05): + """Compares two prediction matfiles. + + Args: + file_1 (Text): Path to prediction file 1 + file_2 (Text): Path to prediction file 2 + th (float, optional): Testing tolerance in mm. Defaults to 0.05. + + Raises: + Exception: If file does not contain com or dannce predictions + """ + + m1 = sio.loadmat(file_1) + m2 = sio.loadmat(file_2) + + if "com" in m1.keys(): + error = np.mean( + np.abs(m1["com"][:N_TEST_IMAGES, ...] - m2["com"][:N_TEST_IMAGES, ...]) + ) + return error < th + elif "pred" in m2.keys(): + error = np.mean( + np.abs( + m1["pred"][:N_TEST_IMAGES, ...] - m2["pred"][:N_TEST_IMAGES, ...] + ) + ) + return error < th + else: + raise Exception("Expected fields (pred, com) not found in inputs") + +def train_setup(): + setup = "cp ./label3d_temp_dannce.mat ./alabel3d_temp_dannce.mat" + os.system(setup) + + +class TestComTrain(absltest.TestCase): + def test_com_train(self): + train_setup() + args = [ + "com-train", + "config_com_mousetest.yaml", + "--com-finetune-weights=../../demo/markerless_mouse_1/COM/weights/", + "--downfac=8", + ] + with patch("sys.argv", args): + cli.com_train_cli() + + def test_com_train_mono(self): + train_setup() + args = ["com-train", "config_com_mousetest.yaml", "--mono=True", "--downfac=8"] + with patch("sys.argv", args): + cli.com_train_cli() + +class TestComPredict(absltest.TestCase): + def test_com_predict(self): + train_setup() + args = ["com-predict", "config_com_mousetest.yaml"] + with patch("sys.argv", args): + cli.com_predict_cli() + self.assertTrue(compare_predictions( + "../touchstones/COM3D_undistorted_masternn.mat", + "../../demo/markerless_mouse_1/COM/predict_test/com3d0.mat", + )) + + def test_com_predict_3_cams(self): + setup = "cp ./label3d_temp_dannce_3cam.mat ./alabel3d_temp_dannce.mat" + os.system(setup) + args = ["com-predict", "config_com_mousetest.yaml", "--downfac=4"] + with patch("sys.argv", args): + cli.com_predict_cli() + + def test_com_predict_5_cams(self): + setup = "cp ./label3d_temp_dannce_5cam.mat ./alabel3d_temp_dannce.mat" + os.system(setup) + args = ["com-predict", "config_com_mousetest.yaml", "--downfac=2"] + with patch("sys.argv", args): + cli.com_predict_cli() + +class TestDannceTrain(absltest.TestCase): + def test_dannce_train_finetune_max(self): + train_setup() + args = [ + "dannce-train", + "config_mousetest.yaml", + "--net-type=MAX", + "--dannce-finetune-weights=../../demo/markerless_mouse_1/DANNCE/weights/weights.rat.MAX/", + ] + with patch("sys.argv", args): + cli.dannce_train_cli() + + def test_dannce_train_finetune_avg(self): + train_setup() + args = [ + "dannce-train", + "config_mousetest.yaml", + "--net-type=AVG", + "--dannce-finetune-weights=../../demo/markerless_mouse_1/DANNCE/weights/", + ] + with patch("sys.argv", args): + cli.dannce_train_cli() + + def test_dannce_train_finetune_avg_heatmap_regularization(self): + train_setup() + args = [ + "dannce-train", + "config_mousetest.yaml", + "--net-type=AVG", + "--dannce-finetune-weights=../../demo/markerless_mouse_1/DANNCE/weights/", + ] + with patch("sys.argv", args): + cli.dannce_train_cli() + + def test_dannce_train_finetune_avg_from_finetune(self): + train_setup() + args = [ + "dannce-train", + "config_mousetest.yaml", + "--net-type=AVG", + "--dannce-finetune-weights=../../demo/markerless_mouse_1/DANNCE/train_results/AVG/", + ] + with patch("sys.argv", args): + cli.dannce_train_cli() + + def test_dannce_train_avg(self): + train_setup() + args = [ + "dannce-train", + "config_mousetest.yaml", + "--net=unet3d_big_expectedvalue", + "--train-mode=new", + "--n-channels-out=22", + ] + with patch("sys.argv", args): + cli.dannce_train_cli() + + def test_dannce_train_max(self): + train_setup() + args = [ + "dannce-train", + "config_mousetest.yaml", + "--net=unet3d_big", + "--train-mode=new", + "--n-channels-out=22", + ] + with patch("sys.argv", args): + cli.dannce_train_cli() + + def test_dannce_train_avg_continued(self): + train_setup() + args = [ + "dannce-train", + "config_mousetest.yaml", + "--net-type=AVG", + "--train-mode=continued", + "--dannce-finetune-weights=../../demo/markerless_mouse_1/DANNCE/train_results/AVG/", + ] + with patch("sys.argv", args): + cli.dannce_train_cli() + + def test_dannce_train_max_continued(self): + train_setup() + args = [ + "dannce-train", + "config_mousetest.yaml", + "--net=finetune_MAX", + "--train-mode=continued", + "--dannce-finetune-weights=../../demo/markerless_mouse_1/DANNCE/train_results/", + ] + with patch("sys.argv", args): + cli.dannce_train_cli() + + def test_dannce_train_avg_mono(self): + train_setup() + args = [ + "dannce-train", + "config_mousetest.yaml", + "--net-type=AVG", + "--train-mode=new", + "--net=unet3d_big_expectedvalue", + "--mono=True", + "--n-channels-out=22", + ] + with patch("sys.argv", args): + cli.dannce_train_cli() + + def test_dannce_train_avg_mono_finetune(self): + train_setup() + args = [ + "dannce-train", + "config_mousetest.yaml", + "--net-type=AVG", + "--mono=True", + "--dannce-finetune-weights=../../demo/markerless_mouse_1/DANNCE/weights/weights.rat.AVG.MONO/", + ] + with patch("sys.argv", args): + cli.dannce_train_cli() + + def test_dannce_train_avg_mono_finetune_drop_landmarks(self): + train_setup() + args = [ + "dannce-train", + "config_mousetest.yaml", + "--net-type=AVG", + "--mono=True", + "--dannce-finetune-weights=../../demo/markerless_mouse_1/DANNCE/weights/weights.rat.AVG.MONO/", + "--drop-landmark=[5,7]", + ] + with patch("sys.argv", args): + cli.dannce_train_cli() + + def test_npy_volume_with_validation(self): + os.chdir("../../demo/markerless_mouse_1/") + args = [ + "dannce-train", + "../../configs/dannce_mouse_config.yaml", + "--net-type=AVG", + "--use-npy=True", + "--dannce-train-dir=./DANNCE/npy_test/", + "--epochs=2", + "--valid-exp=[1]", + "--gpu=1", + ] + with patch("sys.argv", args): + cli.dannce_train_cli() + + def test_npy_volume_with_multi_gpu(self): + os.chdir("../../demo/markerless_mouse_1/") + args = [ + "dannce-train", + "../../configs/dannce_mouse_config.yaml", + "--net-type=AVG", + "--batch-size=4", + "--use-npy=True", + "--dannce-train-dir=./DANNCE/npy_test/", + "--epochs=2", + "--valid-exp=[1]", + "--multi-gpu-train=True", + "--gpu=1", + ] + with patch("sys.argv", args): + cli.dannce_train_cli() + + def test_npy_volume_with_num_train_exp(self): + os.chdir("../../demo/markerless_mouse_1/") + args = [ + "dannce-train", + "../../configs/dannce_mouse_config.yaml", + "--net-type=AVG", + "--use-npy=True", + "--dannce-train-dir=./DANNCE/npy_test/", + "--epochs=2", + "--num-train-per-exp=2", + "--batch-size=1", + "--gpu=1", + ] + with patch("sys.argv", args): + cli.dannce_train_cli() + + def test_npy_volume_with_validation_and_num_train_exp(self): + os.chdir("../../demo/markerless_mouse_1/") + args = [ + "dannce-train", + "../../configs/dannce_mouse_config.yaml", + "--net-type=AVG", + "--use-npy=True", + "--dannce-train-dir=./DANNCE/npy_test/", + "--epochs=2", + "--valid-exp=[1]", + "--num-train-per-exp=2", + "--batch-size=1", + "--gpu=1", + ] + with patch("sys.argv", args): + cli.dannce_train_cli() + + def test_dannce_train_MAX_layer_norm(self): + train_setup() + args = [ + "dannce-train", + "dgptest_config.yaml", + "--dannce-train-dir=../../demo/markerless_mouse_1/DANNCE/train_test_ln/", + "--n-channels-out=22", + ] + with patch("sys.argv", args): + cli.dannce_train_cli() + + def test_dannce_train_MAX_scratch_instance_norm(self): + train_setup() + args = [ + "dannce-train", + "dgptest_config.yaml", + "--norm-method=instance", + "--dannce-train-dir=../../demo/markerless_mouse_1/DANNCE/train_test_in/", + "--n-channels-out=22", + ] + with patch("sys.argv", args): + cli.dannce_train_cli() + + def test_dannce_train_DGP_MAX_scratch_layer_norm_sigmoid_cross_entropy_Gaussian( + self, + ): + train_setup() + args = [ + "dannce-train", + "dgptest_config.yaml", + "--loss=gaussian_cross_entropy_loss", + "--dannce-train-dir=../../demo/markerless_mouse_1/DANNCE/train_test_ln_dgp/", + "--n-channels-out=22", + ] + with patch("sys.argv", args): + cli.dannce_train_cli() + + def test_dannce_train_DGP_MAX_scratch_instance_norm_sigmoid_cross_entropy_Gaussian( + self, + ): + train_setup() + args = [ + "dannce-train", + "dgptest_config.yaml", + "--norm-method=instance", + "--loss=gaussian_cross_entropy_loss", + "--dannce-train-dir=../../demo/markerless_mouse_1/DANNCE/train_test_ln_dgp/", + "--n-channels-out=22", + ] + with patch("sys.argv", args): + cli.dannce_train_cli() + + +class TestDanncePredict(absltest.TestCase): + def test_dannce_predict_mono(self): + # TODO(refactor): This test depends on there being a mono model saved. + train_setup() + args = [ + "dannce-train", + "config_mousetest.yaml", + "--net-type=AVG", + "--mono=True", + "--dannce-finetune-weights=../../demo/markerless_mouse_1/DANNCE/weights/weights.rat.AVG.MONO/", + ] + with patch("sys.argv", args): + cli.dannce_train_cli() + args = [ + "dannce-predict", + "config_mousetest.yaml", + "--net-type=AVG", + "--dannce-predict-model=../../demo/markerless_mouse_1/DANNCE/train_test/fullmodel_weights/fullmodel_end.hdf5", + "--mono=True", + ] + with patch("sys.argv", args): + cli.dannce_predict_cli() + + def test_dannce_predict_avg(self): + train_setup() + args = [ + "dannce-predict", + "config_mousetest.yaml", + "--net-type=AVG", + ] + with patch("sys.argv", args): + cli.dannce_predict_cli() + self.assertTrue(compare_predictions( + "../touchstones/save_data_AVG_torch_nearest.mat", + "../../demo/markerless_mouse_1/DANNCE/predict_test/save_data_AVG0.mat", + )) + + def test_dannce_predict_max(self): + train_setup() + args = [ + "dannce-predict", + "config_mousetest.yaml", + "--net-type=MAX", + "--expval=False", + "--dannce-predict-model=../../demo/markerless_mouse_1/DANNCE/train_results/weights.12000-0.00014.hdf5", + ] + with patch("sys.argv", args): + cli.dannce_predict_cli() + self.assertTrue(compare_predictions( + "../touchstones/save_data_MAX_torchnearest_newtfroutine.mat", + "../../demo/markerless_mouse_1/DANNCE/predict_test/save_data_MAX0.mat", + )) + + def test_dannce_predict_numpy_volume_generation(self): + setup = "cp ./label3d_voltest_dannce_m1.mat ./alabel3d_temp_dannce.mat" + os.system(setup) + args = [ + "dannce-predict", + "config_mousetest.yaml", + "--net-type=AVG", + "--write-npy=../../demo/markerless_mouse_1/npy_volumes/", + "--batch-size=1", + ] + with patch("sys.argv", args): + cli.dannce_predict_cli() + setup2 = "cp ./label3d_voltest_dannce_m2.mat ./alabel3d_temp_dannce.mat" + os.system(setup2) + args = [ + "dannce-predict", + "config_mousetest.yaml", + "--net-type=AVG", + "--write-npy=../../demo/markerless_mouse_2/npy_volumes/", + "--batch-size=1", + ] + with patch("sys.argv", args): + cli.dannce_predict_cli() + +if __name__ == "__main__": + log_file = 'log_file.txt' + with open(log_file, "w") as f: + runner = unittest.TextTestRunner(f) + absltest.main(testRunner=runner) diff --git a/tests/cli_test_inherit.py b/tests/cli_test_inherit.py new file mode 100644 index 0000000..f7b3fe2 --- /dev/null +++ b/tests/cli_test_inherit.py @@ -0,0 +1,738 @@ +from absl.testing import absltest +import tensorflow as tf +import dannce.cli as cli +from cluster import grid, multi_gpu +import os +import numpy as np +import scipy.io as sio +import sys +import unittest +from unittest.mock import patch +from typing import Text + +# Initialize the gpu prior to testing +# tf.test.is_gpu_available() +tf.config.list_physical_devices('GPU') + +DANNCE_HOME = os.path.dirname(os.getcwd()) + +# Move to the testing project folder +os.chdir("configs") +N_TEST_IMAGES = 8 + + +def compare_predictions(file_1: Text, file_2: Text, th: float = 0.05): + """Compares two prediction matfiles. + + Args: + file_1 (Text): Path to prediction file 1 + file_2 (Text): Path to prediction file 2 + th (float, optional): Testing tolerance in mm. Defaults to 0.05. + + Raises: + Exception: If file does not contain com or dannce predictions + """ + + m1 = sio.loadmat(file_1) + m2 = sio.loadmat(file_2) + + if "com" in m1.keys(): + error = np.mean( + np.abs(m1["com"][:N_TEST_IMAGES, ...] - m2["com"][:N_TEST_IMAGES, ...]) + ) + return error < th + elif "pred" in m2.keys(): + error = np.mean( + np.abs( + m1["pred"][:N_TEST_IMAGES, ...] - m2["pred"][:N_TEST_IMAGES, ...] + ) + ) + return error < th + else: + raise Exception("Expected fields (pred, com) not found in inputs") + +def train_setup(): + if os.getcwd() != os.path.join(DANNCE_HOME, "tests/configs"): + os.chdir(os.path.join(DANNCE_HOME, "tests/configs")) + setup = "cp ./label3d_temp_dannce.mat ./alabel3d_temp_dannce.mat" + os.system(setup) + +def train_setup_5cams(): + if os.getcwd() != os.path.join(DANNCE_HOME, "tests/configs"): + os.chdir(os.path.join(DANNCE_HOME, "tests/configs")) + setup = "mkdir ../../demo/markerless_mouse_1/videos5/;" + \ + "ln -s ../../../dannce/demo/markerless_mouse_1/videos/*[0-5] ../../demo/markerless_mouse_1/videos5;" + \ + "mkdir ../../demo/markerless_mouse_2/videos5/;" + \ + "ln -s ../../../dannce/demo/markerless_mouse_2/videos/*[0-5] ../../demo/markerless_mouse_2/videos5 " + os.system(setup) + + +class TestComTrain(absltest.TestCase): + def test_com_train(self): + train_setup() + args = [ + "com-train", + "config_com_mousetest.yaml", + "--com-finetune-weights=../../demo/markerless_mouse_1/COM/weights/", + "--downfac=8", + ] + with patch("sys.argv", args): + cli.com_train_cli() + + def test_com_train_mono(self): + train_setup() + args = ["com-train", "config_com_mousetest.yaml", "--mono=True", "--downfac=8"] + with patch("sys.argv", args): + cli.com_train_cli() + +class TestComPredict(absltest.TestCase): + def test_com_predict(self): + assert os.getcwd() == os.path.join(DANNCE_HOME, "tests/configs"), \ + "Test not being performed from the intended configs folder {}".format(os.path.join(DANNCE_HOME, "tests/configs")) + train_setup() + args = ["com-predict", os.path.join(os.path.join(DANNCE_HOME, "tests/configs"),"config_com_mousetest.yaml")] + with patch("sys.argv", args): + cli.com_predict_cli() + self.assertTrue(compare_predictions( + "../touchstones/COM3D_undistorted_masternn.mat", + "../../demo/markerless_mouse_1/COM/predict_test/com3d0.mat", + )) + + def test_com_predict_3_cams(self): + setup = "cp ./label3d_temp_dannce_3cam.mat ./alabel3d_temp_dannce.mat" + os.system(setup) + assert os.getcwd() == os.path.join(DANNCE_HOME, "tests/configs"), \ + "Test not being performed from the intended configs folder {}".format(os.path.join(DANNCE_HOME, "tests/configs")) + args = ["com-predict", "config_com_mousetest.yaml", "--downfac=4"] + with patch("sys.argv", args): + cli.com_predict_cli() + + def test_com_predict_5_cams(self): + setup = "cp ./label3d_temp_dannce_5cam.mat ./alabel3d_temp_dannce.mat" + os.system(setup) + assert os.getcwd() == os.path.join(DANNCE_HOME, "tests/configs"), \ + "Test not being performed from the intended configs folder {}".format(os.path.join(DANNCE_HOME, "tests/configs")) + args = ["com-predict", "config_com_mousetest.yaml", "--downfac=2"] + with patch("sys.argv", args): + cli.com_predict_cli() + +class TestDannceTrain(absltest.TestCase): + com_args = [ + "dannce-train", + "config_mousetest.yaml", + ] + + def do_setup(self, caller_func): + if 'train' in caller_func: + train_setup() + return 0 + + return 1 + + def do_test(self): + from inspect import stack + import copy + caller_function = stack()[1].function + + setup=self.do_setup(caller_function) + if setup!=0 : + print("Setup Incomplete") + + args_=copy.deepcopy(TestDannceTrain.com_args) + args = self.get_args(args_, caller_function) + + with patch("sys.argv", args): + cli.dannce_train_cli() + + def get_args(self, args, caller_function): + + losses = ["mask_nan_keep_loss", "mask_nan_l1_loss", "gaussian_cross_entropy_loss"] + net_types = ["max", "avgmax", "avg"] + nets = ["unet3d_big_expectedvalue", "unet3d_big"] + train_modes = ["new","finetune","continued"] + + for loss in losses: + if loss in caller_function: + args.append("--loss={}".format(loss)) + + for net_type in net_types: + if net_type in caller_function and not "avgmax" in caller_function: + args.append("--net-type={}".format(net_type.upper())) + break + if "avgmax" in caller_function: + args.append("--net-type={}".format("AVG")) + args.append("--avg-max={}".format(10)) + + for train_mode in train_modes: + if train_mode in caller_function: + args.append("--train_mode={}".format(train_mode)) + + if train_mode!="new": + avgmax_weights_dir = "../../demo/markerless_mouse_1/DANNCE/train_results/AVG/" + avg_weights_dir = "../../demo/markerless_mouse_1/DANNCE/weights/" + max_weights_dir = "../../demo/markerless_mouse_1/DANNCE/weights/weights.rat.MAX/" + if caller_function.count("finetune") > 1: + args.append("--dannce-finetune-weights={}".format(avgmax_weights_dir)) + elif train_mode != "continued": + args.append("--dannce-finetune-weights={}".format(avg_weights_dir if net_type!="max" else max_weights_dir)) + else: + args.append("--dannce-finetune-weights={}".format(avgmax_weights_dir if net_type!="max" else avg_weights_dir)) + + else: + args.append("--n-channels-out=22") + args.append("--net={}".format("unet3d_big" if net_type == "max" else "unet3d_big_expectedvalue")) + + if "mono" in caller_function: + args.append("--mono=True") + + + return args + + def test_dannce_train_finetune_max(self): + train_setup() + assert os.getcwd() == os.path.join(DANNCE_HOME, "tests/configs"), \ + "Test not being performed from the intended configs folder {}".format(os.path.join(DANNCE_HOME, "tests/configs")) + args = [ + "dannce-train", + "config_mousetest.yaml", + "--net-type=MAX", + "--dannce-finetune-weights=../../demo/markerless_mouse_1/DANNCE/weights/weights.rat.MAX/", + ] + with patch("sys.argv", args): + cli.dannce_train_cli() + + def test_dannce_train_finetune_avg(self): + train_setup() + assert os.getcwd() == os.path.join(DANNCE_HOME, "tests/configs"), \ + "Test not being performed from the intended configs folder {}".format(os.path.join(DANNCE_HOME, "tests/configs")) + args = [ + "dannce-train", + "config_mousetest.yaml", + "--net-type=AVG", + "--dannce-finetune-weights=../../demo/markerless_mouse_1/DANNCE/weights/", + ] + with patch("sys.argv", args): + cli.dannce_train_cli() + + def test_dannce_train_finetune_avgmax(self): + train_setup() + assert os.getcwd() == os.path.join(DANNCE_HOME, "tests/configs"), \ + "Test not being performed from the intended configs folder {}".format(os.path.join(DANNCE_HOME, "tests/configs")) + args = [ + "dannce-train", + "config_mousetest.yaml", + "--net-type=AVG", + "--avg-max=10", + "--dannce-finetune-weights=../../demo/markerless_mouse_1/DANNCE/weights/", + ] + with patch("sys.argv", args): + cli.dannce_train_cli() + + + def test_dannce_train_finetune_avg_heatmap_regularization(self): + train_setup() + args = [ + "dannce-train", + "config_mousetest.yaml", + "--net-type=AVG", + "--dannce-finetune-weights=../../demo/markerless_mouse_1/DANNCE/weights/", + ] + with patch("sys.argv", args): + cli.dannce_train_cli() + + def test_dannce_train_finetune_avg_from_finetune(self): + train_setup() + args = [ + "dannce-train", + "config_mousetest.yaml", + "--net-type=AVG", + "--dannce-finetune-weights=../../demo/markerless_mouse_1/DANNCE/train_results/AVG/", + ] + with patch("sys.argv", args): + cli.dannce_train_cli() + + def test_dannce_train_finetune_avgmax_from_finetune(self): + train_setup() + assert os.getcwd() == os.path.join(DANNCE_HOME, "tests/configs"), \ + "Test not being performed from the intended configs folder {}".format(os.path.join(DANNCE_HOME, "tests/configs")) + args = [ + "dannce-train", + "config_mousetest.yaml", + "--net-type=AVG", + "--avg-max=10", + "--dannce-finetune-weights=../../demo/markerless_mouse_1/DANNCE/train_results/AVG/", + ] + with patch("sys.argv", args): + cli.dannce_train_cli() + + def test_dannce_train_avg(self): + train_setup() + args = [ + "dannce-train", + "config_mousetest.yaml", + "--net=unet3d_big_expectedvalue", + "--train-mode=new", + "--n-channels-out=22", + ] + with patch("sys.argv", args): + cli.dannce_train_cli() + + def test_dannce_train_max(self): + train_setup() + args = [ + "dannce-train", + "config_mousetest.yaml", + "--net=unet3d_big", + "--train-mode=new", + "--n-channels-out=22", + ] + with patch("sys.argv", args): + cli.dannce_train_cli() + + def test_dannce_train_avg_continued(self): + train_setup() + args = [ + "dannce-train", + "config_mousetest.yaml", + "--net-type=AVG", + "--train-mode=continued", + "--dannce-finetune-weights=../../demo/markerless_mouse_1/DANNCE/train_results/AVG/", + ] + with patch("sys.argv", args): + cli.dannce_train_cli() + + def test_dannce_train_max_continued(self): + train_setup() + args = [ + "dannce-train", + "config_mousetest.yaml", + "--net=finetune_MAX", + "--train-mode=continued", + "--dannce-finetune-weights=../../demo/markerless_mouse_1/DANNCE/train_results/", + ] + with patch("sys.argv", args): + cli.dannce_train_cli() + + def test_dannce_train_avg_mono(self): + train_setup() + args = [ + "dannce-train", + "config_mousetest.yaml", + "--net-type=AVG", + "--train-mode=new", + "--net=unet3d_big_expectedvalue", + "--mono=True", + "--n-channels-out=22", + ] + with patch("sys.argv", args): + cli.dannce_train_cli() + + def test_dannce_train_avg_mono_finetune(self): + train_setup() + args = [ + "dannce-train", + "config_mousetest.yaml", + "--net-type=AVG", + "--mono=True", + "--dannce-finetune-weights=../../demo/markerless_mouse_1/DANNCE/weights/weights.rat.AVG.MONO/", + ] + with patch("sys.argv", args): + cli.dannce_train_cli() + + def test_dannce_train_avg_mono_finetune_drop_landmarks(self): + train_setup() + args = [ + "dannce-train", + "config_mousetest.yaml", + "--net-type=AVG", + "--mono=True", + "--dannce-finetune-weights=../../demo/markerless_mouse_1/DANNCE/weights/weights.rat.AVG.MONO/", + "--drop-landmark=[5,7]", + ] + with patch("sys.argv", args): + cli.dannce_train_cli() + + def test_npy_volume_with_validation(self): + # train_setup() + os.chdir("../../demo/markerless_mouse_1/") + args = [ + "dannce-train", + "../../configs/dannce_mouse_config.yaml", + "--net-type=AVG", + "--use-npy=True", + "--dannce-train-dir=./DANNCE/npy_test/", + "--epochs=2", + "--valid-exp=[1]", + "--gpu=1", + ] + with patch("sys.argv", args): + cli.dannce_train_cli() + + def test_npy_volume_with_multi_gpu(self): + # train_setup() + os.chdir("../../demo/markerless_mouse_1/") + args = [ + "dannce-train", + "../../configs/dannce_mouse_config.yaml", + "--net-type=AVG", + "--batch-size=4", + "--use-npy=True", + "--dannce-train-dir=./DANNCE/npy_test/", + "--epochs=2", + "--valid-exp=[1]", + "--multi-gpu-train=True", + "--gpu=1", + ] + with patch("sys.argv", args): + cli.dannce_train_cli() + + def test_npy_volume_with_num_train_exp(self): + os.chdir("../../demo/markerless_mouse_1/") + args = [ + "dannce-train", + "../../configs/dannce_mouse_config.yaml", + "--net-type=AVG", + "--use-npy=True", + "--dannce-train-dir=./DANNCE/npy_test/", + "--epochs=2", + "--num-train-per-exp=2", + "--batch-size=1", + "--gpu=1", + ] + with patch("sys.argv", args): + cli.dannce_train_cli() + + def test_npy_volume_with_validation_and_num_train_exp(self): + os.chdir("../../demo/markerless_mouse_1/") + args = [ + "dannce-train", + "../../configs/dannce_mouse_config.yaml", + "--net-type=AVG", + "--use-npy=True", + "--dannce-train-dir=./DANNCE/npy_test/", + "--epochs=2", + "--valid-exp=[1]", + "--num-train-per-exp=2", + "--batch-size=1", + "--gpu=1", + ] + with patch("sys.argv", args): + cli.dannce_train_cli() + + def test_dannce_train_MAX_layer_norm(self): + train_setup() + args = [ + "dannce-train", + "dgptest_config.yaml", + "--dannce-train-dir=../../demo/markerless_mouse_1/DANNCE/train_test_ln/", + "--n-channels-out=22", + ] + with patch("sys.argv", args): + cli.dannce_train_cli() + + def test_dannce_train_MAX_scratch_instance_norm(self): + train_setup() + args = [ + "dannce-train", + "dgptest_config.yaml", + "--norm-method=instance", + "--dannce-train-dir=../../demo/markerless_mouse_1/DANNCE/train_test_in/", + "--n-channels-out=22", + ] + with patch("sys.argv", args): + cli.dannce_train_cli() + + def test_dannce_train_DGP_MAX_scratch_layer_norm_sigmoid_cross_entropy_Gaussian( + self, + ): + train_setup() + args = [ + "dannce-train", + "dgptest_config.yaml", + "--loss=gaussian_cross_entropy_loss", + "--dannce-train-dir=../../demo/markerless_mouse_1/DANNCE/train_test_ln_dgp/", + "--n-channels-out=22", + ] + with patch("sys.argv", args): + cli.dannce_train_cli() + + def test_dannce_train_DGP_MAX_scratch_instance_norm_sigmoid_cross_entropy_Gaussian( + self, + ): + train_setup() + args = [ + "dannce-train", + "dgptest_config.yaml", + "--norm-method=instance", + "--loss=gaussian_cross_entropy_loss", + "--dannce-train-dir=../../demo/markerless_mouse_1/DANNCE/train_test_ln_dgp/", + "--n-channels-out=22", + ] + with patch("sys.argv", args): + cli.dannce_train_cli() + + def test_dannce_train_avgmax(self): + train_setup() + args = [ + "dannce-train", + "config_mousetest.yaml", + "--net-type=AVG", + "--avg-max=10", + "--dannce-train-dir=../../demo/markerless_mouse_1/DANNCE/train_test/AVG_MAX/", + "--dannce-finetune-weights=../../demo/markerless_mouse_1/DANNCE/train_results/AVG/", + ] + with patch("sys.argv", args): + cli.dannce_train_cli() + + def test_dannce_train_finetune_avg_grid(self): + train_setup() + args = [ + "dannce-train-grid", + "config_mousetest.yaml", + "grid_config.yaml", + + ] + with patch("sys.argv", args): + grid.dannce_train_grid() + + def test_dannce_train_finetune_avg_6cams_with5camswts(self): + # This will probably fail when using save_pred_targets = True + # save_pred_targets based testing need to be done for both AVG and MAX models + train_setup() + args = [ + "dannce-train", + "config_mousetest.yaml", + "--n-rand-views=5", + "--net-type=AVG", + "--mono=True", + "--dannce-finetune-weights=../../demo/markerless_mouse_1/DANNCE/weights/weights.rat.AVG.MONO.5cams/", + + ] + with patch("sys.argv", args): + cli.dannce_train_cli() + + def test_dannce_train_finetune_avg_5cams_with6camswts(self): + train_setup_5cams() + args = [ + "dannce-train", + "config_mousetest.yaml", + "--io-config=io_5cams.yaml", + "--n-views=6", + "--net-type=AVG", + + ] + with patch("sys.argv", args): + cli.dannce_train_cli() + + + +class TestDannceTrainLosses(absltest.TestCase): + com_args = [ + "dannce-train", + "config_mousetest.yaml", + ] + + def do_setup(self, caller_func): + if 'train' in caller_func: + train_setup() + return 0 + + return 1 + + def do_test(self, spec_args=[]): + # import pdb; pdb.set_trace() + from inspect import stack + import copy + caller_function = stack()[1].function + + setup=self.do_setup(caller_function) + if setup!=0 : + print("Setup Incomplete") + + assert os.getcwd() == os.path.join(DANNCE_HOME, "tests/configs"), \ + "Test not being performed from the intended configs folder {} and instead being performed from {}".format(os.path.join(DANNCE_HOME, "tests/configs"), + os.getcwd()) + + args_=copy.deepcopy(TestDannceTrainLosses.com_args) + args = self.get_args(args_, caller_function) + + if len(spec_args) > 0: + args.extend(spec_args) + + with patch("sys.argv", args): + cli.dannce_train_cli() + + def get_args(self, args, caller_function): + + losses = ["mask_nan_keep_loss", "mask_nan_l1_loss", "gaussian_cross_entropy_loss" ] + distances = ["euclidean_distance_3D", "centered_euclidean_distance_3D"] + net_types = ["max", "avgmax", "avg"] + nets = ["unet3d_big_expectedvalue", "unet3d_big"] + train_modes = ["new","finetune","continued"] + + + + for net_type in net_types: + if net_type in caller_function and not "avgmax" in caller_function: + args.append("--net-type={}".format(net_type.upper())) + break + if "avgmax" in caller_function: + args.append("--net-type={}".format("AVG")) + args.append("--avg-max={}".format(10)) + + for train_mode in train_modes: + if train_mode in caller_function: + args.append("--train-mode={}".format(train_mode)) + + if train_mode!="new": + avgmax_weights_dir = "../../demo/markerless_mouse_1/DANNCE/train_results/AVG/" + avg_weights_dir = "../../demo/markerless_mouse_1/DANNCE/weights/" + max_weights_dir = "../../demo/markerless_mouse_1/DANNCE/weights/weights.rat.MAX/" + if caller_function.count("finetune") > 1: + args.append("--dannce-finetune-weights={}".format(avgmax_weights_dir)) + elif train_mode != "continued": + args.append("--dannce-finetune-weights={}".format(avg_weights_dir if net_type!="max" else max_weights_dir)) + else: + args.append("--dannce-finetune-weights={}".format(avgmax_weights_dir if net_type!="max" else avg_weights_dir)) + + else: + args.append("--n-channels-out=22") + args.append("--net={}".format("unet3d_big" if net_type == "max" else "unet3d_big_expectedvalue")) + + if "mono" in caller_function: + args.append("--mono=True") + + for loss in losses: + if loss in caller_function: + args.append("--loss={}".format(loss)) + + for distance in distances: + if distance in caller_function: + args.append("--metric={}".format(distance)) + + return args + + + def test_dannce_train_finetune_avg_mask_nan_keep_loss(self): + self.do_test() + + def test_dannce_train_finetune_avg_mask_nan_l1_loss(self): + self.do_test() + + def test_dannce_train_finetune_avg_gaussian_cross_entropy_loss(self): + self.do_test() + + def test_dannce_train_finetune_max_mask_nan_keep_loss(self): + self.do_test() + + def test_dannce_train_finetune_max_mask_nan_l1_loss(self): + self.do_test() + + def test_dannce_train_finetune_max_gaussian_cross_entropy_loss(self): + self.do_test() + + + + + + + +class TestDanncePredict(absltest.TestCase): + def test_dannce_predict_mono(self): + # TODO(refactor): This test depends on there being a mono model saved. + train_setup() + args = [ + "dannce-train", + "config_mousetest.yaml", + "--net-type=AVG", + "--mono=True", + "--dannce-finetune-weights=../../demo/markerless_mouse_1/DANNCE/weights/weights.rat.AVG.MONO/", + ] + with patch("sys.argv", args): + cli.dannce_train_cli() + args = [ + "dannce-predict", + "config_mousetest.yaml", + "--net-type=AVG", + "--dannce-predict-model=../../demo/markerless_mouse_1/DANNCE/train_test/fullmodel_weights/fullmodel_end.hdf5", + "--mono=True", + ] + with patch("sys.argv", args): + cli.dannce_predict_cli() + + def test_dannce_predict_avg(self): + train_setup() + args = [ + "dannce-predict", + "config_mousetest.yaml", + "--net-type=AVG", + ] + with patch("sys.argv", args): + cli.dannce_predict_cli() + self.assertTrue(compare_predictions( + "../touchstones/save_data_AVG_torch_nearest.mat", + "../../demo/markerless_mouse_1/DANNCE/predict_test/save_data_AVG0.mat", + )) + + def test_dannce_predict_max(self): + train_setup() + args = [ + "dannce-predict", + "config_mousetest.yaml", + "--net-type=MAX", + "--expval=False", + "--dannce-predict-model=../../demo/markerless_mouse_1/DANNCE/train_results/weights.12000-0.00014.hdf5", + ] + with patch("sys.argv", args): + cli.dannce_predict_cli() + self.assertTrue(compare_predictions( + "../touchstones/save_data_MAX_torchnearest_newtfroutine.mat", + "../../demo/markerless_mouse_1/DANNCE/predict_test/save_data_MAX0.mat", + )) + + def test_dannce_predict_avgmax(self): + train_setup() + args = [ + "dannce-predict", + "config_mousetest.yaml", + "--net-type=AVG", + "--dannce-predict-dir=../../demo/markerless_mouse_1/DANNCE/predict_test/AVG_MAX/", + "--dannce-finetune-weights=../../demo/markerless_mouse_1/DANNCE/train_results/AVG_MAX/" + ] + + with patch("sys.argv", args): + cli.dannce_predict_cli() + self.assertTrue(compare_predictions( + "../touchstones/save_data_AVG_MAX.mat", + "../../demo/markerless_mouse_1/DANNCE/predict_test/AVG_MAX/save_data_AVG0.mat", + )) + + def test_dannce_predict_numpy_volume_generation(self): + setup = "cp ./label3d_voltest_dannce_m1.mat ./alabel3d_temp_dannce.mat" + os.system(setup) + args = [ + "dannce-predict", + "config_mousetest.yaml", + "--net-type=AVG", + "--write-npy=../../demo/markerless_mouse_1/npy_volumes/", + "--batch-size=1", + ] + with patch("sys.argv", args): + cli.dannce_predict_cli() + setup2 = "cp ./label3d_voltest_dannce_m2.mat ./alabel3d_temp_dannce.mat" + os.system(setup2) + args = [ + "dannce-predict", + "config_mousetest.yaml", + "--net-type=AVG", + "--write-npy=../../demo/markerless_mouse_2/npy_volumes/", + "--batch-size=1", + ] + with patch("sys.argv", args): + cli.dannce_predict_cli() + +if __name__ == "__main__": + log_file = 'log_file_inherit.txt' + with open(log_file, "w") as f: + runner = unittest.TextTestRunner(f) + absltest.main(testRunner=runner) diff --git a/tests/compare_predictions.py b/tests/compare_predictions.py deleted file mode 100755 index 19269b8..0000000 --- a/tests/compare_predictions.py +++ /dev/null @@ -1,25 +0,0 @@ -""" -Compares two prediction matfiles -""" -import numpy as np -import scipy.io as sio -import sys - -m1 = sio.loadmat(sys.argv[1]) -m2 = sio.loadmat(sys.argv[2]) -th = float(sys.argv[3]) - -if "com" in m1.keys(): - print("Checking for parity between predictions...") - error = np.mean(np.abs(m1["com"] - m2["com"])) - print("Mean error is: ", error, "\nThreshold is: ", th) - assert error < th - print("Good!") -elif "pred" in m2.keys(): - print("Checking for parity between predictions...") - error = np.mean(np.abs(m1["pred"] - m2["pred"])) - print("Mean error is: ", error, "\nThreshold is: ", th) - assert error < th - print("Good!") -else: - raise Exception("Expected fields (pred, com) not found in inputs") diff --git a/tests/configs/_grid_params.p b/tests/configs/_grid_params.p new file mode 100644 index 0000000..fc1ac3d Binary files /dev/null and b/tests/configs/_grid_params.p differ diff --git a/tests/configs/alabel3d_temp_dannce.mat b/tests/configs/alabel3d_temp_dannce.mat deleted file mode 100755 index b2d1687..0000000 Binary files a/tests/configs/alabel3d_temp_dannce.mat and /dev/null differ diff --git a/tests/configs/base_config_temp.yaml b/tests/configs/base_config_temp.yaml index c514a01..8a65c63 100755 --- a/tests/configs/base_config_temp.yaml +++ b/tests/configs/base_config_temp.yaml @@ -35,7 +35,7 @@ max_queue_size: 20 sigma: 10 # DANNCE training option. Sets the number of epochs during training (default 1200) -epochs: 3 +epochs: 1 # DANNCE training option. Sets the verbosity of training output verbose: 1 @@ -121,7 +121,7 @@ cthresh: 350 channel_combo: 'None' # max. number of batches to evaluate during prediction. set to 'max' to evaluate over all data/video frames -maxbatch: 250 +max_num_samples: 10 start_batch: 0 diff --git a/tests/configs/config_com_mousetest.yaml b/tests/configs/config_com_mousetest.yaml index 3b2ee5c..7b42972 100755 --- a/tests/configs/config_com_mousetest.yaml +++ b/tests/configs/config_com_mousetest.yaml @@ -13,7 +13,7 @@ batch_size: 2 sigma: 30 # COM training option. Sets the number of epochs during training -epochs: 3 +epochs: 1 # DANNCE training option. Sets the verbosity of training output verbose: 1 @@ -46,7 +46,7 @@ gpu_id: "0" #COMdebug: Camera5 # How many frames to you want to predict over? Set to 'max' for all video frames. -max_num_samples: 1000 +max_num_samples: 10 com_finetune_weights: diff --git a/tests/configs/config_mousetest.yaml b/tests/configs/config_mousetest.yaml index 7dfe6af..9337600 100755 --- a/tests/configs/config_mousetest.yaml +++ b/tests/configs/config_mousetest.yaml @@ -7,7 +7,7 @@ new_n_channels_out: 22 batch_size: 4 # DANNCE training option. Sets the number of epochs during training (default 1200) -epochs: 3 +epochs: 1 # Options: # 'new': initializes and trains a network from scratch @@ -15,6 +15,8 @@ epochs: 3 # 'continued': initializes a full model, including optimizer state, and continuous training from the last full model checkpoint train_mode: finetune +slurm_config: "../../cluster/duke.yaml" + # How many samples from each animal do you want to (randomly) set aside for a validation metric? num_validation_per_exp: 4 @@ -25,10 +27,13 @@ vol_size: 120 nvox: 64 # max. number of batches to evaluate during prediction. set to 'max' to evaluate over all data/video frames -max_num_samples: 1000 +max_num_samples: 10 start_batch: 0 dannce_finetune_weights: ../../demo/markerless_mouse_1/DANNCE/weights/ -metric: [euclidean_distance_3D, centered_euclidean_distance_3D] \ No newline at end of file +metric: [euclidean_distance_3D, centered_euclidean_distance_3D] + +log_dest: ../logs_/mylog.log +log_level: 'INFO' \ No newline at end of file diff --git a/tests/configs/config_mousetest_multi_instance.yaml b/tests/configs/config_mousetest_multi_instance.yaml new file mode 100755 index 0000000..8c3dc15 --- /dev/null +++ b/tests/configs/config_mousetest_multi_instance.yaml @@ -0,0 +1,41 @@ +io_config: io.yaml + +# New number of network output channels. +new_n_channels_out: 22 + +# batch_size +batch_size: 4 + +# DANNCE training option. Sets the number of epochs during training (default 1200) +epochs: 1 + +# Options: +# 'new': initializes and trains a network from scratch +# 'finetune': loads in pre-trained weights and fine-tuned from there +# 'continued': initializes a full model, including optimizer state, and continuous training from the last full model checkpoint +train_mode: finetune + +n_instances: 2 + +slurm_config: "../../cluster/duke.yaml" + +# How many samples from each animal do you want to (randomly) set aside for a validation metric? +num_validation_per_exp: 4 + +# Size of 3D volume (in mm) anchored on animal +vol_size: 120 + +# Number of voxels along each spatial dimension +nvox: 64 + +# max. number of batches to evaluate during prediction. set to 'max' to evaluate over all data/video frames +max_num_samples: 10 + +start_batch: 0 + +dannce_finetune_weights: ../../demo/markerless_mouse_1/DANNCE/weights/ + +metric: [euclidean_distance_3D, centered_euclidean_distance_3D] + +log_dest: '../logs_/mylog.log' +log_level: 'DEBUG' \ No newline at end of file diff --git a/tests/configs/dgptest_config.yaml b/tests/configs/dgptest_config.yaml new file mode 100644 index 0000000..7e6960e --- /dev/null +++ b/tests/configs/dgptest_config.yaml @@ -0,0 +1,43 @@ +io_config: io.yaml + +# New number of network output channels. +new_n_channels_out: 22 + +n_channels_out: 22 + +# batch_size +batch_size: 4 + +# DANNCE training option. Sets the number of epochs during training (default 1200) +epochs: 1 + +net_type: MAX + +net: unet3d_big + +# so that you look at the same exact frames for validation between training conditions +data_split_seed: 2 + +# Options: +# 'new': initializes and trains a network from scratch +# 'finetune': loads in pre-trained weights and fine-tuned from there +# 'continued': initializes a full model, including optimizer state, and continuous training from the last full model checkpoint +train_mode: new + +loss: mask_nan_keep_loss + +# How many samples from each animal do you want to (randomly) set aside for a validation metric? +num_validation_per_exp: 4 + +# Size of 3D volume (in mm) anchored on animal +vol_size: 120 + +# Number of voxels along each spatial dimension +nvox: 64 + +# max. number of batches to evaluate during prediction. set to 'max' to evaluate over all data/video frames +max_num_samples: max + +start_batch: 0 + +metric: [euclidean_distance_3D, centered_euclidean_distance_3D] \ No newline at end of file diff --git a/tests/configs/grid_config.yaml b/tests/configs/grid_config.yaml new file mode 100644 index 0000000..0738758 --- /dev/null +++ b/tests/configs/grid_config.yaml @@ -0,0 +1,6 @@ +batch_params: + - avg+max: 10 + dannce_train_dir: ../../demo/markerless_mouse_1/DANNCE/grid_1/ + + - avg+max: 5 + dannce_train_dir: ../../demo/markerless_mouse_1/DANNCE/grid_2/ \ No newline at end of file diff --git a/tests/configs/io.yaml b/tests/configs/io.yaml index f0f2d3b..cdb1cf5 100755 --- a/tests/configs/io.yaml +++ b/tests/configs/io.yaml @@ -29,7 +29,7 @@ exp: # path to folder contraining video sub directories. Used only for com and dannce PREDICTION viddir: ../../demo/markerless_mouse_1/videos/ -gpu_id: "1" +gpu_id: "0" diff --git a/tests/configs/io_5cams.yaml b/tests/configs/io_5cams.yaml new file mode 100755 index 0000000..cbb49d9 --- /dev/null +++ b/tests/configs/io_5cams.yaml @@ -0,0 +1,35 @@ +### COM ### +# path to folder where COM weights and logs will be saved +com_train_dir: ../../demo/markerless_mouse_1/COM/train_test/ +com_predict_dir: ../../demo/markerless_mouse_1/COM/predict_test/ + +# During prediction, will look for the last epoch weights saved to ./COM/train_results/. To load in a different weights file, add the path here +com_predict_weights: ../../demo/markerless_mouse_1/COM/train_results/weights.250-0.00036.hdf5 + +### Dannce ### +# path to folder where DANNCE weights and logs will be saved +dannce_train_dir: ../../demo/markerless_mouse_1/DANNCE/train_test/ +dannce_predict_dir: ../../demo/markerless_mouse_1/DANNCE/predict_test/ + +# During prediction, will look for the last epoch weights saved to ./DANNCE/train_results/. To load in a different weights file, add the path here +# Note that this must be a FULL MODEL file, not just weights. +dannce_predict_model: ../../demo/markerless_mouse_1/DANNCE/train_results/AVG/weights.1200-12.77642.hdf5 + +exp: + - label3d_file: ./label3d_temp_dannce.mat + # camnames: ['Camera1','Camera2','Camera3','Camera4','Camera5','Camera6'] + # com_file: ../../demo/markerless_mouse_1/COM/predict_results/com3d.mat + viddir: ../../demo/markerless_mouse_1/videos5/ + - label3d_file: ./label3d_temp_dannce.mat + # label3d_file: ../../demo/markerless_mouse_2/label3d_demo.mat # The temp file is only for rapid testing, but does not actually train over real mouse_2 labels + # camnames: ['Camera1','Camera2','Camera3','Camera4','Camera5','Camera6'] + # com_file: ../../demo/markerless_mouse_2/COM/predict_results/com3d.pickle + viddir: ../../demo/markerless_mouse_2/videos5/ + +# path to folder contraining video sub directories. Used only for com and dannce PREDICTION +viddir: ../../demo/markerless_mouse_1/videos5/ + +gpu_id: "0" + + + diff --git a/tests/configs/io_dgp.yaml b/tests/configs/io_dgp.yaml new file mode 100644 index 0000000..bc7d4cb --- /dev/null +++ b/tests/configs/io_dgp.yaml @@ -0,0 +1,35 @@ +### COM ### +# path to folder where COM weights and logs will be saved +com_train_dir: ../../demo/markerless_mouse_1/COM/train_test/ +com_predict_dir: ../../demo/markerless_mouse_1/COM/predict_test/ + +# During prediction, will look for the last epoch weights saved to ./COM/train_results/. To load in a different weights file, add the path here +com_predict_weights: ../../demo/markerless_mouse_1/COM/train_results/weights.250-0.00036.hdf5 + +### Dannce ### +# path to folder where DANNCE weights and logs will be saved +dannce_train_dir: ../../demo/markerless_mouse_1/DANNCE/train_test_in_dgp/ +dannce_predict_dir: ../../demo/markerless_mouse_1/DANNCE/predict_test_in_dgp/ + +# During prediction, will look for the last epoch weights saved to ./DANNCE/train_results/. To load in a different weights file, add the path here +# Note that this must be a FULL MODEL file, not just weights. +dannce_predict_model: ../../demo/markerless_mouse_1/DANNCE/train_results_dgp/AVG/weights.1200-12.77642.hdf5 + +exp: + - label3d_file: ./label3d_temp_dannce.mat + # camnames: ['Camera1','Camera2','Camera3','Camera4','Camera5','Camera6'] + # com_file: ../../demo/markerless_mouse_1/COM/predict_results/com3d.mat + viddir: ../../demo/markerless_mouse_1/videos/ + - label3d_file: ./label3d_temp_dannce.mat + # label3d_file: ../../demo/markerless_mouse_2/label3d_demo.mat # The temp file is only for rapid testing, but does not actually train over real mouse_2 labels + # camnames: ['Camera1','Camera2','Camera3','Camera4','Camera5','Camera6'] + # com_file: ../../demo/markerless_mouse_2/COM/predict_results/com3d.pickle + viddir: ../../demo/markerless_mouse_2/videos/ + +# path to folder contraining video sub directories. Used only for com and dannce PREDICTION +viddir: ../../demo/markerless_mouse_1/videos/ + +gpu_id: "0" + + + diff --git a/tests/configs/log_file_inherit.txt b/tests/configs/log_file_inherit.txt new file mode 100644 index 0000000..360f42a --- /dev/null +++ b/tests/configs/log_file_inherit.txt @@ -0,0 +1,38 @@ +.........E............................... +====================================================================== +ERROR: test_dannce_predict_numpy_volume_generation (__main__.TestDanncePredict) +TestDanncePredict.test_dannce_predict_numpy_volume_generation +---------------------------------------------------------------------- +Traceback (most recent call last): + File "cli_test_inherit.py", line 732, in test_dannce_predict_numpy_volume_generation + cli.dannce_predict_cli() + File "/hpc/group/tdunn/asabath/dannce_release_proc/dannce/cli.py", line 135, in dannce_predict_cli + dannce_predict(params) + File "/hpc/group/tdunn/asabath/dannce_release_proc/dannce/interface.py", line 1564, in dannce_predict + processing.write_npy(params["write_npy"], predict_generator) + File "/hpc/group/tdunn/asabath/dannce_release_proc/dannce/engine/processing.py", line 1545, in write_npy + bch = gen.__getitem__(i) + File "/hpc/group/tdunn/asabath/dannce_release_proc/dannce/engine/generator.py", line 979, in __getitem__ + X, y = self.__data_generation(list_IDs_temp) + File "/hpc/group/tdunn/asabath/dannce_release_proc/dannce/engine/generator.py", line 1279, in __data_generation + result = self.threadpool.starmap(self.project_grid, arglist) + File "/hpc/group/tdunn/as1296/miniconda3/envs/dannce_aux/lib/python3.7/multiprocessing/pool.py", line 276, in starmap + return self._map_async(func, iterable, starmapstar, chunksize).get() + File "/hpc/group/tdunn/as1296/miniconda3/envs/dannce_aux/lib/python3.7/multiprocessing/pool.py", line 657, in get + raise self._value + File "/hpc/group/tdunn/as1296/miniconda3/envs/dannce_aux/lib/python3.7/multiprocessing/pool.py", line 121, in worker + result = (True, func(*args, **kwds)) + File "/hpc/group/tdunn/as1296/miniconda3/envs/dannce_aux/lib/python3.7/multiprocessing/pool.py", line 47, in starmapstar + return list(itertools.starmap(args[0], args[1])) + File "/hpc/group/tdunn/asabath/dannce_release_proc/dannce/engine/generator.py", line 1047, in project_grid + X_grid, camname, ID, experimentID, com, com_precrop, thisim + File "/hpc/group/tdunn/asabath/dannce_release_proc/dannce/engine/generator.py", line 1099, in pj_grid_post + X_grid, self.camera_params[experimentID][camname]["M"], self.device + File "/hpc/group/tdunn/asabath/dannce_release_proc/dannce/engine/ops.py", line 64, in project_to2d_torch + projPts = torch.matmul(torch.cat((pts, pts1), 1), M) +RuntimeError: CUDA error: CUBLAS_STATUS_ALLOC_FAILED when calling `cublasCreate(handle)` + +---------------------------------------------------------------------- +Ran 41 tests in 3725.588s + +FAILED (errors=1) diff --git a/tests/dgp_tests.sh b/tests/dgp_tests.sh new file mode 100644 index 0000000..fe5b05c --- /dev/null +++ b/tests/dgp_tests.sh @@ -0,0 +1,24 @@ +#!/bin/bash +set -e +# Set of tests to run. +# +# List of tests: +# 1) Comparing layer norm on max net with instance norm on + +python setup.py install + +cd tests/configs +cp ./label3d_temp_dannce.mat ./alabel3d_temp_dannce.mat +echo "Testing DANNCE training, MAX net from scratch with layer norm" +dannce-train dgptest_config.yaml --dannce-train-dir=../../demo/markerless_mouse_1/DANNCE/train_test_ln/ --n-channels-out=22 + +echo "Testing DANNCE training, MAX net from scratch with instance norm" +dannce-train dgptest_config.yaml --norm-method=instance --n-channels-out=22 --dannce-train-dir=../../demo/markerless_mouse_1/DANNCE/train_test_in/ + +# echo "Testing DANNCE training, dgp MAX net from scratch with layer norm and sigmoid cross entropy with gaussian targets" +# dannce-train dgptest_config.yaml --loss=gaussian_cross_entropy_loss --n-channels-out=22 --dannce-train-dir=../../demo/markerless_mouse_1/DANNCE/train_test_ln_dgp/ + +echo "Testing DANNCE training, dgp MAX net from scratch with instance norm and sigmoid cross entropy with gaussian targets" +dannce-train dgptest_config.yaml --norm-method=instance --loss=gaussian_cross_entropy_loss --n-channels-out=22 + +echo "Finished" diff --git a/tests/logs b/tests/logs new file mode 100644 index 0000000..cf43c12 --- /dev/null +++ b/tests/logs @@ -0,0 +1,1478 @@ +01/30/2023 01:13:08 AM INFO:dance.cluster.grid.GridHandler.submit_jobs dict_values([10, '../../demo/markerless_mouse_1/DANNCE/train_test/grid_1/']) +01/30/2023 01:13:08 AM INFO:dance.cluster.grid.GridHandler.submit_jobs dict_values([5, '../../demo/markerless_mouse_1/DANNCE/train_test/grid_2/']) +01/30/2023 01:16:13 AM INFO:dance.cluster.grid.GridHandler.submit_jobs dict_values([10, '../../demo/markerless_mouse_1/DANNCE/train_test/grid_1/']) +01/30/2023 01:16:13 AM INFO:dance.cluster.grid.GridHandler.submit_jobs dict_values([5, '../../demo/markerless_mouse_1/DANNCE/train_test/grid_2/']) +01/30/2023 01:17:03 AM INFO:dance.cluster.grid.GridHandler.submit_jobs dict_values([10, '../../demo/markerless_mouse_1/DANNCE/train_test/grid_1/']) +01/30/2023 01:17:03 AM INFO:dance.cluster.grid.GridHandler.submit_jobs dict_values([5, '../../demo/markerless_mouse_1/DANNCE/train_test/grid_2/']) +01/30/2023 01:17:03 AM INFO:dance.cluster.grid.GridHandler.submit_jobs Command issued: sbatch --wait --array=0-1 --job-name=trainDannce -p gpu-common,dsplus-gpu --mem=80000 -t 3-00:00 --gres=gpu:1 -N 1 -c 16 --account=plusds --wrap=". ~/.bashrc; conda activate dannce_aux; dannce-train-single-batch config_mousetest.yaml grid_config.yaml" +01/30/2023 01:18:53 AM INFO:dance.cluster.grid.GridHandler.submit_jobs dict_values([10, '../../demo/markerless_mouse_1/DANNCE/train_test/grid_1/']) +01/30/2023 01:18:53 AM INFO:dance.cluster.grid.GridHandler.submit_jobs dict_values([5, '../../demo/markerless_mouse_1/DANNCE/train_test/grid_2/']) +01/30/2023 01:18:53 AM INFO:dance.cluster.grid.GridHandler.submit_jobs Command issued: sbatch --wait --array=0-1 --job-name=trainDannce -p gpu-common,dsplus-gpu --mem=80000 -t 3-00:00 --gres=gpu:1 -N 1 -c 16 --account=plusds --wrap=". ~/.bashrc; conda activate dannce_aux; dannce-train-single-batch config_mousetest.yaml grid_config.yaml" +01/30/2023 01:20:39 AM INFO:dance.cluster.grid.GridHandler.submit_jobs dict_values([10, '../../demo/markerless_mouse_1/DANNCE/train_test/grid_1/']) +01/30/2023 01:20:39 AM INFO:dance.cluster.grid.GridHandler.submit_jobs dict_values([5, '../../demo/markerless_mouse_1/DANNCE/train_test/grid_2/']) +01/30/2023 01:20:39 AM INFO:dance.cluster.grid.GridHandler.submit_jobs Command issued: sbatch --wait --array=0-1 --job-name=trainDannce -p gpu-common,dsplus-gpu --mem=80000 -t 3-00:00 --gres=gpu:1 -N 1 -c 16 --account=plusds --wrap=". ~/.bashrc; conda activate dannce_aux; dannce-train-single-batch config_mousetest.yaml grid_config.yaml" +02/07/2023 11:57:37 AM INFO:dance.cluster.grid.GridHandler.submit_jobs dict_values([10, '../../demo/markerless_mouse_1/DANNCE/grid_1/']) +02/07/2023 11:57:37 AM INFO:dance.cluster.grid.GridHandler.submit_jobs dict_values([5, '../../demo/markerless_mouse_1/DANNCE/grid_2/']) +02/07/2023 11:57:37 AM INFO:dance.cluster.grid.GridHandler.submit_jobs Command issued: sbatch --wait --array=0-1 --job-name=trainDannce -p gpu-common,dsplus-gpu --mem=80000 -t 3-00:00 --gres=gpu:1 -N 1 -c 16 --account=plusds --wrap=". ~/.bashrc; conda activate dannce_aux; dannce-train-single-batch config_mousetest.yaml grid_config.yaml" +02/07/2023 12:00:01 PM INFO:dance.cluster.grid.GridHandler.submit_jobs dict_values([10, '../../demo/markerless_mouse_1/DANNCE/grid_1/']) +02/07/2023 12:00:01 PM INFO:dance.cluster.grid.GridHandler.submit_jobs dict_values([5, '../../demo/markerless_mouse_1/DANNCE/grid_2/']) +02/07/2023 12:00:01 PM INFO:dance.cluster.grid.GridHandler.submit_jobs Command issued: sbatch --wait --array=0-1 --job-name=trainDannce -p gpu-common,dsplus-gpu --mem=80000 -t 3-00:00 --gres=gpu:1 -N 1 -c 16 --account=plusds --wrap=". ~/.bashrc; conda activate dannce_aux; dannce-train-single-batch config_mousetest.yaml grid_config.yaml" +02/07/2023 12:01:29 PM INFO:dance.cluster.grid.GridHandler.submit_jobs dict_values([10, '../../demo/markerless_mouse_1/DANNCE/grid_1/']) +02/07/2023 12:01:29 PM INFO:dance.cluster.grid.GridHandler.submit_jobs dict_values([5, '../../demo/markerless_mouse_1/DANNCE/grid_2/']) +02/07/2023 12:01:29 PM INFO:dance.cluster.grid.GridHandler.submit_jobs Command issued: sbatch --wait --array=0-1 --job-name=trainDannce -p gpu-common,tdunn --mem=80000 -t 3-00:00 --gres=gpu:1 -N 1 -c 16 --wrap=". ~/.bashrc; conda activate dannce_aux; dannce-train-single-batch config_mousetest.yaml grid_config.yaml" +02/07/2023 12:04:14 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting vid_dir_flag to True. +02/07/2023 12:04:14 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting vid_dir_flag to True. +02/07/2023 12:04:14 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting extension to .mp4. +02/07/2023 12:04:14 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting extension to .mp4. +02/07/2023 12:04:14 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting chunks to {'Camera1': array([ 0, 3000, 6000, 9000, 12000, 15000]), 'Camera2': array([ 0, 3000, 6000, 9000, 12000, 15000]), 'Camera3': array([ 0, 3000, 6000, 9000, 12000, 15000]), 'Camera4': array([ 0, 3000, 6000, 9000, 12000, 15000]), 'Camera5': array([ 0, 3000, 6000, 9000, 12000, 15000]), 'Camera6': array([ 0, 3000, 6000, 9000, 12000, 15000])}. +02/07/2023 12:04:14 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting chunks to {'Camera1': array([ 0, 3000, 6000, 9000, 12000, 15000]), 'Camera2': array([ 0, 3000, 6000, 9000, 12000, 15000]), 'Camera3': array([ 0, 3000, 6000, 9000, 12000, 15000]), 'Camera4': array([ 0, 3000, 6000, 9000, 12000, 15000]), 'Camera5': array([ 0, 3000, 6000, 9000, 12000, 15000]), 'Camera6': array([ 0, 3000, 6000, 9000, 12000, 15000])}. +02/07/2023 12:04:18 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting n_channels_in to 3. +02/07/2023 12:04:18 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting raw_im_h to 1024. +02/07/2023 12:04:18 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting raw_im_w to 1152. +02/07/2023 12:04:18 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting expval to True. +02/07/2023 12:04:18 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting net_type to AVG. +02/07/2023 12:04:18 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting expval to True. +02/07/2023 12:04:18 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting net to finetune_AVG. +02/07/2023 12:04:18 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting n_channels_in to 3. +02/07/2023 12:04:18 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting raw_im_h to 1024. +02/07/2023 12:04:18 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting raw_im_w to 1152. +02/07/2023 12:04:18 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting expval to True. +02/07/2023 12:04:18 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting net_type to AVG. +02/07/2023 12:04:18 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting expval to True. +02/07/2023 12:04:18 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting net to finetune_AVG. +02/07/2023 12:04:20 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting crop_height to [0, 1024]. +02/07/2023 12:04:20 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting crop_width to [0, 1152]. +02/07/2023 12:04:20 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting maxbatch to 2. +02/07/2023 12:04:20 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting start_batch to 0. +02/07/2023 12:04:20 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting vmin to -60.0. +02/07/2023 12:04:20 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting vmax to 60.0. +02/07/2023 12:04:20 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting crop_height to [0, 1024]. +02/07/2023 12:04:20 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting crop_width to [0, 1152]. +02/07/2023 12:04:20 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting maxbatch to 2. +02/07/2023 12:04:20 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting start_batch to 0. +02/07/2023 12:04:20 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting vmin to -60.0. +02/07/2023 12:04:20 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting vmax to 60.0. +02/07/2023 12:20:28 PM INFO:dance.cluster.grid.GridHandler.submit_jobs dict_values([10, '../../demo/markerless_mouse_1/DANNCE/grid_1/']) +02/07/2023 12:20:28 PM INFO:dance.cluster.grid.GridHandler.submit_jobs dict_values([5, '../../demo/markerless_mouse_1/DANNCE/grid_2/']) +02/07/2023 12:20:28 PM INFO:dance.cluster.grid.GridHandler.submit_jobs Command issued: sbatch --wait --array=0-1 --job-name=trainDannce -p gpu-common,tdunn --mem=80000 -t 3-00:00 --gres=gpu:1 -N 1 -c 16 --wrap=". ~/.bashrc; conda activate dannce_aux; dannce-train-single-batch config_mousetest.yaml grid_config.yaml" +02/07/2023 12:23:32 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting vid_dir_flag to True. +02/07/2023 12:23:32 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting extension to .mp4. +02/07/2023 12:23:32 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting vid_dir_flag to True. +02/07/2023 12:23:32 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting extension to .mp4. +02/07/2023 12:23:32 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting chunks to {'Camera1': array([ 0, 3000, 6000, 9000, 12000, 15000]), 'Camera2': array([ 0, 3000, 6000, 9000, 12000, 15000]), 'Camera3': array([ 0, 3000, 6000, 9000, 12000, 15000]), 'Camera4': array([ 0, 3000, 6000, 9000, 12000, 15000]), 'Camera5': array([ 0, 3000, 6000, 9000, 12000, 15000]), 'Camera6': array([ 0, 3000, 6000, 9000, 12000, 15000])}. +02/07/2023 12:23:32 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting chunks to {'Camera1': array([ 0, 3000, 6000, 9000, 12000, 15000]), 'Camera2': array([ 0, 3000, 6000, 9000, 12000, 15000]), 'Camera3': array([ 0, 3000, 6000, 9000, 12000, 15000]), 'Camera4': array([ 0, 3000, 6000, 9000, 12000, 15000]), 'Camera5': array([ 0, 3000, 6000, 9000, 12000, 15000]), 'Camera6': array([ 0, 3000, 6000, 9000, 12000, 15000])}. +02/07/2023 12:23:36 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting n_channels_in to 3. +02/07/2023 12:23:36 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting raw_im_h to 1024. +02/07/2023 12:23:36 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting raw_im_w to 1152. +02/07/2023 12:23:36 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting expval to True. +02/07/2023 12:23:36 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting net_type to AVG. +02/07/2023 12:23:36 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting expval to True. +02/07/2023 12:23:36 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting net to finetune_AVG. +02/07/2023 12:23:36 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting n_channels_in to 3. +02/07/2023 12:23:36 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting raw_im_h to 1024. +02/07/2023 12:23:36 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting raw_im_w to 1152. +02/07/2023 12:23:36 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting expval to True. +02/07/2023 12:23:36 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting net_type to AVG. +02/07/2023 12:23:36 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting expval to True. +02/07/2023 12:23:36 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting net to finetune_AVG. +02/07/2023 12:23:38 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting crop_height to [0, 1024]. +02/07/2023 12:23:38 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting crop_width to [0, 1152]. +02/07/2023 12:23:38 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting maxbatch to 2. +02/07/2023 12:23:38 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting start_batch to 0. +02/07/2023 12:23:38 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting vmin to -60.0. +02/07/2023 12:23:38 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting vmax to 60.0. +02/07/2023 12:23:38 PM INFO:dance.cluster.grid.dannce_train_single_batch Task ID = 0 +02/07/2023 12:23:38 PM INFO:dance.cluster.grid.dannce_train_single_batch {'avg+max': 10, 'dannce_train_dir': '../../demo/markerless_mouse_1/DANNCE/grid_1/'} +02/07/2023 12:23:38 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting crop_height to [0, 1024]. +02/07/2023 12:23:38 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting crop_width to [0, 1152]. +02/07/2023 12:23:38 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting maxbatch to 2. +02/07/2023 12:23:38 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting start_batch to 0. +02/07/2023 12:23:38 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting vmin to -60.0. +02/07/2023 12:23:38 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting vmax to 60.0. +02/07/2023 12:23:38 PM INFO:dance.cluster.grid.dannce_train_single_batch Task ID = 1 +02/07/2023 12:23:38 PM INFO:dance.cluster.grid.dannce_train_single_batch {'avg+max': 5, 'dannce_train_dir': '../../demo/markerless_mouse_1/DANNCE/grid_2/'} +02/07/2023 12:23:38 PM INFO:dannce.interface.dannce_train Fine-tuning from ../../demo/markerless_mouse_1/DANNCE/weights/weights.rat.AVG.6cam.hdf5 +02/07/2023 12:23:38 PM INFO:dannce.interface.dannce_train Fine-tuning from ../../demo/markerless_mouse_1/DANNCE/weights/weights.rat.AVG.6cam.hdf5 +02/07/2023 12:23:38 PM INFO:dannce.engine.serve_data_DANNCE.prepare_data ./label3d_temp_dannce.mat +02/07/2023 12:23:38 PM INFO:dannce.engine.serve_data_DANNCE.prepare_data ./label3d_temp_dannce.mat +02/07/2023 12:23:38 PM INFO:dannce.interface.do_COM_load Experiment 0 using com3d: ./label3d_temp_dannce.mat +02/07/2023 12:23:38 PM INFO:dannce.interface.do_COM_load Removed 0 samples from the dataset because they either had COM positions over cthresh, or did not have matching sampleIDs in the COM file +02/07/2023 12:23:38 PM INFO:dannce.interface.do_COM_load Experiment 0 using com3d: ./label3d_temp_dannce.mat +02/07/2023 12:23:38 PM INFO:dannce.interface.do_COM_load Removed 0 samples from the dataset because they either had COM positions over cthresh, or did not have matching sampleIDs in the COM file +02/07/2023 12:23:38 PM INFO:dannce.engine.serve_data_DANNCE.prepare_data ./label3d_temp_dannce.mat +02/07/2023 12:23:38 PM INFO:dannce.engine.serve_data_DANNCE.prepare_data ./label3d_temp_dannce.mat +02/07/2023 12:23:38 PM INFO:dannce.interface.do_COM_load Experiment 1 using com3d: ./label3d_temp_dannce.mat +02/07/2023 12:23:38 PM INFO:dannce.interface.do_COM_load Removed 0 samples from the dataset because they either had COM positions over cthresh, or did not have matching sampleIDs in the COM file +02/07/2023 12:23:38 PM INFO:dannce.interface.do_COM_load Experiment 1 using com3d: ./label3d_temp_dannce.mat +02/07/2023 12:23:38 PM INFO:dannce.interface.do_COM_load Removed 0 samples from the dataset because they either had COM positions over cthresh, or did not have matching sampleIDs in the COM file +02/07/2023 12:23:38 PM INFO:dannce.engine.preprocessing.py.make_data_split TRAIN EXPTS: [0 1] +02/07/2023 12:23:38 PM INFO:None +02/07/2023 12:23:38 PM INFO:None +02/07/2023 12:23:38 PM INFO:dannce.interface.dannce_train Loading training data into memory. This can take a while to seek throughlarge sets of video. This process is much faster if the frame indicesare sorted in ascending order in your label data file. +02/07/2023 12:23:38 PM INFO:dannce.engine.preprocessing.py.make_data_split TRAIN EXPTS: [0 1] +02/07/2023 12:23:38 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 0_Camera1 +02/07/2023 12:23:38 PM INFO:None +02/07/2023 12:23:38 PM INFO:None +02/07/2023 12:23:38 PM INFO:dannce.interface.dannce_train Loading training data into memory. This can take a while to seek throughlarge sets of video. This process is much faster if the frame indicesare sorted in ascending order in your label data file. +02/07/2023 12:23:38 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 0_Camera1 +02/07/2023 12:23:40 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 0_Camera2 +02/07/2023 12:23:41 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 0_Camera2 +02/07/2023 12:23:41 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 0_Camera3 +02/07/2023 12:23:42 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 0_Camera3 +02/07/2023 12:23:42 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 0_Camera4 +02/07/2023 12:23:43 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 0_Camera4 +02/07/2023 12:23:43 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 0_Camera5 +02/07/2023 12:23:44 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 0_Camera6 +02/07/2023 12:23:44 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 0_Camera5 +02/07/2023 12:23:45 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 0_Camera6 +02/07/2023 12:24:03 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 1_Camera1 +02/07/2023 12:24:04 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 1_Camera1 +02/07/2023 12:24:04 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 1_Camera2 +02/07/2023 12:24:04 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 1_Camera2 +02/07/2023 12:24:05 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 1_Camera3 +02/07/2023 12:24:05 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 1_Camera3 +02/07/2023 12:24:06 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 1_Camera4 +02/07/2023 12:24:07 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 1_Camera4 +02/07/2023 12:24:07 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 1_Camera5 +02/07/2023 12:24:07 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 1_Camera5 +02/07/2023 12:24:08 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 1_Camera6 +02/07/2023 12:24:09 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 1_Camera6 +02/07/2023 12:24:23 PM INFO:dannce.interface.dannce_train Loading validation data into memory +02/07/2023 12:24:23 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 0_Camera1 +02/07/2023 12:24:24 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 0_Camera2 +02/07/2023 12:24:24 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 0_Camera3 +02/07/2023 12:24:25 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 0_Camera4 +02/07/2023 12:24:25 PM INFO:dannce.interface.dannce_train Loading validation data into memory +02/07/2023 12:24:25 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 0_Camera1 +02/07/2023 12:24:25 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 0_Camera5 +02/07/2023 12:24:26 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 0_Camera6 +02/07/2023 12:24:26 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 0_Camera2 +02/07/2023 12:24:26 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 0_Camera3 +02/07/2023 12:24:27 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 0_Camera4 +02/07/2023 12:24:28 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 0_Camera5 +02/07/2023 12:24:28 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 0_Camera6 +02/07/2023 12:24:33 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 1_Camera1 +02/07/2023 12:24:34 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 1_Camera2 +02/07/2023 12:24:34 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 1_Camera3 +02/07/2023 12:24:35 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 1_Camera4 +02/07/2023 12:24:35 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 1_Camera5 +02/07/2023 12:24:36 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 1_Camera6 +02/07/2023 12:24:36 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 1_Camera1 +02/07/2023 12:24:37 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 1_Camera2 +02/07/2023 12:24:37 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 1_Camera3 +02/07/2023 12:24:38 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 1_Camera4 +02/07/2023 12:24:38 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 1_Camera5 +02/07/2023 12:24:39 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 1_Camera6 +02/07/2023 12:24:46 PM INFO:dannce.interface.dannce_train Using default n_rand_views augmentation with 6 views and with replacement +02/07/2023 12:24:46 PM INFO:dannce.interface.dannce_train To disable n_rand_views augmentation, set it to None in the config. +02/07/2023 12:24:46 PM INFO:dannce.interface.dannce_train Initializing Network... +02/07/2023 12:24:49 PM INFO:dannce.interface.dannce_train Using default n_rand_views augmentation with 6 views and with replacement +02/07/2023 12:24:49 PM INFO:dannce.interface.dannce_train To disable n_rand_views augmentation, set it to None in the config. +02/07/2023 12:24:49 PM INFO:dannce.interface.dannce_train Initializing Network... +02/07/2023 12:26:00 PM INFO:Using MirroredStrategy with devices ('/job:localhost/replica:0/task:0/device:GPU:0',) +02/07/2023 12:26:00 PM INFO:dannce.interface.dannce_train Number of devices: 1 +02/07/2023 12:26:00 PM INFO:dannce.interface.dannce_train NUM CAMERAS: 6 +02/07/2023 12:26:00 PM INFO:dannce.engine.nets.norm_fun using layer normalization +02/07/2023 12:26:00 PM INFO:Using MirroredStrategy with devices ('/job:localhost/replica:0/task:0/device:GPU:0',) +02/07/2023 12:26:00 PM INFO:dannce.interface.dannce_train Number of devices: 1 +02/07/2023 12:26:00 PM INFO:dannce.interface.dannce_train NUM CAMERAS: 6 +02/07/2023 12:26:00 PM INFO:dannce.engine.nets.norm_fun using layer normalization +02/07/2023 12:26:00 PM INFO:dannce.engine.nets.norm_fun calling layer norm fun +02/07/2023 12:26:00 PM INFO:dannce.engine.nets.norm_fun calling layer norm fun +02/07/2023 12:26:00 PM INFO:dannce.engine.nets.norm_fun calling layer norm fun +02/07/2023 12:26:01 PM INFO:dannce.engine.nets.norm_fun calling layer norm fun +02/07/2023 12:26:01 PM INFO:dannce.engine.nets.norm_fun calling layer norm fun +02/07/2023 12:26:01 PM INFO:dannce.engine.nets.norm_fun calling layer norm fun +02/07/2023 12:26:01 PM INFO:dannce.engine.nets.norm_fun calling layer norm fun +02/07/2023 12:26:01 PM INFO:dannce.engine.nets.norm_fun calling layer norm fun +02/07/2023 12:26:01 PM INFO:dannce.engine.nets.norm_fun calling layer norm fun +02/07/2023 12:26:01 PM INFO:dannce.engine.nets.norm_fun calling layer norm fun +02/07/2023 12:26:01 PM INFO:dannce.engine.nets.norm_fun calling layer norm fun +02/07/2023 12:26:01 PM INFO:dannce.engine.nets.norm_fun calling layer norm fun +02/07/2023 12:26:01 PM INFO:dannce.engine.nets.norm_fun calling layer norm fun +02/07/2023 12:26:01 PM INFO:dannce.engine.nets.norm_fun calling layer norm fun +02/07/2023 12:26:01 PM INFO:dannce.engine.nets.norm_fun calling layer norm fun +02/07/2023 12:26:01 PM INFO:dannce.engine.nets.norm_fun calling layer norm fun +02/07/2023 12:26:01 PM INFO:dannce.engine.nets.norm_fun calling layer norm fun +02/07/2023 12:26:01 PM INFO:dannce.engine.nets.norm_fun calling layer norm fun +02/07/2023 12:26:01 PM INFO:dannce.engine.nets.norm_fun calling layer norm fun +02/07/2023 12:26:01 PM INFO:dannce.engine.nets.norm_fun calling layer norm fun +02/07/2023 12:26:01 PM INFO:dannce.engine.nets.norm_fun calling layer norm fun +02/07/2023 12:26:01 PM INFO:dannce.engine.nets.norm_fun calling layer norm fun +02/07/2023 12:26:01 PM INFO:dannce.engine.nets.norm_fun calling layer norm fun +02/07/2023 12:26:01 PM INFO:dannce.engine.nets.norm_fun calling layer norm fun +02/07/2023 12:26:01 PM INFO:dannce.engine.nets.norm_fun calling layer norm fun +02/07/2023 12:26:01 PM INFO:dannce.engine.nets.norm_fun calling layer norm fun +02/07/2023 12:26:01 PM INFO:dannce.engine.nets.norm_fun calling layer norm fun +02/07/2023 12:26:01 PM INFO:dannce.engine.nets.norm_fun calling layer norm fun +02/07/2023 12:26:01 PM INFO:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',). +02/07/2023 12:26:01 PM INFO:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',). +02/07/2023 12:26:01 PM INFO:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',). +02/07/2023 12:26:01 PM INFO:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',). +02/07/2023 12:26:01 PM INFO:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',). +02/07/2023 12:26:01 PM INFO:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',). +02/07/2023 12:26:01 PM INFO:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',). +02/07/2023 12:26:01 PM INFO:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',). +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: image_input, weights: input_1 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: conv3d, weights: conv3d_1 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: instance_normalization, weights: instance_normalization_1 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: activation, weights: activation_1 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: conv3d_1, weights: conv3d_2 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: instance_normalization_1, weights: instance_normalization_2 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: activation_1, weights: activation_2 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: max_pooling3d, weights: max_pooling3d_1 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: conv3d_2, weights: conv3d_3 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: instance_normalization_2, weights: instance_normalization_3 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: activation_2, weights: activation_3 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: conv3d_3, weights: conv3d_4 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: instance_normalization_3, weights: instance_normalization_4 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: activation_3, weights: activation_4 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: max_pooling3d_1, weights: max_pooling3d_2 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: conv3d_4, weights: conv3d_5 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: instance_normalization_4, weights: instance_normalization_5 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: activation_4, weights: activation_5 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: conv3d_5, weights: conv3d_6 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: instance_normalization_5, weights: instance_normalization_6 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: activation_5, weights: activation_6 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: max_pooling3d_2, weights: max_pooling3d_3 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: conv3d_6, weights: conv3d_7 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: instance_normalization_6, weights: instance_normalization_7 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: activation_6, weights: activation_7 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: conv3d_7, weights: conv3d_8 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: instance_normalization_7, weights: instance_normalization_8 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: activation_7, weights: activation_8 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: conv3d_transpose, weights: conv3d_transpose_1 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: concatenate, weights: concatenate_1 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: conv3d_8, weights: conv3d_9 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: instance_normalization_8, weights: instance_normalization_9 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: activation_8, weights: activation_9 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: conv3d_9, weights: conv3d_10 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: instance_normalization_9, weights: instance_normalization_10 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: activation_9, weights: activation_10 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: conv3d_transpose_1, weights: conv3d_transpose_2 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: concatenate_1, weights: concatenate_2 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: conv3d_10, weights: conv3d_11 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: instance_normalization_10, weights: instance_normalization_11 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: activation_10, weights: activation_11 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: conv3d_11, weights: conv3d_12 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: instance_normalization_11, weights: instance_normalization_12 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: activation_11, weights: activation_12 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: conv3d_transpose_2, weights: conv3d_transpose_3 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: concatenate_2, weights: concatenate_3 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: conv3d_12, weights: conv3d_13 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: instance_normalization_12, weights: instance_normalization_13 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: activation_12, weights: activation_13 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: conv3d_13, weights: conv3d_14 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: instance_normalization_13, weights: instance_normalization_14 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: activation_13, weights: activation_14 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: image_input, weights: input_1 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: conv3d, weights: conv3d_1 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: instance_normalization, weights: instance_normalization_1 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: activation, weights: activation_1 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: conv3d_1, weights: conv3d_2 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: instance_normalization_1, weights: instance_normalization_2 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: activation_1, weights: activation_2 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: max_pooling3d, weights: max_pooling3d_1 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: conv3d_2, weights: conv3d_3 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: instance_normalization_2, weights: instance_normalization_3 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: activation_2, weights: activation_3 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: conv3d_3, weights: conv3d_4 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: instance_normalization_3, weights: instance_normalization_4 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: activation_3, weights: activation_4 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: max_pooling3d_1, weights: max_pooling3d_2 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: conv3d_4, weights: conv3d_5 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: instance_normalization_4, weights: instance_normalization_5 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: activation_4, weights: activation_5 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: conv3d_5, weights: conv3d_6 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: instance_normalization_5, weights: instance_normalization_6 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: activation_5, weights: activation_6 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: max_pooling3d_2, weights: max_pooling3d_3 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: conv3d_6, weights: conv3d_7 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: instance_normalization_6, weights: instance_normalization_7 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: activation_6, weights: activation_7 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: conv3d_7, weights: conv3d_8 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: instance_normalization_7, weights: instance_normalization_8 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: activation_7, weights: activation_8 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: conv3d_transpose, weights: conv3d_transpose_1 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: concatenate, weights: concatenate_1 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: conv3d_8, weights: conv3d_9 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: instance_normalization_8, weights: instance_normalization_9 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: activation_8, weights: activation_9 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: conv3d_9, weights: conv3d_10 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: instance_normalization_9, weights: instance_normalization_10 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: activation_9, weights: activation_10 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: conv3d_transpose_1, weights: conv3d_transpose_2 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: concatenate_1, weights: concatenate_2 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: conv3d_10, weights: conv3d_11 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: instance_normalization_10, weights: instance_normalization_11 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: activation_10, weights: activation_11 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: conv3d_11, weights: conv3d_12 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: instance_normalization_11, weights: instance_normalization_12 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: activation_11, weights: activation_12 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: conv3d_transpose_2, weights: conv3d_transpose_3 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: concatenate_2, weights: concatenate_3 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: conv3d_12, weights: conv3d_13 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: instance_normalization_12, weights: instance_normalization_13 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: activation_12, weights: activation_13 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: conv3d_13, weights: conv3d_14 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: instance_normalization_13, weights: instance_normalization_14 +02/07/2023 12:26:02 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: activation_13, weights: activation_14 +02/07/2023 12:26:04 PM INFO:.finetune_AVG evaluating weight deltas in the first conv layer +02/07/2023 12:26:04 PM INFO:.finetune_AVG pre-weights +02/07/2023 12:26:04 PM INFO:.finetune_AVG 0.0 +02/07/2023 12:26:04 PM INFO:.finetune_AVG post-weights +02/07/2023 12:26:04 PM INFO:.finetune_AVG -1.5876454 +02/07/2023 12:26:04 PM INFO:.finetune_AVG delta: +02/07/2023 12:26:04 PM INFO:.finetune_AVG 1.5876454 +02/07/2023 12:26:04 PM INFO:.finetune_AVG evaluating weight deltas in the first conv layer +02/07/2023 12:26:04 PM INFO:.finetune_AVG pre-weights +02/07/2023 12:26:04 PM INFO:.finetune_AVG 0.0 +02/07/2023 12:26:04 PM INFO:.finetune_AVG post-weights +02/07/2023 12:26:04 PM INFO:.finetune_AVG -1.5876454 +02/07/2023 12:26:04 PM INFO:.finetune_AVG delta: +02/07/2023 12:26:04 PM INFO:.finetune_AVG 1.5876454 +02/07/2023 12:26:04 PM INFO:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',). +02/07/2023 12:26:04 PM INFO:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',). +02/07/2023 12:26:04 PM INFO:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',). +02/07/2023 12:26:04 PM INFO:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',). +02/07/2023 12:26:04 PM INFO:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',). +02/07/2023 12:26:04 PM INFO:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',). +02/07/2023 12:26:04 PM INFO:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',). +02/07/2023 12:26:04 PM INFO:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',). +02/07/2023 12:26:04 PM INFO:dannce.interface.dannce_train COMPLETE + +02/07/2023 12:26:04 PM INFO:dannce.interface.dannce_train COMPLETE + +02/07/2023 12:26:09 PM INFO:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',). +02/07/2023 12:26:09 PM INFO:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',). +02/07/2023 12:26:09 PM INFO:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',). +02/07/2023 12:26:09 PM INFO:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',). +02/07/2023 12:56:41 PM INFO:dance.cluster.grid.GridHandler.submit_jobs dict_values([10, '../../demo/markerless_mouse_1/DANNCE/grid_1/']) +02/07/2023 12:56:41 PM INFO:dance.cluster.grid.GridHandler.submit_jobs dict_values([5, '../../demo/markerless_mouse_1/DANNCE/grid_2/']) +02/07/2023 12:56:41 PM INFO:dance.cluster.grid.GridHandler.submit_jobs Command issued: sbatch --wait --array=0-1 --job-name=trainDannce -p gpu-common,dsplus-gpu --mem=80000 -t 3-00:00 --gres=gpu:1 -N 1 -c 16 --account=plusds --wrap=". ~/.bashrc; conda activate dannce_aux; dannce-train-single-batch config_mousetest.yaml grid_config.yaml" +02/07/2023 12:59:29 PM INFO:com_train_dir set to: ../../demo/markerless_mouse_1/COM/train_test/ +02/07/2023 12:59:29 PM INFO:com_predict_dir set to: ../../demo/markerless_mouse_1/COM/predict_test/ +02/07/2023 12:59:29 PM INFO:com_predict_weights set to: ../../demo/markerless_mouse_1/COM/train_results/weights.250-0.00036.hdf5 +02/07/2023 12:59:29 PM INFO:dannce_train_dir set to: ../../demo/markerless_mouse_1/DANNCE/train_results/AVG_MAX/ +02/07/2023 12:59:29 PM INFO:dannce_predict_dir set to: ../../demo/markerless_mouse_1/DANNCE/predict_test/ +02/07/2023 12:59:29 PM INFO:dannce_predict_model set to: ../../demo/markerless_mouse_1/DANNCE/train_results/AVG/weights.1200-12.77642.hdf5 +02/07/2023 12:59:29 PM INFO:exp set to: [{'label3d_file': './label3d_temp_dannce.mat', 'viddir': '../../demo/markerless_mouse_1/videos/'}, {'label3d_file': './label3d_temp_dannce.mat', 'viddir': '../../demo/markerless_mouse_2/videos/'}] +02/07/2023 12:59:29 PM INFO:viddir set to: ../../demo/markerless_mouse_1/videos/ +02/07/2023 12:59:29 PM INFO:gpu_id set to: 0 +02/07/2023 12:59:29 PM INFO:io_config set to: io.yaml +02/07/2023 12:59:29 PM INFO:new_n_channels_out set to: 22 +02/07/2023 12:59:29 PM INFO:batch_size set to: 4 +02/07/2023 12:59:29 PM INFO:epochs set to: 1 +02/07/2023 12:59:29 PM INFO:train_mode set to: finetune +02/07/2023 12:59:29 PM INFO:slurm_config set to: ../../cluster/duke.yaml +02/07/2023 12:59:29 PM INFO:num_validation_per_exp set to: 4 +02/07/2023 12:59:29 PM INFO:vol_size set to: 120 +02/07/2023 12:59:29 PM INFO:nvox set to: 64 +02/07/2023 12:59:29 PM INFO:max_num_samples set to: 10 +02/07/2023 12:59:29 PM INFO:start_batch set to: 0 +02/07/2023 12:59:29 PM INFO:dannce_finetune_weights set to: ../../demo/markerless_mouse_1/DANNCE/weights/ +02/07/2023 12:59:29 PM INFO:metric set to: ['euclidean_distance_3D', 'centered_euclidean_distance_3D'] +02/07/2023 12:59:29 PM INFO:log_dest set to: ../logs/ +02/07/2023 12:59:29 PM INFO:log_level set to: INFO +02/07/2023 12:59:29 PM INFO:base_config set to: config_mousetest.yaml +02/07/2023 12:59:29 PM INFO:crop_height set to: None +02/07/2023 12:59:29 PM INFO:crop_width set to: None +02/07/2023 12:59:29 PM INFO:camnames set to: None +02/07/2023 12:59:29 PM INFO:n_channels_out set to: 20 +02/07/2023 12:59:29 PM INFO:sigma set to: 10 +02/07/2023 12:59:29 PM INFO:verbose set to: 1 +02/07/2023 12:59:29 PM INFO:net set to: None +02/07/2023 12:59:29 PM INFO:immode set to: vid +02/07/2023 12:59:29 PM INFO:mono set to: False +02/07/2023 12:59:29 PM INFO:mirror set to: False +02/07/2023 12:59:29 PM INFO:norm_method set to: layer +02/07/2023 12:59:29 PM INFO:loss set to: mask_nan_keep_loss +02/07/2023 12:59:29 PM INFO:huber-delta set to: 1.35 +02/07/2023 12:59:29 PM INFO:num_train_per_exp set to: None +02/07/2023 12:59:29 PM INFO:lr set to: 0.001 +02/07/2023 12:59:29 PM INFO:augment_hue set to: False +02/07/2023 12:59:29 PM INFO:augment_brightness set to: False +02/07/2023 12:59:29 PM INFO:augment_hue_val set to: 0.05 +02/07/2023 12:59:29 PM INFO:augment_bright_val set to: 0.05 +02/07/2023 12:59:29 PM INFO:augment_rotation_val set to: 5 +02/07/2023 12:59:29 PM INFO:data_split_seed set to: None +02/07/2023 12:59:29 PM INFO:valid_exp set to: None +02/07/2023 12:59:29 PM INFO:net_type set to: AVG +02/07/2023 12:59:29 PM INFO:com_fromlabels set to: False +02/07/2023 12:59:29 PM INFO:medfilt_window set to: None +02/07/2023 12:59:29 PM INFO:com_file set to: None +02/07/2023 12:59:29 PM INFO:new_last_kernel_size set to: [3, 3, 3] +02/07/2023 12:59:29 PM INFO:n_layers_locked set to: 2 +02/07/2023 12:59:29 PM INFO:vmin set to: None +02/07/2023 12:59:29 PM INFO:vmax set to: None +02/07/2023 12:59:29 PM INFO:interp set to: nearest +02/07/2023 12:59:29 PM INFO:depth set to: False +02/07/2023 12:59:29 PM INFO:comthresh set to: 0 +02/07/2023 12:59:29 PM INFO:weighted set to: False +02/07/2023 12:59:29 PM INFO:com_method set to: median +02/07/2023 12:59:29 PM INFO:cthresh set to: None +02/07/2023 12:59:29 PM INFO:channel_combo set to: None +02/07/2023 12:59:29 PM INFO:predict_mode set to: torch +02/07/2023 12:59:29 PM INFO:n_views set to: 6 +02/07/2023 12:59:29 PM INFO:rotate set to: True +02/07/2023 12:59:29 PM INFO:augment_continuous_rotation set to: False +02/07/2023 12:59:29 PM INFO:mirror_augmentation set to: False +02/07/2023 12:59:29 PM INFO:drop_landmark set to: None +02/07/2023 12:59:29 PM INFO:use_npy set to: False +02/07/2023 12:59:29 PM INFO:rand_view_replace set to: True +02/07/2023 12:59:29 PM INFO:n_rand_views set to: 0 +02/07/2023 12:59:29 PM INFO:multi_gpu_train set to: False +02/07/2023 12:59:29 PM INFO:heatmap_reg set to: False +02/07/2023 12:59:29 PM INFO:heatmap_reg_coeff set to: 0.01 +02/07/2023 12:59:29 PM INFO:save_pred_targets set to: False +02/07/2023 12:59:29 PM INFO:avg+max set to: 10.0 +02/07/2023 12:59:29 PM INFO:n_channels_in set to: None +02/07/2023 12:59:29 PM INFO:extension set to: None +02/07/2023 12:59:29 PM INFO:vid_dir_flag set to: None +02/07/2023 12:59:29 PM INFO:chunks set to: None +02/07/2023 12:59:29 PM INFO:lockfirst set to: None +02/07/2023 12:59:29 PM INFO:load_valid set to: None +02/07/2023 12:59:29 PM INFO:right_keypoints set to: None +02/07/2023 12:59:29 PM INFO:left_keypoints set to: None +02/07/2023 12:59:29 PM INFO:raw_im_h set to: None +02/07/2023 12:59:29 PM INFO:raw_im_w set to: None +02/07/2023 12:59:29 PM INFO:n_instances set to: 1 +02/07/2023 12:59:29 PM INFO:start_sample set to: None +02/07/2023 12:59:29 PM INFO:write_npy set to: None +02/07/2023 12:59:29 PM INFO:expval set to: None +02/07/2023 12:59:29 PM INFO:com_thresh set to: None +02/07/2023 12:59:29 PM INFO:cam3_train set to: None +02/07/2023 12:59:29 PM INFO:debug_volume_tifdir set to: None +02/07/2023 12:59:29 PM INFO:downfac set to: None +02/07/2023 12:59:29 PM INFO:from_weights set to: None +02/07/2023 12:59:29 PM INFO:dannce_predict_vol_tifdir set to: None +02/07/2023 12:59:29 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting vid_dir_flag to True. +02/07/2023 12:59:29 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting extension to .mp4. +02/07/2023 12:59:29 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting chunks to {'Camera1': array([ 0, 3000, 6000, 9000, 12000, 15000]), 'Camera2': array([ 0, 3000, 6000, 9000, 12000, 15000]), 'Camera3': array([ 0, 3000, 6000, 9000, 12000, 15000]), 'Camera4': array([ 0, 3000, 6000, 9000, 12000, 15000]), 'Camera5': array([ 0, 3000, 6000, 9000, 12000, 15000]), 'Camera6': array([ 0, 3000, 6000, 9000, 12000, 15000])}. +02/07/2023 12:59:29 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting n_channels_in to 3. +02/07/2023 12:59:29 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting raw_im_h to 1024. +02/07/2023 12:59:29 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting raw_im_w to 1152. +02/07/2023 12:59:29 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting expval to True. +02/07/2023 12:59:29 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting net_type to AVG. +02/07/2023 12:59:29 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting expval to True. +02/07/2023 12:59:29 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting net to finetune_AVG. +02/07/2023 12:59:31 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting crop_height to [0, 1024]. +02/07/2023 12:59:31 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting crop_width to [0, 1152]. +02/07/2023 12:59:31 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting maxbatch to 2. +02/07/2023 12:59:31 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting start_batch to 0. +02/07/2023 12:59:31 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting vmin to -60.0. +02/07/2023 12:59:31 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting vmax to 60.0. +02/07/2023 12:59:31 PM INFO:dannce.interface.dannce_train Fine-tuning from ../../demo/markerless_mouse_1/DANNCE/weights/weights.rat.AVG.6cam.hdf5 +02/07/2023 12:59:31 PM INFO:dannce.engine.serve_data_DANNCE.prepare_data ./label3d_temp_dannce.mat +02/07/2023 12:59:31 PM INFO:dannce.interface.do_COM_load Experiment 0 using com3d: ./label3d_temp_dannce.mat +02/07/2023 12:59:31 PM INFO:dannce.interface.do_COM_load Removed 0 samples from the dataset because they either had COM positions over cthresh, or did not have matching sampleIDs in the COM file +02/07/2023 12:59:31 PM INFO:dannce.engine.serve_data_DANNCE.prepare_data ./label3d_temp_dannce.mat +02/07/2023 12:59:32 PM INFO:dannce.interface.do_COM_load Experiment 1 using com3d: ./label3d_temp_dannce.mat +02/07/2023 12:59:32 PM INFO:dannce.interface.do_COM_load Removed 0 samples from the dataset because they either had COM positions over cthresh, or did not have matching sampleIDs in the COM file +02/07/2023 12:59:32 PM INFO:dannce.engine.preprocessing.py.make_data_split TRAIN EXPTS: [0 1] +02/07/2023 12:59:32 PM INFO:None +02/07/2023 12:59:32 PM INFO:None +02/07/2023 12:59:32 PM INFO:dannce.interface.dannce_train Loading training data into memory. This can take a while to seek throughlarge sets of video. This process is much faster if the frame indicesare sorted in ascending order in your label data file. +02/07/2023 12:59:32 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 0_Camera1 +02/07/2023 12:59:32 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 0_Camera2 +02/07/2023 12:59:33 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 0_Camera3 +02/07/2023 12:59:34 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 0_Camera4 +02/07/2023 12:59:34 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 0_Camera5 +02/07/2023 12:59:35 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 0_Camera6 +02/07/2023 12:59:55 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 1_Camera1 +02/07/2023 12:59:56 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 1_Camera2 +02/07/2023 12:59:57 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 1_Camera3 +02/07/2023 12:59:58 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 1_Camera4 +02/07/2023 12:59:59 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 1_Camera5 +02/07/2023 01:00:00 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 1_Camera6 +02/07/2023 01:00:16 PM INFO:dannce.interface.dannce_train Loading validation data into memory +02/07/2023 01:00:16 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 0_Camera1 +02/07/2023 01:00:17 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 0_Camera2 +02/07/2023 01:00:17 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 0_Camera3 +02/07/2023 01:00:18 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 0_Camera4 +02/07/2023 01:00:19 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 0_Camera5 +02/07/2023 01:00:19 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 0_Camera6 +02/07/2023 01:00:29 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 1_Camera1 +02/07/2023 01:00:30 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 1_Camera2 +02/07/2023 01:00:30 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 1_Camera3 +02/07/2023 01:00:31 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 1_Camera4 +02/07/2023 01:00:31 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 1_Camera5 +02/07/2023 01:00:32 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 1_Camera6 +02/07/2023 01:00:43 PM INFO:dannce.interface.dannce_train Using default n_rand_views augmentation with 6 views and with replacement +02/07/2023 01:00:43 PM INFO:dannce.interface.dannce_train To disable n_rand_views augmentation, set it to None in the config. +02/07/2023 01:00:43 PM INFO:dannce.interface.dannce_train Initializing Network... +02/07/2023 01:00:46 PM INFO:Using MirroredStrategy with devices ('/job:localhost/replica:0/task:0/device:GPU:0',) +02/07/2023 01:00:46 PM INFO:dannce.interface.dannce_train Number of devices: 1 +02/07/2023 01:00:46 PM INFO:dannce.interface.dannce_train NUM CAMERAS: 6 +02/07/2023 01:00:46 PM INFO:dannce.engine.nets.norm_fun using layer normalization +02/07/2023 01:00:46 PM INFO:dannce.engine.nets.norm_fun calling layer norm fun +02/07/2023 01:00:46 PM INFO:dannce.engine.nets.norm_fun calling layer norm fun +02/07/2023 01:00:46 PM INFO:dannce.engine.nets.norm_fun calling layer norm fun +02/07/2023 01:00:46 PM INFO:dannce.engine.nets.norm_fun calling layer norm fun +02/07/2023 01:00:46 PM INFO:dannce.engine.nets.norm_fun calling layer norm fun +02/07/2023 01:00:46 PM INFO:dannce.engine.nets.norm_fun calling layer norm fun +02/07/2023 01:00:46 PM INFO:dannce.engine.nets.norm_fun calling layer norm fun +02/07/2023 01:00:46 PM INFO:dannce.engine.nets.norm_fun calling layer norm fun +02/07/2023 01:00:46 PM INFO:dannce.engine.nets.norm_fun calling layer norm fun +02/07/2023 01:00:46 PM INFO:dannce.engine.nets.norm_fun calling layer norm fun +02/07/2023 01:00:46 PM INFO:dannce.engine.nets.norm_fun calling layer norm fun +02/07/2023 01:00:46 PM INFO:dannce.engine.nets.norm_fun calling layer norm fun +02/07/2023 01:00:46 PM INFO:dannce.engine.nets.norm_fun calling layer norm fun +02/07/2023 01:00:46 PM INFO:dannce.engine.nets.norm_fun calling layer norm fun +02/07/2023 01:00:47 PM INFO:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',). +02/07/2023 01:00:47 PM INFO:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',). +02/07/2023 01:00:47 PM INFO:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',). +02/07/2023 01:00:47 PM INFO:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',). +02/07/2023 01:00:47 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: image_input, weights: input_1 +02/07/2023 01:00:47 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: conv3d, weights: conv3d_1 +02/07/2023 01:00:47 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: instance_normalization, weights: instance_normalization_1 +02/07/2023 01:00:47 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: activation, weights: activation_1 +02/07/2023 01:00:47 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: conv3d_1, weights: conv3d_2 +02/07/2023 01:00:47 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: instance_normalization_1, weights: instance_normalization_2 +02/07/2023 01:00:47 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: activation_1, weights: activation_2 +02/07/2023 01:00:47 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: max_pooling3d, weights: max_pooling3d_1 +02/07/2023 01:00:47 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: conv3d_2, weights: conv3d_3 +02/07/2023 01:00:47 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: instance_normalization_2, weights: instance_normalization_3 +02/07/2023 01:00:47 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: activation_2, weights: activation_3 +02/07/2023 01:00:47 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: conv3d_3, weights: conv3d_4 +02/07/2023 01:00:47 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: instance_normalization_3, weights: instance_normalization_4 +02/07/2023 01:00:47 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: activation_3, weights: activation_4 +02/07/2023 01:00:47 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: max_pooling3d_1, weights: max_pooling3d_2 +02/07/2023 01:00:47 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: conv3d_4, weights: conv3d_5 +02/07/2023 01:00:47 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: instance_normalization_4, weights: instance_normalization_5 +02/07/2023 01:00:47 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: activation_4, weights: activation_5 +02/07/2023 01:00:47 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: conv3d_5, weights: conv3d_6 +02/07/2023 01:00:47 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: instance_normalization_5, weights: instance_normalization_6 +02/07/2023 01:00:47 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: activation_5, weights: activation_6 +02/07/2023 01:00:47 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: max_pooling3d_2, weights: max_pooling3d_3 +02/07/2023 01:00:47 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: conv3d_6, weights: conv3d_7 +02/07/2023 01:00:47 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: instance_normalization_6, weights: instance_normalization_7 +02/07/2023 01:00:47 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: activation_6, weights: activation_7 +02/07/2023 01:00:47 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: conv3d_7, weights: conv3d_8 +02/07/2023 01:00:47 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: instance_normalization_7, weights: instance_normalization_8 +02/07/2023 01:00:47 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: activation_7, weights: activation_8 +02/07/2023 01:00:47 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: conv3d_transpose, weights: conv3d_transpose_1 +02/07/2023 01:00:47 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: concatenate, weights: concatenate_1 +02/07/2023 01:00:47 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: conv3d_8, weights: conv3d_9 +02/07/2023 01:00:47 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: instance_normalization_8, weights: instance_normalization_9 +02/07/2023 01:00:47 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: activation_8, weights: activation_9 +02/07/2023 01:00:47 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: conv3d_9, weights: conv3d_10 +02/07/2023 01:00:47 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: instance_normalization_9, weights: instance_normalization_10 +02/07/2023 01:00:47 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: activation_9, weights: activation_10 +02/07/2023 01:00:47 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: conv3d_transpose_1, weights: conv3d_transpose_2 +02/07/2023 01:00:47 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: concatenate_1, weights: concatenate_2 +02/07/2023 01:00:47 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: conv3d_10, weights: conv3d_11 +02/07/2023 01:00:47 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: instance_normalization_10, weights: instance_normalization_11 +02/07/2023 01:00:47 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: activation_10, weights: activation_11 +02/07/2023 01:00:47 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: conv3d_11, weights: conv3d_12 +02/07/2023 01:00:47 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: instance_normalization_11, weights: instance_normalization_12 +02/07/2023 01:00:47 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: activation_11, weights: activation_12 +02/07/2023 01:00:47 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: conv3d_transpose_2, weights: conv3d_transpose_3 +02/07/2023 01:00:47 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: concatenate_2, weights: concatenate_3 +02/07/2023 01:00:47 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: conv3d_12, weights: conv3d_13 +02/07/2023 01:00:47 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: instance_normalization_12, weights: instance_normalization_13 +02/07/2023 01:00:47 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: activation_12, weights: activation_13 +02/07/2023 01:00:47 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: conv3d_13, weights: conv3d_14 +02/07/2023 01:00:47 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: instance_normalization_13, weights: instance_normalization_14 +02/07/2023 01:00:47 PM INFO:dannce.engine.nets.renameLayers Correcting mismatch in layer name, model: activation_13, weights: activation_14 +02/07/2023 01:00:49 PM INFO:.finetune_AVG evaluating weight deltas in the first conv layer +02/07/2023 01:00:49 PM INFO:.finetune_AVG pre-weights +02/07/2023 01:00:49 PM INFO:.finetune_AVG 0.0 +02/07/2023 01:00:49 PM INFO:.finetune_AVG post-weights +02/07/2023 01:00:49 PM INFO:.finetune_AVG -1.5876454 +02/07/2023 01:00:49 PM INFO:.finetune_AVG delta: +02/07/2023 01:00:49 PM INFO:.finetune_AVG 1.5876454 +02/07/2023 01:00:49 PM INFO:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',). +02/07/2023 01:00:49 PM INFO:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',). +02/07/2023 01:00:49 PM INFO:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',). +02/07/2023 01:00:49 PM INFO:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',). +02/07/2023 01:00:50 PM INFO:dannce.interface.dannce_train COMPLETE + +02/07/2023 01:00:53 PM INFO:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',). +02/07/2023 01:00:53 PM INFO:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',). +02/07/2023 01:01:24 PM INFO:dannce.interface.dannce_train Renaming weights file with best epoch description +02/07/2023 01:01:24 PM INFO:dannce.interface.dannce_train Saving full model at end of training +02/07/2023 01:01:27 PM INFO:dannce.interface.dannce_train Saving predictions for ../../demo/markerless_mouse_1/DANNCE/train_results/AVG_MAX/weights.0-456.55078.hdf5 and ../../demo/markerless_mouse_1/DANNCE/train_results/AVG_MAX/fullmodel_weights/fullmodel_end.hdf5 +02/07/2023 01:01:27 PM INFO:dannce.interface.dannce_train done! +02/08/2023 12:35:24 PM INFO:com_train_dir set to: ../../demo/markerless_mouse_1/COM/train_test/ +02/08/2023 12:35:24 PM INFO:com_predict_dir set to: ../../demo/markerless_mouse_1/COM/predict_test/ +02/08/2023 12:35:24 PM INFO:com_predict_weights set to: ../../demo/markerless_mouse_1/COM/train_results/weights.250-0.00036.hdf5 +02/08/2023 12:35:24 PM INFO:dannce_train_dir set to: ../../demo/markerless_mouse_1/DANNCE/train_test/ +02/08/2023 12:35:24 PM INFO:dannce_predict_dir set to: ../../demo/markerless_mouse_1/DANNCE/predict_test/ +02/08/2023 12:35:24 PM INFO:dannce_predict_model set to: ../../demo/markerless_mouse_1/DANNCE/train_results/weights.12000-0.00014.hdf5 +02/08/2023 12:35:24 PM INFO:exp set to: [{'label3d_file': './label3d_temp_dannce.mat', 'viddir': '../../demo/markerless_mouse_1/videos/'}, {'label3d_file': './label3d_temp_dannce.mat', 'viddir': '../../demo/markerless_mouse_2/videos/'}] +02/08/2023 12:35:24 PM INFO:viddir set to: ../../demo/markerless_mouse_1/videos/ +02/08/2023 12:35:24 PM INFO:gpu_id set to: 0 +02/08/2023 12:35:24 PM INFO:io_config set to: io.yaml +02/08/2023 12:35:24 PM INFO:new_n_channels_out set to: 22 +02/08/2023 12:35:24 PM INFO:batch_size set to: 4 +02/08/2023 12:35:24 PM INFO:epochs set to: 1 +02/08/2023 12:35:24 PM INFO:train_mode set to: finetune +02/08/2023 12:35:24 PM INFO:slurm_config set to: ../../cluster/duke.yaml +02/08/2023 12:35:24 PM INFO:num_validation_per_exp set to: 4 +02/08/2023 12:35:24 PM INFO:vol_size set to: 120 +02/08/2023 12:35:24 PM INFO:nvox set to: 64 +02/08/2023 12:35:24 PM INFO:max_num_samples set to: 10 +02/08/2023 12:35:24 PM INFO:start_batch set to: 0 +02/08/2023 12:35:24 PM INFO:dannce_finetune_weights set to: ../../demo/markerless_mouse_1/DANNCE/weights/ +02/08/2023 12:35:24 PM INFO:metric set to: ['euclidean_distance_3D', 'centered_euclidean_distance_3D'] +02/08/2023 12:35:24 PM INFO:log_dest set to: ../logs/ +02/08/2023 12:35:24 PM INFO:log_level set to: INFO +02/08/2023 12:35:24 PM INFO:base_config set to: config_mousetest.yaml +02/08/2023 12:35:24 PM INFO:crop_height set to: None +02/08/2023 12:35:24 PM INFO:crop_width set to: None +02/08/2023 12:35:24 PM INFO:camnames set to: None +02/08/2023 12:35:24 PM INFO:n_channels_out set to: 20 +02/08/2023 12:35:24 PM INFO:sigma set to: 10 +02/08/2023 12:35:24 PM INFO:verbose set to: 1 +02/08/2023 12:35:24 PM INFO:net set to: None +02/08/2023 12:35:24 PM INFO:immode set to: vid +02/08/2023 12:35:24 PM INFO:mono set to: False +02/08/2023 12:35:24 PM INFO:mirror set to: False +02/08/2023 12:35:24 PM INFO:norm_method set to: layer +02/08/2023 12:35:24 PM INFO:start_sample set to: None +02/08/2023 12:35:24 PM INFO:net_type set to: MAX +02/08/2023 12:35:24 PM INFO:com_fromlabels set to: False +02/08/2023 12:35:24 PM INFO:medfilt_window set to: None +02/08/2023 12:35:24 PM INFO:com_file set to: None +02/08/2023 12:35:24 PM INFO:new_last_kernel_size set to: [3, 3, 3] +02/08/2023 12:35:24 PM INFO:n_layers_locked set to: 2 +02/08/2023 12:35:24 PM INFO:vmin set to: None +02/08/2023 12:35:24 PM INFO:vmax set to: None +02/08/2023 12:35:24 PM INFO:interp set to: nearest +02/08/2023 12:35:24 PM INFO:depth set to: False +02/08/2023 12:35:24 PM INFO:comthresh set to: 0 +02/08/2023 12:35:24 PM INFO:weighted set to: False +02/08/2023 12:35:24 PM INFO:com_method set to: median +02/08/2023 12:35:24 PM INFO:cthresh set to: None +02/08/2023 12:35:24 PM INFO:channel_combo set to: None +02/08/2023 12:35:24 PM INFO:predict_mode set to: torch +02/08/2023 12:35:24 PM INFO:n_views set to: 6 +02/08/2023 12:35:24 PM INFO:expval set to: False +02/08/2023 12:35:24 PM INFO:from_weights set to: None +02/08/2023 12:35:24 PM INFO:write_npy set to: None +02/08/2023 12:35:24 PM INFO:loss set to: mask_nan_keep_loss +02/08/2023 12:35:24 PM INFO:n_channels_in set to: None +02/08/2023 12:35:24 PM INFO:extension set to: None +02/08/2023 12:35:24 PM INFO:vid_dir_flag set to: None +02/08/2023 12:35:24 PM INFO:num_train_per_exp set to: None +02/08/2023 12:35:24 PM INFO:chunks set to: None +02/08/2023 12:35:24 PM INFO:lockfirst set to: None +02/08/2023 12:35:24 PM INFO:load_valid set to: None +02/08/2023 12:35:24 PM INFO:augment_hue set to: False +02/08/2023 12:35:24 PM INFO:augment_brightness set to: False +02/08/2023 12:35:24 PM INFO:augment_hue_val set to: 0.05 +02/08/2023 12:35:24 PM INFO:augment_bright_val set to: 0.05 +02/08/2023 12:35:24 PM INFO:augment_rotation_val set to: 5 +02/08/2023 12:35:24 PM INFO:mirror_augmentation set to: False +02/08/2023 12:35:24 PM INFO:right_keypoints set to: None +02/08/2023 12:35:24 PM INFO:left_keypoints set to: None +02/08/2023 12:35:24 PM INFO:drop_landmark set to: None +02/08/2023 12:35:24 PM INFO:raw_im_h set to: None +02/08/2023 12:35:24 PM INFO:raw_im_w set to: None +02/08/2023 12:35:24 PM INFO:n_instances set to: 1 +02/08/2023 12:35:24 PM INFO:use_npy set to: False +02/08/2023 12:35:24 PM INFO:data_split_seed set to: None +02/08/2023 12:35:24 PM INFO:valid_exp set to: None +02/08/2023 12:35:24 PM INFO:lr set to: 0.001 +02/08/2023 12:35:24 PM INFO:rotate set to: True +02/08/2023 12:35:24 PM INFO:augment_continuous_rotation set to: False +02/08/2023 12:35:24 PM INFO:com_thresh set to: None +02/08/2023 12:35:24 PM INFO:cam3_train set to: None +02/08/2023 12:35:24 PM INFO:debug_volume_tifdir set to: None +02/08/2023 12:35:24 PM INFO:downfac set to: None +02/08/2023 12:35:24 PM INFO:dannce_predict_vol_tifdir set to: None +02/08/2023 12:35:24 PM INFO:n_rand_views set to: 0 +02/08/2023 12:35:24 PM INFO:rand_view_replace set to: True +02/08/2023 12:35:24 PM INFO:multi_gpu_train set to: False +02/08/2023 12:35:24 PM INFO:heatmap_reg set to: False +02/08/2023 12:35:24 PM INFO:heatmap_reg_coeff set to: 0.01 +02/08/2023 12:35:24 PM INFO:save_pred_targets set to: False +02/08/2023 12:35:24 PM INFO:huber-delta set to: 1.35 +02/08/2023 12:35:24 PM INFO:avg+max set to: None +02/08/2023 12:35:24 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting vid_dir_flag to True. +02/08/2023 12:35:24 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting extension to .mp4. +02/08/2023 12:35:24 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting chunks to {'Camera1': array([ 0, 3000, 6000, 9000, 12000, 15000]), 'Camera2': array([ 0, 3000, 6000, 9000, 12000, 15000]), 'Camera3': array([ 0, 3000, 6000, 9000, 12000, 15000]), 'Camera4': array([ 0, 3000, 6000, 9000, 12000, 15000]), 'Camera5': array([ 0, 3000, 6000, 9000, 12000, 15000]), 'Camera6': array([ 0, 3000, 6000, 9000, 12000, 15000])}. +02/08/2023 12:35:24 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting n_channels_in to 3. +02/08/2023 12:35:24 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting raw_im_h to 1024. +02/08/2023 12:35:24 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting raw_im_w to 1152. +02/08/2023 12:35:24 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting expval to False. +02/08/2023 12:35:24 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting net to finetune_MAX. +02/08/2023 12:35:25 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting crop_height to [0, 1024]. +02/08/2023 12:35:25 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting crop_width to [0, 1152]. +02/08/2023 12:35:25 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting maxbatch to 2. +02/08/2023 12:35:25 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting start_batch to 0. +02/08/2023 12:35:25 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting vmin to -60.0. +02/08/2023 12:35:25 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting vmax to 60.0. +02/08/2023 12:35:25 PM INFO:dannce.interface.setup_dannce_predict Using torch predict mode +02/08/2023 12:35:25 PM INFO:dannce.interface.setup_dannce_predict Using camnames: ['Camera1', 'Camera2', 'Camera3', 'Camera4', 'Camera5', 'Camera6'] +02/08/2023 12:35:26 PM INFO:dannce.interface.do_COM_load Experiment 0 using com3d: ./alabel3d_temp_dannce.mat +02/08/2023 12:35:26 PM INFO:dannce.interface.do_COM_load Removed 0 samples from the dataset because they either had COM positions over cthresh, or did not have matching sampleIDs in the COM file +02/08/2023 12:35:26 PM INFO:dannce.interface.write_com_file Saving 3D COM to ../../demo/markerless_mouse_1/DANNCE/predict_test/com3d_used.mat +02/08/2023 12:35:26 PM INFO:None +02/08/2023 12:35:29 PM INFO:dannce.engine.generator.DataGenerator_3Dconv_torch.__init__Executing eagerly: True +02/08/2023 12:35:30 PM INFO:dannce.engine.generator.DataGenerator_3Dconv_torch.__init__Init took 3.8584048748016357 sec. +02/08/2023 12:35:30 PM INFO:dannce.interface.build_model Initializing Network... +02/08/2023 12:35:30 PM INFO:dannce.interface.build_model Loading model from ../../demo/markerless_mouse_1/DANNCE/train_results/weights.12000-0.00014.hdf5 +02/08/2023 12:35:31 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 0_Camera1 +02/08/2023 12:35:31 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 0_Camera5 +02/08/2023 12:35:31 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 0_Camera4 +02/08/2023 12:35:31 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 0_Camera6 +02/08/2023 12:35:31 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 0_Camera2 +02/08/2023 12:35:31 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 0_Camera3 +02/08/2023 12:43:10 PM INFO:com_train_dir set to: ../../demo/markerless_mouse_1/COM/train_test/ +02/08/2023 12:43:10 PM INFO:com_predict_dir set to: ../../demo/markerless_mouse_1/COM/predict_test/ +02/08/2023 12:43:10 PM INFO:com_predict_weights set to: ../../demo/markerless_mouse_1/COM/train_results/weights.250-0.00036.hdf5 +02/08/2023 12:43:10 PM INFO:dannce_train_dir set to: ../../demo/markerless_mouse_1/DANNCE/train_test/ +02/08/2023 12:43:10 PM INFO:dannce_predict_dir set to: ../../demo/markerless_mouse_1/DANNCE/predict_test/ +02/08/2023 12:43:10 PM INFO:dannce_predict_model set to: ../../demo/markerless_mouse_1/DANNCE/train_results/weights.12000-0.00014.hdf5 +02/08/2023 12:43:10 PM INFO:exp set to: [{'label3d_file': './label3d_temp_dannce.mat', 'viddir': '../../demo/markerless_mouse_1/videos/'}, {'label3d_file': './label3d_temp_dannce.mat', 'viddir': '../../demo/markerless_mouse_2/videos/'}] +02/08/2023 12:43:10 PM INFO:viddir set to: ../../demo/markerless_mouse_1/videos/ +02/08/2023 12:43:10 PM INFO:gpu_id set to: 0 +02/08/2023 12:43:10 PM INFO:io_config set to: io.yaml +02/08/2023 12:43:10 PM INFO:new_n_channels_out set to: 22 +02/08/2023 12:43:10 PM INFO:batch_size set to: 4 +02/08/2023 12:43:10 PM INFO:epochs set to: 1 +02/08/2023 12:43:10 PM INFO:train_mode set to: finetune +02/08/2023 12:43:10 PM INFO:slurm_config set to: ../../cluster/duke.yaml +02/08/2023 12:43:10 PM INFO:num_validation_per_exp set to: 4 +02/08/2023 12:43:10 PM INFO:vol_size set to: 120 +02/08/2023 12:43:10 PM INFO:nvox set to: 64 +02/08/2023 12:43:10 PM INFO:max_num_samples set to: 10 +02/08/2023 12:43:10 PM INFO:start_batch set to: 0 +02/08/2023 12:43:10 PM INFO:dannce_finetune_weights set to: ../../demo/markerless_mouse_1/DANNCE/weights/ +02/08/2023 12:43:10 PM INFO:metric set to: ['euclidean_distance_3D', 'centered_euclidean_distance_3D'] +02/08/2023 12:43:10 PM INFO:log_dest set to: ../logs/ +02/08/2023 12:43:10 PM INFO:log_level set to: INFO +02/08/2023 12:43:10 PM INFO:base_config set to: config_mousetest.yaml +02/08/2023 12:43:10 PM INFO:crop_height set to: None +02/08/2023 12:43:10 PM INFO:crop_width set to: None +02/08/2023 12:43:10 PM INFO:camnames set to: None +02/08/2023 12:43:10 PM INFO:n_channels_out set to: 20 +02/08/2023 12:43:10 PM INFO:sigma set to: 10 +02/08/2023 12:43:10 PM INFO:verbose set to: 1 +02/08/2023 12:43:10 PM INFO:net set to: None +02/08/2023 12:43:10 PM INFO:immode set to: vid +02/08/2023 12:43:10 PM INFO:mono set to: False +02/08/2023 12:43:10 PM INFO:mirror set to: False +02/08/2023 12:43:10 PM INFO:norm_method set to: layer +02/08/2023 12:43:10 PM INFO:start_sample set to: None +02/08/2023 12:43:10 PM INFO:net_type set to: MAX +02/08/2023 12:43:10 PM INFO:com_fromlabels set to: False +02/08/2023 12:43:10 PM INFO:medfilt_window set to: None +02/08/2023 12:43:10 PM INFO:com_file set to: None +02/08/2023 12:43:10 PM INFO:new_last_kernel_size set to: [3, 3, 3] +02/08/2023 12:43:10 PM INFO:n_layers_locked set to: 2 +02/08/2023 12:43:10 PM INFO:vmin set to: None +02/08/2023 12:43:10 PM INFO:vmax set to: None +02/08/2023 12:43:10 PM INFO:interp set to: nearest +02/08/2023 12:43:10 PM INFO:depth set to: False +02/08/2023 12:43:10 PM INFO:comthresh set to: 0 +02/08/2023 12:43:10 PM INFO:weighted set to: False +02/08/2023 12:43:10 PM INFO:com_method set to: median +02/08/2023 12:43:10 PM INFO:cthresh set to: None +02/08/2023 12:43:10 PM INFO:channel_combo set to: None +02/08/2023 12:43:10 PM INFO:predict_mode set to: torch +02/08/2023 12:43:10 PM INFO:n_views set to: 6 +02/08/2023 12:43:10 PM INFO:expval set to: False +02/08/2023 12:43:10 PM INFO:from_weights set to: None +02/08/2023 12:43:10 PM INFO:write_npy set to: None +02/08/2023 12:43:10 PM INFO:loss set to: mask_nan_keep_loss +02/08/2023 12:43:10 PM INFO:n_channels_in set to: None +02/08/2023 12:43:10 PM INFO:extension set to: None +02/08/2023 12:43:10 PM INFO:vid_dir_flag set to: None +02/08/2023 12:43:10 PM INFO:num_train_per_exp set to: None +02/08/2023 12:43:10 PM INFO:chunks set to: None +02/08/2023 12:43:10 PM INFO:lockfirst set to: None +02/08/2023 12:43:10 PM INFO:load_valid set to: None +02/08/2023 12:43:10 PM INFO:augment_hue set to: False +02/08/2023 12:43:10 PM INFO:augment_brightness set to: False +02/08/2023 12:43:10 PM INFO:augment_hue_val set to: 0.05 +02/08/2023 12:43:10 PM INFO:augment_bright_val set to: 0.05 +02/08/2023 12:43:10 PM INFO:augment_rotation_val set to: 5 +02/08/2023 12:43:10 PM INFO:mirror_augmentation set to: False +02/08/2023 12:43:10 PM INFO:right_keypoints set to: None +02/08/2023 12:43:10 PM INFO:left_keypoints set to: None +02/08/2023 12:43:10 PM INFO:drop_landmark set to: None +02/08/2023 12:43:10 PM INFO:raw_im_h set to: None +02/08/2023 12:43:10 PM INFO:raw_im_w set to: None +02/08/2023 12:43:10 PM INFO:n_instances set to: 1 +02/08/2023 12:43:10 PM INFO:use_npy set to: False +02/08/2023 12:43:10 PM INFO:data_split_seed set to: None +02/08/2023 12:43:10 PM INFO:valid_exp set to: None +02/08/2023 12:43:10 PM INFO:lr set to: 0.001 +02/08/2023 12:43:10 PM INFO:rotate set to: True +02/08/2023 12:43:10 PM INFO:augment_continuous_rotation set to: False +02/08/2023 12:43:10 PM INFO:com_thresh set to: None +02/08/2023 12:43:10 PM INFO:cam3_train set to: None +02/08/2023 12:43:10 PM INFO:debug_volume_tifdir set to: None +02/08/2023 12:43:10 PM INFO:downfac set to: None +02/08/2023 12:43:10 PM INFO:dannce_predict_vol_tifdir set to: None +02/08/2023 12:43:10 PM INFO:n_rand_views set to: 0 +02/08/2023 12:43:10 PM INFO:rand_view_replace set to: True +02/08/2023 12:43:10 PM INFO:multi_gpu_train set to: False +02/08/2023 12:43:10 PM INFO:heatmap_reg set to: False +02/08/2023 12:43:10 PM INFO:heatmap_reg_coeff set to: 0.01 +02/08/2023 12:43:10 PM INFO:save_pred_targets set to: False +02/08/2023 12:43:10 PM INFO:huber-delta set to: 1.35 +02/08/2023 12:43:10 PM INFO:avg+max set to: None +02/08/2023 12:43:10 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting vid_dir_flag to True. +02/08/2023 12:43:10 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting extension to .mp4. +02/08/2023 12:43:10 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting chunks to {'Camera1': array([ 0, 3000, 6000, 9000, 12000, 15000]), 'Camera2': array([ 0, 3000, 6000, 9000, 12000, 15000]), 'Camera3': array([ 0, 3000, 6000, 9000, 12000, 15000]), 'Camera4': array([ 0, 3000, 6000, 9000, 12000, 15000]), 'Camera5': array([ 0, 3000, 6000, 9000, 12000, 15000]), 'Camera6': array([ 0, 3000, 6000, 9000, 12000, 15000])}. +02/08/2023 12:43:10 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting n_channels_in to 3. +02/08/2023 12:43:10 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting raw_im_h to 1024. +02/08/2023 12:43:10 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting raw_im_w to 1152. +02/08/2023 12:43:10 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting expval to False. +02/08/2023 12:43:10 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting net to finetune_MAX. +02/08/2023 12:43:11 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting crop_height to [0, 1024]. +02/08/2023 12:43:11 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting crop_width to [0, 1152]. +02/08/2023 12:43:11 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting maxbatch to 2. +02/08/2023 12:43:11 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting start_batch to 0. +02/08/2023 12:43:11 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting vmin to -60.0. +02/08/2023 12:43:11 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting vmax to 60.0. +02/08/2023 12:43:11 PM INFO:dannce.interface.setup_dannce_predict Using torch predict mode +02/08/2023 12:43:11 PM INFO:dannce.interface.setup_dannce_predict Using camnames: ['Camera1', 'Camera2', 'Camera3', 'Camera4', 'Camera5', 'Camera6'] +02/08/2023 12:43:11 PM INFO:dannce.interface.do_COM_load Experiment 0 using com3d: ./alabel3d_temp_dannce.mat +02/08/2023 12:43:11 PM INFO:dannce.interface.do_COM_load Removed 0 samples from the dataset because they either had COM positions over cthresh, or did not have matching sampleIDs in the COM file +02/08/2023 12:43:11 PM INFO:dannce.interface.write_com_file Saving 3D COM to ../../demo/markerless_mouse_1/DANNCE/predict_test/com3d_used.mat +02/08/2023 12:43:11 PM INFO:None +02/08/2023 12:43:14 PM INFO:dannce.engine.generator.DataGenerator_3Dconv_torch.__init__Executing eagerly: True +02/08/2023 12:43:15 PM INFO:dannce.engine.generator.DataGenerator_3Dconv_torch.__init__Init took 3.8577842712402344 sec. +02/08/2023 12:43:15 PM INFO:dannce.interface.build_model Initializing Network... +02/08/2023 12:43:15 PM INFO:dannce.interface.build_model Loading model from ../../demo/markerless_mouse_1/DANNCE/train_results/weights.12000-0.00014.hdf5 +02/08/2023 12:43:17 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 0_Camera3 +02/08/2023 12:43:17 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 0_Camera6 +02/08/2023 12:43:17 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 0_Camera5 +02/08/2023 12:43:17 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 0_Camera2 +02/08/2023 12:43:17 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 0_Camera1 +02/08/2023 12:43:17 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 0_Camera4 +02/09/2023 03:51:17 PM INFO:com_train_dir set to: ../../demo/markerless_mouse_1/COM/train_test/ +02/09/2023 03:51:17 PM INFO:com_predict_dir set to: ../../demo/markerless_mouse_1/COM/predict_test/ +02/09/2023 03:51:17 PM INFO:com_predict_weights set to: ../../demo/markerless_mouse_1/COM/train_results/weights.250-0.00036.hdf5 +02/09/2023 03:51:17 PM INFO:dannce_train_dir set to: ../../demo/markerless_mouse_1/DANNCE/train_test/ +02/09/2023 03:51:17 PM INFO:dannce_predict_dir set to: ../../demo/markerless_mouse_1/DANNCE/predict_test/ +02/09/2023 03:51:17 PM INFO:dannce_predict_model set to: ../../demo/markerless_mouse_1/DANNCE/train_results/weights.12000-0.00014.hdf5 +02/09/2023 03:51:17 PM INFO:exp set to: [{'label3d_file': './label3d_temp_dannce.mat', 'viddir': '../../demo/markerless_mouse_1/videos/'}, {'label3d_file': './label3d_temp_dannce.mat', 'viddir': '../../demo/markerless_mouse_2/videos/'}] +02/09/2023 03:51:17 PM INFO:viddir set to: ../../demo/markerless_mouse_1/videos/ +02/09/2023 03:51:17 PM INFO:gpu_id set to: 0 +02/09/2023 03:51:17 PM INFO:io_config set to: io.yaml +02/09/2023 03:51:17 PM INFO:new_n_channels_out set to: 22 +02/09/2023 03:51:17 PM INFO:batch_size set to: 4 +02/09/2023 03:51:17 PM INFO:epochs set to: 1 +02/09/2023 03:51:17 PM INFO:train_mode set to: finetune +02/09/2023 03:51:17 PM INFO:slurm_config set to: ../../cluster/duke.yaml +02/09/2023 03:51:17 PM INFO:num_validation_per_exp set to: 4 +02/09/2023 03:51:17 PM INFO:vol_size set to: 120 +02/09/2023 03:51:17 PM INFO:nvox set to: 64 +02/09/2023 03:51:17 PM INFO:max_num_samples set to: 10 +02/09/2023 03:51:17 PM INFO:start_batch set to: 0 +02/09/2023 03:51:17 PM INFO:dannce_finetune_weights set to: ../../demo/markerless_mouse_1/DANNCE/weights/ +02/09/2023 03:51:17 PM INFO:metric set to: ['euclidean_distance_3D', 'centered_euclidean_distance_3D'] +02/09/2023 03:51:17 PM INFO:log_dest set to: ../logs/ +02/09/2023 03:51:17 PM INFO:log_level set to: INFO +02/09/2023 03:51:17 PM INFO:base_config set to: config_mousetest.yaml +02/09/2023 03:51:17 PM INFO:crop_height set to: None +02/09/2023 03:51:17 PM INFO:crop_width set to: None +02/09/2023 03:51:17 PM INFO:camnames set to: None +02/09/2023 03:51:17 PM INFO:n_channels_out set to: 20 +02/09/2023 03:51:17 PM INFO:sigma set to: 10 +02/09/2023 03:51:17 PM INFO:verbose set to: 1 +02/09/2023 03:51:17 PM INFO:net set to: None +02/09/2023 03:51:17 PM INFO:immode set to: vid +02/09/2023 03:51:17 PM INFO:mono set to: False +02/09/2023 03:51:17 PM INFO:mirror set to: False +02/09/2023 03:51:17 PM INFO:norm_method set to: layer +02/09/2023 03:51:17 PM INFO:start_sample set to: None +02/09/2023 03:51:17 PM INFO:net_type set to: MAX +02/09/2023 03:51:17 PM INFO:com_fromlabels set to: False +02/09/2023 03:51:17 PM INFO:medfilt_window set to: None +02/09/2023 03:51:17 PM INFO:com_file set to: None +02/09/2023 03:51:17 PM INFO:new_last_kernel_size set to: [3, 3, 3] +02/09/2023 03:51:17 PM INFO:n_layers_locked set to: 2 +02/09/2023 03:51:17 PM INFO:vmin set to: None +02/09/2023 03:51:17 PM INFO:vmax set to: None +02/09/2023 03:51:17 PM INFO:interp set to: nearest +02/09/2023 03:51:17 PM INFO:depth set to: False +02/09/2023 03:51:17 PM INFO:comthresh set to: 0 +02/09/2023 03:51:17 PM INFO:weighted set to: False +02/09/2023 03:51:17 PM INFO:com_method set to: median +02/09/2023 03:51:17 PM INFO:cthresh set to: None +02/09/2023 03:51:17 PM INFO:channel_combo set to: None +02/09/2023 03:51:17 PM INFO:predict_mode set to: torch +02/09/2023 03:51:17 PM INFO:n_views set to: 6 +02/09/2023 03:51:17 PM INFO:expval set to: False +02/09/2023 03:51:17 PM INFO:from_weights set to: None +02/09/2023 03:51:17 PM INFO:write_npy set to: None +02/09/2023 03:51:17 PM INFO:loss set to: mask_nan_keep_loss +02/09/2023 03:51:17 PM INFO:n_channels_in set to: None +02/09/2023 03:51:17 PM INFO:extension set to: None +02/09/2023 03:51:17 PM INFO:vid_dir_flag set to: None +02/09/2023 03:51:17 PM INFO:num_train_per_exp set to: None +02/09/2023 03:51:17 PM INFO:chunks set to: None +02/09/2023 03:51:17 PM INFO:lockfirst set to: None +02/09/2023 03:51:17 PM INFO:load_valid set to: None +02/09/2023 03:51:17 PM INFO:augment_hue set to: False +02/09/2023 03:51:17 PM INFO:augment_brightness set to: False +02/09/2023 03:51:17 PM INFO:augment_hue_val set to: 0.05 +02/09/2023 03:51:17 PM INFO:augment_bright_val set to: 0.05 +02/09/2023 03:51:17 PM INFO:augment_rotation_val set to: 5 +02/09/2023 03:51:17 PM INFO:mirror_augmentation set to: False +02/09/2023 03:51:17 PM INFO:right_keypoints set to: None +02/09/2023 03:51:17 PM INFO:left_keypoints set to: None +02/09/2023 03:51:17 PM INFO:drop_landmark set to: None +02/09/2023 03:51:17 PM INFO:raw_im_h set to: None +02/09/2023 03:51:17 PM INFO:raw_im_w set to: None +02/09/2023 03:51:17 PM INFO:n_instances set to: 1 +02/09/2023 03:51:17 PM INFO:use_npy set to: False +02/09/2023 03:51:17 PM INFO:data_split_seed set to: None +02/09/2023 03:51:17 PM INFO:valid_exp set to: None +02/09/2023 03:51:17 PM INFO:lr set to: 0.001 +02/09/2023 03:51:17 PM INFO:rotate set to: True +02/09/2023 03:51:17 PM INFO:augment_continuous_rotation set to: False +02/09/2023 03:51:17 PM INFO:com_thresh set to: None +02/09/2023 03:51:17 PM INFO:cam3_train set to: None +02/09/2023 03:51:17 PM INFO:debug_volume_tifdir set to: None +02/09/2023 03:51:17 PM INFO:downfac set to: None +02/09/2023 03:51:17 PM INFO:dannce_predict_vol_tifdir set to: None +02/09/2023 03:51:17 PM INFO:n_rand_views set to: 0 +02/09/2023 03:51:17 PM INFO:rand_view_replace set to: True +02/09/2023 03:51:17 PM INFO:multi_gpu_train set to: False +02/09/2023 03:51:17 PM INFO:heatmap_reg set to: False +02/09/2023 03:51:17 PM INFO:heatmap_reg_coeff set to: 0.01 +02/09/2023 03:51:17 PM INFO:save_pred_targets set to: False +02/09/2023 03:51:17 PM INFO:huber-delta set to: 1.35 +02/09/2023 03:51:17 PM INFO:avg+max set to: None +02/09/2023 03:51:17 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting vid_dir_flag to True. +02/09/2023 03:51:17 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting extension to .mp4. +02/09/2023 03:51:17 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting chunks to {'Camera1': array([ 0, 3000, 6000, 9000, 12000, 15000]), 'Camera2': array([ 0, 3000, 6000, 9000, 12000, 15000]), 'Camera3': array([ 0, 3000, 6000, 9000, 12000, 15000]), 'Camera4': array([ 0, 3000, 6000, 9000, 12000, 15000]), 'Camera5': array([ 0, 3000, 6000, 9000, 12000, 15000]), 'Camera6': array([ 0, 3000, 6000, 9000, 12000, 15000])}. +02/09/2023 03:51:17 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting n_channels_in to 3. +02/09/2023 03:51:17 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting raw_im_h to 1024. +02/09/2023 03:51:17 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting raw_im_w to 1152. +02/09/2023 03:51:17 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting expval to False. +02/09/2023 03:51:17 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting net to finetune_MAX. +02/09/2023 03:51:18 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting crop_height to [0, 1024]. +02/09/2023 03:51:18 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting crop_width to [0, 1152]. +02/09/2023 03:51:18 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting maxbatch to 2. +02/09/2023 03:51:18 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting start_batch to 0. +02/09/2023 03:51:18 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting vmin to -60.0. +02/09/2023 03:51:18 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting vmax to 60.0. +02/09/2023 03:51:18 PM INFO:dannce.interface.setup_dannce_predict Using torch predict mode +02/09/2023 03:51:18 PM INFO:dannce.interface.setup_dannce_predict Using camnames: ['Camera1', 'Camera2', 'Camera3', 'Camera4', 'Camera5', 'Camera6'] +02/09/2023 03:51:19 PM INFO:dannce.interface.do_COM_load Experiment 0 using com3d: ./alabel3d_temp_dannce.mat +02/09/2023 03:51:19 PM INFO:dannce.interface.do_COM_load Removed 0 samples from the dataset because they either had COM positions over cthresh, or did not have matching sampleIDs in the COM file +02/09/2023 03:51:19 PM INFO:dannce.interface.write_com_file Saving 3D COM to ../../demo/markerless_mouse_1/DANNCE/predict_test/com3d_used.mat +02/09/2023 03:51:19 PM INFO:None +02/09/2023 03:51:31 PM INFO:dannce.engine.generator.DataGenerator_3Dconv_torch.__init__Executing eagerly: True +02/09/2023 03:51:32 PM INFO:dannce.engine.generator.DataGenerator_3Dconv_torch.__init__Init took 13.033998727798462 sec. +02/09/2023 03:51:32 PM INFO:dannce.interface.build_model Initializing Network... +02/09/2023 03:51:32 PM INFO:dannce.interface.build_model Loading model from ../../demo/markerless_mouse_1/DANNCE/train_results/weights.12000-0.00014.hdf5 +02/09/2023 03:51:35 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 0_Camera3 +02/09/2023 03:51:35 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 0_Camera2 +02/09/2023 03:51:35 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 0_Camera1 +02/09/2023 03:51:35 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 0_Camera4 +02/09/2023 03:51:35 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 0_Camera6 +02/09/2023 03:51:35 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 0_Camera5 +02/09/2023 03:56:56 PM INFO:com_train_dir set to: ../../demo/markerless_mouse_1/COM/train_test/ +02/09/2023 03:56:56 PM INFO:com_predict_dir set to: ../../demo/markerless_mouse_1/COM/predict_test/ +02/09/2023 03:56:56 PM INFO:com_predict_weights set to: ../../demo/markerless_mouse_1/COM/train_results/weights.250-0.00036.hdf5 +02/09/2023 03:56:56 PM INFO:dannce_train_dir set to: ../../demo/markerless_mouse_1/DANNCE/train_test/ +02/09/2023 03:56:56 PM INFO:dannce_predict_dir set to: ../../demo/markerless_mouse_1/DANNCE/predict_test/ +02/09/2023 03:56:56 PM INFO:dannce_predict_model set to: ../../demo/markerless_mouse_1/DANNCE/train_results/weights.12000-0.00014.hdf5 +02/09/2023 03:56:56 PM INFO:exp set to: [{'label3d_file': './label3d_temp_dannce.mat', 'viddir': '../../demo/markerless_mouse_1/videos/'}, {'label3d_file': './label3d_temp_dannce.mat', 'viddir': '../../demo/markerless_mouse_2/videos/'}] +02/09/2023 03:56:56 PM INFO:viddir set to: ../../demo/markerless_mouse_1/videos/ +02/09/2023 03:56:56 PM INFO:gpu_id set to: 0 +02/09/2023 03:56:56 PM INFO:io_config set to: io.yaml +02/09/2023 03:56:56 PM INFO:new_n_channels_out set to: 22 +02/09/2023 03:56:56 PM INFO:batch_size set to: 4 +02/09/2023 03:56:56 PM INFO:epochs set to: 1 +02/09/2023 03:56:56 PM INFO:train_mode set to: finetune +02/09/2023 03:56:56 PM INFO:slurm_config set to: ../../cluster/duke.yaml +02/09/2023 03:56:56 PM INFO:num_validation_per_exp set to: 4 +02/09/2023 03:56:56 PM INFO:vol_size set to: 120 +02/09/2023 03:56:56 PM INFO:nvox set to: 64 +02/09/2023 03:56:56 PM INFO:max_num_samples set to: 10 +02/09/2023 03:56:56 PM INFO:start_batch set to: 0 +02/09/2023 03:56:56 PM INFO:dannce_finetune_weights set to: ../../demo/markerless_mouse_1/DANNCE/weights/ +02/09/2023 03:56:56 PM INFO:metric set to: ['euclidean_distance_3D', 'centered_euclidean_distance_3D'] +02/09/2023 03:56:56 PM INFO:log_dest set to: ../logs/ +02/09/2023 03:56:56 PM INFO:log_level set to: INFO +02/09/2023 03:56:56 PM INFO:base_config set to: config_mousetest.yaml +02/09/2023 03:56:56 PM INFO:crop_height set to: None +02/09/2023 03:56:56 PM INFO:crop_width set to: None +02/09/2023 03:56:56 PM INFO:camnames set to: None +02/09/2023 03:56:56 PM INFO:n_channels_out set to: 20 +02/09/2023 03:56:56 PM INFO:sigma set to: 10 +02/09/2023 03:56:56 PM INFO:verbose set to: 1 +02/09/2023 03:56:56 PM INFO:net set to: None +02/09/2023 03:56:56 PM INFO:immode set to: vid +02/09/2023 03:56:56 PM INFO:mono set to: False +02/09/2023 03:56:56 PM INFO:mirror set to: False +02/09/2023 03:56:56 PM INFO:norm_method set to: layer +02/09/2023 03:56:56 PM INFO:start_sample set to: None +02/09/2023 03:56:56 PM INFO:net_type set to: MAX +02/09/2023 03:56:56 PM INFO:com_fromlabels set to: False +02/09/2023 03:56:56 PM INFO:medfilt_window set to: None +02/09/2023 03:56:56 PM INFO:com_file set to: None +02/09/2023 03:56:56 PM INFO:new_last_kernel_size set to: [3, 3, 3] +02/09/2023 03:56:56 PM INFO:n_layers_locked set to: 2 +02/09/2023 03:56:56 PM INFO:vmin set to: None +02/09/2023 03:56:56 PM INFO:vmax set to: None +02/09/2023 03:56:56 PM INFO:interp set to: nearest +02/09/2023 03:56:56 PM INFO:depth set to: False +02/09/2023 03:56:56 PM INFO:comthresh set to: 0 +02/09/2023 03:56:56 PM INFO:weighted set to: False +02/09/2023 03:56:56 PM INFO:com_method set to: median +02/09/2023 03:56:56 PM INFO:cthresh set to: None +02/09/2023 03:56:56 PM INFO:channel_combo set to: None +02/09/2023 03:56:56 PM INFO:predict_mode set to: torch +02/09/2023 03:56:56 PM INFO:n_views set to: 6 +02/09/2023 03:56:56 PM INFO:expval set to: False +02/09/2023 03:56:56 PM INFO:from_weights set to: None +02/09/2023 03:56:56 PM INFO:write_npy set to: None +02/09/2023 03:56:56 PM INFO:loss set to: mask_nan_keep_loss +02/09/2023 03:56:56 PM INFO:n_channels_in set to: None +02/09/2023 03:56:56 PM INFO:extension set to: None +02/09/2023 03:56:56 PM INFO:vid_dir_flag set to: None +02/09/2023 03:56:56 PM INFO:num_train_per_exp set to: None +02/09/2023 03:56:56 PM INFO:chunks set to: None +02/09/2023 03:56:56 PM INFO:lockfirst set to: None +02/09/2023 03:56:56 PM INFO:load_valid set to: None +02/09/2023 03:56:56 PM INFO:augment_hue set to: False +02/09/2023 03:56:56 PM INFO:augment_brightness set to: False +02/09/2023 03:56:56 PM INFO:augment_hue_val set to: 0.05 +02/09/2023 03:56:56 PM INFO:augment_bright_val set to: 0.05 +02/09/2023 03:56:56 PM INFO:augment_rotation_val set to: 5 +02/09/2023 03:56:56 PM INFO:mirror_augmentation set to: False +02/09/2023 03:56:56 PM INFO:right_keypoints set to: None +02/09/2023 03:56:56 PM INFO:left_keypoints set to: None +02/09/2023 03:56:56 PM INFO:drop_landmark set to: None +02/09/2023 03:56:56 PM INFO:raw_im_h set to: None +02/09/2023 03:56:56 PM INFO:raw_im_w set to: None +02/09/2023 03:56:56 PM INFO:n_instances set to: 1 +02/09/2023 03:56:56 PM INFO:use_npy set to: False +02/09/2023 03:56:56 PM INFO:data_split_seed set to: None +02/09/2023 03:56:56 PM INFO:valid_exp set to: None +02/09/2023 03:56:56 PM INFO:lr set to: 0.001 +02/09/2023 03:56:56 PM INFO:rotate set to: True +02/09/2023 03:56:56 PM INFO:augment_continuous_rotation set to: False +02/09/2023 03:56:56 PM INFO:com_thresh set to: None +02/09/2023 03:56:56 PM INFO:cam3_train set to: None +02/09/2023 03:56:56 PM INFO:debug_volume_tifdir set to: None +02/09/2023 03:56:56 PM INFO:downfac set to: None +02/09/2023 03:56:56 PM INFO:dannce_predict_vol_tifdir set to: None +02/09/2023 03:56:56 PM INFO:n_rand_views set to: 0 +02/09/2023 03:56:56 PM INFO:rand_view_replace set to: True +02/09/2023 03:56:56 PM INFO:multi_gpu_train set to: False +02/09/2023 03:56:56 PM INFO:heatmap_reg set to: False +02/09/2023 03:56:56 PM INFO:heatmap_reg_coeff set to: 0.01 +02/09/2023 03:56:56 PM INFO:save_pred_targets set to: False +02/09/2023 03:56:56 PM INFO:huber-delta set to: 1.35 +02/09/2023 03:56:56 PM INFO:avg+max set to: None +02/09/2023 03:56:56 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting vid_dir_flag to True. +02/09/2023 03:56:56 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting extension to .mp4. +02/09/2023 03:56:56 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting chunks to {'Camera1': array([ 0, 3000, 6000, 9000, 12000, 15000]), 'Camera2': array([ 0, 3000, 6000, 9000, 12000, 15000]), 'Camera3': array([ 0, 3000, 6000, 9000, 12000, 15000]), 'Camera4': array([ 0, 3000, 6000, 9000, 12000, 15000]), 'Camera5': array([ 0, 3000, 6000, 9000, 12000, 15000]), 'Camera6': array([ 0, 3000, 6000, 9000, 12000, 15000])}. +02/09/2023 03:56:56 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting n_channels_in to 3. +02/09/2023 03:56:56 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting raw_im_h to 1024. +02/09/2023 03:56:56 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting raw_im_w to 1152. +02/09/2023 03:56:56 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting expval to False. +02/09/2023 03:56:56 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting net to finetune_MAX. +02/09/2023 03:56:57 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting crop_height to [0, 1024]. +02/09/2023 03:56:57 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting crop_width to [0, 1152]. +02/09/2023 03:56:57 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting maxbatch to 2. +02/09/2023 03:56:57 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting start_batch to 0. +02/09/2023 03:56:57 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting vmin to -60.0. +02/09/2023 03:56:57 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting vmax to 60.0. +02/09/2023 03:56:57 PM INFO:dannce.interface.setup_dannce_predict Using torch predict mode +02/09/2023 03:56:57 PM INFO:dannce.interface.setup_dannce_predict Using camnames: ['Camera1', 'Camera2', 'Camera3', 'Camera4', 'Camera5', 'Camera6'] +02/09/2023 03:56:58 PM INFO:dannce.interface.do_COM_load Experiment 0 using com3d: ./alabel3d_temp_dannce.mat +02/09/2023 03:56:58 PM INFO:dannce.interface.do_COM_load Removed 0 samples from the dataset because they either had COM positions over cthresh, or did not have matching sampleIDs in the COM file +02/09/2023 03:56:58 PM INFO:dannce.interface.write_com_file Saving 3D COM to ../../demo/markerless_mouse_1/DANNCE/predict_test/com3d_used.mat +02/09/2023 03:56:58 PM INFO:None +02/09/2023 03:57:01 PM INFO:dannce.engine.generator.DataGenerator_3Dconv_torch.__init__Executing eagerly: True +02/09/2023 03:57:01 PM INFO:dannce.engine.generator.DataGenerator_3Dconv_torch.__init__Init took 3.561072587966919 sec. +02/09/2023 03:57:01 PM INFO:dannce.interface.build_model Initializing Network... +02/09/2023 03:57:01 PM INFO:dannce.interface.build_model Loading model from ../../demo/markerless_mouse_1/DANNCE/train_results/weights.12000-0.00014.hdf5 +02/09/2023 03:57:03 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 0_Camera5 +02/09/2023 03:57:03 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 0_Camera6 +02/09/2023 03:57:03 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 0_Camera1 +02/09/2023 03:57:03 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 0_Camera4 +02/09/2023 03:57:03 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 0_Camera3 +02/09/2023 03:57:03 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 0_Camera2 +02/09/2023 03:59:20 PM INFO:com_train_dir set to: ../../demo/markerless_mouse_1/COM/train_test/ +02/09/2023 03:59:20 PM INFO:com_predict_dir set to: ../../demo/markerless_mouse_1/COM/predict_test/ +02/09/2023 03:59:20 PM INFO:com_predict_weights set to: ../../demo/markerless_mouse_1/COM/train_results/weights.250-0.00036.hdf5 +02/09/2023 03:59:20 PM INFO:dannce_train_dir set to: ../../demo/markerless_mouse_1/DANNCE/train_test/ +02/09/2023 03:59:20 PM INFO:dannce_predict_dir set to: ../../demo/markerless_mouse_1/DANNCE/predict_test/ +02/09/2023 03:59:20 PM INFO:dannce_predict_model set to: ../../demo/markerless_mouse_1/DANNCE/train_results/weights.12000-0.00014.hdf5 +02/09/2023 03:59:20 PM INFO:exp set to: [{'label3d_file': './label3d_temp_dannce.mat', 'viddir': '../../demo/markerless_mouse_1/videos/'}, {'label3d_file': './label3d_temp_dannce.mat', 'viddir': '../../demo/markerless_mouse_2/videos/'}] +02/09/2023 03:59:20 PM INFO:viddir set to: ../../demo/markerless_mouse_1/videos/ +02/09/2023 03:59:20 PM INFO:gpu_id set to: 0 +02/09/2023 03:59:20 PM INFO:io_config set to: io.yaml +02/09/2023 03:59:20 PM INFO:new_n_channels_out set to: 22 +02/09/2023 03:59:20 PM INFO:batch_size set to: 4 +02/09/2023 03:59:20 PM INFO:epochs set to: 1 +02/09/2023 03:59:20 PM INFO:train_mode set to: finetune +02/09/2023 03:59:20 PM INFO:slurm_config set to: ../../cluster/duke.yaml +02/09/2023 03:59:20 PM INFO:num_validation_per_exp set to: 4 +02/09/2023 03:59:20 PM INFO:vol_size set to: 120 +02/09/2023 03:59:20 PM INFO:nvox set to: 64 +02/09/2023 03:59:20 PM INFO:max_num_samples set to: 10 +02/09/2023 03:59:20 PM INFO:start_batch set to: 0 +02/09/2023 03:59:20 PM INFO:dannce_finetune_weights set to: ../../demo/markerless_mouse_1/DANNCE/weights/ +02/09/2023 03:59:20 PM INFO:metric set to: ['euclidean_distance_3D', 'centered_euclidean_distance_3D'] +02/09/2023 03:59:20 PM INFO:log_dest set to: ../logs/ +02/09/2023 03:59:20 PM INFO:log_level set to: INFO +02/09/2023 03:59:20 PM INFO:base_config set to: config_mousetest.yaml +02/09/2023 03:59:20 PM INFO:crop_height set to: None +02/09/2023 03:59:20 PM INFO:crop_width set to: None +02/09/2023 03:59:20 PM INFO:camnames set to: None +02/09/2023 03:59:20 PM INFO:n_channels_out set to: 20 +02/09/2023 03:59:20 PM INFO:sigma set to: 10 +02/09/2023 03:59:20 PM INFO:verbose set to: 1 +02/09/2023 03:59:20 PM INFO:net set to: None +02/09/2023 03:59:20 PM INFO:immode set to: vid +02/09/2023 03:59:20 PM INFO:mono set to: False +02/09/2023 03:59:20 PM INFO:mirror set to: False +02/09/2023 03:59:20 PM INFO:norm_method set to: layer +02/09/2023 03:59:20 PM INFO:start_sample set to: None +02/09/2023 03:59:20 PM INFO:net_type set to: MAX +02/09/2023 03:59:20 PM INFO:com_fromlabels set to: False +02/09/2023 03:59:20 PM INFO:medfilt_window set to: None +02/09/2023 03:59:20 PM INFO:com_file set to: None +02/09/2023 03:59:20 PM INFO:new_last_kernel_size set to: [3, 3, 3] +02/09/2023 03:59:20 PM INFO:n_layers_locked set to: 2 +02/09/2023 03:59:20 PM INFO:vmin set to: None +02/09/2023 03:59:20 PM INFO:vmax set to: None +02/09/2023 03:59:20 PM INFO:interp set to: nearest +02/09/2023 03:59:20 PM INFO:depth set to: False +02/09/2023 03:59:20 PM INFO:comthresh set to: 0 +02/09/2023 03:59:20 PM INFO:weighted set to: False +02/09/2023 03:59:20 PM INFO:com_method set to: median +02/09/2023 03:59:20 PM INFO:cthresh set to: None +02/09/2023 03:59:20 PM INFO:channel_combo set to: None +02/09/2023 03:59:20 PM INFO:predict_mode set to: torch +02/09/2023 03:59:20 PM INFO:n_views set to: 6 +02/09/2023 03:59:20 PM INFO:expval set to: False +02/09/2023 03:59:20 PM INFO:from_weights set to: None +02/09/2023 03:59:20 PM INFO:write_npy set to: None +02/09/2023 03:59:20 PM INFO:loss set to: mask_nan_keep_loss +02/09/2023 03:59:20 PM INFO:n_channels_in set to: None +02/09/2023 03:59:20 PM INFO:extension set to: None +02/09/2023 03:59:20 PM INFO:vid_dir_flag set to: None +02/09/2023 03:59:20 PM INFO:num_train_per_exp set to: None +02/09/2023 03:59:20 PM INFO:chunks set to: None +02/09/2023 03:59:20 PM INFO:lockfirst set to: None +02/09/2023 03:59:20 PM INFO:load_valid set to: None +02/09/2023 03:59:20 PM INFO:augment_hue set to: False +02/09/2023 03:59:20 PM INFO:augment_brightness set to: False +02/09/2023 03:59:20 PM INFO:augment_hue_val set to: 0.05 +02/09/2023 03:59:20 PM INFO:augment_bright_val set to: 0.05 +02/09/2023 03:59:20 PM INFO:augment_rotation_val set to: 5 +02/09/2023 03:59:20 PM INFO:mirror_augmentation set to: False +02/09/2023 03:59:20 PM INFO:right_keypoints set to: None +02/09/2023 03:59:20 PM INFO:left_keypoints set to: None +02/09/2023 03:59:20 PM INFO:drop_landmark set to: None +02/09/2023 03:59:20 PM INFO:raw_im_h set to: None +02/09/2023 03:59:20 PM INFO:raw_im_w set to: None +02/09/2023 03:59:20 PM INFO:n_instances set to: 1 +02/09/2023 03:59:20 PM INFO:use_npy set to: False +02/09/2023 03:59:20 PM INFO:data_split_seed set to: None +02/09/2023 03:59:20 PM INFO:valid_exp set to: None +02/09/2023 03:59:20 PM INFO:lr set to: 0.001 +02/09/2023 03:59:20 PM INFO:rotate set to: True +02/09/2023 03:59:20 PM INFO:augment_continuous_rotation set to: False +02/09/2023 03:59:20 PM INFO:com_thresh set to: None +02/09/2023 03:59:20 PM INFO:cam3_train set to: None +02/09/2023 03:59:20 PM INFO:debug_volume_tifdir set to: None +02/09/2023 03:59:20 PM INFO:downfac set to: None +02/09/2023 03:59:20 PM INFO:dannce_predict_vol_tifdir set to: None +02/09/2023 03:59:20 PM INFO:n_rand_views set to: 0 +02/09/2023 03:59:20 PM INFO:rand_view_replace set to: True +02/09/2023 03:59:20 PM INFO:multi_gpu_train set to: False +02/09/2023 03:59:20 PM INFO:heatmap_reg set to: False +02/09/2023 03:59:20 PM INFO:heatmap_reg_coeff set to: 0.01 +02/09/2023 03:59:20 PM INFO:save_pred_targets set to: False +02/09/2023 03:59:20 PM INFO:huber-delta set to: 1.35 +02/09/2023 03:59:20 PM INFO:avg+max set to: None +02/09/2023 03:59:20 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting vid_dir_flag to True. +02/09/2023 03:59:20 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting extension to .mp4. +02/09/2023 03:59:20 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting chunks to {'Camera1': array([ 0, 3000, 6000, 9000, 12000, 15000]), 'Camera2': array([ 0, 3000, 6000, 9000, 12000, 15000]), 'Camera3': array([ 0, 3000, 6000, 9000, 12000, 15000]), 'Camera4': array([ 0, 3000, 6000, 9000, 12000, 15000]), 'Camera5': array([ 0, 3000, 6000, 9000, 12000, 15000]), 'Camera6': array([ 0, 3000, 6000, 9000, 12000, 15000])}. +02/09/2023 03:59:20 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting n_channels_in to 3. +02/09/2023 03:59:20 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting raw_im_h to 1024. +02/09/2023 03:59:20 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting raw_im_w to 1152. +02/09/2023 03:59:20 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting expval to False. +02/09/2023 03:59:20 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting net to finetune_MAX. +02/09/2023 03:59:21 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting crop_height to [0, 1024]. +02/09/2023 03:59:21 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting crop_width to [0, 1152]. +02/09/2023 03:59:21 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting maxbatch to 2. +02/09/2023 03:59:21 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting start_batch to 0. +02/09/2023 03:59:21 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting vmin to -60.0. +02/09/2023 03:59:21 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting vmax to 60.0. +02/09/2023 03:59:21 PM INFO:dannce.interface.setup_dannce_predict Using torch predict mode +02/09/2023 03:59:21 PM INFO:dannce.interface.setup_dannce_predict Using camnames: ['Camera1', 'Camera2', 'Camera3', 'Camera4', 'Camera5', 'Camera6'] +02/09/2023 03:59:21 PM INFO:dannce.interface.do_COM_load Experiment 0 using com3d: ./alabel3d_temp_dannce.mat +02/09/2023 03:59:21 PM INFO:dannce.interface.do_COM_load Removed 0 samples from the dataset because they either had COM positions over cthresh, or did not have matching sampleIDs in the COM file +02/09/2023 03:59:21 PM INFO:dannce.interface.write_com_file Saving 3D COM to ../../demo/markerless_mouse_1/DANNCE/predict_test/com3d_used.mat +02/09/2023 03:59:21 PM INFO:None +02/09/2023 03:59:24 PM INFO:dannce.engine.generator.DataGenerator_3Dconv_torch.__init__Executing eagerly: True +02/09/2023 03:59:25 PM INFO:dannce.engine.generator.DataGenerator_3Dconv_torch.__init__Init took 3.6019911766052246 sec. +02/09/2023 03:59:25 PM INFO:dannce.interface.build_model Initializing Network... +02/09/2023 03:59:25 PM INFO:dannce.interface.build_model Loading model from ../../demo/markerless_mouse_1/DANNCE/train_results/weights.12000-0.00014.hdf5 +02/09/2023 03:59:26 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 0_Camera5 +02/09/2023 03:59:26 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 0_Camera6 +02/09/2023 03:59:26 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 0_Camera1 +02/09/2023 03:59:26 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 0_Camera2 +02/09/2023 03:59:26 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 0_Camera3 +02/09/2023 03:59:26 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 0_Camera4 +02/09/2023 04:24:26 PM INFO:com_train_dir set to: ../../demo/markerless_mouse_1/COM/train_test/ +02/09/2023 04:24:26 PM INFO:com_predict_dir set to: ../../demo/markerless_mouse_1/COM/predict_test/ +02/09/2023 04:24:26 PM INFO:com_predict_weights set to: ../../demo/markerless_mouse_1/COM/train_results/weights.250-0.00036.hdf5 +02/09/2023 04:24:26 PM INFO:dannce_train_dir set to: ../../demo/markerless_mouse_1/DANNCE/train_test/ +02/09/2023 04:24:26 PM INFO:dannce_predict_dir set to: ../../demo/markerless_mouse_1/DANNCE/predict_test/ +02/09/2023 04:24:26 PM INFO:dannce_predict_model set to: ../../demo/markerless_mouse_1/DANNCE/train_results/AVG/weights.1200-12.77642.hdf5 +02/09/2023 04:24:26 PM INFO:exp set to: [{'label3d_file': './label3d_temp_dannce.mat', 'viddir': '../../demo/markerless_mouse_1/videos/'}, {'label3d_file': './label3d_temp_dannce.mat', 'viddir': '../../demo/markerless_mouse_2/videos/'}] +02/09/2023 04:24:26 PM INFO:viddir set to: ../../demo/markerless_mouse_1/videos/ +02/09/2023 04:24:26 PM INFO:gpu_id set to: 0 +02/09/2023 04:24:26 PM INFO:io_config set to: io.yaml +02/09/2023 04:24:26 PM INFO:new_n_channels_out set to: 22 +02/09/2023 04:24:26 PM INFO:batch_size set to: 4 +02/09/2023 04:24:26 PM INFO:epochs set to: 1 +02/09/2023 04:24:26 PM INFO:train_mode set to: finetune +02/09/2023 04:24:26 PM INFO:slurm_config set to: ../../cluster/duke.yaml +02/09/2023 04:24:26 PM INFO:num_validation_per_exp set to: 4 +02/09/2023 04:24:26 PM INFO:vol_size set to: 120 +02/09/2023 04:24:26 PM INFO:nvox set to: 64 +02/09/2023 04:24:26 PM INFO:max_num_samples set to: 10 +02/09/2023 04:24:26 PM INFO:start_batch set to: 0 +02/09/2023 04:24:26 PM INFO:dannce_finetune_weights set to: ../../demo/markerless_mouse_1/DANNCE/weights/ +02/09/2023 04:24:26 PM INFO:metric set to: ['euclidean_distance_3D', 'centered_euclidean_distance_3D'] +02/09/2023 04:24:26 PM INFO:log_dest set to: ../logs/ +02/09/2023 04:24:26 PM INFO:log_level set to: INFO +02/09/2023 04:24:26 PM INFO:base_config set to: config_mousetest.yaml +02/09/2023 04:24:26 PM INFO:crop_height set to: None +02/09/2023 04:24:26 PM INFO:crop_width set to: None +02/09/2023 04:24:26 PM INFO:camnames set to: None +02/09/2023 04:24:26 PM INFO:n_channels_out set to: 20 +02/09/2023 04:24:26 PM INFO:sigma set to: 10 +02/09/2023 04:24:26 PM INFO:verbose set to: 1 +02/09/2023 04:24:26 PM INFO:net set to: None +02/09/2023 04:24:26 PM INFO:immode set to: vid +02/09/2023 04:24:26 PM INFO:mono set to: False +02/09/2023 04:24:26 PM INFO:mirror set to: False +02/09/2023 04:24:26 PM INFO:norm_method set to: layer +02/09/2023 04:24:26 PM INFO:start_sample set to: None +02/09/2023 04:24:26 PM INFO:net_type set to: AVG +02/09/2023 04:24:26 PM INFO:com_fromlabels set to: False +02/09/2023 04:24:26 PM INFO:medfilt_window set to: None +02/09/2023 04:24:26 PM INFO:com_file set to: None +02/09/2023 04:24:26 PM INFO:new_last_kernel_size set to: [3, 3, 3] +02/09/2023 04:24:26 PM INFO:n_layers_locked set to: 2 +02/09/2023 04:24:26 PM INFO:vmin set to: None +02/09/2023 04:24:26 PM INFO:vmax set to: None +02/09/2023 04:24:26 PM INFO:interp set to: nearest +02/09/2023 04:24:26 PM INFO:depth set to: False +02/09/2023 04:24:26 PM INFO:comthresh set to: 0 +02/09/2023 04:24:26 PM INFO:weighted set to: False +02/09/2023 04:24:26 PM INFO:com_method set to: median +02/09/2023 04:24:26 PM INFO:cthresh set to: None +02/09/2023 04:24:26 PM INFO:channel_combo set to: None +02/09/2023 04:24:26 PM INFO:predict_mode set to: torch +02/09/2023 04:24:26 PM INFO:n_views set to: 6 +02/09/2023 04:24:26 PM INFO:expval set to: None +02/09/2023 04:24:26 PM INFO:from_weights set to: None +02/09/2023 04:24:26 PM INFO:write_npy set to: None +02/09/2023 04:24:26 PM INFO:loss set to: mask_nan_keep_loss +02/09/2023 04:24:26 PM INFO:n_channels_in set to: None +02/09/2023 04:24:26 PM INFO:extension set to: None +02/09/2023 04:24:26 PM INFO:vid_dir_flag set to: None +02/09/2023 04:24:26 PM INFO:num_train_per_exp set to: None +02/09/2023 04:24:26 PM INFO:chunks set to: None +02/09/2023 04:24:26 PM INFO:lockfirst set to: None +02/09/2023 04:24:26 PM INFO:load_valid set to: None +02/09/2023 04:24:26 PM INFO:augment_hue set to: False +02/09/2023 04:24:26 PM INFO:augment_brightness set to: False +02/09/2023 04:24:26 PM INFO:augment_hue_val set to: 0.05 +02/09/2023 04:24:26 PM INFO:augment_bright_val set to: 0.05 +02/09/2023 04:24:26 PM INFO:augment_rotation_val set to: 5 +02/09/2023 04:24:26 PM INFO:mirror_augmentation set to: False +02/09/2023 04:24:26 PM INFO:right_keypoints set to: None +02/09/2023 04:24:26 PM INFO:left_keypoints set to: None +02/09/2023 04:24:26 PM INFO:drop_landmark set to: None +02/09/2023 04:24:26 PM INFO:raw_im_h set to: None +02/09/2023 04:24:26 PM INFO:raw_im_w set to: None +02/09/2023 04:24:26 PM INFO:n_instances set to: 1 +02/09/2023 04:24:26 PM INFO:use_npy set to: False +02/09/2023 04:24:26 PM INFO:data_split_seed set to: None +02/09/2023 04:24:26 PM INFO:valid_exp set to: None +02/09/2023 04:24:26 PM INFO:lr set to: 0.001 +02/09/2023 04:24:26 PM INFO:rotate set to: True +02/09/2023 04:24:26 PM INFO:augment_continuous_rotation set to: False +02/09/2023 04:24:26 PM INFO:com_thresh set to: None +02/09/2023 04:24:26 PM INFO:cam3_train set to: None +02/09/2023 04:24:26 PM INFO:debug_volume_tifdir set to: None +02/09/2023 04:24:26 PM INFO:downfac set to: None +02/09/2023 04:24:26 PM INFO:dannce_predict_vol_tifdir set to: None +02/09/2023 04:24:26 PM INFO:n_rand_views set to: 0 +02/09/2023 04:24:26 PM INFO:rand_view_replace set to: True +02/09/2023 04:24:26 PM INFO:multi_gpu_train set to: False +02/09/2023 04:24:26 PM INFO:heatmap_reg set to: False +02/09/2023 04:24:26 PM INFO:heatmap_reg_coeff set to: 0.01 +02/09/2023 04:24:26 PM INFO:save_pred_targets set to: False +02/09/2023 04:24:26 PM INFO:huber-delta set to: 1.35 +02/09/2023 04:24:26 PM INFO:avg+max set to: None +02/09/2023 04:24:26 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting vid_dir_flag to True. +02/09/2023 04:24:26 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting extension to .mp4. +02/09/2023 04:24:26 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting chunks to {'Camera1': array([ 0, 3000, 6000, 9000, 12000, 15000]), 'Camera2': array([ 0, 3000, 6000, 9000, 12000, 15000]), 'Camera3': array([ 0, 3000, 6000, 9000, 12000, 15000]), 'Camera4': array([ 0, 3000, 6000, 9000, 12000, 15000]), 'Camera5': array([ 0, 3000, 6000, 9000, 12000, 15000]), 'Camera6': array([ 0, 3000, 6000, 9000, 12000, 15000])}. +02/09/2023 04:24:26 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting n_channels_in to 3. +02/09/2023 04:24:26 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting raw_im_h to 1024. +02/09/2023 04:24:26 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting raw_im_w to 1152. +02/09/2023 04:24:26 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting expval to True. +02/09/2023 04:24:26 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting net to finetune_AVG. +02/09/2023 04:24:27 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting crop_height to [0, 1024]. +02/09/2023 04:24:27 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting crop_width to [0, 1152]. +02/09/2023 04:24:27 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting maxbatch to 2. +02/09/2023 04:24:27 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting start_batch to 0. +02/09/2023 04:24:27 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting vmin to -60.0. +02/09/2023 04:24:27 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting vmax to 60.0. +02/09/2023 04:24:27 PM INFO:dannce.interface.setup_dannce_predict Using torch predict mode +02/09/2023 04:24:27 PM INFO:dannce.interface.setup_dannce_predict Using camnames: ['Camera1', 'Camera2', 'Camera3', 'Camera4', 'Camera5', 'Camera6'] +02/09/2023 04:24:27 PM INFO:dannce.interface.do_COM_load Experiment 0 using com3d: ./alabel3d_temp_dannce.mat +02/09/2023 04:24:27 PM INFO:dannce.interface.do_COM_load Removed 0 samples from the dataset because they either had COM positions over cthresh, or did not have matching sampleIDs in the COM file +02/09/2023 04:24:27 PM INFO:dannce.interface.write_com_file Saving 3D COM to ../../demo/markerless_mouse_1/DANNCE/predict_test/com3d_used.mat +02/09/2023 04:24:27 PM INFO:None +02/09/2023 04:24:30 PM INFO:dannce.engine.generator.DataGenerator_3Dconv_torch.__init__Executing eagerly: True +02/09/2023 04:24:31 PM INFO:dannce.engine.generator.DataGenerator_3Dconv_torch.__init__Init took 3.5297820568084717 sec. +02/09/2023 04:24:31 PM INFO:dannce.interface.build_model Initializing Network... +02/09/2023 04:24:31 PM INFO:dannce.interface.build_model Loading model from ../../demo/markerless_mouse_1/DANNCE/train_results/AVG/weights.1200-12.77642.hdf5 +02/09/2023 04:24:35 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 0_Camera2 +02/09/2023 04:24:35 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 0_Camera1 +02/09/2023 04:24:35 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 0_Camera4 +02/09/2023 04:24:35 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 0_Camera3 +02/09/2023 04:24:35 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 0_Camera5 +02/09/2023 04:24:35 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 0_Camera6 +02/16/2023 05:34:23 PM INFO:com_train_dir set to: ../../demo/markerless_mouse_1/COM/train_test/ +02/16/2023 05:34:23 PM INFO:com_predict_dir set to: ../../demo/markerless_mouse_1/COM/predict_test/ +02/16/2023 05:34:23 PM INFO:com_predict_weights set to: ../../demo/markerless_mouse_1/COM/train_results/weights.250-0.00036.hdf5 +02/16/2023 05:34:23 PM INFO:dannce_train_dir set to: ../../demo/markerless_mouse_1/DANNCE/train_test/ +02/16/2023 05:34:23 PM INFO:dannce_predict_dir set to: ../../demo/markerless_mouse_1/DANNCE/predict_test/AVG_MAX/ +02/16/2023 05:34:23 PM INFO:dannce_predict_model set to: ../../demo/markerless_mouse_1/DANNCE/train_results/AVG/weights.1200-12.77642.hdf5 +02/16/2023 05:34:23 PM INFO:exp set to: [{'label3d_file': './label3d_temp_dannce.mat', 'viddir': '../../demo/markerless_mouse_1/videos/'}, {'label3d_file': './label3d_temp_dannce.mat', 'viddir': '../../demo/markerless_mouse_2/videos/'}] +02/16/2023 05:34:23 PM INFO:viddir set to: ../../demo/markerless_mouse_1/videos/ +02/16/2023 05:34:23 PM INFO:gpu_id set to: 0 +02/16/2023 05:34:23 PM INFO:io_config set to: io.yaml +02/16/2023 05:34:23 PM INFO:new_n_channels_out set to: 22 +02/16/2023 05:34:23 PM INFO:batch_size set to: 4 +02/16/2023 05:34:23 PM INFO:epochs set to: 1 +02/16/2023 05:34:23 PM INFO:train_mode set to: finetune +02/16/2023 05:34:23 PM INFO:slurm_config set to: ../../cluster/duke.yaml +02/16/2023 05:34:23 PM INFO:num_validation_per_exp set to: 4 +02/16/2023 05:34:23 PM INFO:vol_size set to: 120 +02/16/2023 05:34:23 PM INFO:nvox set to: 64 +02/16/2023 05:34:23 PM INFO:max_num_samples set to: 10 +02/16/2023 05:34:23 PM INFO:start_batch set to: 0 +02/16/2023 05:34:23 PM INFO:dannce_finetune_weights set to: ../../demo/markerless_mouse_1/DANNCE/train_results/AVG_MAX/ +02/16/2023 05:34:23 PM INFO:metric set to: ['euclidean_distance_3D', 'centered_euclidean_distance_3D'] +02/16/2023 05:34:23 PM INFO:log_dest set to: ../logs/ +02/16/2023 05:34:23 PM INFO:log_level set to: INFO +02/16/2023 05:34:23 PM INFO:base_config set to: config_mousetest.yaml +02/16/2023 05:34:23 PM INFO:crop_height set to: None +02/16/2023 05:34:23 PM INFO:crop_width set to: None +02/16/2023 05:34:23 PM INFO:camnames set to: None +02/16/2023 05:34:23 PM INFO:n_channels_out set to: 20 +02/16/2023 05:34:23 PM INFO:sigma set to: 10 +02/16/2023 05:34:23 PM INFO:verbose set to: 1 +02/16/2023 05:34:23 PM INFO:net set to: None +02/16/2023 05:34:23 PM INFO:immode set to: vid +02/16/2023 05:34:23 PM INFO:mono set to: False +02/16/2023 05:34:23 PM INFO:mirror set to: False +02/16/2023 05:34:23 PM INFO:norm_method set to: layer +02/16/2023 05:34:23 PM INFO:start_sample set to: None +02/16/2023 05:34:23 PM INFO:net_type set to: AVG +02/16/2023 05:34:23 PM INFO:com_fromlabels set to: False +02/16/2023 05:34:23 PM INFO:medfilt_window set to: None +02/16/2023 05:34:23 PM INFO:com_file set to: None +02/16/2023 05:34:23 PM INFO:new_last_kernel_size set to: [3, 3, 3] +02/16/2023 05:34:23 PM INFO:n_layers_locked set to: 2 +02/16/2023 05:34:23 PM INFO:vmin set to: None +02/16/2023 05:34:23 PM INFO:vmax set to: None +02/16/2023 05:34:23 PM INFO:interp set to: nearest +02/16/2023 05:34:23 PM INFO:depth set to: False +02/16/2023 05:34:23 PM INFO:comthresh set to: 0 +02/16/2023 05:34:23 PM INFO:weighted set to: False +02/16/2023 05:34:23 PM INFO:com_method set to: median +02/16/2023 05:34:23 PM INFO:cthresh set to: None +02/16/2023 05:34:23 PM INFO:channel_combo set to: None +02/16/2023 05:34:23 PM INFO:predict_mode set to: torch +02/16/2023 05:34:23 PM INFO:n_views set to: 6 +02/16/2023 05:34:23 PM INFO:expval set to: None +02/16/2023 05:34:23 PM INFO:from_weights set to: None +02/16/2023 05:34:23 PM INFO:write_npy set to: None +02/16/2023 05:34:23 PM INFO:loss set to: mask_nan_keep_loss +02/16/2023 05:34:23 PM INFO:n_channels_in set to: None +02/16/2023 05:34:23 PM INFO:extension set to: None +02/16/2023 05:34:23 PM INFO:vid_dir_flag set to: None +02/16/2023 05:34:23 PM INFO:num_train_per_exp set to: None +02/16/2023 05:34:23 PM INFO:chunks set to: None +02/16/2023 05:34:23 PM INFO:lockfirst set to: None +02/16/2023 05:34:23 PM INFO:load_valid set to: None +02/16/2023 05:34:23 PM INFO:augment_hue set to: False +02/16/2023 05:34:23 PM INFO:augment_brightness set to: False +02/16/2023 05:34:23 PM INFO:augment_hue_val set to: 0.05 +02/16/2023 05:34:23 PM INFO:augment_bright_val set to: 0.05 +02/16/2023 05:34:23 PM INFO:augment_rotation_val set to: 5 +02/16/2023 05:34:23 PM INFO:mirror_augmentation set to: False +02/16/2023 05:34:23 PM INFO:right_keypoints set to: None +02/16/2023 05:34:23 PM INFO:left_keypoints set to: None +02/16/2023 05:34:23 PM INFO:drop_landmark set to: None +02/16/2023 05:34:23 PM INFO:raw_im_h set to: None +02/16/2023 05:34:23 PM INFO:raw_im_w set to: None +02/16/2023 05:34:23 PM INFO:n_instances set to: 1 +02/16/2023 05:34:23 PM INFO:use_npy set to: False +02/16/2023 05:34:23 PM INFO:data_split_seed set to: None +02/16/2023 05:34:23 PM INFO:valid_exp set to: None +02/16/2023 05:34:23 PM INFO:lr set to: 0.001 +02/16/2023 05:34:23 PM INFO:rotate set to: True +02/16/2023 05:34:23 PM INFO:augment_continuous_rotation set to: False +02/16/2023 05:34:23 PM INFO:com_thresh set to: None +02/16/2023 05:34:23 PM INFO:cam3_train set to: None +02/16/2023 05:34:23 PM INFO:debug_volume_tifdir set to: None +02/16/2023 05:34:23 PM INFO:downfac set to: None +02/16/2023 05:34:23 PM INFO:dannce_predict_vol_tifdir set to: None +02/16/2023 05:34:23 PM INFO:n_rand_views set to: 0 +02/16/2023 05:34:23 PM INFO:rand_view_replace set to: True +02/16/2023 05:34:23 PM INFO:multi_gpu_train set to: False +02/16/2023 05:34:23 PM INFO:heatmap_reg set to: False +02/16/2023 05:34:23 PM INFO:heatmap_reg_coeff set to: 0.01 +02/16/2023 05:34:23 PM INFO:save_pred_targets set to: False +02/16/2023 05:34:23 PM INFO:huber-delta set to: 1.35 +02/16/2023 05:34:23 PM INFO:avg+max set to: None +02/16/2023 05:34:23 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting vid_dir_flag to True. +02/16/2023 05:34:23 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting extension to .mp4. +02/16/2023 05:34:23 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting chunks to {'Camera1': array([ 0, 3000, 6000, 9000, 12000, 15000]), 'Camera2': array([ 0, 3000, 6000, 9000, 12000, 15000]), 'Camera3': array([ 0, 3000, 6000, 9000, 12000, 15000]), 'Camera4': array([ 0, 3000, 6000, 9000, 12000, 15000]), 'Camera5': array([ 0, 3000, 6000, 9000, 12000, 15000]), 'Camera6': array([ 0, 3000, 6000, 9000, 12000, 15000])}. +02/16/2023 05:34:23 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting n_channels_in to 3. +02/16/2023 05:34:23 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting raw_im_h to 1024. +02/16/2023 05:34:23 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting raw_im_w to 1152. +02/16/2023 05:34:23 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting expval to True. +02/16/2023 05:34:23 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting net to finetune_AVG. +02/16/2023 05:34:24 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting crop_height to [0, 1024]. +02/16/2023 05:34:24 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting crop_width to [0, 1152]. +02/16/2023 05:34:24 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting maxbatch to 2. +02/16/2023 05:34:24 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting start_batch to 0. +02/16/2023 05:34:24 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting vmin to -60.0. +02/16/2023 05:34:24 PM INFO:dannce.engine.preprocessing.py.print_and_set Setting vmax to 60.0. +02/16/2023 05:34:24 PM INFO:dannce.interface.setup_dannce_predict Using torch predict mode +02/16/2023 05:34:24 PM INFO:dannce.interface.setup_dannce_predict Using camnames: ['Camera1', 'Camera2', 'Camera3', 'Camera4', 'Camera5', 'Camera6'] +02/16/2023 05:34:24 PM INFO:dannce.interface.do_COM_load Experiment 0 using com3d: ./alabel3d_temp_dannce.mat +02/16/2023 05:34:24 PM INFO:dannce.interface.do_COM_load Removed 0 samples from the dataset because they either had COM positions over cthresh, or did not have matching sampleIDs in the COM file +02/16/2023 05:34:24 PM INFO:dannce.interface.write_com_file Saving 3D COM to ../../demo/markerless_mouse_1/DANNCE/predict_test/AVG_MAX/com3d_used.mat +02/16/2023 05:34:24 PM INFO:None +02/16/2023 05:34:27 PM INFO:dannce.engine.generator.DataGenerator_3Dconv_torch.__init__Executing eagerly: True +02/16/2023 05:34:27 PM INFO:dannce.engine.generator.DataGenerator_3Dconv_torch.__init__Init took 2.618299961090088 sec. +02/16/2023 05:34:27 PM INFO:dannce.interface.build_model Initializing Network... +02/16/2023 05:34:27 PM INFO:dannce.interface.build_model Loading model from ../../demo/markerless_mouse_1/DANNCE/train_results/AVG/weights.1200-12.77642.hdf5 +02/16/2023 05:34:28 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 0_Camera2 +02/16/2023 05:34:28 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 0_Camera5 +02/16/2023 05:34:28 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 0_Camera6 +02/16/2023 05:34:28 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 0_Camera1 +02/16/2023 05:34:28 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 0_Camera3 +02/16/2023 05:34:28 PM INFO:dannce.engine.video.LoadVideoFrame.load_vid_frame Loading new video: 0.mp4 for 0_Camera4 diff --git a/tests/tests.sh b/tests/tests.sh index a33b9fc..d148cfb 100755 --- a/tests/tests.sh +++ b/tests/tests.sh @@ -22,65 +22,72 @@ python setup.py install cd tests/configs -echo "Testing COMfinder training" -cp ./label3d_temp_dannce.mat ./alabel3d_temp_dannce.mat -com-train config_com_mousetest.yaml --com-finetune-weights=../../demo/markerless_mouse_1/COM/weights/ +# echo "Testing COMfinder training" +# cp ./label3d_temp_dannce.mat ./alabel3d_temp_dannce.mat +# com-train config_com_mousetest.yaml --com-finetune-weights=../../demo/markerless_mouse_1/COM/weights/ -echo "Testing COMfinder training w/ mono" -com-train config_com_mousetest.yaml --mono=True +# echo "Testing COMfinder training w/ mono" +# com-train config_com_mousetest.yaml --mono=True -echo "Testing COMfinder prediction" -com-predict config_com_mousetest.yaml -python ../compare_predictions.py ../touchstones/COM3D_undistorted_masternn.mat ../../demo/markerless_mouse_1/COM/predict_test/com3d.mat 0.001 +# echo "Testing COMfinder prediction" +# com-predict config_com_mousetest.yaml +# python ../compare_predictions.py ../touchstones/COM3D_undistorted_masternn.mat ../../demo/markerless_mouse_1/COM/predict_test/com3d.mat 0.001 -echo "Testing COMfinder prediction, 3 cams" -cp ./label3d_temp_dannce_3cam.mat ./alabel3d_temp_dannce.mat -com-predict config_com_mousetest.yaml --downfac=4 +# echo "Testing COMfinder prediction, 3 cams" +# cp ./label3d_temp_dannce_3cam.mat ./alabel3d_temp_dannce.mat +# com-predict config_com_mousetest.yaml --downfac=4 -echo "Testing COMfinder prediction, 5 cams" -cp ./label3d_temp_dannce_5cam.mat ./alabel3d_temp_dannce.mat -com-predict config_com_mousetest.yaml --downfac=2 +# echo "Testing COMfinder prediction, 5 cams" +# cp ./label3d_temp_dannce_5cam.mat ./alabel3d_temp_dannce.mat +# com-predict config_com_mousetest.yaml --downfac=2 -echo "Testing DANNCE training, finetune_MAX" +# echo "Testing DANNCE training, finetune_MAX" cp ./label3d_temp_dannce.mat ./alabel3d_temp_dannce.mat -dannce-train config_mousetest.yaml --net-type=MAX --dannce-finetune-weights=../../demo/markerless_mouse_1/DANNCE/weights/weights.rat.MAX/ +#dannce-train config_mousetest.yaml --net-type=MAX --dannce-finetune-weights=../../demo/markerless_mouse_1/DANNCE/weights/weights.rat.MAX/ + +# echo "Testing DANNCE training, finetune_AVG" +# dannce-train config_mousetest.yaml --net-type=AVG --dannce-finetune-weights=../../demo/markerless_mouse_1/DANNCE/weights/ + +# echo "Testing DANNCE training, AVG+MAX finetune fromMAX" +# dannce-train config_mousetest.yaml --avg-max=100 --dannce-finetune-weights=../../demo/markerless_mouse_1/DANNCE/weights/weights.rat.MAX/ -echo "Testing DANNCE training, finetune_AVG" -dannce-train config_mousetest.yaml --net-type=AVG --dannce-finetune-weights=../../demo/markerless_mouse_1/DANNCE/weights/ +# echo "Testing DANNCE prediction after AVG+MAX finetune" +# dannce-predict config_mousetest.yaml --max-num-samples=100 --net-type=AVG --dannce-predict-model=../../demo/markerless_mouse_1/DANNCE/train_test/fullmodel_weights/fullmodel_end.hdf5 -echo "Testing DANNCE training, finetune_AVG with heatmap regularization" -dannce-train config_mousetest.yaml --net-type=AVG --dannce-finetune-weights=../../demo/markerless_mouse_1/DANNCE/weights/ --heatmap-reg=True -echo "Testing DANNCE training, finetune_AVG from previous finetune" -dannce-train config_mousetest.yaml --net-type=AVG --dannce-finetune-weights=../../demo/markerless_mouse_1/DANNCE/train_results/AVG/ +# echo "Testing DANNCE training, finetune_AVG with heatmap regularization" +# dannce-train config_mousetest.yaml --net-type=AVG --dannce-finetune-weights=../../demo/markerless_mouse_1/DANNCE/weights/ --heatmap-reg=True + +# echo "Testing DANNCE training, finetune_AVG from previous finetune" +# dannce-train config_mousetest.yaml --net-type=AVG --dannce-finetune-weights=../../demo/markerless_mouse_1/DANNCE/train_results/AVG/ echo "Testing DANNCE training, AVG net from scratch" dannce-train config_mousetest.yaml --net=unet3d_big_expectedvalue --train-mode=new --n-channels-out=22 -echo "Testing DANNCE training, MAX net from scratch" -dannce-train config_mousetest.yaml --net=unet3d_big --train-mode=new --n-channels-out=22 +# echo "Testing DANNCE training, MAX net from scratch" +# dannce-train config_mousetest.yaml --net=unet3d_big --train-mode=new --n-channels-out=22 -echo "Testing DANNCE training, AVG net continued" -dannce-train config_mousetest.yaml --net-type=AVG --train-mode=continued --dannce-finetune-weights=../../demo/markerless_mouse_1/DANNCE/train_results/AVG/ +# echo "Testing DANNCE training, AVG net continued" +# dannce-train config_mousetest.yaml --net-type=AVG --train-mode=continued --dannce-finetune-weights=../../demo/markerless_mouse_1/DANNCE/train_results/AVG/ -echo "Testing DANNCE training, MAX net continued" -dannce-train config_mousetest.yaml --net=finetune_MAX --train-mode=continued --dannce-finetune-weights=../../demo/markerless_mouse_1/DANNCE/train_results/ +# echo "Testing DANNCE training, MAX net continued" +# dannce-train config_mousetest.yaml --net=finetune_MAX --train-mode=continued --dannce-finetune-weights=../../demo/markerless_mouse_1/DANNCE/train_results/ -echo "Testing DANNCE training, AVG MONO from scratch" -dannce-train config_mousetest.yaml --net-type=AVG --train-mode=new --net=unet3d_big_expectedvalue --mono=True --n-channels-out=22 +# echo "Testing DANNCE training, AVG MONO from scratch" +# dannce-train config_mousetest.yaml --net-type=AVG --train-mode=new --net=unet3d_big_expectedvalue --mono=True --n-channels-out=22 -echo "Testing DANNCE training, AVG MONO from scratch w/ augmentation" -dannce-train config_mousetest.yaml --net-type=AVG --train-mode=new --net=unet3d_big_expectedvalue --mono=True --n-channels-out=22 --augment-brightness=True --augment-continuous-rotation=True --augment-hue=True +# echo "Testing DANNCE training, AVG MONO from scratch w/ augmentation" +# dannce-train config_mousetest.yaml --net-type=AVG --train-mode=new --net=unet3d_big_expectedvalue --mono=True --n-channels-out=22 --augment-brightness=True --augment-continuous-rotation=True --augment-hue=True -echo "Testing DANNCE training, AVG MONO finetune" -dannce-train config_mousetest.yaml --net-type=AVG --mono=True --dannce-finetune-weights=../../demo/markerless_mouse_1/DANNCE/weights/weights.rat.AVG.MONO/ +# echo "Testing DANNCE training, AVG MONO finetune" +# dannce-train config_mousetest.yaml --net-type=AVG --mono=True --dannce-finetune-weights=../../demo/markerless_mouse_1/DANNCE/weights/weights.rat.AVG.MONO/ -echo "Testing DANNCE training, AVG MONO finetune" -dannce-train config_mousetest.yaml --net-type=AVG --mono=True --dannce-finetune-weights=../../demo/markerless_mouse_1/DANNCE/weights/weights.rat.AVG.MONO/ --drop-landmark=[5,7] +# echo "Testing DANNCE training, AVG MONO finetune" +# dannce-train config_mousetest.yaml --net-type=AVG --mono=True --dannce-finetune-weights=../../demo/markerless_mouse_1/DANNCE/weights/weights.rat.AVG.MONO/ --drop-landmark=[5,7] -cp ./label3d_temp_dannce.mat ./alabel3d_temp_dannce.mat -echo "Testing DANNCE prediction, MONO" -dannce-predict config_mousetest.yaml --net-type=AVG --dannce-predict-model=../../demo/markerless_mouse_1/DANNCE/train_test/fullmodel_weights/fullmodel_end.hdf5 --mono=True +# cp ./label3d_temp_dannce.mat ./alabel3d_temp_dannce.mat +# echo "Testing DANNCE prediction, MONO" +# dannce-predict config_mousetest.yaml --net-type=AVG --dannce-predict-model=../../demo/markerless_mouse_1/DANNCE/train_test/fullmodel_weights/fullmodel_end.hdf5 --mono=True # # 32 NVOX --------- # #MONO @@ -103,33 +110,33 @@ dannce-predict config_mousetest.yaml --net-type=AVG --dannce-predict-model=../.. # # ----------- -cp ./label3d_temp_dannce.mat ./alabel3d_temp_dannce.mat -echo "Testing DANNCE AVG prediction" -dannce-predict config_mousetest.yaml --net-type=AVG -python ../compare_predictions.py ../touchstones/save_data_AVG_torch_nearest.mat ../../demo/markerless_mouse_1/DANNCE/predict_test/save_data_AVG0.mat 0.001 +# cp ./label3d_temp_dannce.mat ./alabel3d_temp_dannce.mat +# echo "Testing DANNCE AVG prediction" +# dannce-predict config_mousetest.yaml --net-type=AVG +# python ../compare_predictions.py ../touchstones/save_data_AVG_torch_nearest.mat ../../demo/markerless_mouse_1/DANNCE/predict_test/save_data_AVG0.mat 0.001 -echo "Testing DANNCE MAX prediction" -dannce-predict config_mousetest.yaml --net-type=MAX --expval=False --dannce-predict-model=../../demo/markerless_mouse_1/DANNCE/train_results/weights.12000-0.00014.hdf5 -python ../compare_predictions.py ../touchstones/save_data_MAX_torchnearest_newtfroutine.mat ../../demo/markerless_mouse_1/DANNCE/predict_test/save_data_MAX0.mat 0.001 +# echo "Testing DANNCE MAX prediction" +# dannce-predict config_mousetest.yaml --net-type=MAX --expval=False --dannce-predict-model=../../demo/markerless_mouse_1/DANNCE/train_results/weights.12000-0.00014.hdf5 +# python ../compare_predictions.py ../touchstones/save_data_MAX_torchnearest_newtfroutine.mat ../../demo/markerless_mouse_1/DANNCE/predict_test/save_data_MAX0.mat 0.001 -echo "Testing npy volume generation" -cp ./label3d_voltest_dannce_m1.mat ./alabel3d_temp_dannce.mat -dannce-predict config_mousetest.yaml --net-type=AVG --write-npy=../../demo/markerless_mouse_1/npy_volumes/ --batch-size=1 +# echo "Testing npy volume generation" +# cp ./label3d_voltest_dannce_m1.mat ./alabel3d_temp_dannce.mat +# dannce-predict config_mousetest.yaml --net-type=AVG --write-npy=../../demo/markerless_mouse_1/npy_volumes/ --batch-size=1 -cp ./label3d_voltest_dannce_m2.mat ./alabel3d_temp_dannce.mat -dannce-predict config_mousetest.yaml --net-type=AVG --write-npy=../../demo/markerless_mouse_1/npy_volumes/ --batch-size=1 +# cp ./label3d_voltest_dannce_m2.mat ./alabel3d_temp_dannce.mat +# dannce-predict config_mousetest.yaml --net-type=AVG --write-npy=../../demo/markerless_mouse_1/npy_volumes/ --batch-size=1 -echo "Testing npy volume training, with validation recording" -cd ../../demo/markerless_mouse_1/ -dannce-train ../../configs/dannce_mouse_config.yaml --net-type=AVG --use-npy=True --dannce-train-dir=./DANNCE/npy_test/ --epochs=10 --valid-exp=[1] --gpu=1 +# echo "Testing npy volume training, with validation recording" +# cd ../../demo/markerless_mouse_1/ +# dannce-train ../../configs/dannce_mouse_config.yaml --net-type=AVG --use-npy=True --dannce-train-dir=./DANNCE/npy_test/ --epochs=10 --valid-exp=[1] --gpu=1 -echo "Testing npy volume training, with multi gpu support" -dannce-train ../../configs/dannce_mouse_config.yaml --net-type=AVG --batch-size=4 --use-npy=True --dannce-train-dir=./DANNCE/npy_test/ --epochs=10 --multi-gpu-train=True --gpu=1 +# echo "Testing npy volume training, with multi gpu support" +# dannce-train ../../configs/dannce_mouse_config.yaml --net-type=AVG --batch-size=4 --use-npy=True --dannce-train-dir=./DANNCE/npy_test/ --epochs=10 --multi-gpu-train=True --gpu=1 -echo "Testing npy volume testing, with num_train_exp" -dannce-train ../../configs/dannce_mouse_config.yaml --net-type=AVG --use-npy=True --dannce-train-dir=./DANNCE/npy_test/ --epochs=10 --num-train-per-exp=2 --batch-size=1 --gpu=1 +# echo "Testing npy volume testing, with num_train_exp" +# dannce-train ../../configs/dannce_mouse_config.yaml --net-type=AVG --use-npy=True --dannce-train-dir=./DANNCE/npy_test/ --epochs=10 --num-train-per-exp=2 --batch-size=1 --gpu=1 -echo "Testing npy volume testing, with num_train_exp and validation recording" -dannce-train ../../configs/dannce_mouse_config.yaml --net-type=AVG --use-npy=True --dannce-train-dir=./DANNCE/npy_test/ --epochs=10 --valid-exp=[1] --num-train-per-exp=2 --batch-size=1 --gpu=1 +# echo "Testing npy volume testing, with num_train_exp and validation recording" +# dannce-train ../../configs/dannce_mouse_config.yaml --net-type=AVG --use-npy=True --dannce-train-dir=./DANNCE/npy_test/ --epochs=10 --valid-exp=[1] --num-train-per-exp=2 --batch-size=1 --gpu=1 echo "PASSED WITHOUT ERROR" diff --git a/tests/touchstones/save_data_AVG_MAX.mat b/tests/touchstones/save_data_AVG_MAX.mat new file mode 100644 index 0000000..8daacc0 Binary files /dev/null and b/tests/touchstones/save_data_AVG_MAX.mat differ diff --git a/tests/touchstones/save_data_MAX_torchnearest_newtfroutine.mat b/tests/touchstones/save_data_MAX_torchnearest_newtfroutine.mat index b20ab56..784d1fe 100644 Binary files a/tests/touchstones/save_data_MAX_torchnearest_newtfroutine.mat and b/tests/touchstones/save_data_MAX_torchnearest_newtfroutine.mat differ