Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 11 additions & 5 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -3,14 +3,12 @@

This package (Top2Phase) implements the graph neural network for classificaiton of water phases, it constructs graphs based on the positioanal information of neighboring water molecules obtained from molecular dynamics trajectories and train graph neural network model to classify these phase using only edge information.

For further information see following paper.
[Top2Phase](https://doi.org)
For further information check out [the paper](https://pubs.acs.org/doi/full/10.1021/acs.jpcc.2c07423).

![](images/image.png)
![Alt text](doc/logo.gif)

## Table of Contents

- [How to cite](#how-to-cite)
- [Installation](#Installation)
- [Usage](#usage)
- [Trajectory to Graph](#MD-Data-Processing)
Expand All @@ -20,7 +18,7 @@ For further information see following paper.
- [Visualize the results](#visualize-the-results)
- [Data](#data)
- [Authors](#authors)
- [License](#license)
- [License and credits](#License-and-credits)

Topological Classification of Water Phases Using Edge-Conditioned Convolutional Graph Neural Network

Expand Down Expand Up @@ -184,3 +182,11 @@ Prediction
```
python run.py # We will add further details for making it more user-friendly
```

## License and credits
This project is licensed under the GNU General Public License v3.0. See the [LICENSE](./license.txt)
If you use this project in your research, please cite it as follows:
- Alireza Moradzadeh, Hananeh Oliaei, Narayana R. Aluru. "Topology-Based Phase Identification of Bulk, Interface, and Confined Water Using an Edge-Conditioned Convolutional Graph Neural Network" The Journal of Physical Chemistry C 127 (5), 2612-2621 (2023).
[![doi:10.1021/acs.jpcc.2c07423](https://img.shields.io/badge/DOI-10.1021%2Facs.jpcc.2c07423-blue)](https://doi.org/10.1021/acs.jpcc.2c07423)


Binary file added doc/logo.gif
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
674 changes: 674 additions & 0 deletions license.txt

Large diffs are not rendered by default.

93 changes: 93 additions & 0 deletions src/Top2Phase/pytorch/model.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,93 @@
import os
import itertools
import numpy as np
import torch.nn as nn
import torch_geometric.nn as pyg_nn
from torch_geometric.nn import global_mean_pool
from torch_geometric.transforms import GCNNorm

class PhaseModel(nn.Module):
def __init__(self,
Node_dim: int = 2,
Edge_dim: int = 1,
Output_dim: int = 1,
kernel_network: list = [30,60,30],
ecc_layers: int = 3,
ecc_hidden_factor: int = 3,
mlp_layers: int = 3,
pool_type: str = 'sum',
activation: str = 'relu',
use_bias: bool = True
):
# Store all networks architecture in config
self.config = {'Node_dim': Node_dim,
'Edge_dim': Edge_dim,
'Output_dim': Output_dim,
'kernel_network': kernel_network,
'ecc_layers': ecc_layers,
'ecc_hidden_factor': ecc_hidden_factor,
'mlp_layers': mlp_layers,
'pool_type': pool_type,
'activation':activation,
'use_bias': use_bias}
activation_dict = {
'relu': nn.ReLU(),
'sigmoid': nn.Sigmoid(),
'tanh': nn.Tanh(),
}
super(PhaseModel, self).__init__()
################################################################################
# CREATE NETWORK
################################################################################
self.ECCNet = []
print('information about the model: ')
for cnt_layer in range(ecc_layers):
in_channels = int(Node_dim*ecc_hidden_factor**(cnt_layer))
out_channels = int(Node_dim*ecc_hidden_factor**(cnt_layer+1))
kernel_network = [Edge_dim] + kernel_network
MLP = itertools.zip_longest(
[nn.Linear(kernel_network[i], kernel_network[i+1]) for counter, _ in enumerate(kernel_network)],
[activation_dict[activation] for _ in range(len(kernel_network)-2)],
fillvalue=None
)
kernel = torch.nn.Sequential(MLP)
layer = pyg_nn.conv.NNConv(
in_channels,
out_channels,
kernel,
bias=use_bias
)
self.ECCNet.append(layer)
print("hidden dimensions of layer : ", cnt_layer+1, " of GNN equals to : ", out_channels)

# Add pooling layer
#self.pool = global_pool.get(pool_type)()

# Add MLP layers
self.MLPNet = []
for cnt_layer in range(mlp_layers-1):
in_channels = ?
out_channels = max(Output_dim, int(Node_dim*ecc_hidden_factor**(ecc_layers-cnt_layer-1)))
layer = [nn.Linear(?, out_channels), activation_dict[activation]]
self.MLPNet.append(layer)
print("hidden dimensions of layer : ", cnt_layer+1, ' of MLP equals to : ', out_channels)

# output layer with no activation
self.MLPNet.append(nn.Linear(out_channels, Output_dim))


def get_config(self):
return self.config


def forward(self, inputs):
x, a, e = inputs

for ecc_layer in range(self.config['ecc_layers']):
x = self.ECCNet[ecc_layer]([x,a,e])

x = global_mean_pool(x)

for mlp_layer in range(self.config['mlp_layers']):
x = self.MLPNet[mlp_layer](x)
return x
54 changes: 16 additions & 38 deletions src/Top2Phase/model.py → src/Top2Phase/tensorflow/model.py
Original file line number Diff line number Diff line change
@@ -1,26 +1,26 @@
import numpy as np
import os
import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import Dense, Input
from tensorflow.keras.models import Model
import tensorflow as tf
from spektral.layers.pooling import global_pool

from spektral.transforms.normalize_adj import NormalizeAdj
from spektral.layers import ECCConv


class PhaseModel(Model):
def __init__(self,
Node_dim=2,
Edge_dim=1,
Output_dim=1,
kernel_network=[30,60,30],
ecc_layers=3,
ecc_hidden_factor=3,
mlp_layers=3,
pool_type='sum',
activation='relu',
use_bias=True):
Node_dim: int = 2,
Edge_dim: int = 1,
Output_dim: int = 1,
kernel_network: list = [30,60,30],
ecc_layers: int = 3,
ecc_hidden_factor: int = 3,
mlp_layers: int = 3,
pool_type: str = 'sum',
activation: str = 'relu',
use_bias: bool = True
):
# Store all networks architecture in config
self.config = {'Node_dim': Node_dim,
'Edge_dim': Edge_dim,
Expand Down Expand Up @@ -59,10 +59,12 @@ def __init__(self,
if self.non_functional:
pass


def get_config(self):
return self.config

def call(self, inputs,training=True):

def call(self, inputs, training: bool = True):
x, a, e = inputs

for ecc_layer in range(self.config['ecc_layers']):
Expand All @@ -73,27 +75,3 @@ def call(self, inputs,training=True):
for mlp_layer in range(self.config['mlp_layers']):
x = self.MLPNet[mlp_layer](x)
return x
'''
model = PhaseModel()
loss_fn = tf.keras.losses.BinaryCrossentropy(from_logits=True)
opt = Adam(lr=0.0001)
@tf.function(input_signature=loader_tr.tf_signature(), experimental_relax_shapes=True)
def train_step(inputs, target):
with tf.GradientTape() as tape:
predictions = model(inputs, training=True)
loss = loss_fn(target, predictions)
loss += sum(model.losses)
gradients = tape.gradient(loss, model.trainable_variables)
opt.apply_gradients(zip(gradients, model.trainable_variables))
return loss
def train_step(self, seq, conc, gt_expr):
with tf.GradientTape() as tape:
predictions = self.model(inputs = (seq, conc), training=True)
loss = self.loss(gt_expr, predictions)

gradients = tape.gradient(loss, self.model.trainable_variables)
self.optimizer.apply_gradients(zip(gradients, self.model.trainable_variables))

#Note: Train loss is being updated by a loss which obtained by training = True prediction
self.running_loss_train(loss)
'''
File renamed without changes.
File renamed without changes.