-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathspliceai_dropout.py
executable file
·64 lines (47 loc) · 2.09 KB
/
spliceai_dropout.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
###############################################################################
# This file has the functions necessary to create the SpliceAI model.
###############################################################################
import tensorflow as tf
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Activation, Conv1D, Cropping1D, BatchNormalization, Add, Dropout
import numpy as np
def ResidualUnit(l, w, ar, dropout_rate):
# Residual unit proposed in "Identity mappings in Deep Residual Networks"
# by He et al.
def f(input_node):
bn1 = BatchNormalization()(input_node)
act1 = Activation('relu')(bn1)
conv1 = Conv1D(l, w, dilation_rate=ar, padding='same')(act1)
bn2 = BatchNormalization()(conv1)
act2 = Activation('relu')(bn2)
drop1= Dropout(dropout_rate)(act2)
conv2 = Conv1D(l, w, dilation_rate=ar, padding='same')(drop1)
output_node = Add()([conv2, input_node])
return output_node
return f
def SpliceAI(L, W, AR, dropout_rate):
assert len(W) == len(AR)
CL = 2 * np.sum(AR * (W - 1))
input0 = Input(shape=(None, 4))
conv = Conv1D(L, 1)(input0)
skip = Conv1D(L, 1)(conv)
for i in range(len(W)):
conv = ResidualUnit(int(L), int(W[i]), int(AR[i]), dropout_rate)(conv)
if (((i + 1) % 4 == 0) or ((i + 1) == len(W))):
dense = Conv1D(L, 1)(conv)
skip = Add()([skip, dense])
skip = Cropping1D(int(CL/2))(skip)
output0 = [[] for t in range(1)]
for t in range(1):
bn3 = BatchNormalization()(skip)
drop2 = Dropout(dropout_rate)(bn3)
output0[t] = Conv1D(3, 1, activation='softmax')(drop2)
model = Model(inputs=input0, outputs=output0)
return model
def categorical_crossentropy_2d(y_true, y_pred):
y_true_float = tf.cast(y_true, dtype=tf.float32)
return -tf.reduce_mean(
y_true_float[:, :, 0] * tf.math.log(y_pred[:, :, 0] + 1e-10)
+ y_true_float[:, :, 1] * tf.math.log(y_pred[:, :, 1] + 1e-10)
+ y_true_float[:, :, 2] * tf.math.log(y_pred[:, :, 2] + 1e-10)
)