forked from pbcquoc/cifar_dcgan
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtf_layers.py
72 lines (50 loc) · 2.32 KB
/
tf_layers.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
import tensorflow as tf
from tensorflow.contrib.layers import batch_norm
def fully_connected(input, output_dim, activation_fn, scope, keep_prob=None, is_bn_training=None):
input_dim = input.get_shape()[1].value
with tf.variable_scope(scope):
W = tf.get_variable('W', [input_dim, output_dim], initializer=tf.truncated_normal_initializer(stddev=0.01))
b = tf.get_variable('b', [output_dim], initializer=tf.constant_initializer(0))
pre_activations = tf.matmul(input, W) + b
if is_bn_training == None:
bn = pre_activations
else:
bn = batch_norm(pre_activations, updates_collections=None, is_training=is_bn_training)
h = activation_fn(bn)
if keep_prob == None:
h_drop = h
else:
h_drop = tf.nn.dropout(h, keep_prob)
return h_drop
def transposed_conv2d(input, kernel_shape, output_shape, scope, activation_fn, keep_prob=None, is_bn_training=None, strides = [1,2,2,1]):
with tf.variable_scope(scope):
kernel = tf.get_variable('filter', kernel_shape, initializer=tf.truncated_normal_initializer(stddev=0.01))
conv = tf.nn.conv2d_transpose(input, kernel, output_shape=output_shape, strides=strides, padding='SAME')
b = tf.get_variable('b', [output_shape[3]], initializer=tf.constant_initializer(0))
pre_activations = conv + b
if is_bn_training == None:
bn = pre_activations
else:
bn = batch_norm(pre_activations, updates_collections=None, is_training=is_bn_training)
h = activation_fn(bn)
if keep_prob == None:
h_drop = h
else:
h_drop = tf.nn.dropout(h, keep_prob)
return h_drop
def conv2d(input, kernel_shape, strides, activation_fn, scope, keep_prob=None, is_bn_training=None):
with tf.variable_scope(scope):
filter = tf.get_variable('filter', kernel_shape, initializer=tf.truncated_normal_initializer(stddev=0.01))
b = tf.get_variable('b', [kernel_shape[3]], initializer=tf.constant_initializer(0))
conv = tf.nn.conv2d(input, filter, strides=strides, padding='SAME')
pre_activations = conv + b
if is_bn_training == None:
bn = pre_activations
else:
bn = batch_norm(pre_activations, updates_collections=None, is_training=is_bn_training)
h = activation_fn(bn)
if keep_prob == None:
h_drop = h
else:
h_drop = tf.nn.dropout(h, keep_prob)
return h_drop