1
1
from tensorlayerx .nn import Module
2
2
import tensorlayerx as tlx
3
- from tensorlayerx .nn import Conv2d , BatchNorm2d ,Elementwise ,SubpixelConv2d , UpSampling2d , Flatten , Sequential
3
+ from tensorlayerx .nn import Conv2d , BatchNorm2d , Elementwise , SubpixelConv2d , UpSampling2d , Flatten , Sequential
4
4
from tensorlayerx .nn import Linear , MaxPool2d
5
5
6
6
W_init = tlx .initializers .TruncatedNormal (stddev = 0.02 )
@@ -11,10 +11,16 @@ class ResidualBlock(Module):
11
11
12
12
def __init__ (self ):
13
13
super (ResidualBlock , self ).__init__ ()
14
- self .conv1 = Conv2d (out_channels = 64 , kernel_size = (3 ,3 ), stride = (1 ,1 ), act = None , padding = 'SAME' , W_init = W_init , b_init = None )
15
- self .bn1 = BatchNorm2d (num_features = 64 , act = tlx .ReLU , gamma_init = G_init )
16
- self .conv2 = Conv2d (out_channels = 64 , kernel_size = (3 ,3 ), stride = (1 ,1 ), act = None , padding = 'SAME' , W_init = W_init , b_init = None )
17
- self .bn2 = BatchNorm2d (num_features = 64 , act = None , gamma_init = G_init )
14
+ self .conv1 = Conv2d (
15
+ out_channels = 64 , kernel_size = (3 , 3 ), stride = (1 , 1 ), act = None , padding = 'SAME' , W_init = W_init ,
16
+ data_format = 'channels_first' , b_init = None
17
+ )
18
+ self .bn1 = BatchNorm2d (num_features = 64 , act = tlx .ReLU , gamma_init = G_init , data_format = 'channels_first' )
19
+ self .conv2 = Conv2d (
20
+ out_channels = 64 , kernel_size = (3 , 3 ), stride = (1 , 1 ), act = None , padding = 'SAME' , W_init = W_init ,
21
+ data_format = 'channels_first' , b_init = None
22
+ )
23
+ self .bn2 = BatchNorm2d (num_features = 64 , act = None , gamma_init = G_init , data_format = 'channels_first' )
18
24
19
25
def forward (self , x ):
20
26
z = self .conv1 (x )
@@ -24,21 +30,30 @@ def forward(self, x):
24
30
x = x + z
25
31
return x
26
32
33
+
27
34
class SRGAN_g (Module ):
28
35
""" Generator in Photo-Realistic Single Image Super-Resolution Using a Generative Adversarial Network
29
36
feature maps (n) and stride (s) feature maps (n) and stride (s)
30
37
"""
38
+
31
39
def __init__ (self ):
32
- super (SRGAN_g ,self ).__init__ ()
33
- self .conv1 = Conv2d (out_channels = 64 , kernel_size = (3 ,3 ), stride = (1 ,1 ), act = tlx .ReLU , padding = 'SAME' , W_init = W_init )
40
+ super (SRGAN_g , self ).__init__ ()
41
+ self .conv1 = Conv2d (
42
+ out_channels = 64 , kernel_size = (3 , 3 ), stride = (1 , 1 ), act = tlx .ReLU , padding = 'SAME' , W_init = W_init ,
43
+ data_format = 'channels_first'
44
+ )
34
45
self .residual_block = self .make_layer ()
35
- self .conv2 = Conv2d (out_channels = 64 , kernel_size = (3 ,3 ), stride = (1 ,1 ),padding = 'SAME' , W_init = W_init , b_init = None )
36
- self .bn1 = BatchNorm2d (num_features = 64 , act = None , gamma_init = G_init )
37
- self .conv3 = Conv2d (out_channels = 256 , kernel_size = (3 ,3 ), stride = (1 ,1 ),padding = 'SAME' , W_init = W_init )
38
- self .subpiexlconv1 = SubpixelConv2d (scale = 2 , act = tlx .ReLU )
39
- self .conv4 = Conv2d (out_channels = 256 , kernel_size = (3 ,3 ), stride = (1 ,1 ), padding = 'SAME' , W_init = W_init )
40
- self .subpiexlconv2 = SubpixelConv2d (scale = 2 , act = tlx .ReLU )
41
- self .conv5 = Conv2d (3 , kernel_size = (1 ,1 ), stride = (1 ,1 ), act = tlx .Tanh , padding = 'SAME' , W_init = W_init )
46
+ self .conv2 = Conv2d (
47
+ out_channels = 64 , kernel_size = (3 , 3 ), stride = (1 , 1 ), padding = 'SAME' , W_init = W_init ,
48
+ data_format = 'channels_first' , b_init = None
49
+ )
50
+ self .bn1 = BatchNorm2d (num_features = 64 , act = None , gamma_init = G_init , data_format = 'channels_first' )
51
+ self .conv3 = Conv2d (out_channels = 256 , kernel_size = (3 , 3 ), stride = (1 , 1 ), padding = 'SAME' , W_init = W_init , data_format = 'channels_first' )
52
+ self .subpiexlconv1 = SubpixelConv2d (data_format = 'channels_first' , scale = 2 , act = tlx .ReLU )
53
+ self .conv4 = Conv2d (out_channels = 256 , kernel_size = (3 , 3 ), stride = (1 , 1 ), padding = 'SAME' , W_init = W_init , data_format = 'channels_first' )
54
+ self .subpiexlconv2 = SubpixelConv2d (data_format = 'channels_first' , scale = 2 , act = tlx .ReLU )
55
+ self .conv5 = Conv2d (3 , kernel_size = (1 , 1 ), stride = (1 , 1 ), act = tlx .Tanh , padding = 'SAME' , W_init = W_init , data_format = 'channels_first' )
56
+
42
57
def make_layer (self ):
43
58
layer_list = []
44
59
for i in range (16 ):
@@ -61,7 +76,6 @@ def forward(self, x):
61
76
return x
62
77
63
78
64
-
65
79
class SRGAN_g2 (Module ):
66
80
""" Generator in Photo-Realistic Single Image Super-Resolution Using a Generative Adversarial Network
67
81
feature maps (n) and stride (s) feature maps (n) and stride (s)
@@ -70,22 +84,34 @@ class SRGAN_g2(Module):
70
84
71
85
Use Resize Conv
72
86
"""
87
+
73
88
def __init__ (self ):
74
- super (SRGAN_g2 ,self ).__init__ ()
75
- self .conv1 = Conv2d (out_channels = 64 , kernel_size = (3 ,3 ), stride = (1 ,1 ), act = None , padding = 'SAME' , W_init = W_init )
89
+ super (SRGAN_g2 , self ).__init__ ()
90
+ self .conv1 = Conv2d (
91
+ out_channels = 64 , kernel_size = (3 , 3 ), stride = (1 , 1 ), act = None , padding = 'SAME' , W_init = W_init ,
92
+ data_format = 'channels_first'
93
+ )
76
94
self .residual_block = self .make_layer ()
77
- self .conv2 = Conv2d (out_channels = 64 , kernel_size = (3 , 3 ), stride = (1 , 1 ), padding = 'SAME' , W_init = W_init ,
78
- b_init = None )
79
- self .bn1 = BatchNorm2d (act = None , gamma_init = G_init )
80
- self .upsample1 = UpSampling2d (scale = (2 ,2 ), method = 'bilinear' )
81
- self .conv3 = Conv2d (out_channels = 64 , kernel_size = (3 , 3 ), stride = (1 , 1 ), padding = 'SAME' , W_init = W_init ,
82
- b_init = None )
83
- self .bn2 = BatchNorm2d (act = tlx .ReLU , gamma_init = G_init )
84
- self .upsample2 = UpSampling2d (scale = (4 ,4 ),method = 'bilinear' )
85
- self .conv4 = Conv2d (out_channels = 32 , kernel_size = (3 , 3 ), stride = (1 , 1 ), padding = 'SAME' , W_init = W_init ,
86
- b_init = None )
87
- self .bn3 = BatchNorm2d (act = tlx .ReLU , gamma_init = G_init )
88
- self .conv5 = Conv2d (out_channels = 3 , kernel_size = (1 ,1 ), stride = (1 ,1 ), act = tlx .Tanh , padding = 'SAME' , W_init = W_init )
95
+ self .conv2 = Conv2d (
96
+ out_channels = 64 , kernel_size = (3 , 3 ), stride = (1 , 1 ), padding = 'SAME' , W_init = W_init ,
97
+ data_format = 'channels_first' , b_init = None
98
+ )
99
+ self .bn1 = BatchNorm2d (act = None , gamma_init = G_init , data_format = 'channels_first' )
100
+ self .upsample1 = UpSampling2d (data_format = 'channels_first' , scale = (2 , 2 ), method = 'bilinear' )
101
+ self .conv3 = Conv2d (
102
+ out_channels = 64 , kernel_size = (3 , 3 ), stride = (1 , 1 ), padding = 'SAME' , W_init = W_init ,
103
+ data_format = 'channels_first' , b_init = None
104
+ )
105
+ self .bn2 = BatchNorm2d (act = tlx .ReLU , gamma_init = G_init , data_format = 'channels_first' )
106
+ self .upsample2 = UpSampling2d (data_format = 'channels_first' , scale = (4 , 4 ), method = 'bilinear' )
107
+ self .conv4 = Conv2d (
108
+ out_channels = 32 , kernel_size = (3 , 3 ), stride = (1 , 1 ), padding = 'SAME' , W_init = W_init ,
109
+ data_format = 'channels_first' , b_init = None
110
+ )
111
+ self .bn3 = BatchNorm2d (act = tlx .ReLU , gamma_init = G_init , data_format = 'channels_first' )
112
+ self .conv5 = Conv2d (
113
+ out_channels = 3 , kernel_size = (1 , 1 ), stride = (1 , 1 ), act = tlx .Tanh , padding = 'SAME' , W_init = W_init
114
+ )
89
115
90
116
def make_layer (self ):
91
117
layer_list = []
@@ -109,27 +135,53 @@ def forward(self, x):
109
135
x = self .conv5 (x )
110
136
return x
111
137
138
+
112
139
class SRGAN_d2 (Module ):
113
140
""" Discriminator in Photo-Realistic Single Image Super-Resolution Using a Generative Adversarial Network
114
141
feature maps (n) and stride (s) feature maps (n) and stride (s)
115
142
"""
143
+
116
144
def __init__ (self , ):
117
145
super (SRGAN_d2 , self ).__init__ ()
118
- self .conv1 = Conv2d (out_channels = 64 , kernel_size = (3 ,3 ), stride = (1 ,1 ), act = tlx .LeakyReLU (negative_slope = 0.2 ), padding = 'SAME' , W_init = W_init )
119
- self .conv2 = Conv2d (out_channels = 64 , kernel_size = (3 ,3 ), stride = (2 ,2 ), act = tlx .LeakyReLU (negative_slope = 0.2 ), padding = 'SAME' , W_init = W_init , b_init = None )
120
- self .bn1 = BatchNorm2d ( gamma_init = G_init )
121
- self .conv3 = Conv2d (out_channels = 128 , kernel_size = (3 ,3 ), stride = (1 ,1 ), act = tlx .LeakyReLU (negative_slope = 0.2 ), padding = 'SAME' , W_init = W_init , b_init = None )
122
- self .bn2 = BatchNorm2d ( gamma_init = G_init )
123
- self .conv4 = Conv2d (out_channels = 128 , kernel_size = (3 ,3 ), stride = (2 ,2 ), act = tlx .LeakyReLU (negative_slope = 0.2 ), padding = 'SAME' , W_init = W_init , b_init = None )
124
- self .bn3 = BatchNorm2d (gamma_init = G_init )
125
- self .conv5 = Conv2d (out_channels = 256 , kernel_size = (3 ,3 ), stride = (1 ,1 ), act = tlx .LeakyReLU (negative_slope = 0.2 ), padding = 'SAME' , W_init = W_init , b_init = None )
126
- self .bn4 = BatchNorm2d ( gamma_init = G_init )
127
- self .conv6 = Conv2d (out_channels = 256 , kernel_size = (3 ,3 ), stride = (2 ,2 ), act = tlx .LeakyReLU (negative_slope = 0.2 ), padding = 'SAME' , W_init = W_init , b_init = None )
128
- self .bn5 = BatchNorm2d ( gamma_init = G_init )
129
- self .conv7 = Conv2d (out_channels = 512 , kernel_size = (3 ,3 ), stride = (1 ,1 ), act = tlx .LeakyReLU (negative_slope = 0.2 ), padding = 'SAME' , W_init = W_init , b_init = None )
130
- self .bn6 = BatchNorm2d ( gamma_init = G_init )
131
- self .conv8 = Conv2d (out_channels = 512 , kernel_size = (3 ,3 ), stride = (2 ,2 ), act = tlx .LeakyReLU (negative_slope = 0.2 ), padding = 'SAME' , W_init = W_init , b_init = None )
132
- self .bn7 = BatchNorm2d ( gamma_init = G_init )
146
+ self .conv1 = Conv2d (
147
+ out_channels = 64 , kernel_size = (3 , 3 ), stride = (1 , 1 ), act = tlx .LeakyReLU (negative_slope = 0.2 ), padding = 'SAME' ,
148
+ W_init = W_init , data_format = 'channels_first'
149
+ )
150
+ self .conv2 = Conv2d (
151
+ out_channels = 64 , kernel_size = (3 , 3 ), stride = (2 , 2 ), act = tlx .LeakyReLU (negative_slope = 0.2 ), padding = 'SAME' ,
152
+ W_init = W_init , data_format = 'channels_first' , b_init = None
153
+ )
154
+ self .bn1 = BatchNorm2d (gamma_init = G_init , data_format = 'channels_first' )
155
+ self .conv3 = Conv2d (
156
+ out_channels = 128 , kernel_size = (3 , 3 ), stride = (1 , 1 ), act = tlx .LeakyReLU (negative_slope = 0.2 ), padding = 'SAME' ,
157
+ W_init = W_init , data_format = 'channels_first' , b_init = None
158
+ )
159
+ self .bn2 = BatchNorm2d (gamma_init = G_init , data_format = 'channels_first' )
160
+ self .conv4 = Conv2d (
161
+ out_channels = 128 , kernel_size = (3 , 3 ), stride = (2 , 2 ), act = tlx .LeakyReLU (negative_slope = 0.2 ), padding = 'SAME' ,
162
+ W_init = W_init , data_format = 'channels_first' , b_init = None
163
+ )
164
+ self .bn3 = BatchNorm2d (gamma_init = G_init , data_format = 'channels_first' )
165
+ self .conv5 = Conv2d (
166
+ out_channels = 256 , kernel_size = (3 , 3 ), stride = (1 , 1 ), act = tlx .LeakyReLU (negative_slope = 0.2 ), padding = 'SAME' ,
167
+ W_init = W_init , data_format = 'channels_first' , b_init = None
168
+ )
169
+ self .bn4 = BatchNorm2d (gamma_init = G_init , data_format = 'channels_first' )
170
+ self .conv6 = Conv2d (
171
+ out_channels = 256 , kernel_size = (3 , 3 ), stride = (2 , 2 ), act = tlx .LeakyReLU (negative_slope = 0.2 ), padding = 'SAME' ,
172
+ W_init = W_init , data_format = 'channels_first' , b_init = None
173
+ )
174
+ self .bn5 = BatchNorm2d (gamma_init = G_init , data_format = 'channels_first' )
175
+ self .conv7 = Conv2d (
176
+ out_channels = 512 , kernel_size = (3 , 3 ), stride = (1 , 1 ), act = tlx .LeakyReLU (negative_slope = 0.2 ), padding = 'SAME' ,
177
+ W_init = W_init , data_format = 'channels_first' , b_init = None
178
+ )
179
+ self .bn6 = BatchNorm2d (gamma_init = G_init , data_format = 'channels_first' )
180
+ self .conv8 = Conv2d (
181
+ out_channels = 512 , kernel_size = (3 , 3 ), stride = (2 , 2 ), act = tlx .LeakyReLU (negative_slope = 0.2 ), padding = 'SAME' ,
182
+ W_init = W_init , data_format = 'channels_first' , b_init = None
183
+ )
184
+ self .bn7 = BatchNorm2d (gamma_init = G_init , data_format = 'channels_first' )
133
185
self .flat = Flatten ()
134
186
self .dense1 = Linear (out_features = 1024 , act = tlx .LeakyReLU (negative_slope = 0.2 ))
135
187
self .dense2 = Linear (out_features = 1 )
@@ -158,43 +210,67 @@ def forward(self, x):
158
210
return n , logits
159
211
160
212
161
-
162
-
163
213
class SRGAN_d (Module ):
164
214
165
- def __init__ (self , dim = 64 ):
166
- super (SRGAN_d ,self ).__init__ ()
167
- self .conv1 = Conv2d (out_channels = dim , kernel_size = (4 ,4 ), stride = (2 ,2 ), act = tlx .LeakyReLU , padding = 'SAME' , W_init = W_init )
168
- self .conv2 = Conv2d (out_channels = dim * 2 , kernel_size = (4 ,4 ), stride = (2 ,2 ), act = None , padding = 'SAME' , W_init = W_init , b_init = None )
169
- self .bn1 = BatchNorm2d (num_features = dim * 2 , act = tlx .LeakyReLU , gamma_init = G_init )
170
- self .conv3 = Conv2d (out_channels = dim * 4 , kernel_size = (4 ,4 ), stride = (2 ,2 ), act = None , padding = 'SAME' , W_init = W_init , b_init = None )
171
- self .bn2 = BatchNorm2d (num_features = dim * 4 ,act = tlx .LeakyReLU , gamma_init = G_init )
172
- self .conv4 = Conv2d (out_channels = dim * 8 , kernel_size = (4 , 4 ), stride = (2 , 2 ), act = None , padding = 'SAME' ,W_init = W_init , b_init = None )
173
- self .bn3 = BatchNorm2d (num_features = dim * 8 , act = tlx .LeakyReLU , gamma_init = G_init )
174
- self .conv5 = Conv2d (out_channels = dim * 16 , kernel_size = (4 , 4 ), stride = (2 , 2 ), act = None , padding = 'SAME' ,
175
- W_init = W_init , b_init = None )
176
- self .bn4 = BatchNorm2d (num_features = dim * 16 , act = tlx .LeakyReLU , gamma_init = G_init )
177
- self .conv6 = Conv2d (out_channels = dim * 32 , kernel_size = (4 , 4 ), stride = (2 , 2 ), act = None , padding = 'SAME' ,
178
- W_init = W_init , b_init = None )
179
- self .bn5 = BatchNorm2d (num_features = dim * 32 ,act = tlx .LeakyReLU , gamma_init = G_init )
180
- self .conv7 = Conv2d (out_channels = dim * 16 , kernel_size = (1 , 1 ), stride = (1 , 1 ), act = None , padding = 'SAME' ,
181
- W_init = W_init , b_init = None )
182
- self .bn6 = BatchNorm2d (num_features = dim * 16 ,act = tlx .LeakyReLU , gamma_init = G_init )
183
- self .conv8 = Conv2d (out_channels = dim * 8 , kernel_size = (1 , 1 ), stride = (1 , 1 ), act = None , padding = 'SAME' ,
184
- W_init = W_init , b_init = None )
185
- self .bn7 = BatchNorm2d (num_features = dim * 8 ,act = None , gamma_init = G_init )
186
- self .conv9 = Conv2d (out_channels = dim * 2 , kernel_size = (1 , 1 ), stride = (1 , 1 ), act = None , padding = 'SAME' ,
187
- W_init = W_init , b_init = None )
188
- self .bn8 = BatchNorm2d (num_features = dim * 2 ,act = tlx .LeakyReLU , gamma_init = G_init )
189
- self .conv10 = Conv2d (out_channels = dim * 2 , kernel_size = (3 , 3 ), stride = (1 , 1 ), act = None , padding = 'SAME' ,
190
- W_init = W_init , b_init = None )
191
- self .bn9 = BatchNorm2d (num_features = dim * 2 ,act = tlx .LeakyReLU , gamma_init = G_init )
192
- self .conv11 = Conv2d (out_channels = dim * 8 , kernel_size = (3 , 3 ), stride = (1 , 1 ), act = None , padding = 'SAME' ,
193
- W_init = W_init , b_init = None )
194
- self .bn10 = BatchNorm2d (num_features = dim * 8 , gamma_init = G_init )
215
+ def __init__ (self , dim = 64 ):
216
+ super (SRGAN_d , self ).__init__ ()
217
+ self .conv1 = Conv2d (
218
+ out_channels = dim , kernel_size = (4 , 4 ), stride = (2 , 2 ), act = tlx .LeakyReLU , padding = 'SAME' , W_init = W_init ,
219
+ data_format = 'channels_first'
220
+ )
221
+ self .conv2 = Conv2d (
222
+ out_channels = dim * 2 , kernel_size = (4 , 4 ), stride = (2 , 2 ), act = None , padding = 'SAME' , W_init = W_init ,
223
+ data_format = 'channels_first' , b_init = None
224
+ )
225
+ self .bn1 = BatchNorm2d (num_features = dim * 2 , act = tlx .LeakyReLU , gamma_init = G_init , data_format = 'channels_first' )
226
+ self .conv3 = Conv2d (
227
+ out_channels = dim * 4 , kernel_size = (4 , 4 ), stride = (2 , 2 ), act = None , padding = 'SAME' , W_init = W_init ,
228
+ data_format = 'channels_first' , b_init = None
229
+ )
230
+ self .bn2 = BatchNorm2d (num_features = dim * 4 , act = tlx .LeakyReLU , gamma_init = G_init , data_format = 'channels_first' )
231
+ self .conv4 = Conv2d (
232
+ out_channels = dim * 8 , kernel_size = (4 , 4 ), stride = (2 , 2 ), act = None , padding = 'SAME' , W_init = W_init ,
233
+ data_format = 'channels_first' , b_init = None
234
+ )
235
+ self .bn3 = BatchNorm2d (num_features = dim * 8 , act = tlx .LeakyReLU , gamma_init = G_init , data_format = 'channels_first' )
236
+ self .conv5 = Conv2d (
237
+ out_channels = dim * 16 , kernel_size = (4 , 4 ), stride = (2 , 2 ), act = None , padding = 'SAME' , W_init = W_init ,
238
+ data_format = 'channels_first' , b_init = None
239
+ )
240
+ self .bn4 = BatchNorm2d (num_features = dim * 16 , act = tlx .LeakyReLU , gamma_init = G_init , data_format = 'channels_first' )
241
+ self .conv6 = Conv2d (
242
+ out_channels = dim * 32 , kernel_size = (4 , 4 ), stride = (2 , 2 ), act = None , padding = 'SAME' , W_init = W_init ,
243
+ data_format = 'channels_first' , b_init = None
244
+ )
245
+ self .bn5 = BatchNorm2d (num_features = dim * 32 , act = tlx .LeakyReLU , gamma_init = G_init , data_format = 'channels_first' )
246
+ self .conv7 = Conv2d (
247
+ out_channels = dim * 16 , kernel_size = (1 , 1 ), stride = (1 , 1 ), act = None , padding = 'SAME' , W_init = W_init ,
248
+ data_format = 'channels_first' , b_init = None
249
+ )
250
+ self .bn6 = BatchNorm2d (num_features = dim * 16 , act = tlx .LeakyReLU , gamma_init = G_init , data_format = 'channels_first' )
251
+ self .conv8 = Conv2d (
252
+ out_channels = dim * 8 , kernel_size = (1 , 1 ), stride = (1 , 1 ), act = None , padding = 'SAME' , W_init = W_init ,
253
+ data_format = 'channels_first' , b_init = None
254
+ )
255
+ self .bn7 = BatchNorm2d (num_features = dim * 8 , act = None , gamma_init = G_init , data_format = 'channels_first' )
256
+ self .conv9 = Conv2d (
257
+ out_channels = dim * 2 , kernel_size = (1 , 1 ), stride = (1 , 1 ), act = None , padding = 'SAME' , W_init = W_init ,
258
+ data_format = 'channels_first' , b_init = None
259
+ )
260
+ self .bn8 = BatchNorm2d (num_features = dim * 2 , act = tlx .LeakyReLU , gamma_init = G_init , data_format = 'channels_first' )
261
+ self .conv10 = Conv2d (
262
+ out_channels = dim * 2 , kernel_size = (3 , 3 ), stride = (1 , 1 ), act = None , padding = 'SAME' , W_init = W_init ,
263
+ data_format = 'channels_first' , b_init = None
264
+ )
265
+ self .bn9 = BatchNorm2d (num_features = dim * 2 , act = tlx .LeakyReLU , gamma_init = G_init , data_format = 'channels_first' )
266
+ self .conv11 = Conv2d (
267
+ out_channels = dim * 8 , kernel_size = (3 , 3 ), stride = (1 , 1 ), act = None , padding = 'SAME' , W_init = W_init ,
268
+ data_format = 'channels_first' , b_init = None
269
+ )
270
+ self .bn10 = BatchNorm2d (num_features = dim * 8 , gamma_init = G_init , data_format = 'channels_first' )
195
271
self .add = Elementwise (combine_fn = tlx .add , act = tlx .LeakyReLU )
196
272
self .flat = Flatten ()
197
- self .dense = Linear (out_features = 1 , W_init = W_init )
273
+ self .dense = Linear (out_features = 1 , W_init = W_init )
198
274
199
275
def forward (self , x ):
200
276
@@ -227,37 +303,36 @@ def forward(self, x):
227
303
return x
228
304
229
305
230
-
231
306
class Vgg19_simple_api (Module ):
232
307
233
308
def __init__ (self ):
234
- super (Vgg19_simple_api ,self ).__init__ ()
309
+ super (Vgg19_simple_api , self ).__init__ ()
235
310
""" conv1 """
236
311
self .conv1 = Conv2d (out_channels = 64 , kernel_size = (3 , 3 ), stride = (1 , 1 ), act = tlx .ReLU , padding = 'SAME' )
237
312
self .conv2 = Conv2d (out_channels = 64 , kernel_size = (3 , 3 ), stride = (1 , 1 ), act = tlx .ReLU , padding = 'SAME' )
238
- self .maxpool1 = MaxPool2d (kernel_size = (2 ,2 ), stride = (2 ,2 ), padding = 'SAME' )
313
+ self .maxpool1 = MaxPool2d (kernel_size = (2 , 2 ), stride = (2 , 2 ), padding = 'SAME' )
239
314
""" conv2 """
240
315
self .conv3 = Conv2d (out_channels = 128 , kernel_size = (3 , 3 ), stride = (1 , 1 ), act = tlx .ReLU , padding = 'SAME' )
241
316
self .conv4 = Conv2d (out_channels = 128 , kernel_size = (3 , 3 ), stride = (1 , 1 ), act = tlx .ReLU , padding = 'SAME' )
242
- self .maxpool2 = MaxPool2d (kernel_size = (2 ,2 ), stride = (2 ,2 ), padding = 'SAME' )
317
+ self .maxpool2 = MaxPool2d (kernel_size = (2 , 2 ), stride = (2 , 2 ), padding = 'SAME' )
243
318
""" conv3 """
244
319
self .conv5 = Conv2d (out_channels = 256 , kernel_size = (3 , 3 ), stride = (1 , 1 ), act = tlx .ReLU , padding = 'SAME' )
245
320
self .conv6 = Conv2d (out_channels = 256 , kernel_size = (3 , 3 ), stride = (1 , 1 ), act = tlx .ReLU , padding = 'SAME' )
246
321
self .conv7 = Conv2d (out_channels = 256 , kernel_size = (3 , 3 ), stride = (1 , 1 ), act = tlx .ReLU , padding = 'SAME' )
247
322
self .conv8 = Conv2d (out_channels = 256 , kernel_size = (3 , 3 ), stride = (1 , 1 ), act = tlx .ReLU , padding = 'SAME' )
248
- self .maxpool3 = MaxPool2d (kernel_size = (2 ,2 ), stride = (2 ,2 ), padding = 'SAME' )
323
+ self .maxpool3 = MaxPool2d (kernel_size = (2 , 2 ), stride = (2 , 2 ), padding = 'SAME' )
249
324
""" conv4 """
250
325
self .conv9 = Conv2d (out_channels = 512 , kernel_size = (3 , 3 ), stride = (1 , 1 ), act = tlx .ReLU , padding = 'SAME' )
251
326
self .conv10 = Conv2d (out_channels = 512 , kernel_size = (3 , 3 ), stride = (1 , 1 ), act = tlx .ReLU , padding = 'SAME' )
252
327
self .conv11 = Conv2d (out_channels = 512 , kernel_size = (3 , 3 ), stride = (1 , 1 ), act = tlx .ReLU , padding = 'SAME' )
253
328
self .conv12 = Conv2d (out_channels = 512 , kernel_size = (3 , 3 ), stride = (1 , 1 ), act = tlx .ReLU , padding = 'SAME' )
254
- self .maxpool4 = MaxPool2d (kernel_size = (2 ,2 ), stride = (2 ,2 ), padding = 'SAME' ) # (batch_size, 14, 14, 512)
329
+ self .maxpool4 = MaxPool2d (kernel_size = (2 , 2 ), stride = (2 , 2 ), padding = 'SAME' ) # (batch_size, 14, 14, 512)
255
330
""" conv5 """
256
331
self .conv13 = Conv2d (out_channels = 512 , kernel_size = (3 , 3 ), stride = (1 , 1 ), act = tlx .ReLU , padding = 'SAME' )
257
332
self .conv14 = Conv2d (out_channels = 512 , kernel_size = (3 , 3 ), stride = (1 , 1 ), act = tlx .ReLU , padding = 'SAME' )
258
333
self .conv15 = Conv2d (out_channels = 512 , kernel_size = (3 , 3 ), stride = (1 , 1 ), act = tlx .ReLU , padding = 'SAME' )
259
334
self .conv16 = Conv2d (out_channels = 512 , kernel_size = (3 , 3 ), stride = (1 , 1 ), act = tlx .ReLU , padding = 'SAME' )
260
- self .maxpool5 = MaxPool2d (kernel_size = (2 ,2 ), stride = (2 ,2 ), padding = 'SAME' ) # (batch_size, 7, 7, 512)
335
+ self .maxpool5 = MaxPool2d (kernel_size = (2 , 2 ), stride = (2 , 2 ), padding = 'SAME' ) # (batch_size, 7, 7, 512)
261
336
""" fc 6~8 """
262
337
self .flat = Flatten ()
263
338
self .dense1 = Linear (out_features = 4096 , act = tlx .ReLU )
0 commit comments