-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathattention_unet3d.py
101 lines (79 loc) · 2.85 KB
/
attention_unet3d.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
import torch
import torch.nn as nn
class ConvBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.conv = nn.Sequential(
nn.Conv3d(in_channels, out_channels, kernel_size=3, padding=1),
nn.BatchNorm3d(out_channels),
nn.ReLU(inplace=True),
nn.Conv3d(out_channels, out_channels, kernel_size=3, padding=1),
nn.BatchNorm3d(out_channels),
nn.ReLU(inplace=True)
)
def forward(self, x):
return self.conv(x)
class EncoderBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.conv = ConvBlock(in_channels, out_channels)
self.pool = nn.MaxPool3d((2, 2, 2))
def forward(self, x):
s = self.conv(x)
p = self.pool(s)
return s, p
class AttentionGate(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.Wg = nn.Sequential(
nn.Conv3d(in_channels[0], out_channels, kernel_size=1, padding=0),
nn.BatchNorm3d(out_channels)
)
self.Ws = nn.Sequential(
nn.Conv3d(in_channels[1], out_channels, kernel_size=1, padding=0),
nn.BatchNorm3d(out_channels)
)
self.relu = nn.ReLU(inplace=True)
self.output = nn.Sequential(
nn.Conv3d(out_channels, out_channels, kernel_size=1, padding=0),
nn.Sigmoid()
)
def forward(self, g, s):
Wg = self.Wg(g)
Ws = self.Ws(s)
out = self.relu(Wg + Ws)
out = self.output(out)
return out * s
class DecoderBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.up = nn.Upsample(scale_factor=2, mode="trilinear", align_corners=True)
self.ag = AttentionGate(in_channels, out_channels)
self.c1 = ConvBlock(in_channels[0]+out_channels, out_channels)
def forward(self, x, s):
x = self.up(x)
s = self.ag(x, s)
x = torch.cat([x, s], 1)
x = self.c1(x)
return x
class AttentionUNet3D(nn.Module):
def __init__(self, image_channels, num_classes):
super().__init__()
self.e1 = EncoderBlock(image_channels, 64)
self.e2 = EncoderBlock(64, 128)
self.e3 = EncoderBlock(128, 256)
self.b1 = ConvBlock(256, 512)
self.d1 = DecoderBlock([512, 256], 256)
self.d2 = DecoderBlock([256, 128], 128)
self.d3 = DecoderBlock([128, 64], 64)
self.output = nn.Conv3d(64, num_classes, kernel_size=1, padding=0)
def forward(self, x):
s1, p1 = self.e1(x)
s2, p2 = self.e2(p1)
s3, p3 = self.e3(p2)
b1 = self.b1(p3)
d1 = self.d1(b1, s3)
d2 = self.d2(d1, s2)
d3 = self.d3(d2, s1)
output = self.output(d3)
return output