Skip to content

Commit 46fe73a

Browse files
Merge branch 'main' into fast_tensor_accesor
2 parents 9e6ba01 + 458b77e commit 46fe73a

File tree

159 files changed

+5123
-6367
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

159 files changed

+5123
-6367
lines changed

RELEASENOTES.md

+26
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,32 @@
22

33
Releases, starting with 9/2/2021, are listed with the most recent release at the top.
44

5+
# NuGet Version 0.104.0
6+
7+
This is a big change in implementation, but not as big in API surface area. Many of the builtin modules, but not all, were re-implemented in managed code calling into native code via the functional APIs. This has several advantages:
8+
9+
1. Align with the Pytorch implementations.<br/>
10+
2. More easily expose module attributes as properties as Pytorch does.<br/>
11+
3. In some cases, avoid native code altogether.<br/>
12+
4. The builtin modules can serve as "best practice" examples for custom module authors.<br/>
13+
14+
__Breaking Changes__:
15+
16+
The names of several arguments have been changed to align better with Pytorch naming. This may break code that passes such arguments by name, but will be caught at compile time.
17+
18+
The argument defaults for `torch.diagonal()` and `Tensor.diagonal()` arguments have been corrected.
19+
20+
__Issues fixed__:
21+
22+
#1397 Look into whether parameter creation from a tensor leads to incorrect dispose scope statistics. This bug was discovered during testing of the PR.<br/>
23+
#1210 Attribute omissions.<br/>
24+
#1400 There may be an error in torchvision.transforms.GaussianBlur<br/>
25+
#1402 diagonal() has incorrect default<br/>
26+
27+
__API Changes__:
28+
29+
#1382: Add support for torch.nn.functional.normalize<br/>
30+
531
# NuGet Version 0.103.1
632

733
__Breaking Changes__:

TorchSharp.sln

+9-10
Original file line numberDiff line numberDiff line change
@@ -34,9 +34,9 @@ Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "TorchSharp", "TorchSharp",
3434
pkg\TorchSharp\TorchSharp.symbols.nupkgproj = pkg\TorchSharp\TorchSharp.symbols.nupkgproj
3535
EndProjectSection
3636
EndProject
37-
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "LibTorchSharp", "bin\obj\x64.Debug\Native\LibTorchSharp\LibTorchSharp.vcxproj", "{2B359162-062E-3C52-91D3-027A8542A58C}"
37+
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "LibTorchSharp", "bin\obj\x64.Debug\Native\LibTorchSharp\LibTorchSharp.vcxproj", "{CAD9DB7F-3223-3324-884D-FA2381593DA7}"
3838
EndProject
39-
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "LibTorchSharp", "bin\obj\x64.Release\Native\LibTorchSharp\LibTorchSharp.vcxproj", "{E4C0DBEE-0815-311B-9065-137BB50BD793}"
39+
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "LibTorchSharp", "bin\obj\x64.Release\Native\LibTorchSharp\LibTorchSharp.vcxproj", "{BB811429-0DF1-3D22-B664-09C2F5A9E0AB}"
4040
EndProject
4141
Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Native-Debug", "Native-Debug", "{CF2C1A9E-3A8A-4329-8A6E-7880C15AAC3D}"
4242
ProjectSection(SolutionItems) = preProject
@@ -66,9 +66,9 @@ Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Solution Items", "Solution
6666
azure-pipelines.yml = azure-pipelines.yml
6767
build\BranchInfo.props = build\BranchInfo.props
6868
DEVGUIDE.md = DEVGUIDE.md
69+
global.json = global.json
6970
README.md = README.md
7071
RELEASENOTES.md = RELEASENOTES.md
71-
global.json = global.json
7272
EndProjectSection
7373
EndProject
7474
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "TorchVision", "src\TorchVision\TorchVision.csproj", "{DCF01EE5-6431-4115-85E0-1FC4C3DE86A2}"
@@ -107,10 +107,10 @@ Global
107107
{42B45168-476D-4BFA-87B8-81A34E6295CD}.Release|Any CPU.Build.0 = Release|Any CPU
108108
{42B45168-476D-4BFA-87B8-81A34E6295CD}.Release|x64.ActiveCfg = Release|Any CPU
109109
{42B45168-476D-4BFA-87B8-81A34E6295CD}.Release|x64.Build.0 = Release|Any CPU
110-
{2B359162-062E-3C52-91D3-027A8542A58C}.Debug|Any CPU.ActiveCfg = Debug|x64
111-
{2B359162-062E-3C52-91D3-027A8542A58C}.Debug|x64.ActiveCfg = Debug|x64
112-
{2B359162-062E-3C52-91D3-027A8542A58C}.Release|Any CPU.ActiveCfg = Release|x64
113-
{2B359162-062E-3C52-91D3-027A8542A58C}.Release|x64.ActiveCfg = Release|x64
110+
{CAD9DB7F-3223-3324-884D-FA2381593DA7}.Debug|Any CPU.ActiveCfg = Debug|x64
111+
{CAD9DB7F-3223-3324-884D-FA2381593DA7}.Debug|x64.ActiveCfg = Debug|x64
112+
{CAD9DB7F-3223-3324-884D-FA2381593DA7}.Release|Any CPU.ActiveCfg = Release|x64
113+
{CAD9DB7F-3223-3324-884D-FA2381593DA7}.Release|x64.ActiveCfg = Release|x64
114114
{E4C0DBEE-0815-311B-9065-137BB50BD793}.Debug|Any CPU.ActiveCfg = Debug|x64
115115
{E4C0DBEE-0815-311B-9065-137BB50BD793}.Debug|x64.ActiveCfg = Debug|x64
116116
{E4C0DBEE-0815-311B-9065-137BB50BD793}.Release|Any CPU.ActiveCfg = Release|x64
@@ -148,7 +148,6 @@ Global
148148
{95493944-D1AE-414E-964B-B58AEAE672E5}.Release|x64.ActiveCfg = Release|Any CPU
149149
{95493944-D1AE-414E-964B-B58AEAE672E5}.Release|x64.Build.0 = Release|Any CPU
150150
{6D3CE8AA-F369-4D2D-BDA7-9F89D6BE1B2E}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
151-
{6D3CE8AA-F369-4D2D-BDA7-9F89D6BE1B2E}.Debug|Any CPU.Build.0 = Debug|Any CPU
152151
{6D3CE8AA-F369-4D2D-BDA7-9F89D6BE1B2E}.Debug|x64.ActiveCfg = Debug|Any CPU
153152
{6D3CE8AA-F369-4D2D-BDA7-9F89D6BE1B2E}.Debug|x64.Build.0 = Debug|Any CPU
154153
{6D3CE8AA-F369-4D2D-BDA7-9F89D6BE1B2E}.Release|Any CPU.ActiveCfg = Release|Any CPU
@@ -181,8 +180,8 @@ Global
181180
{6C323B05-9028-4B09-911C-3C03AE058BEE} = {AED9C836-31E3-4F3F-8ABC-929555D3F3C4}
182181
{42B45168-476D-4BFA-87B8-81A34E6295CD} = {09EADF06-BE25-4228-AB53-95AE3E15B530}
183182
{567456AD-B026-4CB6-B98D-4FC930C90223} = {D3D38B03-B557-484D-8348-8BADEE4DF592}
184-
{2B359162-062E-3C52-91D3-027A8542A58C} = {CF2C1A9E-3A8A-4329-8A6E-7880C15AAC3D}
185-
{E4C0DBEE-0815-311B-9065-137BB50BD793} = {4DB9E84D-324C-408F-87A6-246E86205540}
183+
{CAD9DB7F-3223-3324-884D-FA2381593DA7} = {CF2C1A9E-3A8A-4329-8A6E-7880C15AAC3D}
184+
{BB811429-0DF1-3D22-B664-09C2F5A9E0AB} = {4DB9E84D-324C-408F-87A6-246E86205540}
186185
{CF2C1A9E-3A8A-4329-8A6E-7880C15AAC3D} = {09EADF06-BE25-4228-AB53-95AE3E15B530}
187186
{D8C60CD8-8429-45F2-A755-47B6CD10FDF8} = {09EADF06-BE25-4228-AB53-95AE3E15B530}
188187
{4DB9E84D-324C-408F-87A6-246E86205540} = {CF2C1A9E-3A8A-4329-8A6E-7880C15AAC3D}

azure-pipelines.yml

+2-2
Original file line numberDiff line numberDiff line change
@@ -323,7 +323,7 @@ jobs:
323323
displayName: Restore package projects
324324

325325
# Pack TorchSharp (and libtorch-cpu if BuildLibTorchPackages is true)
326-
- script: dotnet pack -c $(BuildConfig) --no-build -v:n /p:SkipNative=true /p:SkipTests=true /p:IncludeTorchSharpPackage=true /p:IncludeLibTorchCpuPackages=${{ parameters.BuildLibTorchPackages }} pkg/pack.proj
326+
- script: dotnet pack -c $(BuildConfig) --no-build -v:n /p:SkipNative=true /p:SkipTests=true /p:IncludeTorchSharpPackage=true /p:IncludeLibTorchCpuPackages=${{ parameters.BuildLibTorchPackages }} /p:GenerateCompatibilitySuppressionFile=true pkg/pack.proj
327327
displayName: Create Packages
328328

329329
- script: rmdir /q /s bin\obj
@@ -448,7 +448,7 @@ jobs:
448448
- script: dotnet restore pkg/pack.proj /p:Configuration=Release
449449
displayName: Restore package projects
450450

451-
- script: dotnet pack -c $(BuildConfig) --no-build -v:n /p:SkipNative=true /p:SkipTests=true /p:IncludeTorchSharpPackage=false /p:IncludeLibTorchCpuPackages=false /p:IncludeLibTorchCudaPackages=true pkg/pack.proj
451+
- script: dotnet pack -c $(BuildConfig) --no-build -v:n /p:SkipNative=true /p:SkipTests=true /p:ApiCompatGenerateSuppressionFile=true /p:IncludeTorchSharpPackage=false /p:IncludeLibTorchCpuPackages=false /p:IncludeLibTorchCudaPackages=true pkg/pack.proj
452452
displayName: Create Packages
453453

454454
# We are 10GB space-constrained on the Azure Pipelines CI system so clean up what we can

build/BranchInfo.props

+3-3
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,8 @@
11
<Project>
22
<PropertyGroup>
33
<MajorVersion>0</MajorVersion>
4-
<MinorVersion>103</MinorVersion>
5-
<PatchVersion>1</PatchVersion>
6-
<PreviousPackageVersion>0.103.0</PreviousPackageVersion>
4+
<MinorVersion>104</MinorVersion>
5+
<PatchVersion>0</PatchVersion>
6+
<PreviousPackageVersion>0.103.1</PreviousPackageVersion>
77
</PropertyGroup>
88
</Project>

docfx/articles/modules.md

+3-3
Original file line numberDiff line numberDiff line change
@@ -84,7 +84,7 @@ To illustrate, this is the code for MobileNet from the TorchSharp examples:
8484
var modules = new List<(string, Module)>();
8585

8686
modules.Add(("conv2d-first",
87-
Conv2d(3, 32, kernelSize: 3, stride: 1, padding: 1, bias: false)));
87+
Conv2d(3, 32, kernel_size: 3, stride: 1, padding: 1, bias: false)));
8888
modules.Add(("bnrm2d-first",
8989
BatchNorm2d(32)));
9090
modules.Add(("relu-first",
@@ -110,13 +110,13 @@ To illustrate, this is the code for MobileNet from the TorchSharp examples:
110110
var stride = strides[i];
111111

112112
modules.Add(($"conv2d-{i}a",
113-
Conv2d(in_planes, in_planes, kernelSize: 3, stride: stride, padding: 1, groups: in_planes, bias: false)));
113+
Conv2d(in_planes, in_planes, kernel_size: 3, stride: stride, padding: 1, groups: in_planes, bias: false)));
114114
modules.Add(($"bnrm2d-{i}a",
115115
BatchNorm2d(in_planes)));
116116
modules.Add(($"relu-{i}a",
117117
ReLU()));
118118
modules.Add(($"conv2d-{i}b",
119-
Conv2d(in_planes, out_planes, kernelSize: 1L, stride: 1L, padding: 0L, bias: false)));
119+
Conv2d(in_planes, out_planes, kernel_size: 1L, stride: 1L, padding: 0L, bias: false)));
120120
modules.Add(($"bnrm2d-{i}b",
121121
BatchNorm2d(out_planes)));
122122
modules.Add(($"relu-{i}b",

src/Examples/AlexNet.cs

+8-8
Original file line numberDiff line numberDiff line change
@@ -17,19 +17,19 @@ class AlexNet : Module<Tensor, Tensor>
1717
public AlexNet(string name, int numClasses, torch.Device device = null) : base(name)
1818
{
1919
features = Sequential(
20-
("c1", Conv2d(3, 64, kernelSize: 3, stride: 2, padding: 1)),
20+
("c1", Conv2d(3, 64, kernel_size: 3, stride: 2, padding: 1)),
2121
("r1", ReLU(inplace: true)),
22-
("mp1", MaxPool2d(kernelSize: new long[] { 2, 2 })),
23-
("c2", Conv2d(64, 192, kernelSize: 3, padding: 1)),
22+
("mp1", MaxPool2d(kernel_size: new long[] { 2, 2 })),
23+
("c2", Conv2d(64, 192, kernel_size: 3, padding: 1)),
2424
("r2", ReLU(inplace: true)),
25-
("mp2", MaxPool2d(kernelSize: new long[] { 2, 2 })),
26-
("c3", Conv2d(192, 384, kernelSize: 3, padding: 1)),
25+
("mp2", MaxPool2d(kernel_size: new long[] { 2, 2 })),
26+
("c3", Conv2d(192, 384, kernel_size: 3, padding: 1)),
2727
("r3", ReLU(inplace: true)),
28-
("c4", Conv2d(384, 256, kernelSize: 3, padding: 1)),
28+
("c4", Conv2d(384, 256, kernel_size: 3, padding: 1)),
2929
("r4", ReLU(inplace: true)),
30-
("c5", Conv2d(256, 256, kernelSize: 3, padding: 1)),
30+
("c5", Conv2d(256, 256, kernel_size: 3, padding: 1)),
3131
("r5", ReLU(inplace: true)),
32-
("mp3", MaxPool2d(kernelSize: new long[] { 2, 2 })));
32+
("mp3", MaxPool2d(kernel_size: new long[] { 2, 2 })));
3333

3434
avgPool = AdaptiveAvgPool2d(new long[] { 2, 2 });
3535

src/Examples/MNIST.cs

+1-1
Original file line numberDiff line numberDiff line change
@@ -105,7 +105,7 @@ internal class Model : Module<Tensor, Tensor>
105105

106106
// These don't have any parameters, so the only reason to instantiate
107107
// them is performance, since they will be used over and over.
108-
private Module<Tensor, Tensor> pool1 = MaxPool2d(kernelSize: new long[] { 2, 2 });
108+
private Module<Tensor, Tensor> pool1 = MaxPool2d(kernel_size: new long[] { 2, 2 });
109109

110110
private Module<Tensor, Tensor> relu1 = ReLU();
111111
private Module<Tensor, Tensor> relu2 = ReLU();

src/Examples/MobileNet.cs

+3-3
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ public MobileNet(string name, int numClasses, Device device = null) : base(name)
3030

3131
var modules = new List<(string, Module<Tensor, Tensor>)>();
3232

33-
modules.Add(($"conv2d-first", Conv2d(3, 32, kernelSize: 3, stride: 1, padding: 1, bias: false)));
33+
modules.Add(($"conv2d-first", Conv2d(3, 32, kernel_size: 3, stride: 1, padding: 1, bias: false)));
3434
modules.Add(($"bnrm2d-first", BatchNorm2d(32)));
3535
modules.Add(($"relu-first", ReLU()));
3636
MakeLayers(modules, 32);
@@ -53,10 +53,10 @@ private void MakeLayers(List<(string, Module<Tensor, Tensor>)> modules, long in_
5353
var out_planes = planes[i];
5454
var stride = strides[i];
5555

56-
modules.Add(($"conv2d-{i}a", Conv2d(in_planes, in_planes, kernelSize: 3, stride: stride, padding: 1, groups: in_planes, bias: false)));
56+
modules.Add(($"conv2d-{i}a", Conv2d(in_planes, in_planes, kernel_size: 3, stride: stride, padding: 1, groups: in_planes, bias: false)));
5757
modules.Add(($"bnrm2d-{i}a", BatchNorm2d(in_planes)));
5858
modules.Add(($"relu-{i}a", ReLU()));
59-
modules.Add(($"conv2d-{i}b", Conv2d(in_planes, out_planes, kernelSize: 1L, stride: 1L, padding: 0L, bias: false)));
59+
modules.Add(($"conv2d-{i}b", Conv2d(in_planes, out_planes, kernel_size: 1L, stride: 1L, padding: 0L, bias: false)));
6060
modules.Add(($"bnrm2d-{i}b", BatchNorm2d(out_planes)));
6161
modules.Add(($"relu-{i}b", ReLU()));
6262

src/Examples/ResNet.cs

+8-8
Original file line numberDiff line numberDiff line change
@@ -72,7 +72,7 @@ public ResNet(string name, Func<string, int,int,int,Module<Tensor, Tensor>> bloc
7272
{
7373
var modules = new List<(string, Module<Tensor, Tensor>)>();
7474

75-
modules.Add(($"conv2d-first", Conv2d(3, 64, kernelSize: 3, stride: 1, padding: 1, bias: false)));
75+
modules.Add(($"conv2d-first", Conv2d(3, 64, kernel_size: 3, stride: 1, padding: 1, bias: false)));
7676
modules.Add(($"bnrm2d-first", BatchNorm2d(64)));
7777
modules.Add(($"relu-first", ReLU(inplace:true)));
7878
MakeLayer(modules, block, expansion, 64, num_blocks[0], 1);
@@ -124,17 +124,17 @@ public BasicBlock (string name, int in_planes, int planes, int stride) : base(na
124124
{
125125
var modules = new List<(string, Module<Tensor, Tensor>)>();
126126

127-
modules.Add(($"{name}-conv2d-1", Conv2d(in_planes, planes, kernelSize: 3, stride: stride, padding: 1, bias: false)));
127+
modules.Add(($"{name}-conv2d-1", Conv2d(in_planes, planes, kernel_size: 3, stride: stride, padding: 1, bias: false)));
128128
modules.Add(($"{name}-bnrm2d-1", BatchNorm2d(planes)));
129129
modules.Add(($"{name}-relu-1", ReLU(inplace: true)));
130-
modules.Add(($"{name}-conv2d-2", Conv2d(planes, planes, kernelSize: 3, stride: 1, padding: 1, bias: false)));
130+
modules.Add(($"{name}-conv2d-2", Conv2d(planes, planes, kernel_size: 3, stride: 1, padding: 1, bias: false)));
131131
modules.Add(($"{name}-bnrm2d-2", BatchNorm2d(planes)));
132132

133133
layers = Sequential(modules);
134134

135135
if (stride != 1 || in_planes != expansion*planes) {
136136
shortcut = Sequential(
137-
($"{name}-conv2d-3", Conv2d(in_planes, expansion * planes, kernelSize: 1, stride: stride, bias: false)),
137+
($"{name}-conv2d-3", Conv2d(in_planes, expansion * planes, kernel_size: 1, stride: stride, bias: false)),
138138
($"{name}-bnrm2d-3", BatchNorm2d(expansion * planes)));
139139
}
140140
else {
@@ -175,20 +175,20 @@ public Bottleneck(string name, int in_planes, int planes, int stride) : base(nam
175175
{
176176
var modules = new List<(string, Module<Tensor, Tensor>)>();
177177

178-
modules.Add(($"{name}-conv2d-1", Conv2d(in_planes, planes, kernelSize: 1, bias: false)));
178+
modules.Add(($"{name}-conv2d-1", Conv2d(in_planes, planes, kernel_size: 1, bias: false)));
179179
modules.Add(($"{name}-bnrm2d-1", BatchNorm2d(planes)));
180180
modules.Add(($"{name}relu-1", ReLU(inplace:true)));
181-
modules.Add(($"{name}-conv2d-2", Conv2d(planes, planes, kernelSize: 3, stride: stride, padding: 1, bias: false)));
181+
modules.Add(($"{name}-conv2d-2", Conv2d(planes, planes, kernel_size: 3, stride: stride, padding: 1, bias: false)));
182182
modules.Add(($"{name}-bnrm2d-2", BatchNorm2d(planes)));
183183
modules.Add(($"{name}relu-2", ReLU(inplace: true)));
184-
modules.Add(($"{name}-conv2d-3", Conv2d(planes, expansion * planes, kernelSize: 1, bias: false)));
184+
modules.Add(($"{name}-conv2d-3", Conv2d(planes, expansion * planes, kernel_size: 1, bias: false)));
185185
modules.Add(($"{name}-bnrm2d-3", BatchNorm2d(expansion * planes)));
186186

187187
layers = Sequential(modules);
188188

189189
if (stride != 1 || in_planes != expansion * planes) {
190190
shortcut = Sequential(
191-
($"{name}-conv2d-4", Conv2d(in_planes, expansion * planes, kernelSize: 1, stride: stride, bias: false)),
191+
($"{name}-conv2d-4", Conv2d(in_planes, expansion * planes, kernel_size: 1, stride: stride, bias: false)),
192192
($"{name}-bnrm2d-4", BatchNorm2d(expansion * planes)));
193193
} else {
194194
shortcut = Sequential();

src/Examples/SpeechCommands.cs

+4-4
Original file line numberDiff line numberDiff line change
@@ -235,16 +235,16 @@ internal class M5 : Module<Tensor, Tensor>
235235

236236
public M5(string name, int n_input = 1, int n_output = 35, int stride = 16, int n_channel = 32) : base(name)
237237
{
238-
conv1 = nn.Conv1d(n_input, n_channel, kernelSize: 80, stride: stride);
238+
conv1 = nn.Conv1d(n_input, n_channel, kernel_size: 80, stride: stride);
239239
bn1 = nn.BatchNorm1d(n_channel);
240240
pool1 = nn.MaxPool1d(4);
241-
conv2 = nn.Conv1d(n_channel, n_channel, kernelSize: 3);
241+
conv2 = nn.Conv1d(n_channel, n_channel, kernel_size: 3);
242242
bn2 = nn.BatchNorm1d(n_channel);
243243
pool2 = nn.MaxPool1d(4);
244-
conv3 = nn.Conv1d(n_channel, 2 * n_channel, kernelSize: 3);
244+
conv3 = nn.Conv1d(n_channel, 2 * n_channel, kernel_size: 3);
245245
bn3 = nn.BatchNorm1d(2 * n_channel);
246246
pool3 = nn.MaxPool1d(4);
247-
conv4 = nn.Conv1d(2 * n_channel, 2 * n_channel, kernelSize: 3);
247+
conv4 = nn.Conv1d(2 * n_channel, 2 * n_channel, kernel_size: 3);
248248
bn4 = nn.BatchNorm1d(2 * n_channel);
249249
pool4 = nn.MaxPool1d(4);
250250
fc1 = nn.Linear(2 * n_channel, n_output);

src/Examples/VGG.cs

+2-2
Original file line numberDiff line numberDiff line change
@@ -38,9 +38,9 @@ public VGG(string name, int numClasses, Device device = null) : base(name)
3838
for (var i = 0; i < channels.Length; i++) {
3939

4040
if (channels[i] == 0) {
41-
modules.Add(($"MaxPool2d-{i}a", MaxPool2d(kernelSize: 2, stride: 2)));
41+
modules.Add(($"MaxPool2d-{i}a", MaxPool2d(kernel_size: 2, stride: 2)));
4242
} else {
43-
modules.Add(($"conv2d-{i}a", Conv2d(in_channels, channels[i], kernelSize: 3, padding: 1)));
43+
modules.Add(($"conv2d-{i}a", Conv2d(in_channels, channels[i], kernel_size: 3, padding: 1)));
4444
modules.Add(($"bnrm2d-{i}a", BatchNorm2d(channels[i])));
4545
modules.Add(($"relu-{i}b", ReLU(inplace: true)));
4646
in_channels = channels[i];

0 commit comments

Comments
 (0)