加入带洞卷积的resnet结构的构建,以及普通resnet如何通过模块的组合来堆砌深层卷积网络。

第一段代码为deeplab v3+(pytorch版本)中的基本模型改进版resnet的构建过程,

第二段代码为model的全部结构图示,以文字的方式表示,forward过程并未显示其中

 import math
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
from modeling.sync_batchnorm.batchnorm import SynchronizedBatchNorm2d class Bottleneck(nn.Module):
# 此类为resnet的基本模块,在构建resnet不同层的时候,主要以模块的个数以及参数不同来区分。
expansion = 4 def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, BatchNorm=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = BatchNorm(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
dilation=dilation, padding=dilation, bias=False)
self.bn2 = BatchNorm(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = BatchNorm(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.dilation = dilation def forward(self, x):
# 传播的过程,在这里设置残差的操作;
residual = x out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out) out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out) out = self.conv3(out)
out = self.bn3(out) if self.downsample is not None:
residual = self.downsample(x) out += residual
out = self.relu(out) return out class ResNet(nn.Module):
# 加入atrous的resnet结构,获取不同的感受野以及上下文信息。
def __init__(self, block, layers, output_stride, BatchNorm, pretrained=True):
# 定义resnet的基本结构,通过前面的几层直接设计加上不同参数的bottleneck模块的组合构成;
# layers参数,在创建resnet类对象的时候,赋予一个数组,在构建多层网络模块的时候调用。
# block代表模块结构,在这里指的是bottleneck.
self.inplanes = 64
super(ResNet, self).__init__()
blocks = [1, 2, 4]
if output_stride == 16:
strides = [1, 2, 2, 1]
dilations = [1, 1, 1, 2]
elif output_stride == 8:
strides = [1, 2, 1, 1]
dilations = [1, 1, 2, 4]
else:
raise NotImplementedError # Modules
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = BatchNorm(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
# 下面通过调用make_layer函数来构造不同参数的bottleneck模块;
self.layer1 = self._make_layer(block, 64, layers[0], stride=strides[0], dilation=dilations[0], BatchNorm=BatchNorm)
self.layer2 = self._make_layer(block, 128, layers[1], stride=strides[1], dilation=dilations[1], BatchNorm=BatchNorm)
self.layer3 = self._make_layer(block, 256, layers[2], stride=strides[2], dilation=dilations[2], BatchNorm=BatchNorm)
self.layer4 = self._make_MG_unit(block, 512, blocks=blocks, stride=strides[3], dilation=dilations[3], BatchNorm=BatchNorm)
# self.layer4 = self._make_layer(block, 512, layers[3], stride=strides[3], dilation=dilations[3], BatchNorm=BatchNorm)
self._init_weight() if pretrained:
self._load_pretrained_model() def _make_layer(self, block, planes, blocks, stride=1, dilation=1, BatchNorm=None):
# block传入的是bottleneck模块;
# planes参数是改变卷积层参数的重要变量,在这里分别传入64.128.256.512,
# 目的是为了给conv2d(in_channels,out_channels,kernel_size,stride,……)传递各种参数, # blocks是定义有几个参数相同的bottleneck模块,即在最下面的Layers参数【3.4.23.3】,在总模型结构图中可以清晰的看出。
# dilation参数是为了设置带洞卷积(atrous),默认为1即普通卷积;
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
BatchNorm(planes * block.expansion),
) layers = []
layers.append(block(self.inplanes, planes, stride, dilation, downsample, BatchNorm))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, dilation=dilation, BatchNorm=BatchNorm)) return nn.Sequential(*layers) def _make_MG_unit(self, block, planes, blocks, stride=1, dilation=1, BatchNorm=None):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
BatchNorm(planes * block.expansion),
) layers = []
layers.append(block(self.inplanes, planes, stride, dilation=blocks[0]*dilation,
downsample=downsample, BatchNorm=BatchNorm))
self.inplanes = planes * block.expansion
for i in range(1, len(blocks)):
layers.append(block(self.inplanes, planes, stride=1,
dilation=blocks[i]*dilation, BatchNorm=BatchNorm)) return nn.Sequential(*layers) def forward(self, input):
x = self.conv1(input)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x) x = self.layer1(x)
low_level_feat = x
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x, low_level_feat def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, SynchronizedBatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_() def _load_pretrained_model(self):
# pretrain_dict = model_zoo.load_url('https://download.pytorch.org/models/resnet101-5d3b4d8f.pth')
pretrain_dict = torch.load('/home/huihua/NewDisk/resnet50-19c8e357.pth')
# 直接加载下载好模型预训练的参数,不用再次下载
model_dict = {}
state_dict = self.state_dict()
for k, v in pretrain_dict.items():
if k in state_dict:
model_dict[k] = v
state_dict.update(model_dict)
self.load_state_dict(state_dict) def ResNet101(output_stride, BatchNorm, pretrained=True):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], output_stride, BatchNorm, pretrained=pretrained)
# 【3,4,23,3】代表make_layer中的block(bottleneck)的块数。resnet源代码中以此来确定resnet50或者101以及更深的。
return model if __name__ == "__main__":
import torch
model = ResNet101(BatchNorm=nn.BatchNorm2d, pretrained=True, output_stride=8)
print(model) #打印模型结构,方便观察如何构造,以及各个参数的含义。
input = torch.rand(1, 3, 512, 512)
output, low_level_feat = model(input)
print(output.size())
print(low_level_feat.size())

打印出的model结构如下:

 /home/huihua/anaconda3/bin/python /home/huihua/PycharmProjects/untitled/pytorch-deeplab-xception-master/modeling/backbone/resnet.py
ResNet(
(conv1): Conv2d(, , kernel_size=(, ), stride=(, ), padding=(, ), bias=False)
(bn1): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
(maxpool): MaxPool2d(kernel_size=, stride=, padding=, dilation=, ceil_mode=False)
(layer1): Sequential(
(): Bottleneck(
(conv1): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn1): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(, , kernel_size=(, ), stride=(, ), padding=(, ), bias=False)
(bn2): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn3): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
(downsample): Sequential(
(): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(): Bottleneck(
(conv1): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn1): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(, , kernel_size=(, ), stride=(, ), padding=(, ), bias=False)
(bn2): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn3): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
)
(): Bottleneck(
(conv1): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn1): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(, , kernel_size=(, ), stride=(, ), padding=(, ), bias=False)
(bn2): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn3): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
)
)
(layer2): Sequential(
(): Bottleneck(
(conv1): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn1): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(, , kernel_size=(, ), stride=(, ), padding=(, ), bias=False)
(bn2): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn3): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
(downsample): Sequential(
(): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(): Bottleneck(
(conv1): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn1): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(, , kernel_size=(, ), stride=(, ), padding=(, ), bias=False)
(bn2): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn3): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
)
(): Bottleneck(
(conv1): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn1): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(, , kernel_size=(, ), stride=(, ), padding=(, ), bias=False)
(bn2): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn3): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
)
(): Bottleneck(
(conv1): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn1): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(, , kernel_size=(, ), stride=(, ), padding=(, ), bias=False)
(bn2): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn3): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
)
)
(layer3): Sequential(
(): Bottleneck(
(conv1): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn1): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(, , kernel_size=(, ), stride=(, ), padding=(, ), dilation=(, ), bias=False)
(bn2): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn3): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
(downsample): Sequential(
(): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(): Bottleneck(
(conv1): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn1): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(, , kernel_size=(, ), stride=(, ), padding=(, ), dilation=(, ), bias=False)
(bn2): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn3): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
)
(): Bottleneck(
(conv1): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn1): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(, , kernel_size=(, ), stride=(, ), padding=(, ), dilation=(, ), bias=False)
(bn2): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn3): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
)
(): Bottleneck(
(conv1): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn1): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(, , kernel_size=(, ), stride=(, ), padding=(, ), dilation=(, ), bias=False)
(bn2): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn3): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
)
(): Bottleneck(
(conv1): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn1): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(, , kernel_size=(, ), stride=(, ), padding=(, ), dilation=(, ), bias=False)
(bn2): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn3): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
)
(): Bottleneck(
(conv1): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn1): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(, , kernel_size=(, ), stride=(, ), padding=(, ), dilation=(, ), bias=False)
(bn2): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn3): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
)
(): Bottleneck(
(conv1): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn1): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(, , kernel_size=(, ), stride=(, ), padding=(, ), dilation=(, ), bias=False)
(bn2): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn3): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
)
(): Bottleneck(
(conv1): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn1): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(, , kernel_size=(, ), stride=(, ), padding=(, ), dilation=(, ), bias=False)
(bn2): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn3): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
)
(): Bottleneck(
(conv1): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn1): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(, , kernel_size=(, ), stride=(, ), padding=(, ), dilation=(, ), bias=False)
(bn2): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn3): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
)
(): Bottleneck(
(conv1): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn1): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(, , kernel_size=(, ), stride=(, ), padding=(, ), dilation=(, ), bias=False)
(bn2): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn3): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
)
(): Bottleneck(
(conv1): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn1): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(, , kernel_size=(, ), stride=(, ), padding=(, ), dilation=(, ), bias=False)
(bn2): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn3): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
)
(): Bottleneck(
(conv1): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn1): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(, , kernel_size=(, ), stride=(, ), padding=(, ), dilation=(, ), bias=False)
(bn2): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn3): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
)
(): Bottleneck(
(conv1): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn1): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(, , kernel_size=(, ), stride=(, ), padding=(, ), dilation=(, ), bias=False)
(bn2): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn3): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
)
(): Bottleneck(
(conv1): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn1): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(, , kernel_size=(, ), stride=(, ), padding=(, ), dilation=(, ), bias=False)
(bn2): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn3): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
)
(): Bottleneck(
(conv1): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn1): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(, , kernel_size=(, ), stride=(, ), padding=(, ), dilation=(, ), bias=False)
(bn2): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn3): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
)
(): Bottleneck(
(conv1): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn1): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(, , kernel_size=(, ), stride=(, ), padding=(, ), dilation=(, ), bias=False)
(bn2): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn3): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
)
(): Bottleneck(
(conv1): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn1): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(, , kernel_size=(, ), stride=(, ), padding=(, ), dilation=(, ), bias=False)
(bn2): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn3): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
)
(): Bottleneck(
(conv1): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn1): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(, , kernel_size=(, ), stride=(, ), padding=(, ), dilation=(, ), bias=False)
(bn2): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn3): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
)
(): Bottleneck(
(conv1): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn1): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(, , kernel_size=(, ), stride=(, ), padding=(, ), dilation=(, ), bias=False)
(bn2): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn3): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
)
(): Bottleneck(
(conv1): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn1): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(, , kernel_size=(, ), stride=(, ), padding=(, ), dilation=(, ), bias=False)
(bn2): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn3): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
)
(): Bottleneck(
(conv1): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn1): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(, , kernel_size=(, ), stride=(, ), padding=(, ), dilation=(, ), bias=False)
(bn2): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn3): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
)
(): Bottleneck(
(conv1): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn1): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(, , kernel_size=(, ), stride=(, ), padding=(, ), dilation=(, ), bias=False)
(bn2): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn3): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
)
(): Bottleneck(
(conv1): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn1): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(, , kernel_size=(, ), stride=(, ), padding=(, ), dilation=(, ), bias=False)
(bn2): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn3): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
)
)
(layer4): Sequential(
(): Bottleneck(
(conv1): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn1): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(, , kernel_size=(, ), stride=(, ), padding=(, ), dilation=(, ), bias=False)
(bn2): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn3): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
(downsample): Sequential(
(): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(): Bottleneck(
(conv1): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn1): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(, , kernel_size=(, ), stride=(, ), padding=(, ), dilation=(, ), bias=False)
(bn2): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn3): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
)
(): Bottleneck(
(conv1): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn1): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv2): Conv2d(, , kernel_size=(, ), stride=(, ), padding=(, ), dilation=(, ), bias=False)
(bn2): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(conv3): Conv2d(, , kernel_size=(, ), stride=(, ), bias=False)
(bn3): BatchNorm2d(, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace)
)
)
)
torch.Size([, , , ])
torch.Size([, , , ]) Process finished with exit code

Deeplab v3+中的骨干模型resnet(加入atrous)的源码解析,以及普通resnet整个结构的构建过程的更多相关文章

  1. Java生鲜电商平台-电商中海量搜索ElasticSearch架构设计实战与源码解析

    Java生鲜电商平台-电商中海量搜索ElasticSearch架构设计实战与源码解析 生鲜电商搜索引擎的特点 众所周知,标准的搜索引擎主要分成三个大的部分,第一步是爬虫系统,第二步是数据分析,第三步才 ...

  2. Java中的容器(集合)之HashMap源码解析

    1.HashMap源码解析(JDK8) 基础原理: 对比上一篇<Java中的容器(集合)之ArrayList源码解析>而言,本篇只解析HashMap常用的核心方法的源码. HashMap是 ...

  3. MapReduce中一次reduce方法的调用中key的值不断变化分析及源码解析

    摘要:mapreduce中执行reduce(KEYIN key, Iterable<VALUEIN> values, Context context),调用一次reduce方法,迭代val ...

  4. 神经网络中 BP 算法的原理与 Python 实现源码解析

    最近这段时间系统性的学习了 BP 算法后写下了这篇学习笔记,因为能力有限,若有明显错误,还请指正. 什么是梯度下降和链式求导法则 假设我们有一个函数 J(w),如下图所示. 梯度下降示意图 现在,我们 ...

  5. Java中的容器(集合)之ArrayList源码解析

    1.ArrayList源码解析 源码解析: 如下源码来自JDK8(如需查看ArrayList扩容源码解析请跳转至<Java中的容器(集合)>第十条):. package java.util ...

  6. 关于原生js中函数的三种角色和jQuery源码解析

    原生js中的函数有三种角色: 分两大种: 1.函数(最主要的角色)2.普通对象(辅助角色):函数也可以像对象一样设置属于本身的私有属性和方法,这些东西和实例或者私有变量没有关系两种角色直接没有必然的关 ...

  7. [源码解析] PyTorch分布式优化器(3)---- 模型并行

    [源码解析] PyTorch分布式优化器(3)---- 模型并行 目录 [源码解析] PyTorch分布式优化器(3)---- 模型并行 0x00 摘要 0x01 前文回顾 0x02 单机模型 2.1 ...

  8. 量化交易中VWAP/TWAP算法的基本原理和简单源码实现(C++和python)(转)

    量化交易中VWAP/TWAP算法的基本原理和简单源码实现(C++和python) 原文地址:http://blog.csdn.net/u012234115/article/details/728300 ...

  9. 谷歌BERT预训练源码解析(二):模型构建

    目录前言源码解析模型配置参数BertModelword embeddingembedding_postprocessorTransformerself_attention模型应用前言BERT的模型主要 ...

随机推荐

  1. java.util.function 中的 Function、Predicate、Consumer

    函数式接口: 函数式接口(Functional Interface)就是一个有且仅有一个抽象方法,但可以有多个非抽象方法的接口. 函数式接口可以被隐式转换为 Lambda 表达式. Function ...

  2. 云游戏真的来了,这次的搅局者是 Google,云游戏平台搭建

    索尼.微软.任天堂和 Steam 等几家平台商的博弈. Google:云计算将会彻底改变我们的游戏方式 名为「Stadia」的全新游戏平台 和我们平时看到的索尼 PS4.微软 Xbox One 以及任 ...

  3. Java8学习笔记(十一)--并发与非并发流下reduce比较

    BinaryOperator<Integer> operator = (l, r) -> l + r; BiFunction<Integer, Integer, Integer ...

  4. 使用ionic2开发一个二维码扫描功能

    界面添加一个按钮: <button ion-button block color="secondary" class="Scan-button" (cli ...

  5. .NET开发人员遇到Maven

    由.NET转向Java开发,总是会带着.NET平台的一些概念和工具想着在对应的Java平台是否也有着相同的解决方案.第一次用Maven随手打开pom.xml,看着里面许多属性描述我的感觉就是这是一个M ...

  6. VS2017 安装打包插件

    安装 打开VS2017:工具 --> 扩展和更新 --> 联机,搜索Microsoft Visual Studio 2017 Installer Projects,如下图: 在搜索中输入: ...

  7. iOS - 如何得到UIImage的大小

    把UIImage 转换为NSData,然后拿到NSData的大小 NSData * imageData = UIImageJPEGRepresentation(image,); length = [i ...

  8. 3D Slicer Reconstruct CT/MRI

    3D Slicer Reconstruct CT/MRI 1. Load DCM file of your CT/MRI 2. Go to Volume Rendering, click the ey ...

  9. Oracle管理常用查询语句

    1.查看表空间的名称及大小 select t.tablespace_name, round(sum(bytes/(1024*1024)),0) ts_sizefrom dba_tablespaces ...

  10. 13 form表单

    form表单中可包括 <input > 类型包括 text(name,value) password(name,value) checkbox(name,value) radio(chec ...