上采样(upsampling)一般包括2种方式:

第二种方法如何用pytorch实现可见上面的链接

这里想要介绍的是如何使用pytorch实现第一种方法:

举例:

1)使用torch.nn模块实现一个生成器为:

import torch.nn as nn
import torch.nn.functional as F class ConvLayer(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride):
super(ConvLayer, self).__init__()
padding = kernel_size //
self.reflection_pad = nn.ReflectionPad2d(padding)
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride) def forward(self, x):
out = self.reflection_pad(x)
out = self.conv(out) return out class Generator(nn.Module):
def __init__(self, in_channels):
super(Generator, self).__init__()
self.in_channels = in_channels self.encoder = nn.Sequential(
ConvLayer(self.in_channels, , , ),
nn.BatchNorm2d(),
nn.ReLU(),
ConvLayer(, , , ),
nn.BatchNorm2d(),
nn.ReLU(),
ConvLayer(, , , ),
) upsample = nn.Upsample(scale_factor=, mode='bilinear', align_corners=True)
self.decoder = nn.Sequential(
upsample,
nn.Conv2d(, , ),
nn.BatchNorm2d(),
nn.ReLU(),
upsample,
nn.Conv2d(, , ),
nn.BatchNorm2d(),
nn.ReLU(),
upsample,
nn.Conv2d(, , ),
nn.Tanh()
) def forward(self, x):
x = self.encoder(x)
out = self.decoder(x) return out def test():
net = Generator()
for module in net.children():
print(module)
x = Variable(torch.randn(,,,))
output = net(x)
print('output :', output.size())
print(type(output)) if __name__ == '__main__':
test()

返回:

model.py .Sequential(
(): ConvLayer(
(reflection_pad): ReflectionPad2d((, , , ))
(conv): Conv2d(, , kernel_size=(, ), stride=(, ))
)
(): BatchNorm2d(, eps=1e-, momentum=0.1, affine=True, track_running_stats=True)
(): ReLU()
(): ConvLayer(
(reflection_pad): ReflectionPad2d((, , , ))
(conv): Conv2d(, , kernel_size=(, ), stride=(, ))
)
(): BatchNorm2d(, eps=1e-, momentum=0.1, affine=True, track_running_stats=True)
(): ReLU()
(): ConvLayer(
(reflection_pad): ReflectionPad2d((, , , ))
(conv): Conv2d(, , kernel_size=(, ), stride=(, ))
)
)
Sequential(
(): Upsample(scale_factor=, mode=bilinear)
(): Conv2d(, , kernel_size=(, ), stride=(, ))
(): BatchNorm2d(, eps=1e-, momentum=0.1, affine=True, track_running_stats=True)
(): ReLU()
(): Upsample(scale_factor=, mode=bilinear)
(): Conv2d(, , kernel_size=(, ), stride=(, ))
(): BatchNorm2d(, eps=1e-, momentum=0.1, affine=True, track_running_stats=True)
(): ReLU()
(): Upsample(scale_factor=, mode=bilinear)
(): Conv2d(, , kernel_size=(, ), stride=(, ))
(): Tanh()
)
output : torch.Size([, , , ])
<class 'torch.Tensor'>

但是这个会有警告:

 UserWarning: nn.Upsample is deprecated. Use nn.functional.interpolate instead.

可使用torch.nn.functional模块替换为:

import torch.nn as nn
import torch.nn.functional as F class ConvLayer(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride):
super(ConvLayer, self).__init__()
padding = kernel_size //
self.reflection_pad = nn.ReflectionPad2d(padding)
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride) def forward(self, x):
out = self.reflection_pad(x)
out = self.conv(out) return out class Generator(nn.Module):
def __init__(self, in_channels):
super(Generator, self).__init__()
self.in_channels = in_channels self.encoder = nn.Sequential(
ConvLayer(self.in_channels, , , ),
nn.BatchNorm2d(),
nn.ReLU(),
ConvLayer(, , , ),
nn.BatchNorm2d(),
nn.ReLU(),
ConvLayer(, , , ),
) self.decoder1 = nn.Sequential(
nn.Conv2d(, , ),
nn.BatchNorm2d(),
nn.ReLU()
)
self.decoder2 = nn.Sequential(
nn.Conv2d(, , ),
nn.BatchNorm2d(),
nn.ReLU()
)
self.decoder3 = nn.Sequential(
nn.Conv2d(, , ),
nn.Tanh()
) def forward(self, x):
x = self.encoder(x)
x = F.interpolate(x, scale_factor=, mode='bilinear', align_corners=True)
x = self.decoder1(x)
x = F.interpolate(x, scale_factor=, mode='bilinear', align_corners=True)
x = self.decoder2(x)
x = F.interpolate(x, scale_factor=, mode='bilinear', align_corners=True)
out = self.decoder3(x) return out def test():
net = Generator()
for module in net.children():
print(module)
x = Variable(torch.randn(,,,))
output = net(x)
print('output :', output.size())
print(type(output)) if __name__ == '__main__':
test()

返回:

model.py .Sequential(
(): ConvLayer(
(reflection_pad): ReflectionPad2d((, , , ))
(conv): Conv2d(, , kernel_size=(, ), stride=(, ))
)
(): BatchNorm2d(, eps=1e-, momentum=0.1, affine=True, track_running_stats=True)
(): ReLU()
(): ConvLayer(
(reflection_pad): ReflectionPad2d((, , , ))
(conv): Conv2d(, , kernel_size=(, ), stride=(, ))
)
(): BatchNorm2d(, eps=1e-, momentum=0.1, affine=True, track_running_stats=True)
(): ReLU()
(): ConvLayer(
(reflection_pad): ReflectionPad2d((, , , ))
(conv): Conv2d(, , kernel_size=(, ), stride=(, ))
)
)
Sequential(
(): Conv2d(, , kernel_size=(, ), stride=(, ))
(): BatchNorm2d(, eps=1e-, momentum=0.1, affine=True, track_running_stats=True)
(): ReLU()
)
Sequential(
(): Conv2d(, , kernel_size=(, ), stride=(, ))
(): BatchNorm2d(, eps=1e-, momentum=0.1, affine=True, track_running_stats=True)
(): ReLU()
)
Sequential(
(): Conv2d(, , kernel_size=(, ), stride=(, ))
(): Tanh()
)
output : torch.Size([, , , ])
<class 'torch.Tensor'>

pytorch 不使用转置卷积来实现上采样的更多相关文章

  1. 【python实现卷积神经网络】上采样层upSampling2D实现

    代码来源:https://github.com/eriklindernoren/ML-From-Scratch 卷积神经网络中卷积层Conv2D(带stride.padding)的具体实现:https ...

  2. pytorch torch.nn 实现上采样——nn.Upsample

    Vision layers 1)Upsample CLASS torch.nn.Upsample(size=None, scale_factor=None, mode='nearest', align ...

  3. 由浅入深:CNN中卷积层与转置卷积层的关系

    欢迎大家前往腾讯云+社区,获取更多腾讯海量技术实践干货哦~ 本文由forrestlin发表于云+社区专栏 导语:转置卷积层(Transpose Convolution Layer)又称反卷积层或分数卷 ...

  4. 上采样 及 Sub-pixel Convolution (子像素卷积)

    参考:https://blog.csdn.net/leviopku/article/details/84975282 参考:https://blog.csdn.net/g11d111/article/ ...

  5. 直接理解转置卷积(Transposed convolution)的各种情况

    使用GAN生成图像必不可少的层就是上采样,其中最常用的就是转置卷积(Transposed Convolution).如果把卷积操作转换为矩阵乘法的形式,转置卷积实际上就是将其中的矩阵进行转置,从而产生 ...

  6. 深度学习卷积网络中反卷积/转置卷积的理解 transposed conv/deconv

    搞明白了卷积网络中所谓deconv到底是个什么东西后,不写下来怕又忘记,根据参考资料,加上我自己的理解,记录在这篇博客里. 先来规范表达 为了方便理解,本文出现的举例情况都是2D矩阵卷积,卷积输入和核 ...

  7. CNN:转置卷积输出分辨率计算

    上一篇介绍了卷积的输出分辨率计算,现在这一篇就来写下转置卷积的分辨率计算.转置卷积(Transposed convolution),转置卷积也有叫反卷积(deconvolution)或者fractio ...

  8. pytorch(13)卷积层

    卷积层 1. 1d/2d/3d卷积 Dimension of Convolution 卷积运算:卷积核在输入信号(图像)上滑动,相应位置上进行乘加 卷积核:又称为滤波器,过滤器,可认为是某种模式,某种 ...

  9. 『TensotFlow』转置卷积

    网上解释 作者:张萌链接:https://www.zhihu.com/question/43609045/answer/120266511来源:知乎著作权归作者所有.商业转载请联系作者获得授权,非商业 ...

随机推荐

  1. ML,DL核心数学及算法知识点总结

    ML,DL核心数学及算法知识点总结:https://mp.weixin.qq.com/s/bskyMQ2i1VMNiYKIvw_d7g

  2. js判断radio选中状态

    var radios = document.getElementsByName("radiosName"); var checked = false; for ( var j = ...

  3. CDH 大数据平台搭建

    一.概述 Cloudera版本(Cloudera’s Distribution Including Apache Hadoop,简称“CDH”),基于Web的用户界面,支持大多数Hadoop组件,包括 ...

  4. Session的应用——三天免登录

    1.使用Cookie实现的登录的不足: protected void doGet(HttpServletRequest request, HttpServletResponse response) t ...

  5. 记下mongoose(转载)

    连接mongodb时使用的是mongoose模块,安装和使用方法如下: 安装: npm install mongoose --save 使用: let mongoose = require('mong ...

  6. 交互设计算法基础(2) - Selection Sort

    int[] selection_sort(int[] arr) { int i, j, min, temp, len=arr.length; for (i=0; i<len-1; i++) { ...

  7. 洛谷P3178[HAOI]2015 树上操作

    题目 树剖裸题,这个题更可以深刻的理解树剖中把树上的节点转换为区间的思想. 要注意在区间上连续的节点,一定是在一棵子树中. #include <bits/stdc++.h> #define ...

  8. python 装饰器demo

    本质就是一个函数,这个函数符合闭包语法结构,可以在函数不需要改变任何代码情况下增加额外功装饰器的返回值是一个函数的引用 功能:1.引入日志:2.函数执行时间统计:3.执行函数前预备处理:4.执行函数后 ...

  9. Redis企业实战的几个坑

    一.前言 小伙伴们对Redis应该不陌生,Redis是系统必备的分布式缓存中间件,主要用来解决高并发下分担DB资源的负载,从而提升系统吞吐量. Redis支持多种数据类型,String(字符串).li ...

  10. 小程序原生js获取用户权限

    1.首先要有一个按钮 <view name="authorizemodal"> <view class="drawer_screen" wx: ...