PaddlePaddle 飞桨复现 ResNet34
import paddle.nn as nn
class ResidualBlock(nn.Layer):
def __init__(self, in_channels, out_channels, stride = 1, downsample = None):
super(ResidualBlock, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2D(in_channels, out_channels, kernel_size = 3, stride = stride, padding = 1),
nn.BatchNorm2D(out_channels),
nn.ReLU())
self.conv2 = nn.Sequential(
nn.Conv2D(out_channels, out_channels, kernel_size = 3, stride = 1, padding = 1),
nn.BatchNorm2D(out_channels))
self.downsample = downsample
self.relu = nn.ReLU()
self.out_channels = out_channels
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.conv2(out)
if self.downsample:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Layer):
def __init__(self, block, layers, num_classes = 1000):
super(ResNet, self).__init__()
self.inplanes = 64
self.conv1 = nn.Sequential(
nn.Conv2D(3, 64, kernel_size = 7, stride = 2, padding = 3),
nn.BatchNorm2D(64),
nn.ReLU())
self.maxpool = nn.MaxPool2D(kernel_size = 3, stride = 2, padding = 1)
self.layer0 = self._make_layer(block, 64, layers[0], stride = 1)
self.layer1 = self._make_layer(block, 128, layers[1], stride = 2)
self.layer2 = self._make_layer(block, 256, layers[2], stride = 2)
self.layer3 = self._make_layer(block, 512, layers[3], stride = 2)
self.avgpool = nn.AvgPool2D(7, stride=1)
self.fc = nn.Linear(2048, num_classes)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes:
downsample = nn.Sequential(
nn.Conv2D(self.inplanes, planes, kernel_size=1, stride=stride),
nn.BatchNorm2D(planes),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.maxpool(x)
x = self.layer0(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.avgpool(x)
# x = x.view(x.size(0), -1)
x = paddle.reshape(x, [x.shape[0],-1])
x = self.fc(x)
return x
model = ResNet(ResidualBlock, [3, 4, 6, 3], num_classes=2)#模型实例化
paddle.Model(model).summary((-1, 3, 256, 256))
W0505 09:07:12.146911 5588 device_context.cc:447] Please NOTE: device: 0, GPU Compute Capability: 7.0, Driver API Version: 11.2, Runtime API Version: 10.1
W0505 09:07:12.151273 5588 device_context.cc:465] device: 0, cuDNN Version: 7.6.
----------------------------------------------------------------------------
Layer (type) Input Shape Output Shape Param #
============================================================================
Conv2D-1 [[1, 3, 256, 256]] [1, 64, 128, 128] 9,472
BatchNorm2D-1 [[1, 64, 128, 128]] [1, 64, 128, 128] 256
ReLU-1 [[1, 64, 128, 128]] [1, 64, 128, 128] 0
MaxPool2D-1 [[1, 64, 128, 128]] [1, 64, 64, 64] 0
Conv2D-2 [[1, 64, 64, 64]] [1, 64, 64, 64] 36,928
BatchNorm2D-2 [[1, 64, 64, 64]] [1, 64, 64, 64] 256
ReLU-2 [[1, 64, 64, 64]] [1, 64, 64, 64] 0
Conv2D-3 [[1, 64, 64, 64]] [1, 64, 64, 64] 36,928
BatchNorm2D-3 [[1, 64, 64, 64]] [1, 64, 64, 64] 256
ReLU-3 [[1, 64, 64, 64]] [1, 64, 64, 64] 0
ResidualBlock-1 [[1, 64, 64, 64]] [1, 64, 64, 64] 0
Conv2D-4 [[1, 64, 64, 64]] [1, 64, 64, 64] 36,928
BatchNorm2D-4 [[1, 64, 64, 64]] [1, 64, 64, 64] 256
ReLU-4 [[1, 64, 64, 64]] [1, 64, 64, 64] 0
Conv2D-5 [[1, 64, 64, 64]] [1, 64, 64, 64] 36,928
BatchNorm2D-5 [[1, 64, 64, 64]] [1, 64, 64, 64] 256
ReLU-5 [[1, 64, 64, 64]] [1, 64, 64, 64] 0
ResidualBlock-2 [[1, 64, 64, 64]] [1, 64, 64, 64] 0
Conv2D-6 [[1, 64, 64, 64]] [1, 64, 64, 64] 36,928
BatchNorm2D-6 [[1, 64, 64, 64]] [1, 64, 64, 64] 256
ReLU-6 [[1, 64, 64, 64]] [1, 64, 64, 64] 0
Conv2D-7 [[1, 64, 64, 64]] [1, 64, 64, 64] 36,928
BatchNorm2D-7 [[1, 64, 64, 64]] [1, 64, 64, 64] 256
ReLU-7 [[1, 64, 64, 64]] [1, 64, 64, 64] 0
ResidualBlock-3 [[1, 64, 64, 64]] [1, 64, 64, 64] 0
Conv2D-9 [[1, 64, 64, 64]] [1, 128, 32, 32] 73,856
BatchNorm2D-9 [[1, 128, 32, 32]] [1, 128, 32, 32] 512
ReLU-8 [[1, 128, 32, 32]] [1, 128, 32, 32] 0
Conv2D-10 [[1, 128, 32, 32]] [1, 128, 32, 32] 147,584
BatchNorm2D-10 [[1, 128, 32, 32]] [1, 128, 32, 32] 512
Conv2D-8 [[1, 64, 64, 64]] [1, 128, 32, 32] 8,320
BatchNorm2D-8 [[1, 128, 32, 32]] [1, 128, 32, 32] 512
ReLU-9 [[1, 128, 32, 32]] [1, 128, 32, 32] 0
ResidualBlock-4 [[1, 64, 64, 64]] [1, 128, 32, 32] 0
Conv2D-11 [[1, 128, 32, 32]] [1, 128, 32, 32] 147,584
BatchNorm2D-11 [[1, 128, 32, 32]] [1, 128, 32, 32] 512
ReLU-10 [[1, 128, 32, 32]] [1, 128, 32, 32] 0
Conv2D-12 [[1, 128, 32, 32]] [1, 128, 32, 32] 147,584
BatchNorm2D-12 [[1, 128, 32, 32]] [1, 128, 32, 32] 512
ReLU-11 [[1, 128, 32, 32]] [1, 128, 32, 32] 0
ResidualBlock-5 [[1, 128, 32, 32]] [1, 128, 32, 32] 0
Conv2D-13 [[1, 128, 32, 32]] [1, 128, 32, 32] 147,584
BatchNorm2D-13 [[1, 128, 32, 32]] [1, 128, 32, 32] 512
ReLU-12 [[1, 128, 32, 32]] [1, 128, 32, 32] 0
Conv2D-14 [[1, 128, 32, 32]] [1, 128, 32, 32] 147,584
BatchNorm2D-14 [[1, 128, 32, 32]] [1, 128, 32, 32] 512
ReLU-13 [[1, 128, 32, 32]] [1, 128, 32, 32] 0
ResidualBlock-6 [[1, 128, 32, 32]] [1, 128, 32, 32] 0
Conv2D-15 [[1, 128, 32, 32]] [1, 128, 32, 32] 147,584
BatchNorm2D-15 [[1, 128, 32, 32]] [1, 128, 32, 32] 512
ReLU-14 [[1, 128, 32, 32]] [1, 128, 32, 32] 0
Conv2D-16 [[1, 128, 32, 32]] [1, 128, 32, 32] 147,584
BatchNorm2D-16 [[1, 128, 32, 32]] [1, 128, 32, 32] 512
ReLU-15 [[1, 128, 32, 32]] [1, 128, 32, 32] 0
ResidualBlock-7 [[1, 128, 32, 32]] [1, 128, 32, 32] 0
Conv2D-18 [[1, 128, 32, 32]] [1, 256, 16, 16] 295,168
BatchNorm2D-18 [[1, 256, 16, 16]] [1, 256, 16, 16] 1,024
ReLU-16 [[1, 256, 16, 16]] [1, 256, 16, 16] 0
Conv2D-19 [[1, 256, 16, 16]] [1, 256, 16, 16] 590,080
BatchNorm2D-19 [[1, 256, 16, 16]] [1, 256, 16, 16] 1,024
Conv2D-17 [[1, 128, 32, 32]] [1, 256, 16, 16] 33,024
BatchNorm2D-17 [[1, 256, 16, 16]] [1, 256, 16, 16] 1,024
ReLU-17 [[1, 256, 16, 16]] [1, 256, 16, 16] 0
ResidualBlock-8 [[1, 128, 32, 32]] [1, 256, 16, 16] 0
Conv2D-20 [[1, 256, 16, 16]] [1, 256, 16, 16] 590,080
BatchNorm2D-20 [[1, 256, 16, 16]] [1, 256, 16, 16] 1,024
ReLU-18 [[1, 256, 16, 16]] [1, 256, 16, 16] 0
Conv2D-21 [[1, 256, 16, 16]] [1, 256, 16, 16] 590,080
BatchNorm2D-21 [[1, 256, 16, 16]] [1, 256, 16, 16] 1,024
ReLU-19 [[1, 256, 16, 16]] [1, 256, 16, 16] 0
ResidualBlock-9 [[1, 256, 16, 16]] [1, 256, 16, 16] 0
Conv2D-22 [[1, 256, 16, 16]] [1, 256, 16, 16] 590,080
BatchNorm2D-22 [[1, 256, 16, 16]] [1, 256, 16, 16] 1,024
ReLU-20 [[1, 256, 16, 16]] [1, 256, 16, 16] 0
Conv2D-23 [[1, 256, 16, 16]] [1, 256, 16, 16] 590,080
BatchNorm2D-23 [[1, 256, 16, 16]] [1, 256, 16, 16] 1,024
ReLU-21 [[1, 256, 16, 16]] [1, 256, 16, 16] 0
ResidualBlock-10 [[1, 256, 16, 16]] [1, 256, 16, 16] 0
Conv2D-24 [[1, 256, 16, 16]] [1, 256, 16, 16] 590,080
BatchNorm2D-24 [[1, 256, 16, 16]] [1, 256, 16, 16] 1,024
ReLU-22 [[1, 256, 16, 16]] [1, 256, 16, 16] 0
Conv2D-25 [[1, 256, 16, 16]] [1, 256, 16, 16] 590,080
BatchNorm2D-25 [[1, 256, 16, 16]] [1, 256, 16, 16] 1,024
ReLU-23 [[1, 256, 16, 16]] [1, 256, 16, 16] 0
ResidualBlock-11 [[1, 256, 16, 16]] [1, 256, 16, 16] 0
Conv2D-26 [[1, 256, 16, 16]] [1, 256, 16, 16] 590,080
BatchNorm2D-26 [[1, 256, 16, 16]] [1, 256, 16, 16] 1,024
ReLU-24 [[1, 256, 16, 16]] [1, 256, 16, 16] 0
Conv2D-27 [[1, 256, 16, 16]] [1, 256, 16, 16] 590,080
BatchNorm2D-27 [[1, 256, 16, 16]] [1, 256, 16, 16] 1,024
ReLU-25 [[1, 256, 16, 16]] [1, 256, 16, 16] 0
ResidualBlock-12 [[1, 256, 16, 16]] [1, 256, 16, 16] 0
Conv2D-28 [[1, 256, 16, 16]] [1, 256, 16, 16] 590,080
BatchNorm2D-28 [[1, 256, 16, 16]] [1, 256, 16, 16] 1,024
ReLU-26 [[1, 256, 16, 16]] [1, 256, 16, 16] 0
Conv2D-29 [[1, 256, 16, 16]] [1, 256, 16, 16] 590,080
BatchNorm2D-29 [[1, 256, 16, 16]] [1, 256, 16, 16] 1,024
ReLU-27 [[1, 256, 16, 16]] [1, 256, 16, 16] 0
ResidualBlock-13 [[1, 256, 16, 16]] [1, 256, 16, 16] 0
Conv2D-31 [[1, 256, 16, 16]] [1, 512, 8, 8] 1,180,160
BatchNorm2D-31 [[1, 512, 8, 8]] [1, 512, 8, 8] 2,048
ReLU-28 [[1, 512, 8, 8]] [1, 512, 8, 8] 0
Conv2D-32 [[1, 512, 8, 8]] [1, 512, 8, 8] 2,359,808
BatchNorm2D-32 [[1, 512, 8, 8]] [1, 512, 8, 8] 2,048
Conv2D-30 [[1, 256, 16, 16]] [1, 512, 8, 8] 131,584
BatchNorm2D-30 [[1, 512, 8, 8]] [1, 512, 8, 8] 2,048
ReLU-29 [[1, 512, 8, 8]] [1, 512, 8, 8] 0
ResidualBlock-14 [[1, 256, 16, 16]] [1, 512, 8, 8] 0
Conv2D-33 [[1, 512, 8, 8]] [1, 512, 8, 8] 2,359,808
BatchNorm2D-33 [[1, 512, 8, 8]] [1, 512, 8, 8] 2,048
ReLU-30 [[1, 512, 8, 8]] [1, 512, 8, 8] 0
Conv2D-34 [[1, 512, 8, 8]] [1, 512, 8, 8] 2,359,808
BatchNorm2D-34 [[1, 512, 8, 8]] [1, 512, 8, 8] 2,048
ReLU-31 [[1, 512, 8, 8]] [1, 512, 8, 8] 0
ResidualBlock-15 [[1, 512, 8, 8]] [1, 512, 8, 8] 0
Conv2D-35 [[1, 512, 8, 8]] [1, 512, 8, 8] 2,359,808
BatchNorm2D-35 [[1, 512, 8, 8]] [1, 512, 8, 8] 2,048
ReLU-32 [[1, 512, 8, 8]] [1, 512, 8, 8] 0
Conv2D-36 [[1, 512, 8, 8]] [1, 512, 8, 8] 2,359,808
BatchNorm2D-36 [[1, 512, 8, 8]] [1, 512, 8, 8] 2,048
ReLU-33 [[1, 512, 8, 8]] [1, 512, 8, 8] 0
ResidualBlock-16 [[1, 512, 8, 8]] [1, 512, 8, 8] 0
AvgPool2D-1 [[1, 512, 8, 8]] [1, 512, 2, 2] 0
Linear-1 [[1, 2048]] [1, 2] 4,098
============================================================================
Total params: 21,314,306
Trainable params: 21,280,258
Non-trainable params: 34,048
----------------------------------------------------------------------------
Input size (MB): 0.75
Forward/backward pass size (MB): 125.77
Params size (MB): 81.31
Estimated Total Size (MB): 207.82
----------------------------------------------------------------------------
{'total_params': 21314306, 'trainable_params': 21280258}
PaddlePaddle 飞桨复现 ResNet34的更多相关文章
- 提速1000倍,预测延迟少于1ms,百度飞桨发布基于ERNIE的语义理解开发套件
提速1000倍,预测延迟少于1ms,百度飞桨发布基于ERNIE的语义理解开发套件 11月5日,在『WAVE Summit+』2019 深度学习开发者秋季峰会上,百度对外发布基于 ERNIE 的语义理解 ...
- 树莓派4B安装 百度飞桨paddlelite 做视频检测 (一、环境安装)
前言: 当前准备重新在树莓派4B8G 上面搭载训练模型进行识别检测,训练采用了百度飞桨的PaddleX再也不用为训练部署环境各种报错发愁了,推荐大家使用. 关于在树莓派4B上面paddlelite的文 ...
- Ubuntu 百度飞桨和 CUDA 的安装
Ubuntu 百度飞桨 和 CUDA 的安装 1.简介 本文主要是 Ubuntu 百度飞桨 和 CUDA 的安装 系统:Ubuntu 20.04 百度飞桨:2.2 为例 2.百度飞桨安装 访问百度飞桨 ...
- 【一】ERNIE:飞桨开源开发套件,入门学习,看看行业顶尖持续学习语义理解框架,如何取得世界多个实战的SOTA效果?
参考文章: 深度剖析知识增强语义表示模型--ERNIE_财神Childe的博客-CSDN博客_ernie模型 ERNIE_ERNIE开源开发套件_飞桨 https://github.com/Pad ...
- 百度飞桨数据处理 API 数据格式 HWC CHW 和 PIL 图像处理之间的关系
使用百度飞桨 API 例如:Resize Normalize,处理数据的时候. Resize:如果输入的图像是 PIL 读取的图像这个数据格式是 HWC ,Resize 就需要 HWC 格式的数据. ...
- 【百度飞桨】手写数字识别模型部署Paddle Inference
从完成一个简单的『手写数字识别任务』开始,快速了解飞桨框架 API 的使用方法. 模型开发 『手写数字识别』是深度学习里的 Hello World 任务,用于对 0 ~ 9 的十类数字进行分类,即输入 ...
- 我做的百度飞桨PaddleOCR .NET调用库
我做的百度飞桨PaddleOCR .NET调用库 .NET Conf 2021中国我做了一次<.NET玩转计算机视觉OpenCV>的分享,其中提到了一个效果特别好的OCR识别引擎--百度飞 ...
- 飞桨paddlespeech语音唤醒推理C实现
上篇(飞桨paddlespeech 语音唤醒初探)初探了paddlespeech下的语音唤醒方案,通过调试也搞清楚了里面的细节.因为是python 下的,不能直接部署,要想在嵌入式上部署需要有C下的推 ...
- 飞桨AI 文本实体抽取 数据准备(excel 文本标注)
网纸: https://ai.baidu.com/easydl/app/deploy/tee/public #!/usr/bin/env python3 # -*- coding: utf-8 -*- ...
- 手把手0基础Centos下安装与部署paddleOcr 教程
!!!以下内容为作者原创,首发于个人博客园&掘金平台.未经原作者同意与许可,任何人.任何组织不得以任何形式转载.原创不易,如果对您的问题提供了些许帮助,希望得到您的点赞支持. 0.paddle ...
随机推荐
- Pytorch中tensor的打印精度
1. 设置打印精 Pytorch中tensor打印的数据长度需要使用torch.set_printoptions(precision=xx)进行设置,否则打印的长度会很短,给人一种精度不够的错觉: & ...
- Postman挂载外部文件,实现参数化
一般来说,对一个接口进行测试,只能用边界值和等价类的方法,因此就会涉及到各种参数,使用Postman参数化,比较便捷: 但是自己这篇写得实在太浅陋了,将流花兄的博客内容附下,可以直接看他的 https ...
- Java学习笔记-封装Java Util包Base64方法
懒人菜鸟入门Java系列-习惯性封装常用方法,方便开发过程中调用 注释: Java版本-1.8 1 * @Author wuwenchao 2 * @Version 1.0.0 3 * @Date ...
- PHP精度计算函数
bcadd - 将两个高精度数字相加 bccomp - 比较两个高精度数字,返回-1, 0, 1 bcdiv - 将两个高精度数字相除 bcmod - 求高精度数字余数 bcmul - 将两个高精度数 ...
- win10修复系统
DISM.exe /Online /Cleanup-image /Restorehealth sfc /scannow
- SpringBoot——实现WebService接口服务端以及客户端开发
参考:https://blog.csdn.net/qq_43842093/article/details/123076587 https://www.cnblogs.com/yinyl/p/14197 ...
- 第七章ssh sftp scp
第七章ssh sftp scp 对数据进行了加密和压缩 版本号协商,可能客户端和服务端的版本号不一致,服务端向客户端发送一个ssh协商,告诉客户端使用的ssh协议的版本号是多少,客户端在接收到了这个协 ...
- SpringBoot3.0 + SpringSecurity6.0+JWT
JWT_SpringSecurity SpringBoot3.0 + SpringSecurity6.0+JWT Spring Security 是 Spring 家族中的一个安全管理框架. 一般We ...
- sql处理重复的列,更好理清分组和分区
一.分组统计.分区排名 1.语法和含义: 如果查询结果看得有疑惑,看第二部分-sql处理重复的列,更好理清分组和分区,有建表插入数据的sql语句 分组统计:GROUP BY 结合 统计/聚合函数一起使 ...
- 性能的极致,Rust的加持,Zed-Dev编辑器快速搭建Python3.10开发环境
快就一个字,甚至比以快著称于世的Sublime 4编辑器都快,这就是Zed.dev编辑器.其底层由 Rust 编写,比基于Electron技术微软开源的编辑器VSCode快一倍有余,性能上无出其右,同 ...