PaddlePaddle 飞桨复现 ResNet34
import paddle.nn as nn
class ResidualBlock(nn.Layer):
def __init__(self, in_channels, out_channels, stride = 1, downsample = None):
super(ResidualBlock, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2D(in_channels, out_channels, kernel_size = 3, stride = stride, padding = 1),
nn.BatchNorm2D(out_channels),
nn.ReLU())
self.conv2 = nn.Sequential(
nn.Conv2D(out_channels, out_channels, kernel_size = 3, stride = 1, padding = 1),
nn.BatchNorm2D(out_channels))
self.downsample = downsample
self.relu = nn.ReLU()
self.out_channels = out_channels
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.conv2(out)
if self.downsample:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Layer):
def __init__(self, block, layers, num_classes = 1000):
super(ResNet, self).__init__()
self.inplanes = 64
self.conv1 = nn.Sequential(
nn.Conv2D(3, 64, kernel_size = 7, stride = 2, padding = 3),
nn.BatchNorm2D(64),
nn.ReLU())
self.maxpool = nn.MaxPool2D(kernel_size = 3, stride = 2, padding = 1)
self.layer0 = self._make_layer(block, 64, layers[0], stride = 1)
self.layer1 = self._make_layer(block, 128, layers[1], stride = 2)
self.layer2 = self._make_layer(block, 256, layers[2], stride = 2)
self.layer3 = self._make_layer(block, 512, layers[3], stride = 2)
self.avgpool = nn.AvgPool2D(7, stride=1)
self.fc = nn.Linear(2048, num_classes)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes:
downsample = nn.Sequential(
nn.Conv2D(self.inplanes, planes, kernel_size=1, stride=stride),
nn.BatchNorm2D(planes),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.maxpool(x)
x = self.layer0(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.avgpool(x)
# x = x.view(x.size(0), -1)
x = paddle.reshape(x, [x.shape[0],-1])
x = self.fc(x)
return x
model = ResNet(ResidualBlock, [3, 4, 6, 3], num_classes=2)#模型实例化
paddle.Model(model).summary((-1, 3, 256, 256))
W0505 09:07:12.146911 5588 device_context.cc:447] Please NOTE: device: 0, GPU Compute Capability: 7.0, Driver API Version: 11.2, Runtime API Version: 10.1
W0505 09:07:12.151273 5588 device_context.cc:465] device: 0, cuDNN Version: 7.6.
----------------------------------------------------------------------------
Layer (type) Input Shape Output Shape Param #
============================================================================
Conv2D-1 [[1, 3, 256, 256]] [1, 64, 128, 128] 9,472
BatchNorm2D-1 [[1, 64, 128, 128]] [1, 64, 128, 128] 256
ReLU-1 [[1, 64, 128, 128]] [1, 64, 128, 128] 0
MaxPool2D-1 [[1, 64, 128, 128]] [1, 64, 64, 64] 0
Conv2D-2 [[1, 64, 64, 64]] [1, 64, 64, 64] 36,928
BatchNorm2D-2 [[1, 64, 64, 64]] [1, 64, 64, 64] 256
ReLU-2 [[1, 64, 64, 64]] [1, 64, 64, 64] 0
Conv2D-3 [[1, 64, 64, 64]] [1, 64, 64, 64] 36,928
BatchNorm2D-3 [[1, 64, 64, 64]] [1, 64, 64, 64] 256
ReLU-3 [[1, 64, 64, 64]] [1, 64, 64, 64] 0
ResidualBlock-1 [[1, 64, 64, 64]] [1, 64, 64, 64] 0
Conv2D-4 [[1, 64, 64, 64]] [1, 64, 64, 64] 36,928
BatchNorm2D-4 [[1, 64, 64, 64]] [1, 64, 64, 64] 256
ReLU-4 [[1, 64, 64, 64]] [1, 64, 64, 64] 0
Conv2D-5 [[1, 64, 64, 64]] [1, 64, 64, 64] 36,928
BatchNorm2D-5 [[1, 64, 64, 64]] [1, 64, 64, 64] 256
ReLU-5 [[1, 64, 64, 64]] [1, 64, 64, 64] 0
ResidualBlock-2 [[1, 64, 64, 64]] [1, 64, 64, 64] 0
Conv2D-6 [[1, 64, 64, 64]] [1, 64, 64, 64] 36,928
BatchNorm2D-6 [[1, 64, 64, 64]] [1, 64, 64, 64] 256
ReLU-6 [[1, 64, 64, 64]] [1, 64, 64, 64] 0
Conv2D-7 [[1, 64, 64, 64]] [1, 64, 64, 64] 36,928
BatchNorm2D-7 [[1, 64, 64, 64]] [1, 64, 64, 64] 256
ReLU-7 [[1, 64, 64, 64]] [1, 64, 64, 64] 0
ResidualBlock-3 [[1, 64, 64, 64]] [1, 64, 64, 64] 0
Conv2D-9 [[1, 64, 64, 64]] [1, 128, 32, 32] 73,856
BatchNorm2D-9 [[1, 128, 32, 32]] [1, 128, 32, 32] 512
ReLU-8 [[1, 128, 32, 32]] [1, 128, 32, 32] 0
Conv2D-10 [[1, 128, 32, 32]] [1, 128, 32, 32] 147,584
BatchNorm2D-10 [[1, 128, 32, 32]] [1, 128, 32, 32] 512
Conv2D-8 [[1, 64, 64, 64]] [1, 128, 32, 32] 8,320
BatchNorm2D-8 [[1, 128, 32, 32]] [1, 128, 32, 32] 512
ReLU-9 [[1, 128, 32, 32]] [1, 128, 32, 32] 0
ResidualBlock-4 [[1, 64, 64, 64]] [1, 128, 32, 32] 0
Conv2D-11 [[1, 128, 32, 32]] [1, 128, 32, 32] 147,584
BatchNorm2D-11 [[1, 128, 32, 32]] [1, 128, 32, 32] 512
ReLU-10 [[1, 128, 32, 32]] [1, 128, 32, 32] 0
Conv2D-12 [[1, 128, 32, 32]] [1, 128, 32, 32] 147,584
BatchNorm2D-12 [[1, 128, 32, 32]] [1, 128, 32, 32] 512
ReLU-11 [[1, 128, 32, 32]] [1, 128, 32, 32] 0
ResidualBlock-5 [[1, 128, 32, 32]] [1, 128, 32, 32] 0
Conv2D-13 [[1, 128, 32, 32]] [1, 128, 32, 32] 147,584
BatchNorm2D-13 [[1, 128, 32, 32]] [1, 128, 32, 32] 512
ReLU-12 [[1, 128, 32, 32]] [1, 128, 32, 32] 0
Conv2D-14 [[1, 128, 32, 32]] [1, 128, 32, 32] 147,584
BatchNorm2D-14 [[1, 128, 32, 32]] [1, 128, 32, 32] 512
ReLU-13 [[1, 128, 32, 32]] [1, 128, 32, 32] 0
ResidualBlock-6 [[1, 128, 32, 32]] [1, 128, 32, 32] 0
Conv2D-15 [[1, 128, 32, 32]] [1, 128, 32, 32] 147,584
BatchNorm2D-15 [[1, 128, 32, 32]] [1, 128, 32, 32] 512
ReLU-14 [[1, 128, 32, 32]] [1, 128, 32, 32] 0
Conv2D-16 [[1, 128, 32, 32]] [1, 128, 32, 32] 147,584
BatchNorm2D-16 [[1, 128, 32, 32]] [1, 128, 32, 32] 512
ReLU-15 [[1, 128, 32, 32]] [1, 128, 32, 32] 0
ResidualBlock-7 [[1, 128, 32, 32]] [1, 128, 32, 32] 0
Conv2D-18 [[1, 128, 32, 32]] [1, 256, 16, 16] 295,168
BatchNorm2D-18 [[1, 256, 16, 16]] [1, 256, 16, 16] 1,024
ReLU-16 [[1, 256, 16, 16]] [1, 256, 16, 16] 0
Conv2D-19 [[1, 256, 16, 16]] [1, 256, 16, 16] 590,080
BatchNorm2D-19 [[1, 256, 16, 16]] [1, 256, 16, 16] 1,024
Conv2D-17 [[1, 128, 32, 32]] [1, 256, 16, 16] 33,024
BatchNorm2D-17 [[1, 256, 16, 16]] [1, 256, 16, 16] 1,024
ReLU-17 [[1, 256, 16, 16]] [1, 256, 16, 16] 0
ResidualBlock-8 [[1, 128, 32, 32]] [1, 256, 16, 16] 0
Conv2D-20 [[1, 256, 16, 16]] [1, 256, 16, 16] 590,080
BatchNorm2D-20 [[1, 256, 16, 16]] [1, 256, 16, 16] 1,024
ReLU-18 [[1, 256, 16, 16]] [1, 256, 16, 16] 0
Conv2D-21 [[1, 256, 16, 16]] [1, 256, 16, 16] 590,080
BatchNorm2D-21 [[1, 256, 16, 16]] [1, 256, 16, 16] 1,024
ReLU-19 [[1, 256, 16, 16]] [1, 256, 16, 16] 0
ResidualBlock-9 [[1, 256, 16, 16]] [1, 256, 16, 16] 0
Conv2D-22 [[1, 256, 16, 16]] [1, 256, 16, 16] 590,080
BatchNorm2D-22 [[1, 256, 16, 16]] [1, 256, 16, 16] 1,024
ReLU-20 [[1, 256, 16, 16]] [1, 256, 16, 16] 0
Conv2D-23 [[1, 256, 16, 16]] [1, 256, 16, 16] 590,080
BatchNorm2D-23 [[1, 256, 16, 16]] [1, 256, 16, 16] 1,024
ReLU-21 [[1, 256, 16, 16]] [1, 256, 16, 16] 0
ResidualBlock-10 [[1, 256, 16, 16]] [1, 256, 16, 16] 0
Conv2D-24 [[1, 256, 16, 16]] [1, 256, 16, 16] 590,080
BatchNorm2D-24 [[1, 256, 16, 16]] [1, 256, 16, 16] 1,024
ReLU-22 [[1, 256, 16, 16]] [1, 256, 16, 16] 0
Conv2D-25 [[1, 256, 16, 16]] [1, 256, 16, 16] 590,080
BatchNorm2D-25 [[1, 256, 16, 16]] [1, 256, 16, 16] 1,024
ReLU-23 [[1, 256, 16, 16]] [1, 256, 16, 16] 0
ResidualBlock-11 [[1, 256, 16, 16]] [1, 256, 16, 16] 0
Conv2D-26 [[1, 256, 16, 16]] [1, 256, 16, 16] 590,080
BatchNorm2D-26 [[1, 256, 16, 16]] [1, 256, 16, 16] 1,024
ReLU-24 [[1, 256, 16, 16]] [1, 256, 16, 16] 0
Conv2D-27 [[1, 256, 16, 16]] [1, 256, 16, 16] 590,080
BatchNorm2D-27 [[1, 256, 16, 16]] [1, 256, 16, 16] 1,024
ReLU-25 [[1, 256, 16, 16]] [1, 256, 16, 16] 0
ResidualBlock-12 [[1, 256, 16, 16]] [1, 256, 16, 16] 0
Conv2D-28 [[1, 256, 16, 16]] [1, 256, 16, 16] 590,080
BatchNorm2D-28 [[1, 256, 16, 16]] [1, 256, 16, 16] 1,024
ReLU-26 [[1, 256, 16, 16]] [1, 256, 16, 16] 0
Conv2D-29 [[1, 256, 16, 16]] [1, 256, 16, 16] 590,080
BatchNorm2D-29 [[1, 256, 16, 16]] [1, 256, 16, 16] 1,024
ReLU-27 [[1, 256, 16, 16]] [1, 256, 16, 16] 0
ResidualBlock-13 [[1, 256, 16, 16]] [1, 256, 16, 16] 0
Conv2D-31 [[1, 256, 16, 16]] [1, 512, 8, 8] 1,180,160
BatchNorm2D-31 [[1, 512, 8, 8]] [1, 512, 8, 8] 2,048
ReLU-28 [[1, 512, 8, 8]] [1, 512, 8, 8] 0
Conv2D-32 [[1, 512, 8, 8]] [1, 512, 8, 8] 2,359,808
BatchNorm2D-32 [[1, 512, 8, 8]] [1, 512, 8, 8] 2,048
Conv2D-30 [[1, 256, 16, 16]] [1, 512, 8, 8] 131,584
BatchNorm2D-30 [[1, 512, 8, 8]] [1, 512, 8, 8] 2,048
ReLU-29 [[1, 512, 8, 8]] [1, 512, 8, 8] 0
ResidualBlock-14 [[1, 256, 16, 16]] [1, 512, 8, 8] 0
Conv2D-33 [[1, 512, 8, 8]] [1, 512, 8, 8] 2,359,808
BatchNorm2D-33 [[1, 512, 8, 8]] [1, 512, 8, 8] 2,048
ReLU-30 [[1, 512, 8, 8]] [1, 512, 8, 8] 0
Conv2D-34 [[1, 512, 8, 8]] [1, 512, 8, 8] 2,359,808
BatchNorm2D-34 [[1, 512, 8, 8]] [1, 512, 8, 8] 2,048
ReLU-31 [[1, 512, 8, 8]] [1, 512, 8, 8] 0
ResidualBlock-15 [[1, 512, 8, 8]] [1, 512, 8, 8] 0
Conv2D-35 [[1, 512, 8, 8]] [1, 512, 8, 8] 2,359,808
BatchNorm2D-35 [[1, 512, 8, 8]] [1, 512, 8, 8] 2,048
ReLU-32 [[1, 512, 8, 8]] [1, 512, 8, 8] 0
Conv2D-36 [[1, 512, 8, 8]] [1, 512, 8, 8] 2,359,808
BatchNorm2D-36 [[1, 512, 8, 8]] [1, 512, 8, 8] 2,048
ReLU-33 [[1, 512, 8, 8]] [1, 512, 8, 8] 0
ResidualBlock-16 [[1, 512, 8, 8]] [1, 512, 8, 8] 0
AvgPool2D-1 [[1, 512, 8, 8]] [1, 512, 2, 2] 0
Linear-1 [[1, 2048]] [1, 2] 4,098
============================================================================
Total params: 21,314,306
Trainable params: 21,280,258
Non-trainable params: 34,048
----------------------------------------------------------------------------
Input size (MB): 0.75
Forward/backward pass size (MB): 125.77
Params size (MB): 81.31
Estimated Total Size (MB): 207.82
----------------------------------------------------------------------------
{'total_params': 21314306, 'trainable_params': 21280258}
PaddlePaddle 飞桨复现 ResNet34的更多相关文章
- 提速1000倍,预测延迟少于1ms,百度飞桨发布基于ERNIE的语义理解开发套件
提速1000倍,预测延迟少于1ms,百度飞桨发布基于ERNIE的语义理解开发套件 11月5日,在『WAVE Summit+』2019 深度学习开发者秋季峰会上,百度对外发布基于 ERNIE 的语义理解 ...
- 树莓派4B安装 百度飞桨paddlelite 做视频检测 (一、环境安装)
前言: 当前准备重新在树莓派4B8G 上面搭载训练模型进行识别检测,训练采用了百度飞桨的PaddleX再也不用为训练部署环境各种报错发愁了,推荐大家使用. 关于在树莓派4B上面paddlelite的文 ...
- Ubuntu 百度飞桨和 CUDA 的安装
Ubuntu 百度飞桨 和 CUDA 的安装 1.简介 本文主要是 Ubuntu 百度飞桨 和 CUDA 的安装 系统:Ubuntu 20.04 百度飞桨:2.2 为例 2.百度飞桨安装 访问百度飞桨 ...
- 【一】ERNIE:飞桨开源开发套件,入门学习,看看行业顶尖持续学习语义理解框架,如何取得世界多个实战的SOTA效果?
参考文章: 深度剖析知识增强语义表示模型--ERNIE_财神Childe的博客-CSDN博客_ernie模型 ERNIE_ERNIE开源开发套件_飞桨 https://github.com/Pad ...
- 百度飞桨数据处理 API 数据格式 HWC CHW 和 PIL 图像处理之间的关系
使用百度飞桨 API 例如:Resize Normalize,处理数据的时候. Resize:如果输入的图像是 PIL 读取的图像这个数据格式是 HWC ,Resize 就需要 HWC 格式的数据. ...
- 【百度飞桨】手写数字识别模型部署Paddle Inference
从完成一个简单的『手写数字识别任务』开始,快速了解飞桨框架 API 的使用方法. 模型开发 『手写数字识别』是深度学习里的 Hello World 任务,用于对 0 ~ 9 的十类数字进行分类,即输入 ...
- 我做的百度飞桨PaddleOCR .NET调用库
我做的百度飞桨PaddleOCR .NET调用库 .NET Conf 2021中国我做了一次<.NET玩转计算机视觉OpenCV>的分享,其中提到了一个效果特别好的OCR识别引擎--百度飞 ...
- 飞桨paddlespeech语音唤醒推理C实现
上篇(飞桨paddlespeech 语音唤醒初探)初探了paddlespeech下的语音唤醒方案,通过调试也搞清楚了里面的细节.因为是python 下的,不能直接部署,要想在嵌入式上部署需要有C下的推 ...
- 飞桨AI 文本实体抽取 数据准备(excel 文本标注)
网纸: https://ai.baidu.com/easydl/app/deploy/tee/public #!/usr/bin/env python3 # -*- coding: utf-8 -*- ...
- 手把手0基础Centos下安装与部署paddleOcr 教程
!!!以下内容为作者原创,首发于个人博客园&掘金平台.未经原作者同意与许可,任何人.任何组织不得以任何形式转载.原创不易,如果对您的问题提供了些许帮助,希望得到您的点赞支持. 0.paddle ...
随机推荐
- 搬运 nginx代理https
oauth2-client在Nginx代理后遇到的问题和解决方案 2020-01-17 2020-05-27 TECH 30 MINUTES READ (ABOUT 4442 WORDS) OAu ...
- typescript 的动态引入组件
环境: Arco Pro + Vue3 vite自身对动态字符串形式的组件引入是有限制的, 以下写法会报错 官方文档中也对此有做说明, 只能通过固定形式去引用 以下形式不会报错, 但这种固定格式的局限 ...
- 文本的格式化标签(粗体,斜体)和 <div>和<span>标签(都是双标签)
上一个笔记有提到各种型号的标题,为了保证文章的美观,又会有除了标题之外的东西,比如粗体,斜体,下划线,删除线和各种分隔 1加粗,<strong><strong/>或者<b ...
- 2.27总结——JDBC学习
今天初步了解了Javaweb的JDBC,了解其基础语句,以及连接数据库的方式,但是自我感觉很抽象,实际上手仍有些困难,需要参考模板,增删改查目前进度在增和查,继续努力,争取本学期尽快跟上同学学习进度!
- 【已解决】appium启动会话时遇到的的几种问题
第一种: 1.启动会话时一直卡在加载界面,报错log信息及截图如下 [ADB] Creating ADB subprocess with args: ["-P",5037,&quo ...
- 在教学中常被问到的几个vue3.x与typescript的问题,统一解答
在教学当中,学生在学习vue3.x时,常常会问到typescript和vue3.x之间的关系,感觉这两个技术总是绑在一起的,下面老赵来统一解答一下: 那学vue3.x,为什么要求也要掌握typescr ...
- async 与 Thread 的错误结合
在 TAP 出现之前,我们可以通过 Thread 来完成一些线程操作,从而实现多线程和异步操作.在 TAP 出现之后,有时候为了更高精度的控制线程,我们还是会使用到 Thread .文本讲介绍一种错误 ...
- 关于EasyExcel的数据导入和单sheet和多sheet导出
读写Excel基本代码 直接复制不一定能用 实体类 @ExcelIgnore 在导出操作中不会被导出 @ExcelProperty 在导入过程中 可以根据导入模板自动匹配字段, 在导出过程中可用于设置 ...
- 万字血书Vue—Vue语法
模板语法 插值语法 Mustache插值采用{{ }},用于解析标签体内容,将Vue实例中的数据插入DOM中 <h1>Hello {{name}}</h1> 指令语法 指令用于 ...
- GO实现Redis:GO实现内存数据库(3)
实现Redis的database层(核心层:处理命令并返回) https://github.com/csgopher/go-redis datastruct/dict/dict.go type Con ...