在训练一个小的分类网络时,发现加上BatchNorm层之后的检索效果相对于之前,效果会有提升,因此将该网络结构记录在这里,供以后查阅使用:

添加该层之前:

 layer {
name: "conv1"
type: "Convolution"
bottom: "data"
top: "conv1"
param {
lr_mult:
decay_mult:
}
param {
lr_mult:
decay_mult:
}
convolution_param {
num_output:
kernel_size:
stride:
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value:
}
}
}
layer {
name: "relu1"
type: "ReLU"
bottom: "conv1"
top: "conv1"
}
layer {
name: "pool1"
type: "Pooling"
bottom: "conv1"
top: "pool1"
pooling_param {
pool: MAX
kernel_size:
stride:
}
}
layer {
name: "norm1"
type: "LRN"
bottom: "pool1"
top: "norm1"
lrn_param {
local_size:
alpha: 0.0001
beta: 0.75
}
}
layer {
name: "conv2"
type: "Convolution"
bottom: "norm1"
top: "conv2"
param {
lr_mult:
decay_mult:
}
param {
lr_mult:
decay_mult:
}
convolution_param {
num_output:
pad:
kernel_size:
stride:
group:
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value:
}
}
}
layer {
name: "relu2"
type: "ReLU"
bottom: "conv2"
top: "conv2"
}
layer {
name: "pool2"
type: "Pooling"
bottom: "conv2"
top: "pool2"
pooling_param {
pool: MAX
kernel_size:
stride:
}
}
layer {
name: "norm2"
type: "LRN"
bottom: "pool2"
top: "norm2"
lrn_param {
local_size:
alpha: 0.0001
beta: 0.75
}
}
layer {
name: "conv3"
type: "Convolution"
bottom: "norm2"
top: "conv3"
param {
lr_mult:
decay_mult:
}
param {
lr_mult:
decay_mult:
}
convolution_param {
num_output:
pad:
stride:
kernel_size:
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value:
}
}
}
layer {
name: "relu3"
type: "ReLU"
bottom: "conv3"
top: "conv3"
}
layer {
name: "conv4"
type: "Convolution"
bottom: "conv3"
top: "conv4"
param {
lr_mult:
decay_mult:
}
param {
lr_mult:
decay_mult:
}
convolution_param {
num_output:
pad:
kernel_size:
group:
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value:
}
}
}
layer {
name: "relu4"
type: "ReLU"
bottom: "conv4"
top: "conv4"
}
layer {
name: "conv5"
type: "Convolution"
bottom: "conv4"
top: "conv5"
param {
lr_mult:
decay_mult:
}
param {
lr_mult:
decay_mult:
}
convolution_param {
num_output:
pad:
stride:
kernel_size:
group:
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value:
}
}
}
layer {
name: "relu5"
type: "ReLU"
bottom: "conv5"
top: "conv5"
}
layer {
name: "pool5"
type: "Pooling"
bottom: "conv5"
top: "pool5"
pooling_param {
pool: MAX
kernel_size:
stride:
}
}
layer {
name: "fc6_srx"
type: "InnerProduct"
bottom: "pool5"
top: "fc6"
param {
lr_mult:
decay_mult:
}
param {
lr_mult:
decay_mult:
}
inner_product_param {
num_output:
weight_filler {
type: "gaussian"
std: 0.005
}
bias_filler {
type: "constant"
value:
}
}
}
layer {
name: "relu7"
type: "ReLU"
bottom: "fc6"
top: "fc6"
}
layer {
name: "drop7"
type: "Dropout"
bottom: "fc6"
top: "fc6"
dropout_param {
dropout_ratio: 0.5
}
}
layer {
name: "fc7_srx"
type: "InnerProduct"
bottom: "fc6"
top: "fc7"
param {
lr_mult:
decay_mult:
}
param {
lr_mult:
decay_mult:
}
inner_product_param {
num_output:
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value:
}
}
}
layer{
name: "loss"
type: "SoftmaxWithLoss"
top: "SoftmaxWithLoss"
bottom: "fc7"
bottom: "label"
include {
phase: TRAIN
}
}
layer {
name: "accuracy"
type: "Accuracy"
bottom: "fc7"
bottom: "label"
top: "accuracy"
include {
phase: TEST
}
}

添加该层之后:

 layer {
name: "conv1"
type: "Convolution"
bottom: "data"
top: "conv1"
param {
lr_mult:
decay_mult:
}
param {
lr_mult:
decay_mult:
}
convolution_param {
num_output:
kernel_size:
stride:
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value:
}
}
}
##############
layer {
bottom: "conv1"
top: "conv1"
name: "bn1"
type: "BatchNorm"
param {
lr_mult:
}
param {
lr_mult:
}
param {
lr_mult:
}
}
##############
layer {
name: "relu1"
type: "ReLU"
bottom: "conv1"
top: "conv1"
}
layer {
name: "pool1"
type: "Pooling"
bottom: "conv1"
top: "pool1"
pooling_param {
pool: MAX
kernel_size:
stride:
}
} layer {
name: "conv2"
type: "Convolution"
bottom: "pool1"
top: "conv2"
param {
lr_mult:
decay_mult:
}
param {
lr_mult:
decay_mult:
}
convolution_param {
num_output:
pad:
kernel_size:
stride:
group:
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value:
}
}
}
##############
layer {
bottom: "conv2"
top: "conv2"
name: "bn2"
type: "BatchNorm"
param {
lr_mult:
}
param {
lr_mult:
}
param {
lr_mult:
}
}
##############
layer {
name: "relu2"
type: "ReLU"
bottom: "conv2"
top: "conv2"
}
layer {
name: "pool2"
type: "Pooling"
bottom: "conv2"
top: "pool2"
pooling_param {
pool: MAX
kernel_size:
stride:
}
} layer {
name: "conv3"
type: "Convolution"
bottom: "pool2"
top: "conv3"
param {
lr_mult:
decay_mult:
}
param {
lr_mult:
decay_mult:
}
convolution_param {
num_output:
pad:
stride:
kernel_size:
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value:
}
}
}
##############
layer {
bottom: "conv3"
top: "conv3"
name: "bn3"
type: "BatchNorm"
param {
lr_mult:
}
param {
lr_mult:
}
param {
lr_mult:
}
}
##############
layer {
name: "relu3"
type: "ReLU"
bottom: "conv3"
top: "conv3"
}
layer {
name: "conv4"
type: "Convolution"
bottom: "conv3"
top: "conv4"
param {
lr_mult:
decay_mult:
}
param {
lr_mult:
decay_mult:
}
convolution_param {
num_output:
pad:
kernel_size:
group:
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value:
}
}
}
##############
layer {
bottom: "conv4"
top: "conv4"
name: "bn4"
type: "BatchNorm"
param {
lr_mult:
}
param {
lr_mult:
}
param {
lr_mult:
}
}
##############
layer {
name: "relu4"
type: "ReLU"
bottom: "conv4"
top: "conv4"
}
layer {
name: "conv5"
type: "Convolution"
bottom: "conv4"
top: "conv5"
param {
lr_mult:
decay_mult:
}
param {
lr_mult:
decay_mult:
}
convolution_param {
num_output:
pad:
stride:
kernel_size:
group:
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value:
}
}
}
##############
layer {
bottom: "conv5"
top: "conv5"
name: "bn5"
type: "BatchNorm"
param {
lr_mult:
}
param {
lr_mult:
}
param {
lr_mult:
}
}
##############
layer {
name: "relu5"
type: "ReLU"
bottom: "conv5"
top: "conv5"
}
layer {
name: "pool5"
type: "Pooling"
bottom: "conv5"
top: "pool5"
pooling_param {
pool: MAX
kernel_size:
stride:
}
}
layer {
name: "fc6_srx"
type: "InnerProduct"
bottom: "pool5"
top: "fc6"
param {
lr_mult:
decay_mult:
}
param {
lr_mult:
decay_mult:
}
inner_product_param {
num_output:
weight_filler {
type: "gaussian"
std: 0.005
}
bias_filler {
type: "constant"
value:
}
}
}
layer {
name: "relu7"
type: "ReLU"
bottom: "fc6"
top: "fc6"
}
layer {
name: "drop7"
type: "Dropout"
bottom: "fc6"
top: "fc6"
dropout_param {
dropout_ratio: 0.5
}
}
layer {
name: "fc7_srx"
type: "InnerProduct"
bottom: "fc6"
top: "fc7"
param {
lr_mult:
decay_mult:
}
param {
lr_mult:
decay_mult:
}
inner_product_param {
num_output:
weight_filler {
type: "gaussian"
std: 0.01
}
bias_filler {
type: "constant"
value:
}
}
}
layer{
name: "loss"
type: "SoftmaxWithLoss"
top: "SoftmaxWithLoss"
bottom: "fc7"
bottom: "label"
include {
phase: TRAIN
}
}
layer {
name: "accuracy"
type: "Accuracy"
bottom: "fc7"
bottom: "label"
top: "accuracy"
include {
phase: TEST
}
}

caffe中的BatchNorm层的更多相关文章

  1. (原)torch和caffe中的BatchNorm层

    转载请注明出处: http://www.cnblogs.com/darkknightzh/p/6015990.html BatchNorm具体网上搜索. caffe中batchNorm层是通过Batc ...

  2. caffe 中 python 数据层

    caffe中大多数层用C++写成. 但是对于自己数据的输入要写对应的输入层,比如你要去图像中的一部分,不能用LMDB,或者你的label 需要特殊的标记. 这时候就需要用python 写一个输入层. ...

  3. caffe中全卷积层和全连接层训练参数如何确定

    今天来仔细讲一下卷基层和全连接层训练参数个数如何确定的问题.我们以Mnist为例,首先贴出网络配置文件: name: "LeNet" layer { name: "mni ...

  4. caffe中添加local层

    下载caffe-local,解压缩; 修改makefile.config:我是将cuudn注释掉,去掉cpu_only的注释; make all make test(其中local_test出错,将文 ...

  5. 转载:caffe中的Reshape层

    http://blog.csdn.net/terrenceyuu/article/details/76228317 #作用:在不改变数据的情况下,改变输入的维度 layer { name: " ...

  6. caffe中各层的作用:

    关于caffe中的solver: cafffe中的sover的方法都有: Stochastic Gradient Descent (type: "SGD"), AdaDelta ( ...

  7. 深度学习中 batchnorm 层是咋回事?

    作者:Double_V_ 来源:CSDN 原文:https://blog.csdn.net/qq_25737169/article/details/79048516 版权声明:本文为博主原创文章,转载 ...

  8. caffe中ConvolutionLayer的前向和反向传播解析及源码阅读

    一.前向传播 在caffe中,卷积层做卷积的过程被转化成了由卷积核的参数组成的权重矩阵weights(简记为W)和feature map中的元素组成的输入矩阵(简记为Cin)的矩阵乘积W * Cin. ...

  9. caffe中batch norm源码阅读

    1. batch norm 输入batch norm层的数据为[N, C, H, W], 该层计算得到均值为C个,方差为C个,输出数据为[N, C, H, W]. <1> 形象点说,均值的 ...

随机推荐

  1. ABAP 合并单元格自建函数

    FORM frm_merge_cells USING sor_cell tar_cell.   CALL METHOD OF excel 'Range' = range     EXPORTING   ...

  2. PHP 抽象类 和 interface 接口

    静态: 在类里面 定义的变量   在别的地方引用的时候 只要是  静态引用 就一定要加$ 普通成员 : 属于对象的 静态成员: 属于类的  static  (通过双冒号调用) 调用静态成员:   re ...

  3. C#调用webservice简单实例

    如何利用IIS创建webservice不多做阐述,直接讲C#代码中如何调用已创建好的webservice. 首先在VS2010中新建一个工程项目,然后右键点击工程名选择添加服务引用. 在URL一栏中输 ...

  4. jquery ashx交互 返回list 循环json输入信息

    html代码:触发按钮 <input type="button" id="search" value="查询" /> ashx代 ...

  5. C#读取XML文件并取值

    1.新建XML文件: <?xml version="1.0" encoding="utf-8" ?> <SystemInfo> < ...

  6. HTML:让表单、文本框只读,不可编辑的方法

    有时候,我们希望表单中的文本框是只读的,让用户不能修改其中的信息,如使<input type="text" name="input1" value=&qu ...

  7. Intent之复杂数据的传递

    想在两个Activity之间传递一个对象Result,在网上差了很多,都需要序列化或者时下Paracelable,等等,试了很多都不行. 后来才制单,这个Result,根本不需要集成Sereriabl ...

  8. iOS //清除本地缓存

    //清除本地缓存 -(void)clearCache{ dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT ...

  9. 关于Android 5.0 网络图标叹号的解决办法

    那么下面就给出解决方法(无需root): 1.完全屏蔽网络检查功能,最简单快速,但是就没有办法提示wifi登录: adb shell "settings put global captive ...

  10. Asp.Net MVC4 + Oracle + EasyUI 学习 序章

    Asp.Net MVC4 + Oracle + EasyUI  序章 -- 新建微软实例 本文链接:http://www.cnblogs.com/likeli/p/4233387.html 1.  简 ...