VGG_19 train_vali.prototxt file
name: "VGG_ILSVRC_19_layer"
layer {
name: "data"
type: "ImageData"
top: "data"
top: "label"
include {
phase: TRAIN
}
image_data_param {
batch_size: 12
source: "../../fine_tuning_data/HAT_fineTuning_data/train_data_fineTuning.txt"
root_folder: "../../fine_tuning_data/HAT_fineTuning_data/train_data/"
}
}
layer {
name: "data"
type: "ImageData"
top: "data"
top: "label"
include {
phase: TEST
}
transform_param {
mirror: false
}
image_data_param {
batch_size: 10
source: "../../fine_tuning_data/HAT_fineTuning_data/test_data_fineTuning.txt"
root_folder: "../../fine_tuning_data/HAT_fineTuning_data/test_data/"
}
}
layer {
bottom:"data"
top:"conv1_1"
name:"conv1_1"
type:"Convolution"
convolution_param {
num_output:64
pad:1
kernel_size:3
}
}
layer {
bottom:"conv1_1"
top:"conv1_1"
name:"relu1_1"
type:"ReLU"
}
layer {
bottom:"conv1_1"
top:"conv1_2"
name:"conv1_2"
type:"Convolution"
convolution_param {
num_output:64
pad:1
kernel_size:3
}
}
layer {
bottom:"conv1_2"
top:"conv1_2"
name:"relu1_2"
type:"ReLU"
}
layer {
bottom:"conv1_2"
top:"pool1"
name:"pool1"
type:"Pooling"
pooling_param {
pool:MAX
kernel_size:2
stride:2
}
}
layer {
bottom:"pool1"
top:"conv2_1"
name:"conv2_1"
type:"Convolution"
convolution_param {
num_output:128
pad:1
kernel_size:3
}
}
layer {
bottom:"conv2_1"
top:"conv2_1"
name:"relu2_1"
type:"ReLU"
}
layer {
bottom:"conv2_1"
top:"conv2_2"
name:"conv2_2"
type:"Convolution"
convolution_param {
num_output:128
pad:1
kernel_size:3
}
}
layer {
bottom:"conv2_2"
top:"conv2_2"
name:"relu2_2"
type:"ReLU"
}
layer {
bottom:"conv2_2"
top:"pool2"
name:"pool2"
type:"Pooling"
pooling_param {
pool:MAX
kernel_size:2
stride:2
}
}
layer {
bottom:"pool2"
top:"conv3_1"
name: "conv3_1"
type:"Convolution"
convolution_param {
num_output:256
pad:1
kernel_size:3
}
}
layer {
bottom:"conv3_1"
top:"conv3_1"
name:"relu3_1"
type:"ReLU"
}
layer {
bottom:"conv3_1"
top:"conv3_2"
name:"conv3_2"
type:"Convolution"
convolution_param {
num_output:256
pad:1
kernel_size:3
}
}
layer {
bottom:"conv3_2"
top:"conv3_2"
name:"relu3_2"
type:"ReLU"
}
layer {
bottom:"conv3_2"
top:"conv3_3"
name:"conv3_3"
type:"Convolution"
convolution_param {
num_output:256
pad:1
kernel_size:3
}
}
layer {
bottom:"conv3_3"
top:"conv3_3"
name:"relu3_3"
type:"ReLU"
}
layer {
bottom:"conv3_3"
top:"conv3_4"
name:"conv3_4"
type:"Convolution"
convolution_param {
num_output:256
pad:1
kernel_size:3
}
}
layer {
bottom:"conv3_4"
top:"conv3_4"
name:"relu3_4"
type:"ReLU"
}
layer {
bottom:"conv3_4"
top:"pool3"
name:"pool3"
type:"Pooling"
pooling_param {
pool:MAX
kernel_size: 2
stride: 2
}
}
layer {
bottom:"pool3"
top:"conv4_1"
name:"conv4_1"
type:"Convolution"
convolution_param {
num_output: 512
pad: 1
kernel_size: 3
}
}
layer {
bottom:"conv4_1"
top:"conv4_1"
name:"relu4_1"
type:"ReLU"
}
layer {
bottom:"conv4_1"
top:"conv4_2"
name:"conv4_2"
type:"Convolution"
convolution_param {
num_output: 512
pad: 1
kernel_size: 3
}
}
layer {
bottom:"conv4_2"
top:"conv4_2"
name:"relu4_2"
type:"ReLU"
}
layer {
bottom:"conv4_2"
top:"conv4_3"
name:"conv4_3"
type:"Convolution"
convolution_param {
num_output: 512
pad: 1
kernel_size: 3
}
}
layer {
bottom:"conv4_3"
top:"conv4_3"
name:"relu4_3"
type:"ReLU"
}
layer {
bottom:"conv4_3"
top:"conv4_4"
name:"conv4_4"
type:"Convolution"
convolution_param {
num_output: 512
pad: 1
kernel_size: 3
}
}
layer {
bottom:"conv4_4"
top:"conv4_4"
name:"relu4_4"
type:"ReLU"
}
layer {
bottom:"conv4_4"
top:"pool4"
name:"pool4"
type:"Pooling"
pooling_param {
pool:MAX
kernel_size: 2
stride: 2
}
}
layer {
bottom:"pool4"
top:"conv5_1"
name:"conv5_1"
type:"Convolution"
convolution_param {
num_output: 512
pad: 1
kernel_size: 3
}
}
layer {
bottom:"conv5_1"
top:"conv5_1"
name:"relu5_1"
type:"ReLU"
}
layer {
bottom:"conv5_1"
top:"conv5_2"
name:"conv5_2"
type:"Convolution"
convolution_param {
num_output: 512
pad: 1
kernel_size: 3
}
}
layer {
bottom:"conv5_2"
top:"conv5_2"
name:"relu5_2"
type:"ReLU"
}
layer {
bottom:"conv5_2"
top:"conv5_3"
name:"conv5_3"
type:"Convolution"
convolution_param {
num_output: 512
pad: 1
kernel_size: 3
}
}
layer {
bottom:"conv5_3"
top:"conv5_3"
name:"relu5_3"
type:"ReLU"
}
layer {
bottom:"conv5_3"
top:"conv5_4"
name:"conv5_4"
type:"Convolution"
convolution_param {
num_output: 512
pad: 1
kernel_size: 3
}
}
layer {
bottom:"conv5_4"
top:"conv5_4"
name:"relu5_4"
type:"ReLU"
}
layer {
bottom:"conv5_4"
top:"pool5"
name:"pool5"
type:"Pooling"
pooling_param {
pool:MAX
kernel_size: 2
stride: 2
}
}
layer {
bottom:"pool5"
top:"fc6_"
name:"fc6_"
type:"InnerProduct"
inner_product_param {
num_output: 4096
}
}
layer {
bottom:"fc6_"
top:"fc6_"
name:"relu6"
type:"ReLU"
}
layer {
bottom:"fc6_"
top:"fc6_"
name:"drop6"
type:"Dropout"
dropout_param {
dropout_ratio: 0.5
}
}
layer {
bottom:"fc6_"
top:"fc7"
name:"fc7"
type:"InnerProduct"
inner_product_param {
num_output: 4096
}
}
layer {
bottom:"fc7"
top:"fc7"
name:"relu7"
type:"ReLU"
}
layer {
bottom:"fc7"
top:"fc7"
name:"drop7"
type:"Dropout"
dropout_param {
dropout_ratio: 0.5
}
}
layer {
bottom:"fc7"
top:"fc8_"
name:"fc8_"
type:"InnerProduct"
inner_product_param {
num_output: 27
}
}
layer {
name: "sigmoid"
type: "Sigmoid"
bottom: "fc8_"
top: "fc8_"
}
layer {
name: "accuracy"
type: "Accuracy"
bottom: "fc8_"
bottom: "label"
top: "accuracy"
include {
phase: TEST
}
}
layer {
name: "loss"
type: "EuclideanLoss"
bottom: "fc8_"
bottom: "label"
top: "loss"
}
VGG_19 train_vali.prototxt file的更多相关文章
- 如何才能将Faster R-CNN训练起来?
如何才能将Faster R-CNN训练起来? 首先进入 Faster RCNN 的官网啦,即:https://github.com/rbgirshick/py-faster-rcnn#installa ...
- SSD框架训练自己的数据集
SSD demo中详细介绍了如何在VOC数据集上使用SSD进行物体检测的训练和验证.本文介绍如何使用SSD实现对自己数据集的训练和验证过程,内容包括: 1 数据集的标注2 数据集的转换3 使用SSD如 ...
- Faster-RCNN 训练自己的数据
在前一篇随笔中,数据制作成了VOC2007格式,可以用于Faster-RCNN的训练. 1.针对数据的修改 修改datasets\VOCdevkit2007\VOCcode\VOCinit.m,我只做 ...
- caffe drawnet.py 用Python画网络框架
在caffe中可以使用draw_net.py轻松地绘制卷积神经网络(CNN,Convolutional Neural Networks)的架构图.这个工具对于我们理解.学习甚至查错都有很大的帮助. 1 ...
- caffe实际运行中遇到的问题
https://blog.csdn.net/u010417185/article/details/52649178 1.均值计算是否需要统一图像的尺寸? 在图像计算均值时,应该先统一图像的尺寸,否则会 ...
- [OpenCV] Install OpenCV 3.3 with DNN
OpenCV 3.3 Aug 3, 2017 OpenCV 3.3 has been released with greatly improved Deep Learning module and l ...
- [PyImageSearch] Ubuntu16.04 使用深度学习和OpenCV实现物体检测
上一篇博文中讲到如何用OpenCV实现物体分类,但是接下来这篇博文将会告诉你图片中物体的位置具体在哪里. 我们将会知道如何使用OpenCV‘s的dnn模块去加载一个预训练的物体检测网络,它能使得我们将 ...
- 【PyImageSearch】Ubuntu16.04使用OpenCV3.3.0实现图像分类
这篇博文将会展示如何采用一个预训练的深度学习网络(模型)在ImageNet的数据集并把它当作输入图像. 首先说明,运行环境为Ubuntu16.04(或者MacOS),windows暂不支持,已经编译好 ...
- 机器学习进阶-目标追踪-SSD多进程执行 1.cv2.dnn.readnetFromCaffe(用于读取已经训练好的caffe模型) 2.delib.correlation_tracker(生成追踪器) 5.cv2.writer(将图片写入视频中) 6.cv2.dnn.blobFromImage(图片归一化) 10.multiprocessing.process(生成进程)
1. cv2.dnn.readNetFromCaffe(prototxt, model) 用于进行SSD网络的caffe框架的加载 参数说明:prototxt表示caffe网络的结构文本,model ...
随机推荐
- Python开发入门与实战2-第一个Django项目
2.第一个Django项目 上一章节我们完成了python,django和数据库等运行环境的安装,现在我们来创建第一个django project吧,迈出使用django开发应用的第一步. 2.1.创 ...
- Java基础毕向东day04
1. 数组 2.选择排序.冒泡排序.折半查找.
- XML元素和结点的区别
1.区别介绍 Element是Node的扩展,所以也更实用一些.例如,用Element可以方便的获得Node的属性getAttribute(String attrName)如果用Node,可以得到一个 ...
- Chapter 3: Connector(连接器)
一.概述 Tomcat或者称之为Catalina(开发名称),可以简化为两个主要的模块,如下图: 多个Connector关联一个Container.之所以需要多个Connector,是为了处理多种协议 ...
- javascript splice
//arrayObject.splice(index,howmany,element1,.....,elementX)//index 必需.规定从何处添加/删除元素.(0)//howmany 必需.规 ...
- Ubuntu下Eclipse中文乱码问题解决(转)
Ubuntu下Eclipse中文乱码问题解决 把Windows下的工程导入到了Linux下Eclipse中,由于以前的工程代码,都是GBK编码的(Windows下的Eclipse 默认会去读取系统的编 ...
- String to Integer (atoi) ---- LeetCode 008
Implement atoi to convert a string to an integer. Hint: Carefully consider all possible input cases. ...
- Android Material Design Ripple Effect在Android5.0(SDK=21)以下Android版本崩溃问题解决
Android Material Design Ripple Effect在Android5.0(SDK=21)以下Android版本崩溃问题解决 附录1的Android Ripple Effect水 ...
- C# Listview 第一列不能居中
/********************************************************************** * C# Listview 第一列不能居中 * 说明: ...
- Day09_面向对象第四天
1.多态的概念和前提(掌握) 1.概念-什么是多态(掌握) 对象在不同时刻表现出来的不同状态. 2.针对引用类型的理解 编译期间状态和运行期间状态不一样 比如 ...