c++ caffe 输出 activation map 、 层参数
python输出activation map与层参数:https://blog.csdn.net/tina_ttl/article/details/51033660
caffe::Net文档:
https://caffe.berkeleyvision.org/doxygen/classcaffe_1_1Net.html#a6f6cf9d40637f7576828d856bb1b1826
caffe::Blob文档:
http://caffe.berkeleyvision.org/doxygen/classcaffe_1_1Blob.html
图像通道分离与合并cv::split() cv::merge()
https://blog.csdn.net/guduruyu/article/details/70837779
caffe官方提供的prediction代码
caffe提供了一个用已经训练好的caffemodel来分类单张图片的库(./build/examples/cpp_classification/classification.bin),该库的源码为文件./examples/cpp-classification/classification.cpp
#include <caffe/caffe.hpp>
#ifdef USE_OPENCV
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#endif // USE_OPENCV
#include <algorithm>
#include <iosfwd>
#include <memory>
#include <string>
#include <utility>
#include <vector> #ifdef USE_OPENCV
using namespace caffe; // NOLINT(build/namespaces)
using std::string; /* Pair (label, confidence) representing a prediction. */
typedef std::pair<string, float> Prediction; class Classifier {
public:
Classifier(const string& model_file,
const string& trained_file,
const string& mean_file,
const string& label_file); std::vector<Prediction> Classify(const cv::Mat& img, int N = ); private:
void SetMean(const string& mean_file); std::vector<float> Predict(const cv::Mat& img); void WrapInputLayer(std::vector<cv::Mat>* input_channels); void Preprocess(const cv::Mat& img,
std::vector<cv::Mat>* input_channels); private:
shared_ptr<Net<float> > net_;
cv::Size input_geometry_;
int num_channels_;
cv::Mat mean_;
std::vector<string> labels_;
}; /*分类对象构造文件*/
Classifier::Classifier(const string& model_file,
const string& trained_file,
const string& mean_file,
const string& label_file) {
#ifdef CPU_ONLY
Caffe::set_mode(Caffe::CPU);
#else
Caffe::set_mode(Caffe::GPU);
#endif /* Load the network. */
net_.reset(new Net<float>(model_file, TEST)); /*复制网络结构*/
net_->CopyTrainedLayersFrom(trained_file); /*加载caffemodel,该函数在net.cpp中实现*/ CHECK_EQ(net_->num_inputs(), ) << "Network should have exactly one input.";
CHECK_EQ(net_->num_outputs(), ) << "Network should have exactly one output."; Blob<float>* input_layer = net_->input_blobs()[];
num_channels_ = input_layer->channels(); /*该网络结构所要求的图片输入通道数*/
CHECK(num_channels_ == || num_channels_ == )
<< "Input layer should have 1 or 3 channels.";
input_geometry_ = cv::Size(input_layer->width(), input_layer->height()); /*输入层需要的图片宽高*/ /* Load the binaryproto mean file. */
SetMean(mean_file); /*加载均值文件*/ /* Load labels. */
std::ifstream labels(label_file.c_str()); /*加载标签名称文件*/
CHECK(labels) << "Unable to open labels file " << label_file;
string line;
while (std::getline(labels, line))
labels_.push_back(string(line)); Blob<float>* output_layer = net_->output_blobs()[]; /*检查标签个数与网络的输出结点个数是否一样*/
CHECK_EQ(labels_.size(), output_layer->channels())
<< "Number of labels is different from the output layer dimension.";
} static bool PairCompare(const std::pair<float, int>& lhs,
const std::pair<float, int>& rhs) {
return lhs.first > rhs.first;
} /* Return the indices of the top N values of vector v. */
static std::vector<int> Argmax(const std::vector<float>& v, int N) {
std::vector<std::pair<float, int> > pairs;
for (size_t i = ; i < v.size(); ++i)
pairs.push_back(std::make_pair(v[i], i));
std::partial_sort(pairs.begin(), pairs.begin() + N, pairs.end(), PairCompare); std::vector<int> result;
for (int i = ; i < N; ++i)
result.push_back(pairs[i].second);
return result;
} /* Return the top N predictions. */
std::vector<Prediction> Classifier::Classify(const cv::Mat& img, int N) {
std::vector<float> output = Predict(img); /*调用这个函数做分类*/ N = std::min<int>(labels_.size(), N);
std::vector<int> maxN = Argmax(output, N);
std::vector<Prediction> predictions;
for (int i = ; i < N; ++i) {
int idx = maxN[i];
predictions.push_back(std::make_pair(labels_[idx], output[idx]));
} return predictions;
} /* Load the mean file in binaryproto format. */
void Classifier::SetMean(const string& mean_file) {
BlobProto blob_proto;
ReadProtoFromBinaryFileOrDie(mean_file.c_str(), &blob_proto); /*读入均值文件在Io.cpp中实现*/ /* Convert from BlobProto to Blob<float> */
Blob<float> mean_blob;
mean_blob.FromProto(blob_proto); /*将读入的均值文件转成Blob对象*//*Blob类在Blob.hpp中定义*/
CHECK_EQ(mean_blob.channels(), num_channels_)
<< "Number of channels of mean file doesn't match input layer."; /* The format of the mean file is planar 32-bit float BGR or grayscale. */
std::vector<cv::Mat> channels;
float* data = mean_blob.mutable_cpu_data();
for (int i = ; i < num_channels_; ++i) {
/* Extract an individual channel. */
cv::Mat channel(mean_blob.height(), mean_blob.width(), CV_32FC1, data);
channels.push_back(channel);
data += mean_blob.height() * mean_blob.width();
} /*将均值图像的每个通道图像拷贝到channel中*/ /* Merge the separate channels into a single image. */
cv::Mat mean;
cv::merge(channels, mean); /*合并每个通道图像*/ /* Compute the global mean pixel value and create a mean image
* filled with this value. */
cv::Scalar channel_mean = cv::mean(mean);
mean_ = cv::Mat(input_geometry_, mean.type(), channel_mean);
} /*测试函数*/
std::vector<float> Classifier::Predict(const cv::Mat& img) {
Blob<float>* input_layer = net_->input_blobs()[];
input_layer->Reshape(, num_channels_,
input_geometry_.height, input_geometry_.width);/*没太看懂,应该是一些缩放*/
/* Forward dimension change to all layers. */
net_->Reshape(); std::vector<cv::Mat> input_channels;
WrapInputLayer(&input_channels);/*对输入层数据进行包装*/ Preprocess(img, &input_channels); /*把传入的测试图像写入到输入层*/ net_->Forward(); /*网络前向传播:计算出该测试图像属于哪个每个类别的概率也就是最终的输出层*/ /* Copy the output layer to a std::vector */
Blob<float>* output_layer = net_->output_blobs()[]; /*将输出层拷贝到向量*/
const float* begin = output_layer->cpu_data();
const float* end = begin + output_layer->channels();
return std::vector<float>(begin, end);
} /* Wrap the input layer of the network in separate cv::Mat objects
* (one per channel). This way we save one memcpy operation and we
* don't need to rely on cudaMemcpy2D. The last preprocessing
* operation will write the separate channels directly to the input
* layer. */
void Classifier::WrapInputLayer(std::vector<cv::Mat>* input_channels) {
Blob<float>* input_layer = net_->input_blobs()[]; int width = input_layer->width();
int height = input_layer->height();
float* input_data = input_layer->mutable_cpu_data();
for (int i = ; i < input_layer->channels(); ++i) {
cv::Mat channel(height, width, CV_32FC1, input_data);
input_channels->push_back(channel);
input_data += width * height;
}
} void Classifier::Preprocess(const cv::Mat& img,
std::vector<cv::Mat>* input_channels) {
/* Convert the input image to the input image format of the network. */
cv::Mat sample;
if (img.channels() == && num_channels_ == )
cv::cvtColor(img, sample, cv::COLOR_BGR2GRAY);
else if (img.channels() == && num_channels_ == )
cv::cvtColor(img, sample, cv::COLOR_BGRA2GRAY);
else if (img.channels() == && num_channels_ == )
cv::cvtColor(img, sample, cv::COLOR_BGRA2BGR);
else if (img.channels() == && num_channels_ == )
cv::cvtColor(img, sample, cv::COLOR_GRAY2BGR);
else
sample = img;/*将输入的图像转换成输入层需要的图像格式*/ cv::Mat sample_resized;
if (sample.size() != input_geometry_)
cv::resize(sample, sample_resized, input_geometry_); /*如果大小不一致则需要缩放*/
else
sample_resized = sample; cv::Mat sample_float;
if (num_channels_ == )
sample_resized.convertTo(sample_float, CV_32FC3); /*将数据转化成浮点型*/
else
sample_resized.convertTo(sample_float, CV_32FC1); cv::Mat sample_normalized;
cv::subtract(sample_float, mean_, sample_normalized); /*应该是当前图像减去均值图像*/ /* This operation will write the separate BGR planes directly to the
* input layer of the network because it is wrapped by the cv::Mat
* objects in input_channels. */
cv::split(sample_normalized, *input_channels); /*把测试的图像通过之前的定义的wraper写入到输入层*/ CHECK(reinterpret_cast<float*>(input_channels->at().data)
== net_->input_blobs()[]->cpu_data())
<< "Input channels are not wrapping the input layer of the network.";
} int main(int argc, char** argv) {
if (argc != ) {
std::cerr << "Usage: " << argv[]
<< " deploy.prototxt network.caffemodel"
<< " mean.binaryproto labels.txt img.jpg" << std::endl;
return ;
} ::google::InitGoogleLogging(argv[]); string model_file = argv[]; /*标识网络结构的deploy.prototxt文件*/
string trained_file = argv[]; /*训练出来的模型文件caffemodel*/
string mean_file = argv[]; /*均值.binaryproto文件*/
string label_file = argv[]; /*标签文件:标识类别的名称*/
Classifier classifier(model_file, trained_file, mean_file, label_file); /*创建对象并初始化网络、模型、均值、标签各类对象*/ string file = argv[]; /*传入的待测试图片*/ std::cout << "---------- Prediction for "
<< file << " ----------" << std::endl; cv::Mat img = cv::imread(file, -);
CHECK(!img.empty()) << "Unable to decode image " << file;
std::vector<Prediction> predictions = classifier.Classify(img); /*具体测试传入的图片并返回测试的结果:类别ID与概率值的Prediction类型数组*/ /* Print the top N predictions. *//*将测试的结果打印*/
for (size_t i = ; i < predictions.size(); ++i) {
Prediction p = predictions[i];
std::cout << std::fixed << std::setprecision() << p.second << " - \""
<< p.first << "\"" << std::endl;
}
}
#else
int main(int argc, char** argv) {
LOG(FATAL) << "This example requires OpenCV; compile with USE_OPENCV.";
}
#endif // USE_OPENCV
输出activation map代码
输出层参数代码
c++ caffe 输出 activation map 、 层参数的更多相关文章
- caffe fine tune 复制预训练model的参数和freeze指定层参数
复制预训练model的参数,只需要重新copy一个train_val.prototxt.然后把不需要复制的层的名字改一下,如(fc7 -> fc7_new),然后fine tune即可. fre ...
- (原)torch中微调某层参数
转载请注明出处: http://www.cnblogs.com/darkknightzh/p/6221664.html 参考网址: https://github.com/torch/nn/issues ...
- pytorch中网络特征图(feture map)、卷积核权重、卷积核最匹配样本、类别激活图(Class Activation Map/CAM)、网络结构的可视化方法
目录 0,可视化的重要性: 1,特征图(feture map) 2,卷积核权重 3,卷积核最匹配样本 4,类别激活图(Class Activation Map/CAM) 5,网络结构的可视化 0,可视 ...
- caffe添加python数据层
caffe添加python数据层(ImageData) 在caffe中添加自定义层时,必须要实现这四个函数,在C++中是(LayerSetUp,Reshape,Forward_cpu,Backward ...
- 可视化CNN神经网路第一层参数
在上Andrew Ng的课的时候搜集到了课程里面自带的显示NN参数的代码,但是只能显示灰度图,而且NN里的参数没有通道的概念.所以想要获得可视化CNN的参数,并且达到彩色的效果就不行了. 所以就自己写 ...
- [caffe]网络各层参数设置
数据层 数据层是模型最底层,提供提供数据输入和数据从Blobs转换成别的格式进行保存输出,通常数据预处理(减去均值,放大缩小,裁剪和镜像等)也在这一层设置参数实现. 参数设置: name: 名称 ty ...
- caffe(5) 其他常用层及参数
本文讲解一些其它的常用层,包括:softmax_loss层,Inner Product层,accuracy层,reshape层和dropout层及其它们的参数配置. 1.softmax-loss so ...
- caffe中全卷积层和全连接层训练参数如何确定
今天来仔细讲一下卷基层和全连接层训练参数个数如何确定的问题.我们以Mnist为例,首先贴出网络配置文件: name: "LeNet" layer { name: "mni ...
- Caffe常用层参数介绍
版权声明:本文为博主原创文章,转载请注明出处. https://blog.csdn.net/Cheese_pop/article/details/52024980 DATA crop:截取原图像中一个 ...
随机推荐
- saveFile()方法
saveFile的原理就是将流写入到需要写入的文件,通过可以用“FileOutputStream”创建文件实例,之后过“OutputStreamWriter”流的形式进行存储,举例:public vo ...
- Hadoop默认端口表及用途
端口 用途 9000 fs.defaultFS,如:hdfs://172.25.40.171:9000 9001 dfs.namenode.rpc-address,DataNode会连接这个端口 ...
- pyqt加载图片
使用QPixmap可以加载图片,但是图片只能是标准二进制文件格式: bmp,gif,ico,jpeg,jpg,mng,pbm,pgm,png,ppm,svg,svgz,tga,tif,tiff,xbm ...
- 复现IIS6.0远程命令执行漏洞
出这个漏洞有一定时间了,一直没测试,因为知道这个漏洞有条件的,好吧,顺便感谢royal大佬今天晚上日内网的指点. 漏洞要求: 开启Webdav PS:不想刚拿下的内网服务器一下权限掉了,又不想放xx远 ...
- cocos2dx遇到的一些坑
针对2.x 1.CCSprite无法直接用文件名更换图片,可以添加如下函数 bool CCSprite::setWithFile(const char *pszFilename) { CCAssert ...
- crontab中运行python程序出错,提示ImportError: No module named解决全过程
将一个python脚本放入crontab执行时,提示如下错:ImportError: No module named hashlib但是在shell中直接执行时没有任何问题,google之后,得到线索 ...
- 让div也出现滑动框。
<div id="box" style="height: 300px; width: 200px; border:1px solid #CCC; overflow: ...
- Elasticsearch JVM Heap Size大于32G,有什么影响?
0.引言 在规划ES部署的时候,会涉及到data node的分配堆内存大小,而Elasticsearch默认安装后设置的内存是1GB,对于任何一个业务部署来说,这个都太小了. 设置Heap Size的 ...
- C++之函数模板
C++之函数模板与模版函数 直接上代码: C++ Code 12345678910111213141516171819202122232425262728293031323334353637383 ...
- CENTOS --5分钟搞定Nginx安装的教程
1. 安装gcc(centos 7之后一般已自带,可以在第6步失败后再安装) yum install gcc gcc-c++ 2. 安装pcre yum install -y pcre pcre-de ...