softmax_loss.cu 和 softmax_loss.cpp源码
#include <algorithm>
#include <cfloat>
#include <vector> #include "caffe/layers/softmax_loss_layer.hpp"
#include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype>
__global__ void SoftmaxLossForwardGPU(const int nthreads,
const Dtype* prob_data, const Dtype* label, Dtype* loss,
const int num, const int dim, const int spatial_dim,
const bool has_ignore_label_, const int ignore_label_,
Dtype* counts) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
loss[index] = ;
counts[index] = ;
} else {
loss[index] = -log(max(prob_data[n * dim + label_value * spatial_dim + s],
Dtype(FLT_MIN)));
counts[index] = ;
}
}
} template <typename Dtype>
void SoftmaxWithLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_);
const Dtype* prob_data = prob_.gpu_data();
const Dtype* label = bottom[]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is not used for anything until it is overwritten
// on the backward pass, we use it here to avoid having to allocate new GPU
// memory to accumulate intermediate results in the kernel.
Dtype* loss_data = bottom[]->mutable_gpu_diff();
// Similarly, this memory is never used elsewhere, and thus we can use it
// to avoid having to allocate additional GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
SoftmaxLossForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, prob_data, label, loss_data,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts);
Dtype loss;
caffe_gpu_asum(nthreads, loss_data, &loss);
Dtype valid_count = -;
// Only launch another CUDA kernel if we actually need the count of valid
// outputs.
if (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) {
caffe_gpu_asum(nthreads, counts, &valid_count);
}
top[]->mutable_cpu_data()[] = loss / get_normalizer(normalization_,
valid_count);
if (top.size() == ) {
top[]->ShareData(prob_);
}
} template <typename Dtype>
__global__ void SoftmaxLossBackwardGPU(const int nthreads, const Dtype* top,
const Dtype* label, Dtype* bottom_diff, const int num, const int dim,
const int spatial_dim, const bool has_ignore_label_,
const int ignore_label_, Dtype* counts) {
const int channels = dim / spatial_dim; CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]); if (has_ignore_label_ && label_value == ignore_label_) {
for (int c = ; c < channels; ++c) {
bottom_diff[n * dim + c * spatial_dim + s] = ;
}
counts[index] = ;
} else {
bottom_diff[n * dim + label_value * spatial_dim + s] -= ;
counts[index] = ;
}
}
} template <typename Dtype>
void SoftmaxWithLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[]) {
Dtype* bottom_diff = bottom[]->mutable_gpu_diff();
const Dtype* prob_data = prob_.gpu_data();
const Dtype* top_data = top[]->gpu_data();
caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff);
const Dtype* label = bottom[]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is never used for anything else,
// we use to to avoid allocating new GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
SoftmaxLossBackwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, top_data, label, bottom_diff,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts); Dtype valid_count = -;
// Only launch another CUDA kernel if we actually need the count of valid
// outputs.
if (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) {
caffe_gpu_asum(nthreads, counts, &valid_count);
}
const Dtype loss_weight = top[]->cpu_diff()[] /
(get_normalizer(normalization_, valid_count) * Caffe::getThreadNum());
caffe_gpu_scal(prob_.count(), loss_weight , bottom_diff);
}
} INSTANTIATE_LAYER_GPU_FUNCS_DISABLE_FP16(SoftmaxWithLossLayer); } // namespace caffe
outer_num_:相当于batch_size
dim: c*w*h
spatial_dim(inner_num_):w*h
softmax_loss.cpp的代码:
outer_num_ = bottom[]->count(, softmax_axis_);
inner_num_ = bottom[]->count(softmax_axis_ + );
其实可以看出来count的只取前,不取后,(0, softmax_axis_)只取了0这一个轴
softmax_loss.cu 和 softmax_loss.cpp源码的更多相关文章
- Mavlink_main.cpp源码学习
int mavlink_main(int argc, char *argv[]) { if (argc < 2) { usage(); ...
- Caffe源码-SGDSolver类
SGDSolver类简介 Solver类用于网络参数的更新,而SGDSolver类实现了优化方法中的随机梯度下降法(stochastic gradient descent),此外还具备缩放.正则化梯度 ...
- android后台截屏实现(2)--screencap源码修改
首先找到screencap类在Android源码中的位置,/442/frameworks/base/cmds/screencap/screencap.cpp 源码如下: /* * Copyright ...
- 【精解】EOS标准货币体系与源码实现分析
EOS智能合约中包含一个exchange合约,它支持用户创建一笔交易,是任何两个基本货币类型之间的交易.这个合约的作用是跨不同币种(都是EOS上的标准货币类型)的,通过各自与EOS主链价值进行锚定,然 ...
- duilib 使用图片素材或者算法给窗体增加阴影(源码和demo)
转载请说明原出处,谢谢:http://blog.csdn.net/zhuhongshu/article/details/42580877 之前我写的程序使用阴影时,一直是使用codeproject网站 ...
- Caffe源码-Solver类
Solver类简介 Net类中实现了网络的前向/反向计算和参数更新,而Solver类中则是对此进行进一步封装,包含可用于逐次训练网络的Step()函数,和用于求解网络的优化解的Solve()函数,同时 ...
- Caffe源码-InsertSplits()函数
InsertSplits()函数 在Net初始化的过程中,存在一个特殊的修改网络结构的操作,那就是当某层的输出blob对应多个其他层的输入blob时,会在输出blob所在层的后面插入一个新的Split ...
- Caffe源码-Blob类
Blob类简介 Blob是caffe中的数据传递的一个基本类,网络各层的输入输出数据以及网络层中的可学习参数(learnable parameters,如卷积层的权重和偏置参数)都是Blob类型.Bl ...
- Caffe源码-SyncedMemory类
SyncedMemory类简介 最近在阅读caffe源码,代码来自BVLC/caffe,基本是参照网络上比较推荐的 Blob-->Layer-->Net-->Solver 的顺序来分 ...
随机推荐
- Codeforces Round #377 (Div. 2)A,B,C,D【二分】
PS:这一场真的是上分场,只要手速快就行.然而在自己做的时候不用翻译软件,看题非常吃力非常慢,还有给队友讲D题如何判断的时候又犯了一个毛病,一定要心平气和,比赛也要保证,不要用翻译软件做题: Code ...
- 浅谈C++中内存泄漏的检测
首先我们需要知道程序有没有内存泄露,然后定位到底是哪行代码出现内存泄露了,这样才能将其修复.最简单的方法当然是借助于专业的检测工具,比较有名如BoundsCheck,功能非常强大,相信做C++开发的人 ...
- 洛谷P4151 [WC2011]最大XOR和路径(线性基)
传送门 不知道线性基是什么东西的可以看看蒟蒻的总结 首先看到异或就想到线性基 我们考虑有一条路径,那么从这条路径走到图中的任意一个环再走回这条路径上,对答案的贡献是这个环的异或和,走到这个环上的路径对 ...
- 多线程 NSThread 了解
用NSThread创建子线程的3种方法 // DYFViewController.m // 623-02-pthread // // Created by dyf on 14-6-23. / ...
- 【SpringCloud构建微服务系列】分布式链路跟踪Spring Cloud Sleuth
一.背景 随着业务的发展,系统规模越来越大,各微服务直接的调用关系也变得越来越复杂.通常一个由客户端发起的请求在后端系统中会经过多个不同的微服务调用协同产生最后的请求结果,几乎每一个前端请求都会形成一 ...
- 51Nod 1099 任务执行顺序 (贪心)
#include <iostream> #include <algorithm> using namespace std; +; struct node{ int r, q; ...
- IP服务-6-SNMP
SNMP(简单网络管理协议) SNMP更为正式的说法是互联网标准管理框架(Internet Standard Management Framework).在这个协议架构中.被管理的设备(SNMP代理) ...
- 快速理解JavaScript面向对象编程—原型
总的来说js语言就是门面向对象编程的语言,对象这个概念几乎贯穿了整个js的学习. 对象 创建对象两种方法:(若要生成对象实例必须调用构造函数) 1.var obj = {name:"jer& ...
- BZOJ1415(期望dp)
解法: 首先bfs预处理go数组:可可在j点时聪聪在点i是怎样贪心走的,这是为了之后O(1)获取转移线路. 然后dfs记忆化一下f[i][j],代表从i到j的期望,对于每层:将所有情况的期望值相加.边 ...
- sql 规范
https://www.cnblogs.com/jacktang/archive/2012/09/25/2701301.html http://blog.csdn.net/ethan_fu/artic ...