阅读本文需要先了解softmax: 链接 https://blog.csdn.net/yang332233/article/details/88319668
0. 基础知识
lenet为例:: SoftmaxWithLossLayer在前向的时候调用softmax计算概率.类SoftmaxWithLossLayer包含类SoftmaxLayer的实例。其中SoftmaxLayer层在正向传导函数中将6410的bottom_data,通过计算得到6410的top_data。这可以理解为输入数据为64个样本,每个样本特征数量为10,计算这64个样本分别在10个类别上的概率。公式如下:
SoftmaxWithLossLayer层利用SoftmaxLayer层的输出计算损失,公式如下,其中N为一个batch的大小(MNIST训练时batch_size为64,测试时batch_size为100)。 根据Cross-Entropy的定义有:
caffe源码: SoftmaxWithLossLayer::LayerSetUp SoftmaxWithLossLayer::Reshape
//loss_layer.cpp
#include <vector>
#include "caffe/layers/loss_layer.hpp"
namespace caffe {
template <typename Dtype>
void LossLayer<Dtype>::LayerSetUp(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
// LossLayers have a non-zero (1) loss by default.
if (this->layer_param_.loss_weight_size() == 0) {
this->layer_param_.add_loss_weight(Dtype(1));
}
}
//layer.hpp
inline void SetLossWeights(const vector<Blob<Dtype>*>& top) {
const int num_loss_weights = layer_param_.loss_weight_size();
if (num_loss_weights) {
CHECK_EQ(top.size(), num_loss_weights) << "loss_weight must be "
"unspecified or specified once per top blob.";
for (int top_id = 0; top_id < top.size(); ++top_id) {
const Dtype loss_weight = layer_param_.loss_weight(top_id);
if (loss_weight == Dtype(0)) { continue; }
this->set_loss(top_id, loss_weight);
const int count = top[top_id]->count();
Dtype* loss_multiplier = top[top_id]->mutable_cpu_diff();
caffe_set(count, loss_weight, loss_multiplier);
}
}
}
//layer.hpp
void SetUp(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
CheckBlobCounts(bottom, top);
LayerSetUp(bottom, top);
Reshape(bottom, top);
SetLossWeights(top);
}
template <typename Dtype>
void SoftmaxWithLossLayer<Dtype>::LayerSetUp(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
LossLayer<Dtype>::LayerSetUp(bottom, top);//重要:设置 loss_weight_ 为1
LayerParameter softmax_param(this->layer_param_);
softmax_param.set_type("Softmax");//type 设置为Softmax
//shared_ptr<Layer<Dtype> > softmax_layer_; 创建softmax类
softmax_layer_ = LayerRegistry<Dtype>::CreateLayer(softmax_param);
softmax_bottom_vec_.clear();
softmax_bottom_vec_.push_back(bottom[0]);
softmax_top_vec_.clear();
softmax_top_vec_.push_back(&prob_);//Blob<Dtype> prob_;
softmax_layer_->SetUp(softmax_bottom_vec_, softmax_top_vec_);
//SetUp 会调用虚基类的layer里面的函数, 如上,其中,
//Reshape会调用softmax自己的函数,完成初始化工作
//最后的SetLossWeights在这里会运行,其他层函数都因为num_loss_weights为0,if语句不成立不会运行 初始化top[0]_diff 为1
//没有忽略的; normalization_ 默认是 LossParameter_NormalizationMode_VALID = 1,
has_ignore_label_ =
this->layer_param_.loss_param().has_ignore_label();
if (has_ignore_label_) {
ignore_label_ = this->layer_param_.loss_param().ignore_label();
}
if (!this->layer_param_.loss_param().has_normalization() &&
this->layer_param_.loss_param().has_normalize()) {
normalization_ = this->layer_param_.loss_param().normalize() ?
LossParameter_NormalizationMode_VALID :
LossParameter_NormalizationMode_BATCH_SIZE;
} else {
normalization_ = this->layer_param_.loss_param().normalization();
}
}
template <typename Dtype>
void LossLayer<Dtype>::Reshape(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
CHECK_EQ(bottom[0]->shape(0), bottom[1]->shape(0))
<< "The data and label should have the same first dimension.";
vector<int> loss_shape(0); // Loss layers output a scalar; 0 axes.
top[0]->Reshape(loss_shape);
}
template <typename Dtype>
void SoftmaxWithLossLayer<Dtype>::Reshape(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
LossLayer<Dtype>::Reshape(bottom, top);//如上,这里设置 top[0]的容量为一个
//这里调用softmax的Reshape
softmax_layer_->Reshape(softmax_bottom_vec_, softmax_top_vec_);
softmax_axis_ =
bottom[0]->CanonicalAxisIndex(this->layer_param_.softmax_param().axis());//1
outer_num_ = bottom[0]->count(0, softmax_axis_);//64
inner_num_ = bottom[0]->count(softmax_axis_ + 1);//越界了都为1
CHECK_EQ(outer_num_ * inner_num_, bottom[1]->count())
<< "Number of labels must match number of predictions; "
<< "e.g., if softmax axis == 1 and prediction shape is (N, C, H, W), "
<< "label count (number of labels) must be N*H*W, "
<< "with integer values in {0, 1, ..., C-1}.";
if (top.size() >= 2) {
// softmax output
top[1]->ReshapeLike(*bottom[0]);
}
}
template <typename Dtype>
Dtype SoftmaxWithLossLayer<Dtype>::get_normalizer(
LossParameter_NormalizationMode normalization_mode, int valid_count) {
Dtype normalizer;
switch (normalization_mode) {
case LossParameter_NormalizationMode_FULL:
normalizer = Dtype(outer_num_ * inner_num_);
break;
case LossParameter_NormalizationMode_VALID:
if (valid_count == -1) {
normalizer = Dtype(outer_num_ * inner_num_);
} else {
normalizer = Dtype(valid_count);
}
break;
case LossParameter_NormalizationMode_BATCH_SIZE:
normalizer = Dtype(outer_num_);
break;
case LossParameter_NormalizationMode_NONE:
normalizer = Dtype(1);
break;
default:
LOG(FATAL) << "Unknown normalization mode: "
<< LossParameter_NormalizationMode_Name(normalization_mode);
}
// Some users will have no labels for some examples in order to 'turn off' a
// particular loss in a multi-task setup. The max prevents NaNs in that case.
return std::max(Dtype(1.0), normalizer);
}
源码: SoftmaxWithLossLayer::Forward_cpu
template <typename Dtype>
void SoftmaxWithLossLayer<Dtype>::Forward_cpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
// The forward pass computes the softmax prob values.
softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_);
const Dtype* prob_data = prob_.cpu_data();
const Dtype* label = bottom[1]->cpu_data();
int dim = prob_.count() / outer_num_;//64*10/64 = 10
int count = 0;
Dtype loss = 0;
for (int i = 0; i < outer_num_; ++i) {//64
for (int j = 0; j < inner_num_; j++) {//1
const int label_value = static_cast<int>(label[i * inner_num_ + j]);
if (has_ignore_label_ && label_value == ignore_label_) {
continue;
}
DCHECK_GE(label_value, 0);
DCHECK_LT(label_value, prob_.shape(softmax_axis_));
//这里prob_ 是64*10 就是每10个是一个图的概率 inner_num_=1 j=0 所以i * dim + label_value
//每10个取本来的label的概率值
//累加loss 如上公式一
loss -= log(std::max(prob_data[i * dim + label_value * inner_num_ + j],
Dtype(FLT_MIN)));
++count;
}
}
//保存一个batch的平均loss
top[0]->mutable_cpu_data()[0] = loss / get_normalizer(normalization_, count);
if (top.size() == 2) {
top[1]->ShareData(prob_);
}
}
3. SoftmaxWithLossLayerhLossLaye反向传播:
当zk == zj时:
当zk != zj时:
4. caffe源码: SoftmaxWithLossLayer::Backward_cpu
template <typename Dtype>
void SoftmaxWithLossLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_cpu_diff();
const Dtype* prob_data = prob_.cpu_data();
caffe_copy(prob_.count(), prob_data, bottom_diff);//公式三 先统一从prob_data拷贝到bottom_diff
const Dtype* label = bottom[1]->cpu_data();
int dim = prob_.count() / outer_num_;//64*10/64=10
int count = 0;
for (int i = 0; i < outer_num_; ++i) {//64
for (int j = 0; j < inner_num_; ++j) {//1
const int label_value = static_cast<int>(label[i * inner_num_ + j]);
if (has_ignore_label_ && label_value == ignore_label_) {
for (int c = 0; c < bottom[0]->shape(softmax_axis_); ++c) {
bottom_diff[i * dim + c * inner_num_ + j] = 0;
}
} else {
bottom_diff[i * dim + label_value * inner_num_ + j] -= 1;//公式二 当前标签反向求导还需要-1
++count;
}
}
}
// Scale gradient
//top[0]->cpu_diff()[0] = 1.0,在layer-->setup-->SetLossWeights()函数中初始化
Dtype loss_weight = top[0]->cpu_diff()[0] /
get_normalizer(normalization_, count);
// 将bottom_diff归一化;同时除以batch
caffe_scal(prob_.count(), loss_weight, bottom_diff);
}
}