Batch Normalization Caffe版实现解析

Batch Normalization Caffe版实现解析

建议先看论文Batch Normalization: Accelerating Deep Network Training by 
Reducing Internal Covariate Shift
,这样会对本文有更好的理解; 
同时使用Batch Normalization的GoogLENet也被称为Inception v2;

Batch Normalization Caffe版实现解析

BatchNormParameter有三个参数:

message BatchNormParameter {
  // If false, accumulate global mean/variance values via a moving average. If
  // true, use those accumulated values instead of computing mean/variance
  // across the batch.
  optional bool use_global_stats = 1;
  // How much does the moving average decay each iteration?
  optional float moving_average_fraction = 2 [default = .999];
  // Small value to add to the variance estimate so that we don't divide by
  // zero.
  optional float eps = 3 [default = 1e-5];
}
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11

其中use_global_stats 是指Train还是Test,如果为True,那么就是指Test; 
其中eps 是指公式中的小量,防止除以0; 
其中moving_average_fraction 指的是mini-batch时每次叠加mean的时候的衰退值;

LayerSetUp模块:

template <typename Dtype>
void BatchNormLayer<Dtype>::LayerSetUp(const vector<Blob<Dtype>*>& bottom,
      const vector<Blob<Dtype>*>& top) {
  //获取BatchNormParameter参数列表
  BatchNormParameter param = this->layer_param_.batch_norm_param();
  //得到moving_average_fraction参数
  moving_average_fraction_ = param.moving_average_fraction();
  //赋值use_global_stats_,如果为Test,则为True
  use_global_stats_ = this->phase_ == TEST;
  //如果参数列表里面定义了use_global_stats,那么从参数列表里面获取
  if (param.has_use_global_stats())
    use_global_stats_ = param.use_global_stats();
  //计算channels_
  if (bottom[0]->num_axes() == 1)
    channels_ = 1;
  else
    channels_ = bottom[0]->shape(1);
  //从参数列表里面获取eps小量
  eps_ = param.eps();
  //初始化三个blob,其中前两个blob的大小为channels_,第三个blob的大小为1.
  if (this->blobs_.size() > 0) {
    LOG(INFO) << "Skipping parameter initialization";
  } else {
    this->blobs_.resize(3);
    vector<int> sz;
    sz.push_back(channels_);
    this->blobs_[0].reset(new Blob<Dtype>(sz));
    this->blobs_[1].reset(new Blob<Dtype>(sz));
    sz[0]=1;
    this->blobs_[2].reset(new Blob<Dtype>(sz));
    //初始化三个Blob的值为0
    for (int i = 0; i < 3; ++i) {
      caffe_set(this->blobs_[i]->count(), Dtype(0),
                this->blobs_[i]->mutable_cpu_data());
    }
  }
}
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37

Reshape模块:

void BatchNormLayer<Dtype>::Reshape(const vector<Blob<Dtype>*>& bottom,
      const vector<Blob<Dtype>*>& top) {
  if (bottom[0]->num_axes() >= 1)
    CHECK_EQ(bottom[0]->shape(1), channels_);
  //输出的形状和输入的形状相同
  top[0]->ReshapeLike(*bottom[0]);
  //定义mean_,variance_,temp_,x_norm_,batch_sum_multiplier_的形状
  vector<int> sz;
  sz.push_back(channels_);
  mean_.Reshape(sz);
  variance_.Reshape(sz);
  temp_.ReshapeLike(*bottom[0]);
  x_norm_.ReshapeLike(*bottom[0]);
  sz[0]=bottom[0]->shape(0);
  //batch_sum_multiplier_的形状为Nx1x1x1
  batch_sum_multiplier_.Reshape(sz);
  //定义spatial_sum_multiplier_的形状
  int spatial_dim = bottom[0]->count()/(channels_*bottom[0]->shape(0));
  if (spatial_sum_multiplier_.num_axes() == 0 ||
      spatial_sum_multiplier_.shape(0) != spatial_dim) {
    sz[0] = spatial_dim;
    spatial_sum_multiplier_.Reshape(sz);
    Dtype* multiplier_data = spatial_sum_multiplier_.mutable_cpu_data();
    //初始化spatial_sum_multiplier_中的值为1
    caffe_set(spatial_sum_multiplier_.count(), Dtype(1), multiplier_data);
  }
  //定义num_by_chans_的形状为channels_*bottom[0]->shape(0)
  int numbychans = channels_*bottom[0]->shape(0);
  if (num_by_chans_.num_axes() == 0 ||
      num_by_chans_.shape(0) != numbychans) {
    sz[0] = numbychans;
    num_by_chans_.Reshape(sz);
    //初始化batch_sum_multiplier_为1
    caffe_set(batch_sum_multiplier_.count(), Dtype(1),
        batch_sum_multiplier_.mutable_cpu_data());
  }
}
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37

Forward_cpu模块:

void BatchNormLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
    const vector<Blob<Dtype>*>& top) {
  //输入blob的数据指针,const变量
  const Dtype* bottom_data = bottom[0]->cpu_data();
  //输出blob的数据指针,这个是我们要计算的
  Dtype* top_data = top[0]->mutable_cpu_data();
  //batch的数量NCHW中的N
  int num = bottom[0]->shape(0);
  //指的是NCHW中H*W的值
  int spatial_dim = bottom[0]->count()/(bottom[0]->shape(0)*channels_);
  //如果bottom和top的值不相同,就把bottom中的值赋给top
  if (bottom[0] != top[0]) {
    caffe_copy(bottom[0]->count(), bottom_data, top_data);
  }
  //如果是使用已经计算好的mean和variance,其中mean保存在blobs_[0]中,variance保存在blobs_[1]中
  if (use_global_stats_) {
    // use the stored mean/variance estimates.
    const Dtype scale_factor = this->blobs_[2]->cpu_data()[0] == 0 ?
        0 : 1 / this->blobs_[2]->cpu_data()[0];
    caffe_cpu_scale(variance_.count(), scale_factor,
        this->blobs_[0]->cpu_data(), mean_.mutable_cpu_data());
    caffe_cpu_scale(variance_.count(), scale_factor,
        this->blobs_[1]->cpu_data(), variance_.mutable_cpu_data());
  } else {//如果没有提供mean和variance,我们需要自己去计算
    // compute mean
    //这个矩阵与向量相乘,目的是计算每个feature map的数值和,然后在除以1./(num*spatial_dim)
    //bottom_data: (channels_*num) x (spatial_dim)
    //spatial_sum_multiplier: spatial_dim x 1
    //alpha : 1./(num*spatial_dim); beta : 0
    //num_by_chans = alpha * (bottom_data x spatial_sum_multiplier) + beta * num_by_chans
    //其中spatial_sum_multiplier的值都为1
    caffe_cpu_gemv<Dtype>(CblasNoTrans, channels_ * num, spatial_dim,
        1. / (num * spatial_dim), bottom_data,
        spatial_sum_multiplier_.cpu_data(), 0.,
        num_by_chans_.mutable_cpu_data());
    //注意关键字是CblasTrans!!
    //num_by_chans_ : channels_ x num;
    //batch_sum_multiplier_ : num x 1;
    //mean_ = 1. x (num_by_chans_ x batch_sum_multiplier_)
    //mean_ : channels_ x 1
    //计算得到对应channels的平均值,这也解释了为什么之前要除以1./(num*spatial_dim)
    //而不是仅除以1./spatial_dim,这样减少了计算量
    caffe_cpu_gemv<Dtype>(CblasTrans, num, channels_, 1.,
        num_by_chans_.cpu_data(), batch_sum_multiplier_.cpu_data(), 0.,
        mean_.mutable_cpu_data());
  }

  // subtract mean
  //batch_sum_multiplier_ : num x 1
  //mean_ : 1 x channels_
  //num_by_chans_ : num x channels_
  //num_by_chans_ :
  //         channels_
  // -----------------------
  // mean00 mean01 ... mean0x
  // ........................
  // ........................
  // meany0 meany1 ... meanyx
  // ------------------------
  //where x = channels and y = num
  caffe_cpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num, channels_, 1, 1,
      batch_sum_multiplier_.cpu_data(), mean_.cpu_data(), 0.,
      num_by_chans_.mutable_cpu_data());
  //num_by_chans_: (channels_ * num) x 1
  //spatial_sum_multiplier_ : 1 x spatial_dim (all values are 1)
  //top_data = 1 x top_data + (-1) x (num_by_chans_ x spatial_sum_multiplier_)
  //这里num_by_chans_ x spatial_sum_multiplier_求得的是每个值对应的平均值
  //最后的top_data保存的就是每个值减去对应channel的均值后的结果
  caffe_cpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, channels_ * num,
      spatial_dim, 1, -1, num_by_chans_.cpu_data(),
      spatial_sum_multiplier_.cpu_data(), 1., top_data);

  if (!use_global_stats_) {
    // compute variance using var(X) = E((X-EX)^2)
    //计算X-E(X)的平方,并把结果保存在temp_中
    caffe_powx(top[0]->count(), top_data, Dtype(2),
        temp_.mutable_cpu_data());  // (X-EX)^2
    //这步计算的是对应每个channel的(X-EX)^2的和,同时除以1. / (num * spatial_dim)
    //这步和计算mean的时候很相似
    caffe_cpu_gemv<Dtype>(CblasNoTrans, channels_ * num, spatial_dim,
        1. / (num * spatial_dim), temp_.cpu_data(),
        spatial_sum_multiplier_.cpu_data(), 0.,
        num_by_chans_.mutable_cpu_data());
    //这步计算的是对应每个batch的(X-EX)^2的和,此时不需要再除以num了,因为上一步已经除以了
    //这步和计算mean的时候很相似
    //这样就得到variance了
    caffe_cpu_gemv<Dtype>(CblasTrans, num, channels_, 1.,
        num_by_chans_.cpu_data(), batch_sum_multiplier_.cpu_data(), 0.,
        variance_.mutable_cpu_data());  // E((X_EX)^2)

    // compute and save moving average
    //blobs_[2]中只有一个值,最开始值为0
    //然后每次blobs_[2] = (moving_average_fraction_ * blobs_[2])+ 1
    this->blobs_[2]->mutable_cpu_data()[0] *= moving_average_fraction_;
    this->blobs_[2]->mutable_cpu_data()[0] += 1;
    //caffe_cpu_axpby : Y = alpha * X + b * Y
    //blobs_[0] = 1 * mean_ + moving_average_fraction_ * blobs_[0]
    //最开始blobs_[0]中的所有值为0
    //这一步是用来保存并叠加每一次计算得到的mean值,
    //结果保存在blob_[0]中
    caffe_cpu_axpby(mean_.count(), Dtype(1), mean_.cpu_data(),
        moving_average_fraction_, this->blobs_[0]->mutable_cpu_data());
    // m = batch_num * feature_map_num
    int m = bottom[0]->count()/channels_;
    //blobs_[1] = m/(m-1) * variance_ + moving_average_fraction_ * blobs_[1]
    Dtype bias_correction_factor = m > 1 ? Dtype(m)/(m-1) : 1;
    caffe_cpu_axpby(variance_.count(), bias_correction_factor,
        variance_.cpu_data(), moving_average_fraction_,
        this->blobs_[1]->mutable_cpu_data());
  }

  // normalize variance
  //计算variance_ + eps_
  caffe_add_scalar(variance_.count(), eps_, variance_.mutable_cpu_data());
  //计算sqrt(variance_ + eps_)
  caffe_powx(variance_.count(), variance_.cpu_data(), Dtype(0.5),
             variance_.mutable_cpu_data());

  // replicate variance to input size
  //拓展variance_到每个batch上
  caffe_cpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num, channels_, 1, 1,
      batch_sum_multiplier_.cpu_data(), variance_.cpu_data(), 0.,
      num_by_chans_.mutable_cpu_data());
  //拓展variance_到每个batch x feature_map上
  caffe_cpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, channels_ * num,
      spatial_dim, 1, 1., num_by_chans_.cpu_data(),
      spatial_sum_multiplier_.cpu_data(), 0., temp_.mutable_cpu_data());
  //这里 top_data = top_data / temp_ 
  caffe_div(temp_.count(), top_data, temp_.cpu_data(), top_data);
  // TODO(cdoersch): The caching is only needed because later in-place layers
  //                 might clobber the data.  Can we skip this if they won't?
  //把结果缓存在x_norm_中,之后其他操作会用到这个值,比如说反向传播
  caffe_copy(x_norm_.count(), top_data,
      x_norm_.mutable_cpu_data());
}
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47
  • 48
  • 49
  • 50
  • 51
  • 52
  • 53
  • 54
  • 55
  • 56
  • 57
  • 58
  • 59
  • 60
  • 61
  • 62
  • 63
  • 64
  • 65
  • 66
  • 67
  • 68
  • 69
  • 70
  • 71
  • 72
  • 73
  • 74
  • 75
  • 76
  • 77
  • 78
  • 79
  • 80
  • 81
  • 82
  • 83
  • 84
  • 85
  • 86
  • 87
  • 88
  • 89
  • 90
  • 91
  • 92
  • 93
  • 94
  • 95
  • 96
  • 97
  • 98
  • 99
  • 100
  • 101
  • 102
  • 103
  • 104
  • 105
  • 106
  • 107
  • 108
  • 109
  • 110
  • 111
  • 112
  • 113
  • 114
  • 115
  • 116
  • 117
  • 118
  • 119
  • 120
  • 121
  • 122
  • 123
  • 124
  • 125
  • 126
  • 127
  • 128
  • 129
  • 130
  • 131
  • 132
  • 133
  • 134
  • 135

这里需要解释一下blobs_的计算方式,我们是在Train的过程中,完成计算blobs_的,而我们训练的过程并不是一次Forward就结束,而是从总样本中抽取mini-batch个样本,进行多次Forward,这样的话我们其实是需要考虑每次计算得到的mean和variance,我们瞬间能想到的就是累加,caffe这里的算法并不是简简单单的将每次计算的mean和variance累加,而是把前一次计算的mean和variance的影响减小(乘以一个小于1的变量),再加上本次计算的结果;

这里我们就计算完成Batch Normalization的前向(Forward)过程了。

Backward_cpu模块:

对于反向传播模块,我们先推导一下公式: 
σEσvar=miσEσyiσyiσvar=miσEσyi(ximean)(12)(var+eps)32σEσvar=∑imσEσyiσyiσvar=∑imσEσyi(xi−mean)(−12)(var+eps)−32 
σEσmean=miσEσyiσyiσmean+σyiσvarσvarσmean)=miσEσyi1var+eps√σEσmean=∑imσEσyi(σyiσmean+σyiσvarσvarσmean)=∑imσEσyi−1var+eps 
σEσxi=σEσyi1var+eps√+σEσvarσvarσxi+σEσmeanσmeanσxi=σEσyi1var+eps√+σEσvar2m(ximean)+σEσmean1mσEσxi=σEσyi1var+eps+σEσvarσvarσxi+σEσmeanσmeanσxi=σEσyi1var+eps+σEσvar2m(xi−mean)+σEσmean1m
=1var+eps√(σEσyi1mmiσEσyi(1mmiσEσyiyi)yi)=1var+eps(σEσyi−1m∑imσEσyi−(1m∑imσEσyiyi)yi) 
=1var+eps√(σEσyimean(σEσyi)mean(σEσyi.yi).yi)=1var+eps(σEσyi−mean(σEσyi)−mean(σEσyi.yi).yi)

void BatchNormLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
    const vector<bool>& propagate_down,
    const vector<Blob<Dtype>*>& bottom) {
  const Dtype* top_diff;
  if (bottom[0] != top[0]) {
    top_diff = top[0]->cpu_diff();
  } else {
    caffe_copy(x_norm_.count(), top[0]->cpu_diff(), x_norm_.mutable_cpu_diff());
    top_diff = x_norm_.cpu_diff();
  }
  Dtype* bottom_diff = bottom[0]->mutable_cpu_diff();
  if (use_global_stats_) {
    caffe_div(temp_.count(), top_diff, temp_.cpu_data(), bottom_diff);
    return;
  }
  const Dtype* top_data = x_norm_.cpu_data();
  //batch size
  int num = bottom[0]->shape()[0];
  //feature map的大小
  int spatial_dim = bottom[0]->count()/(bottom[0]->shape(0)*channels_);
  // if Y = (X-mean(X))/(sqrt(var(X)+eps)), then
  //
  // dE(Y)/dX =
  //   (dE/dY - mean(dE/dY) - mean(dE/dY \cdot Y) \cdot Y)
  //     ./ sqrt(var(X) + eps)
  //
  // where \cdot and ./ are hadamard product and elementwise division,
  // respectively, dE/dY is the top diff, and mean/var/sum are all computed
  // along all dimensions except the channels dimension.  In the above
  // equation, the operations allow for expansion (i.e. broadcast) along all
  // dimensions except the channels dimension where required.

  // sum(dE/dY \cdot Y)
  caffe_mul(temp_.count(), top_data, top_diff, bottom_diff);
  caffe_cpu_gemv<Dtype>(CblasNoTrans, channels_ * num, spatial_dim, 1.,
      bottom_diff, spatial_sum_multiplier_.cpu_data(), 0.,
      num_by_chans_.mutable_cpu_data());
  caffe_cpu_gemv<Dtype>(CblasTrans, num, channels_, 1.,
      num_by_chans_.cpu_data(), batch_sum_multiplier_.cpu_data(), 0.,
      mean_.mutable_cpu_data());

  // reshape (broadcast) the above
  caffe_cpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num, channels_, 1, 1,
      batch_sum_multiplier_.cpu_data(), mean_.cpu_data(), 0.,
      num_by_chans_.mutable_cpu_data());
  caffe_cpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, channels_ * num,
      spatial_dim, 1, 1., num_by_chans_.cpu_data(),
      spatial_sum_multiplier_.cpu_data(), 0., bottom_diff);

  // sum(dE/dY \cdot Y) \cdot Y
  caffe_mul(temp_.count(), top_data, bottom_diff, bottom_diff);

  // sum(dE/dY)-sum(dE/dY \cdot Y) \cdot Y
  caffe_cpu_gemv<Dtype>(CblasNoTrans, channels_ * num, spatial_dim, 1.,
      top_diff, spatial_sum_multiplier_.cpu_data(), 0.,
      num_by_chans_.mutable_cpu_data());
  caffe_cpu_gemv<Dtype>(CblasTrans, num, channels_, 1.,
      num_by_chans_.cpu_data(), batch_sum_multiplier_.cpu_data(), 0.,
      mean_.mutable_cpu_data());
  // reshape (broadcast) the above to make
  // sum(dE/dY)-sum(dE/dY \cdot Y) \cdot Y
  caffe_cpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num, channels_, 1, 1,
      batch_sum_multiplier_.cpu_data(), mean_.cpu_data(), 0.,
      num_by_chans_.mutable_cpu_data());
  caffe_cpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num * channels_,
      spatial_dim, 1, 1., num_by_chans_.cpu_data(),
      spatial_sum_multiplier_.cpu_data(), 1., bottom_diff);

  // dE/dY - mean(dE/dY)-mean(dE/dY \cdot Y) \cdot Y
  caffe_cpu_axpby(temp_.count(), Dtype(1), top_diff,
      Dtype(-1. / (num * spatial_dim)), bottom_diff);

  // note: temp_ still contains sqrt(var(X)+eps), computed during the forward
  // pass.
  caffe_div(temp_.count(), bottom_diff, temp_.cpu_data(), bottom_diff);
}

 

posted @ 2018-03-19 11:21  菜鸡一枚  阅读(301)  评论(0编辑  收藏  举报