caffe代码阅读1:blob的实现细节-2016.3.14
caffe代码阅读1:blob的实现细节-2016.3.14
caffe 中 BLOB的实现
一、前言
等着caffe没有膨胀到很大的程度把caffe的代码理一理
(1)第一次阅读Caffe的源码,给人的印象就是里面大量使用了gtest,确实也简化了不少代码,看起来很清晰。
(2)caffe的文档是使用doxygen来生成的,这点在注释里面有体现,对于自己以后的项目也可以借鉴。
二、相关知识:
(1)explicit关键字的作用是禁止隐式转换
比如
A a();
B b = a;// 编译错误
B b(a); //正确
(2)关于const的用法具体参考:
三、具体介绍
BLOB介绍:
看过代码之后,实际上BLOL包含了三类数据
(1)data,前向传播所用到的数据
(2)diff,反向传播所用到的数据
(3)shape,解释data和diff的shape数据
那么围绕这三类数据有对应的方法。
下面给出我的具体的注释:
首先给出blob.h的注释
- #ifndef CAFFE_BLOB_HPP_
- #define CAFFE_BLOB_HPP_
- #include <algorithm>
- #include <string>
- #include <vector>
- #include "caffe/common.hpp"
- #include "caffe/proto/caffe.pb.h"
- #include "caffe/syncedmem.hpp"
- #include "caffe/util/math_functions.hpp"
- const int kMaxBlobAxes = 32;
- namespace caffe {
- /**
- * @brief A wrapper around SyncedMemory holders serving as the basic
- * computational unit through which Layer%s, Net%s, and Solver%s
- * interact.
- * BLOB是SyncedMemory的包裹器
- *
- * TODO(dox): more thorough description.
- */
- template <typename Dtype>
- class Blob {
- public:
- // 构造函数
- Blob()
- : data_(), diff_(), count_(0), capacity_(0) {}
- /// @brief Deprecated; use <code>Blob(const vector<int>& shape)</code>.
- explicit Blob(const int num, const int channels, const int height,
- const int width);
- explicit Blob(const vector<int>& shape);// 推荐使用这个
- // 成员函数
- /// @brief Deprecated; use <code>Reshape(const vector<int>& shape)</code>.
- void Reshape(const int num, const int channels, const int height,
- const int width);
- /**
- * @brief Change the dimensions of the blob, allocating new memory if
- * necessary.
- *
- * This function can be called both to create an initial allocation
- * of memory, and to adjust the dimensions of a top blob during Layer::Reshape
- * or Layer::Forward. When changing the size of blob, memory will only be
- * reallocated if sufficient memory does not already exist, and excess memory
- * will never be freed.
- *
- * Note that reshaping an input blob and immediately calling Net::Backward is
- * an error; either Net::Forward or Net::Reshape need to be called to
- * propagate the new input shape to higher layers.
- */
- void Reshape(const vector<int>& shape); // 推荐使用这个
- void Reshape(const BlobShape& shape);
- void ReshapeLike(const Blob& other);
- // 输出数据的维度,以空格分隔,最后输出一维维度(total)
- inline string shape_string() const {
- ostringstream stream;
- for (int i = 0; i < shape_.size(); ++i) {
- stream << shape_[i] << " ";
- }
- stream << "(" << count_ << ")";
- return stream.str();
- }
- inline const vector<int>& shape() const { return shape_; }
- /**
- * @brief Returns the dimension of the index-th axis (or the negative index-th
- * axis from the end, if index is negative).
- *
- * @param index the axis index, which may be negative as it will be
- * "canonicalized" using CanonicalAxisIndex.
- * Dies on out of range index.
- */
- // 计算从给定维度到最后一个维度的
- inline int shape(int index) const {
- return shape_[CanonicalAxisIndex(index)];
- }
- // 返回数据的维度
- inline int num_axes() const { return shape_.size(); }
- // 返回数据的所有维度的相乘,即数据的个数
- inline int count() const { return count_; }
- /**
- * @brief Compute the volume of a slice; i.e., the product of dimensions
- * among a range of axes.
- *
- * @param start_axis The first axis to include in the slice.
- *
- * @param end_axis The first axis to exclude from the slice.
- */
- inline int count(int start_axis, int end_axis) const {
- // 判断维度的索引是否在范围内
- CHECK_LE(start_axis, end_axis);
- CHECK_GE(start_axis, 0);
- CHECK_GE(end_axis, 0);
- CHECK_LE(start_axis, num_axes());
- CHECK_LE(end_axis, num_axes());
- int count = 1;
- for (int i = start_axis; i < end_axis; ++i) {
- count *= shape(i);
- }
- return count;
- }
- /**
- * @brief Compute the volume of a slice spanning from a particular first
- * axis to the final axis.
- *
- * @param start_axis The first axis to include in the slice.
- */
- // 给定的维度到最后的维度之间包含的数据个数
- inline int count(int start_axis) const {
- return count(start_axis, num_axes());
- }
- /**
- * @brief Returns the 'canonical' version of a (usually) user-specified axis,
- * allowing for negative indexing (e.g., -1 for the last axis).
- *
- * @param axis_index the axis index.
- * If 0 <= index < num_axes(), return index.
- * If -num_axes <= index <= -1, return (num_axes() - (-index)),
- * e.g., the last axis index (num_axes() - 1) if index == -1,
- * the second to last if index == -2, etc.
- * Dies on out of range index.
- */
- // 支持负数维度索引,负数表示从后往前,返回的是正确的维度索引(相当于将负数索引进行的转换)
- inline int CanonicalAxisIndex(int axis_index) const {
- // 判断是否在范围内[-numaxes, numaxes]
- CHECK_GE(axis_index, -num_axes())
- << "axis " << axis_index << " out of range for " << num_axes()
- << "-D Blob with shape " << shape_string();
- CHECK_LT(axis_index, num_axes())
- << "axis " << axis_index << " out of range for " << num_axes()
- << "-D Blob with shape " << shape_string();
- if (axis_index < 0) {
- return axis_index + num_axes();
- }
- return axis_index;
- }
- /// @brief Deprecated legacy shape accessor num: use shape(0) instead.
- inline int num() const { return LegacyShape(0); }
- /// @brief Deprecated legacy shape accessor channels: use shape(1) instead.
- inline int channels() const { return LegacyShape(1); }
- /// @brief Deprecated legacy shape accessor height: use shape(2) instead.
- inline int height() const { return LegacyShape(2); }
- /// @brief Deprecated legacy shape accessor width: use shape(3) instead.
- inline int width() const { return LegacyShape(3); }
- inline int LegacyShape(int index) const {
- CHECK_LE(num_axes(), 4)// 检查blob的维度个数是不是小于4,也许以前的blob只有四维,但是现在的blob应该为了通用而采用了大于四维的方法
- << "Cannot use legacy accessors on Blobs with > 4 axes.";
- CHECK_LT(index, 4);// 检查维度索引是不是小于4
- CHECK_GE(index, -4);// 检查维度索引是不是大于-4
- if (index >= num_axes() || index < -num_axes()) {
- // Axis is out of range, but still in [0, 3] (or [-4, -1] for reverse
- // indexing) -- this special case simulates the one-padding used to fill
- // extraneous axes of legacy blobs.
- return 1;
- }
- return shape(index);
- }
- // 计算一维线性偏移量
- inline int offset(const int n, const int c = 0, const int h = 0,
- const int w = 0) const {
- CHECK_GE(n, 0);
- CHECK_LE(n, num());
- CHECK_GE(channels(), 0);
- CHECK_LE(c, channels());
- CHECK_GE(height(), 0);
- CHECK_LE(h, height());
- CHECK_GE(width(), 0);
- CHECK_LE(w, width());
- return ((n * channels() + c) * height() + h) * width() + w;
- }
- // 计算一维线性偏移量,只不过参数用的是vector<int>
- inline int offset(const vector<int>& indices) const {
- CHECK_LE(indices.size(), num_axes());
- int offset = 0;
- for (int i = 0; i < num_axes(); ++i) {
- offset *= shape(i);
- if (indices.size() > i) {
- CHECK_GE(indices[i], 0);
- CHECK_LT(indices[i], shape(i));
- offset += indices[i];
- }
- }
- return offset;
- }
- /**
- * @brief Copy from a source Blob.
- *
- * @param source the Blob to copy from
- * @param copy_diff if false, copy the data; if true, copy the diff
- * @param reshape if false, require this Blob to be pre-shaped to the shape
- * of other (and die otherwise); if true, Reshape this Blob to other's
- * shape if necessary
- * 从给定的blob进行复制,如果copy_diff=true则新的blob复制的是diff,如果reshape=true则改变新blob的形状
- */
- void CopyFrom(const Blob<Dtype>& source, bool copy_diff = false,
- bool reshape = false);
- // 获取在内存下的数据(前向传播所用的数据)
- inline Dtype data_at(const int n, const int c, const int h,
- const int w) const {
- return cpu_data()[offset(n, c, h, w)];
- }
- // 获取在内存下的diff数据(反传数据)
- inline Dtype diff_at(const int n, const int c, const int h,
- const int w) const {
- return cpu_diff()[offset(n, c, h, w)];
- }
- // 获取在内存下的数据(前向传播所用的数据)
- inline Dtype data_at(const vector<int>& index) const {
- return cpu_data()[offset(index)];
- }
- // 获取在内存下的diff数据(反传数据)
- inline Dtype diff_at(const vector<int>& index) const {
- return cpu_diff()[offset(index)];
- }
- // 同步内存shared_ptr(不明白share_ptr的可以自行百度,引用计数管理机制)
- inline const shared_ptr<SyncedMemory>& data() const {
- CHECK(data_);
- return data_;
- }
- inline const shared_ptr<SyncedMemory>& diff() const {
- CHECK(diff_);
- return diff_;
- }
- // 属性
- const Dtype* cpu_data() const;
- void set_cpu_data(Dtype* data);
- const int* gpu_shape() const;
- const Dtype* gpu_data() const;
- const Dtype* cpu_diff() const;
- const Dtype* gpu_diff() const;
- Dtype* mutable_cpu_data();
- Dtype* mutable_gpu_data();
- Dtype* mutable_cpu_diff();
- Dtype* mutable_gpu_diff();
- // 计算
Y=alpha∗X+beta∗Y
- void Update();
- // 从protobuf序列化文件读取blob对象
- void FromProto(const BlobProto& proto, bool reshape = true);
- // 将对象序列化为protobuf文件
- void ToProto(BlobProto* proto, bool write_diff = false) const;
- /// @brief Compute the sum of absolute values (L1 norm) of the data.
- Dtype asum_data() const;
- /// @brief Compute the sum of absolute values (L1 norm) of the diff.
- Dtype asum_diff() const;
- /// @brief Compute the sum of squares (L2 norm squared) of the data.
- Dtype sumsq_data() const;
- /// @brief Compute the sum of squares (L2 norm squared) of the diff.
- Dtype sumsq_diff() const;
- /// @brief Scale the blob data by a constant factor.
- void scale_data(Dtype scale_factor);
- /// @brief Scale the blob diff by a constant factor.
- void scale_diff(Dtype scale_factor);
- /**
- * @brief Set the data_ shared_ptr to point to the SyncedMemory holding the
- * data_ of Blob other -- useful in Layer%s which simply perform a copy
- * in their Forward pass.
- *
- * This deallocates the SyncedMemory holding this Blob's data_, as
- * shared_ptr calls its destructor when reset with the "=" operator.
- */
- void ShareData(const Blob& other);
- /**
- * @brief Set the diff_ shared_ptr to point to the SyncedMemory holding the
- * diff_ of Blob other -- useful in Layer%s which simply perform a copy
- * in their Forward pass.
- *
- * This deallocates the SyncedMemory holding this Blob's diff_, as
- * shared_ptr calls its destructor when reset with the "=" operator.
- * 将别的blob的data和响应的diff指针给这个Blob,实现数据的共享。
- * 同时需要注意的是这个操作会引起这个Blob里面的SyncedMemory被释放,
- * 因为shared_ptr指针被用=重置的时候回调用响应的析构器。
- */
- void ShareDiff(const Blob& other);
- // 判断形状是否相等
- bool ShapeEquals(const BlobProto& other);
- protected:
- // 前向传播的数据
- shared_ptr<SyncedMemory> data_;
- // diff是反向传播的数据
- shared_ptr<SyncedMemory> diff_;
- // 旧的形状数据
- shared_ptr<SyncedMemory> shape_data_;
- // 新的形状数据
- vector<int> shape_;
- // 数据的个数
- int count_;
- // 容量
- int capacity_;
- DISABLE_COPY_AND_ASSIGN(Blob);
- }; // class Blob
- } // namespace caffe
- #endif // CAFFE_BLOB_HPP_
接下来给出blob所对应的实现
blob.cpp的注释
- #include <climits>
- #include <vector>
- #include "caffe/blob.hpp"
- #include "caffe/common.hpp"
- #include "caffe/syncedmem.hpp"
- #include "caffe/util/math_functions.hpp"
- namespace caffe {
- // reshape 的具体实现
- // 过时的方法最终是调用的新的reshape方法
- template <typename Dtype>
- void Blob<Dtype>::Reshape(const int num, const int channels, const int height,
- const int width) {
- vector<int> shape(4);
- shape[0] = num;
- shape[1] = channels;
- shape[2] = height;
- shape[3] = width;
- Reshape(shape);
- }
- // reshape 的具体实现
- template <typename Dtype>
- void Blob<Dtype>::Reshape(const vector<int>& shape) {
- CHECK_LE(shape.size(), kMaxBlobAxes); //是否小于规定的最大BLOB的维度(35维)
- count_ = 1;
- shape_.resize(shape.size());// 首先将大小设置为vector<int> shape_; 即新的形状数据的大小
- if (!shape_data_ || shape_data_->size() < shape.size() * sizeof(int)) {
- shape_data_.reset(new SyncedMemory(shape.size() * sizeof(int)));// shared_ptr<SyncedMemory> shape_data_;
- }
- int* shape_data = static_cast<int*>(shape_data_->mutable_cpu_data());
- for (int i = 0; i < shape.size(); ++i) {
- // 检查形状数据是否合法
- CHECK_GE(shape[i], 0);
- CHECK_LE(shape[i], INT_MAX / count_) << "blob size exceeds INT_MAX";
- // 计算数据个数
- count_ *= shape[i];
- // 复制shape到新的和旧的形状数据
- shape_[i] = shape[i];
- shape_data[i] = shape[i];
- }
- // 判断是否大于存储的容量
- if (count_ > capacity_) {
- capacity_ = count_;
- // 重新分配内存
- data_.reset(new SyncedMemory(capacity_ * sizeof(Dtype)));
- diff_.reset(new SyncedMemory(capacity_ * sizeof(Dtype)));
- }
- }
- // 所谓的reshape实际上就仅仅是复制了shape的数据而已
- // 在调用的时候自动乘以shape的数据就可以得到数据,有点tricky
- template <typename Dtype>
- void Blob<Dtype>::Reshape(const BlobShape& shape) {
- // 维度是否小于35
- CHECK_LE(shape.dim_size(), kMaxBlobAxes);
- // 复制形状数据
- vector<int> shape_vec(shape.dim_size());
- for (int i = 0; i < shape.dim_size(); ++i) {
- shape_vec[i] = shape.dim(i);
- }
- // 调用新的reshape函数
- Reshape(shape_vec);
- }
- template <typename Dtype>
- void Blob<Dtype>::ReshapeLike(const Blob<Dtype>& other) {
- Reshape(other.shape());
- }
- template <typename Dtype>
- Blob<Dtype>::Blob(const int num, const int channels, const int height,
- const int width)
- // capacity_ must be initialized before calling Reshape
- // 技巧,先初始化容量为0,然后用reshape来分配内存了
- : capacity_(0) {
- Reshape(num, channels, height, width);
- }
- template <typename Dtype>
- Blob<Dtype>::Blob(const vector<int>& shape)
- // capacity_ must be initialized before calling Reshape
- : capacity_(0) {
- Reshape(shape);
- }
- template <typename Dtype>
- const int* Blob<Dtype>::gpu_shape() const {
- CHECK(shape_data_);
- // shared_ptr<SyncedMemory> shape_data_;
- // 因此也分gpu_data和cpu_data
- return (const int*)shape_data_->gpu_data();
- }
- template <typename Dtype>
- const Dtype* Blob<Dtype>::cpu_data() const {
- CHECK(data_);
- / shared_ptr<SyncedMemory> data_;
- return (const Dtype*)data_->cpu_data();
- }
- template <typename Dtype>
- void Blob<Dtype>::set_cpu_data(Dtype* data) {
- CHECK(data);
- data_->set_cpu_data(data);
- }
- template <typename Dtype>
- const Dtype* Blob<Dtype>::gpu_data() const {
- CHECK(data_);
- return (const Dtype*)data_->gpu_data();
- }
- template <typename Dtype>
- const Dtype* Blob<Dtype>::cpu_diff() const {
- CHECK(diff_);
- return (const Dtype*)diff_->cpu_data();
- }
- template <typename Dtype>
- const Dtype* Blob<Dtype>::gpu_diff() const {
- CHECK(diff_);
- return (const Dtype*)diff_->gpu_data();
- }
- template <typename Dtype>
- Dtype* Blob<Dtype>::mutable_cpu_data() {
- CHECK(data_);
- return static_cast<Dtype*>(data_->mutable_cpu_data());
- }
- template <typename Dtype>
- Dtype* Blob<Dtype>::mutable_gpu_data() {
- CHECK(data_);
- return static_cast<Dtype*>(data_->mutable_gpu_data());
- }
- template <typename Dtype>
- Dtype* Blob<Dtype>::mutable_cpu_diff() {
- CHECK(diff_);
- return static_cast<Dtype*>(diff_->mutable_cpu_data());
- }
- template <typename Dtype>
- Dtype* Blob<Dtype>::mutable_gpu_diff() {
- CHECK(diff_);
- return static_cast<Dtype*>(diff_->mutable_gpu_data());
- }
- // 将其他blob的数据复制到当前的blob中去
- template <typename Dtype>
- void Blob<Dtype>::ShareData(const Blob& other) {
- CHECK_EQ(count_, other.count());
- data_ = other.data();
- }
- // 将其他blob的diff数据复制到当前的blob中去
- template <typename Dtype>
- void Blob<Dtype>::ShareDiff(const Blob& other) {
- CHECK_EQ(count_, other.count());
- diff_ = other.diff();
- }
- // The "update" method is used for parameter blobs in a Net, which are stored
- // as Blob<float> or Blob<double> -- hence we do not define it for
- // Blob<int> or Blob<unsigned int>.
- template <> void Blob<unsigned int>::Update() { NOT_IMPLEMENTED; }
- template <> void Blob<int>::Update() { NOT_IMPLEMENTED; }
- // Update是计算data=-1 * diff + data
- template <typename Dtype>
- void Blob<Dtype>::Update() {
- // We will perform update based on where the data is located.
- switch (data_->head()) {
- case SyncedMemory::HEAD_AT_CPU:
- // perform computation on CPU
- // axpby即alpha * x plus beta *y 这个含义,blas的函数命名真是见名知意
- // template <> void caffe_axpy<float>(const int N, const float alpha, const float* X, float* Y) { cblas_saxpy(N, alpha, X, 1, Y, 1); }
- // caffe_axpy计算的是Y=alpha * X + Y ,其中alpha=-1了这里
- // 存储的时候用到了mutable_cpu_data,防止其他线程访问
- caffe_axpy<Dtype>(count_, Dtype(-1),
- static_cast<const Dtype*>(diff_->cpu_data()),
- static_cast<Dtype*>(data_->mutable_cpu_data()));
- break;
- case SyncedMemory::HEAD_AT_GPU:
- case SyncedMemory::SYNCED:
- #ifndef CPU_ONLY
- // perform computation on GPU
- // Y=alpha * X + Y ,其中alpha=-1了这里
- caffe_gpu_axpy<Dtype>(count_, Dtype(-1),
- static_cast<const Dtype*>(diff_->gpu_data()),
- static_cast<Dtype*>(data_->mutable_gpu_data()));
- #else
- NO_GPU;
- #endif
- break;
- default:
- LOG(FATAL) << "Syncedmem not initialized.";
- }
- }
- template <> unsigned int Blob<unsigned int>::asum_data() const {
- NOT_IMPLEMENTED;
- return 0;
- }
- template <> int Blob<int>::asum_data() const {
- NOT_IMPLEMENTED;
- return 0;
- }
- // 计算data的L1范数
- template <typename Dtype>
- Dtype Blob<Dtype>::asum_data() const {
- if (!data_) { return 0; }
- switch (data_->head()) {
- case SyncedMemory::HEAD_AT_CPU:
- return caffe_cpu_asum(count_, cpu_data());
- case SyncedMemory::HEAD_AT_GPU:
- case SyncedMemory::SYNCED:
- #ifndef CPU_ONLY
- {
- Dtype asum;
- caffe_gpu_asum(count_, gpu_data(), &asum);
- return asum;
- }
- #else
- NO_GPU;
- #endif
- case SyncedMemory::UNINITIALIZED:
- return 0;
- default:
- LOG(FATAL) << "Unknown SyncedMemory head state: " << data_->head();
- }
- return 0;
- }
- template <> unsigned int Blob<unsigned int>::asum_diff() const {
- NOT_IMPLEMENTED;
- return 0;
- }
- template <> int Blob<int>::asum_diff() const {
- NOT_IMPLEMENTED;
- return 0;
- }
- // 计算diff的L1范数
- template <typename Dtype>
- Dtype Blob<Dtype>::asum_diff() const {
- if (!diff_) { return 0; }
- switch (diff_->head()) {
- case SyncedMemory::HEAD_AT_CPU:
- return caffe_cpu_asum(count_, cpu_diff());
- case SyncedMemory::HEAD_AT_GPU:
- case SyncedMemory::SYNCED:
- #ifndef CPU_ONLY
- {
- Dtype asum;
- caffe_gpu_asum(count_, gpu_diff(), &asum);
- return asum;
- }
- #else
- NO_GPU;
- #endif
- case SyncedMemory::UNINITIALIZED:
- return 0;
- default:
- LOG(FATAL) << "Unknown SyncedMemory head state: " << diff_->head();
- }
- return 0;
- }
- template <> unsigned int Blob<unsigned int>::sumsq_data() const {
- NOT_IMPLEMENTED;
- return 0;
- }
- template <> int Blob<int>::sumsq_data() const {
- NOT_IMPLEMENTED;
- return 0;
- }
- // 计算sum of square of data(L2范数)
- template <typename Dtype>
- Dtype Blob<Dtype>::sumsq_data() const {
- Dtype sumsq;
- const Dtype* data;
- if (!data_) { return 0; }
- switch (data_->head()) {
- case SyncedMemory::HEAD_AT_CPU:
- data = cpu_data();
- sumsq = caffe_cpu_dot(count_, data, data);
- break;
- case SyncedMemory::HEAD_AT_GPU:
- case SyncedMemory::SYNCED:
- #ifndef CPU_ONLY
- data = gpu_data();
- caffe_gpu_dot(count_, data, data, &sumsq);
- #else
- NO_GPU;
- #endif
- break;
- case SyncedMemory::UNINITIALIZED:
- return 0;
- default:
- LOG(FATAL) << "Unknown SyncedMemory head state: " << data_->head();
- }
- return sumsq;
- }
- template <> unsigned int Blob<unsigned int>::sumsq_diff() const {
- NOT_IMPLEMENTED;
- return 0;
- }
- template <> int Blob<int>::sumsq_diff() const {
- NOT_IMPLEMENTED;
- return 0;
- }
- // sum of square of diff
- template <typename Dtype>
- Dtype Blob<Dtype>::sumsq_diff() const {
- Dtype sumsq;
- const Dtype* diff;
- if (!diff_) { return 0; }
- switch (diff_->head()) {
- case SyncedMemory::HEAD_AT_CPU:
- diff = cpu_diff();
- sumsq = caffe_cpu_dot(count_, diff, diff);
- break;
- case SyncedMemory::HEAD_AT_GPU:
- case SyncedMemory::SYNCED:
- #ifndef CPU_ONLY
- diff = gpu_diff();
- caffe_gpu_dot(count_, diff, diff, &sumsq);
- break;
- #else
- NO_GPU;
- #endif
- case SyncedMemory::UNINITIALIZED:
- return 0;
- default:
- LOG(FATAL) << "Unknown SyncedMemory head state: " << data_->head();
- }
- return sumsq;
- }
- template <> void Blob<unsigned int>::scale_data(unsigned int scale_factor) {
- NOT_IMPLEMENTED;
- }
- template <> void Blob<int>::scale_data(int scale_factor) {
- NOT_IMPLEMENTED;
- }
- // 将data部分乘以一个因子scale_factor
- template <typename Dtype>
- void Blob<Dtype>::scale_data(Dtype scale_factor) {
- Dtype* data;
- if (!data_) { return; }
- switch (data_->head()) {
- case SyncedMemory::HEAD_AT_CPU:
- data = mutable_cpu_data();
- caffe_scal(count_, scale_factor, data);
- return;
- case SyncedMemory::HEAD_AT_GPU:
- case SyncedMemory::SYNCED:
- #ifndef CPU_ONLY
- data = mutable_gpu_data();
- caffe_gpu_scal(count_, scale_factor, data);
- return;
- #else
- NO_GPU;
- #endif
- case SyncedMemory::UNINITIALIZED:
- return;
- default:
- LOG(FATAL) << "Unknown SyncedMemory head state: " << data_->head();
- }
- }
- template <> void Blob<unsigned int>::scale_diff(unsigned int scale_factor) {
- NOT_IMPLEMENTED;
- }
- template <> void Blob<int>::scale_diff(int scale_factor) {
- NOT_IMPLEMENTED;
- }
- // 将diff部分乘以一个因子sacle_factor
- template <typename Dtype>
- void Blob<Dtype>::scale_diff(Dtype scale_factor) {
- Dtype* diff;
- if (!diff_) { return; }
- switch (diff_->head()) {
- case SyncedMemory::HEAD_AT_CPU:
- diff = mutable_cpu_diff();
- caffe_scal(count_, scale_factor, diff);
- return;
- case SyncedMemory::HEAD_AT_GPU:
- case SyncedMemory::SYNCED:
- #ifndef CPU_ONLY
- diff = mutable_gpu_diff();
- caffe_gpu_scal(count_, scale_factor, diff);
- return;
- #else
- NO_GPU;
- #endif
- case SyncedMemory::UNINITIALIZED:
- return;
- default:
- LOG(FATAL) << "Unknown SyncedMemory head state: " << diff_->head();
- }
- }
- // 两个blob是否shape一样
- template <typename Dtype>
- bool Blob<Dtype>::ShapeEquals(const BlobProto& other) {
- // 判断是否是旧的blob
- if (other.has_num() || other.has_channels() ||
- other.has_height() || other.has_width()) {
- // Using deprecated 4D Blob dimensions --
- // shape is (num, channels, height, width).
- // Note: we do not use the normal Blob::num(), Blob::channels(), etc.
- // methods as these index from the beginning of the blob shape, where legacy
- // parameter blobs were indexed from the end of the blob shape (e.g., bias
- // Blob shape (1 x 1 x 1 x N), IP layer weight Blob shape (1 x 1 x M x N)).
- return shape_.size() <= 4 &&
- LegacyShape(-4) == other.num() &&
- LegacyShape(-3) == other.channels() &&
- LegacyShape(-2) == other.height() &&
- LegacyShape(-1) == other.width();
- }
- // 如果不是旧的blob则直接判断
- vector<int> other_shape(other.shape().dim_size());
- for (int i = 0; i < other.shape().dim_size(); ++i) {
- other_shape[i] = other.shape().dim(i);
- }
- return shape_ == other_shape;
- }
- // 从别的blob进行复制
- template <typename Dtype>
- void Blob<Dtype>::CopyFrom(const Blob& source, bool copy_diff, bool reshape) {
- if (source.count() != count_ || source.shape() != shape_) {
- if (reshape) {
- ReshapeLike(source);// 复制shape数据
- } else {
- LOG(FATAL) << "Trying to copy blobs of different sizes.";
- }
- }
- switch (Caffe::mode()) {
- case Caffe::GPU:
- // GPU复制diff
- if (copy_diff) {
- // 这都用 template <> void caffe_copy<float>(const int N, const float* X, float* Y) { cblas_scopy(N, X, 1, Y, 1); }
- // 干嘛要用BLAS里面的运算来复制,真是多余...
- caffe_copy(count_, source.gpu_diff(),
- static_cast<Dtype*>(diff_->mutable_gpu_data()));
- } else {
- caffe_copy(count_, source.gpu_data(),
- static_cast<Dtype*>(data_->mutable_gpu_data()));
- }
- break;
- case Caffe::CPU:
- // CPU复制diff
- if (copy_diff) {
- caffe_copy(count_, source.cpu_diff(),
- static_cast<Dtype*>(diff_->mutable_cpu_data()));
- } else {
- caffe_copy(count_, source.cpu_data(),
- static_cast<Dtype*>(data_->mutable_cpu_data()));
- }
- break;
- default:
- LOG(FATAL) << "Unknown caffe mode.";
- }
- }
- template <typename Dtype>
- void Blob<Dtype>::FromProto(const BlobProto& proto, bool reshape) {
- // copy shape
- if (reshape) {
- vector<int> shape;
- if (proto.has_num() || proto.has_channels() ||
- proto.has_height() || proto.has_width()) {
- // Using deprecated 4D Blob dimensions --
- // shape is (num, channels, height, width).
- // 如果是旧的blob直接转换为新的blob中的shape数据
- shape.resize(4);
- shape[0] = proto.num();
- shape[1] = proto.channels();
- shape[2] = proto.height();
- shape[3] = proto.width();
- } else {
- shape.resize(proto.shape().dim_size());
- for (int i = 0; i < proto.shape().dim_size(); ++i) {
- shape[i] = proto.shape().dim(i);
- }
- }
- Reshape(shape);// 复制shape数据到当前blob
- } else {
- CHECK(ShapeEquals(proto)) << "shape mismatch (reshape not set)";
- }
- // copy data
- Dtype* data_vec = mutable_cpu_data();// 获取当前的blob在内存上的数据指针,该指针是互斥的
- if (proto.double_data_size() > 0) {// data
- CHECK_EQ(count_, proto.double_data_size());
- for (int i = 0; i < count_; ++i) {
- data_vec[i] = proto.double_data(i);
- }
- } else {
- CHECK_EQ(count_, proto.data_size());
- for (int i = 0; i < count_; ++i) {
- data_vec[i] = proto.data(i);
- }
- }
- // copy diff
- if (proto.double_diff_size() > 0) {// diff
- CHECK_EQ(count_, proto.double_diff_size());
- Dtype* diff_vec = mutable_cpu_diff();// 获取当前的diff在内存上的数据指针,该指针是互斥的
- for (int i = 0; i < count_; ++i) {
- diff_vec[i] = proto.double_diff(i);
- }
- } else if (proto.diff_size() > 0) {
- CHECK_EQ(count_, proto.diff_size());
- Dtype* diff_vec = mutable_cpu_diff();
- for (int i = 0; i < count_; ++i) {
- diff_vec[i] = proto.diff(i);
- }
- }
- }
- // BlobProto和BlobShape是protobuf定义的,其中一些函数是自动生成的
- // mutable_shape、add_dim、clear_double_data、clear_double_diff、add_double_data
- // add_double_diff等
- // 见src/caffe/proto/caffe.proto
- template <>
- void Blob<double>::ToProto(BlobProto* proto, bool write_diff) const {
- proto->clear_shape();
- // 存shape
- for (int i = 0; i < shape_.size(); ++i) {
- proto->mutable_shape()->add_dim(shape_[i]);
- }
- proto->clear_double_data();
- proto->clear_double_diff();
- // 存data
- const double* data_vec = cpu_data();
- for (int i = 0; i < count_; ++i) {
- proto->add_double_data(data_vec[i]);
- }
- // 存diff
- if (write_diff) {
- const double* diff_vec = cpu_diff();
- for (int i = 0; i < count_; ++i) {
- proto->add_double_diff(diff_vec[i]);
- }
- }
- }
- template <>
- void Blob<float>::ToProto(BlobProto* proto, bool write_diff) const {
- proto->clear_shape();
- for (int i = 0; i < shape_.size(); ++i) {
- proto->mutable_shape()->add_dim(shape_[i]);
- }
- proto->clear_data();
- proto->clear_diff();
- const float* data_vec = cpu_data();
- for (int i = 0; i < count_; ++i) {
- proto->add_data(data_vec[i]);
- }
- if (write_diff) {
- const float* diff_vec = cpu_diff();
- for (int i = 0; i < count_; ++i) {
- proto->add_diff(diff_vec[i]);
- }
- }
- }
- INSTANTIATE_CLASS(Blob);
- template class Blob<int>;
- template class Blob<unsigned int>;
- } // namespace caffe
总结:
还是那句老话,read the fxxx source code.
多翻caffe的issue看
参考:
[1]caffe源码分析另一个,写的也挺好。
[2]常用的BLAS含义参考
[3]protobuf的参考