caffe-Blob.hpp文件

读Blob的头文件,注释每个方法的作用。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
#ifndef CAFFE_BLOB_HPP_
#define CAFFE_BLOB_HPP_

#include <algorithm>
#include <string>
#include <vector>

#include "caffe/common.hpp"
#include "caffe/proto/caffe.pb.h"
#include "caffe/syncedmem.hpp"

// Blob的最大维数 32
const int kMaxBlobAxes = 32;

namespace caffe {
template <typename Dtype>
class Blob {
public:
// 构造函数
Blob(): data_(), diff_(), count_(0), capacity_(0) {}
explicit Blob(const int num, const int channels,
const int height, const int width);
explicit Blob(const vector<int>& shape);

// 改变blob形状
void Reshape(const int num, const int channels,
const int height,const int width);
void Reshape(const vector<int>& shape);
void Reshape(const BlobShape& shape);
void ReshapeLike(const Blob& other);

// 打印形状信息
inline string shape_string() const {
ostringstream stream;
for (int i = 0; i < shape_.size(); ++i) {
stream << shape_[i] << " ";
}
stream << "(" << count_ << ")";
return stream.str();
}

// 返回形状,维度相关信息
inline const vector<int>& shape() const { return shape_; }
inline int shape(int index) const {
return shape_[CanonicalAxisIndex(index)];
}
inline int num_axes() const { return shape_.size(); }
inline int count() const { return count_; }
// 返回制定维度间的元素个数:
// 返回Blob中从start_axis到end_axis间制定维度的元素个数
inline int count(int start_axis, int end_axis) const {
CHECK_LE(start_axis, end_axis); // 确保start_axis <= end_axis
CHECK_GE(start_axis, 0); // >=
CHECK_GE(end_axis, 0);
CHECK_LE(start_axis, num_axes());
CHECK_LE(end_axis, num_axes());
int count = 1;
for (int i = start_axis; i < end_axis; ++i) {
count *= shape(i);
}
return count;
}
// 返回从start_axis到最后维度的元素个数
inline int count(int start_axis) const {
return count(start_axis, num_axes());
}

// 传入负索引,从后向前的访问。
inline int CanonicalAxisIndex(int axis_index) const {
CHECK_GE(axis_index, -num_axes())
<< "axis " << axis_index << " out of range for " << num_axes()
<< "-D Blob with shape " << shape_string();
CHECK_LT(axis_index, num_axes())
<< "axis " << axis_index << " out of range for " << num_axes()
<< "-D Blob with shape " << shape_string();
if (axis_index < 0) {
return axis_index + num_axes();
}
return axis_index;
}

// 返回某个维度大小
inline int num() const { return LegacyShape(0); }
inline int channels() const { return LegacyShape(1); }
inline int height() const { return LegacyShape(2); }
inline int width() const { return LegacyShape(3); }
inline int LegacyShape(int index) const {
CHECK_LE(num_axes(), 4)
<< "Cannot use l4egacy accessors on Blobs with > 4 axes.";
CHECK_LT(index, 4);
CHECK_GE(index, -4);
if (index >= num_axes() || index < -num_axes()) {
return 1;
}
return shape(index);
}

// 根据n,c,h,w计算全局Index
inline int offset(const int n, const int c = 0,
const int h = 0,const int w = 0) const {
CHECK_GE(n, 0);
CHECK_LE(n, num());
CHECK_GE(channels(), 0);
CHECK_LE(c, channels());
CHECK_GE(height(), 0);
CHECK_LE(h, height());
CHECK_GE(width(), 0);
CHECK_LE(w, width());
// 看到了,最右边变化是最快的
return ((n * channels() + c) * height() + h) * width() + w;
}
// 也是计算全局index,只是传入的是vector
inline int offset(const vector<int>& indices) const {
CHECK_LE(indices.size(), num_axes());
int offset = 0;
for (int i = 0; i < num_axes(); ++i) {
offset *= shape(i);
if (indices.size() > i) {
CHECK_GE(indices[i], 0);
CHECK_LT(indices[i], shape(i));
offset += indices[i];
}
}
return offset;
}

// 拷贝一个Blob到当前Blob
void CopyFrom(const Blob<Dtype>& source, bool copy_diff = false,
bool reshape = false);

// 一下4函数均是访问制定位置的数据。提供局部Index,调用offset计算全局index,后可访问
inline Dtype data_at(const int n, const int c,
const int h,const int w) const {
return cpu_data()[offset(n, c, h, w)];
}
inline Dtype diff_at(const int n, const int c,
const int h, const int w) const {
return cpu_diff()[offset(n, c, h, w)];
}
inline Dtype data_at(const vector<int>& index) const {
return cpu_data()[offset(index)];
}
inline Dtype diff_at(const vector<int>& index) const {
return cpu_diff()[offset(index)];
}

// 取data_
inline const shared_ptr<SyncedMemory>& data() const {
CHECK(data_);
return data_;
}
// 取diff_
inline const shared_ptr<SyncedMemory>& diff() const {
CHECK(diff_);
return diff_;
}

// const只读访问
const Dtype* cpu_data() const;
const Dtype* cpu_diff() const;
// 只读访问GPU数据形状
const int* gpu_shape() const;
const Dtype* gpu_data() const;
const Dtype* gpu_diff() const;
// mutable读写访问
Dtype* mutable_cpu_data();
Dtype* mutable_gpu_data();
Dtype* mutable_cpu_diff();
Dtype* mutable_gpu_diff();
// 设置cpu和gpu数据
void set_cpu_data(Dtype* data);
void set_gpu_data(Dtype* data);
// 执行data = data-diff操作,即学习操作
void Update();
// 从磁盘读取数据到blob,反序列化
void FromProto(const BlobProto& proto, bool reshape = true);
// 保存Blob数据到磁盘,序列化
void ToProto(BlobProto* proto, bool write_diff = false) const;

// 计算l1范数 元素和
Dtype asum_data() const;
Dtype asum_diff() const;
// 计算l2范数 元素平方和
Dtype sumsq_data() const;
Dtype sumsq_diff() const;
// 元素可以一个常数
void scale_data(Dtype scale_factor);
void scale_diff(Dtype scale_factor);
// 共享other这个Blob的data_和diff_
void ShareData(const Blob& other);
void ShareDiff(const Blob& other);
// 当前blob是否与other有相同的内容
bool ShapeEquals(const BlobProto& other);

protected:
// 成员属性:
// 均指针,指向data,diff和shape_data的内存位置
shared_ptr<SyncedMemory> data_;
shared_ptr<SyncedMemory> diff_;
shared_ptr<SyncedMemory> shape_data_;
vector<int> shape_;
int count_; // 当前blob的元素个数
int capacity_; // blob容量

// 就Blob类,禁用其copy构造函数和赋值运算符。
DISABLE_COPY_AND_ASSIGN(Blob);
}; // Blob 类结束

} // namespace caffe 结束

#endif // CAFFE_BLOB_HPP_

Blob类使用到syncedmem.hpp,其与Host和Device内存即内存同步相关操作。