【caffe Net】使用舉例和代碼中文注釋


 首先是Net使用的小例子:

#include <vector>
#include <iostream>
#include <caffe/net.hpp>
using namespace std;
using namespace caffe;
int main()
{
    std::string proto("./bambootry/deploy.prototxt");
    Net<float> nn(proto,caffe::TEST);
    vector<string> bn=nn.blob_names();//獲取Net中所有Blob對象名
    for(int i=0;i<bn.size();i++)
    {
        cout<<"Blob #"<<i<<" : "<<bn[i]<<endl;
    }
    return 0;
}

linux下編譯(bambootry為自己創建的文件夾)

g++ -o ./bambootry/netapp ./bambootry/net.cpp -I ./include -D CPU_ONLY \
-I ./.build_release/src/ -L ./build/lib -lcaffe -lglog -lboost_system \
-lprotobuf

結果:

……中間省略

繼續省略……

代碼注釋

 src/caffe/proto/caffe.proto中NetParameter

 1 message NetParameter {
 2   optional string name = 1; // consider giving the network a name 網絡名稱
 3   // DEPRECATED. See InputParameter. The input blobs to the network.
 4   repeated string input = 3;//網絡的輸入blob名稱,可以有多個blob
 5   // DEPRECATED. See InputParameter. The shape of the input blobs.
 6   repeated BlobShape input_shape = 8;//輸入Blob維度信息
 7 
 8   // 4D input dimensions -- deprecated.  Use "input_shape" instead.
 9   // If specified, for each input blob there should be four
10   // values specifying the num, channels, height and width of the input blob.
11   // Thus, there should be a total of (4 * #input) numbers.
12   repeated int32 input_dim = 4;//舊版維度信息
13 
14   // Whether the network will force every layer to carry out backward operation.
15   // If set False, then whether to carry out backward is determined
16   // automatically according to the net structure and learning rates.
17   // 網絡是否強制每個層進行反向傳播運算,如果設置為false,則由網絡結構和學習速率自動確定是否進行反向傳播運算
18   optional bool force_backward = 5 [default = false];
19   // The current "state" of the network, including the phase, level, and stage.
20   // Some layers may be included/excluded depending on this state and the states
21   // specified in the layers' include and exclude fields.
22   optional NetState state = 6;//網絡當前狀態,phase, level, 和 stage,根據狀態可以確定是否包含某些層
23 
24   // Print debugging information about results while running Net::Forward,
25   // Net::Backward, and Net::Update. 
26   // 運行Net::Forward,Net::Backward, and Net::Update是否打印結果的調試信息
27   optional bool debug_info = 7 [default = false];
28 
29   // The layers that make up the net.  Each of their configurations, including
30   // connectivity and behavior, is specified as a LayerParameter.
31   //組成net的所有層。每個層的配置都包括連接屬性和行為,由LayerParameter定義。
32   //ID設為100可以保證層描述置於末尾
33   repeated LayerParameter layer = 100;  // ID 100 so layers are printed last.
34 
35   // DEPRECATED: use 'layer' instead.
36   repeated V1LayerParameter layers = 2;//已淘汰
37 }

 include/caffe/net.hpp

  1 #ifndef CAFFE_NET_HPP_
  2 #define CAFFE_NET_HPP_
  3 
  4 #include <map>
  5 #include <set>
  6 #include <string>
  7 #include <utility>
  8 #include <vector>
  9 
 10 #include "caffe/blob.hpp"
 11 #include "caffe/common.hpp"
 12 #include "caffe/layer.hpp"
 13 #include "caffe/proto/caffe.pb.h"
 14 
 15 namespace caffe {
 16 
 17 /**
 18  * @brief Connects Layer%s together into a directed acyclic graph (DAG)
 19  *        specified by a NetParameter.
 20  *
 21  * TODO(dox): more thorough description.
 22  */
 23 template <typename Dtype>
 24 class Net {
 25  public:
 26   explicit Net(const NetParameter& param);
 27   explicit Net(const string& param_file, Phase phase,
 28       const int level = 0, const vector<string>* stages = NULL);
 29   virtual ~Net() {}
 30 
 31   /// @brief Initialize a network with a NetParameter.
 32   void Init(const NetParameter& param);//用NetParameter對象初始化Net
 33 
 34   /**
 35    * @brief Run Forward and return the result.
 36    *
 37    */
 38   //前向傳播
 39   const vector<Blob<Dtype>*>& Forward(Dtype* loss = NULL);
 40   /// @brief DEPRECATED; use Forward() instead.已舍棄
 41   const vector<Blob<Dtype>*>& ForwardPrefilled(Dtype* loss = NULL) {
 42     LOG_EVERY_N(WARNING, 1000) << "DEPRECATED: ForwardPrefilled() "
 43         << "will be removed in a future version. Use Forward().";
 44     return Forward(loss);
 45   }
 46 
 47   /**
 48    * The From and To variants of Forward and Backward operate on the
 49    * (topological) ordering by which the net is specified. For general DAG
 50    * networks, note that (1) computing from one layer to another might entail
 51    * extra computation on unrelated branches, and (2) computation starting in
 52    * the middle may be incorrect if all of the layers of a fan-in are not
 53    * included.
 54    */
 55   //前向傳播的幾種形式
 56   Dtype ForwardFromTo(int start, int end);
 57   Dtype ForwardFrom(int start);
 58   Dtype ForwardTo(int end);
 59   /// @brief DEPRECATED; set input blobs then use Forward() instead.
 60   //棄用。指定輸入Blob返回輸出Blob。確定輸入Blob后使用Forward()替代。
 61   const vector<Blob<Dtype>*>& Forward(const vector<Blob<Dtype>* > & bottom,
 62       Dtype* loss = NULL);
 63 
 64   /**
 65    * @brief Zeroes out the diffs of all net parameters.
 66    *        Should be run before Backward.
 67    */
 68   //將diff的所有權值參數清零。應在反向傳播之前調用。
 69   void ClearParamDiffs();
 70 
 71   /**
 72    * The network backward should take no input and output, since it solely
 73    * computes the gradient w.r.t the parameters, and the data has already been
 74    * provided during the forward pass.
 75    */
 76   //反向傳播的幾個函數。不需要輸入或輸出的Blob,數據已經在前向傳播時提供。
 77   void Backward();
 78   void BackwardFromTo(int start, int end);
 79   void BackwardFrom(int start);
 80   void BackwardTo(int end);
 81 
 82   /**
 83    * @brief Reshape all layers from bottom to top.
 84    *
 85    * This is useful to propagate changes to layer sizes without running
 86    * a forward pass, e.g. to compute output feature size.
 87    */
 88   //對Net中所有層自底向上的變形函數。無需進行一次前向傳播就可以計算各層所需的Blob尺寸。
 89   void Reshape();
 90   //前向傳播+反向傳播。輸入為bottom blob,輸出為loss。
 91   Dtype ForwardBackward() {
 92     Dtype loss;
 93     Forward(&loss);
 94     Backward();
 95     return loss;
 96   }
 97 
 98   /// @brief Updates the network weights based on the diff values computed.
 99   void Update();//根據(Solver)准備好的diff更新網絡權值
100   /**
101    * @brief Shares weight data of owner blobs with shared blobs.
102    *
103    * Note: this is called by Net::Init, and thus should normally not be
104    * called manually.
105    */
106   void ShareWeights();//由初始化函數調用,不能隨意調用
107 
108   /**
109    * @brief For an already initialized net, implicitly copies (i.e., using no
110    *        additional memory) the pre-trained layers from another Net.
111    */
112   void ShareTrainedLayersWith(const Net* other);
113   // For an already initialized net, CopyTrainedLayersFrom() copies the already
114   // trained layers from another net parameter instance.
115   /**
116    * @brief For an already initialized net, copies the pre-trained layers from
117    *        another Net.
118    */
119   void CopyTrainedLayersFrom(const NetParameter& param);
120   void CopyTrainedLayersFrom(const string trained_filename);
121   void CopyTrainedLayersFromBinaryProto(const string trained_filename);
122   void CopyTrainedLayersFromHDF5(const string trained_filename);
123   /// @brief Writes the net to a proto.
124   void ToProto(NetParameter* param, bool write_diff = false) const;
125   /// @brief Writes the net to an HDF5 file.
126   void ToHDF5(const string& filename, bool write_diff = false) const;
127 
128   /// @brief returns the network name.返回網絡名稱
129   inline const string& name() const { return name_; }
130   /// @brief returns the layer names 返回層名
131   inline const vector<string>& layer_names() const { return layer_names_; }
132   /// @brief returns the blob names 
133   inline const vector<string>& blob_names() const { return blob_names_; }
134   /// @brief returns the blobs
135   inline const vector<shared_ptr<Blob<Dtype> > >& blobs() const {
136     return blobs_;
137   }
138   /// @brief returns the layers
139   inline const vector<shared_ptr<Layer<Dtype> > >& layers() const {
140     return layers_;
141   }
142   /// @brief returns the phase: TRAIN or TEST
143   inline Phase phase() const { return phase_; }
144   /**
145    * @brief returns the bottom vecs for each layer -- usually you won't
146    *        need this unless you do per-layer checks such as gradients.
147    */
148   inline const vector<vector<Blob<Dtype>*> >& bottom_vecs() const {
149     return bottom_vecs_;//grad check 需要調用返回每一層的輸入。平時不需要。
150   }
151   /**
152    * @brief returns the top vecs for each layer -- usually you won't
153    *        need this unless you do per-layer checks such as gradients.
154    */
155   inline const vector<vector<Blob<Dtype>*> >& top_vecs() const {
156     return top_vecs_;//grad check 需要調用返回每一層的輸出。平時不需要。
157   }
158   /// @brief returns the ids of the top blobs of layer i
159   inline const vector<int> & top_ids(int i) const {
160     CHECK_GE(i, 0) << "Invalid layer id";
161     CHECK_LT(i, top_id_vecs_.size()) << "Invalid layer id";
162     return top_id_vecs_[i];
163   }
164   /// @brief returns the ids of the bottom blobs of layer i
165   inline const vector<int> & bottom_ids(int i) const {
166     CHECK_GE(i, 0) << "Invalid layer id";
167     CHECK_LT(i, bottom_id_vecs_.size()) << "Invalid layer id";
168     return bottom_id_vecs_[i];
169   }
170   inline const vector<vector<bool> >& bottom_need_backward() const {
171     return bottom_need_backward_;
172   }
173   inline const vector<Dtype>& blob_loss_weights() const {
174     return blob_loss_weights_;
175   }
176   inline const vector<bool>& layer_need_backward() const {
177     return layer_need_backward_;
178   }
179   /// @brief returns the parameters
180   inline const vector<shared_ptr<Blob<Dtype> > >& params() const {
181     return params_;
182   }
183   inline const vector<Blob<Dtype>*>& learnable_params() const {
184     return learnable_params_;
185   }
186   /// @brief returns the learnable parameter learning rate multipliers
187   inline const vector<float>& params_lr() const { return params_lr_; }
188   inline const vector<bool>& has_params_lr() const { return has_params_lr_; }
189   /// @brief returns the learnable parameter decay multipliers
190   inline const vector<float>& params_weight_decay() const {
191     return params_weight_decay_;
192   }
193   inline const vector<bool>& has_params_decay() const {
194     return has_params_decay_;
195   }
196   const map<string, int>& param_names_index() const {
197     return param_names_index_;
198   }
199   inline const vector<int>& param_owners() const { return param_owners_; }
200   inline const vector<string>& param_display_names() const {
201     return param_display_names_;
202   }
203   /// @brief Input and output blob numbers 輸入輸出的blob數量
204   inline int num_inputs() const { return net_input_blobs_.size(); }
205   inline int num_outputs() const { return net_output_blobs_.size(); }
206   inline const vector<Blob<Dtype>*>& input_blobs() const {
207     return net_input_blobs_;//返回輸入blob
208   }
209   inline const vector<Blob<Dtype>*>& output_blobs() const {
210     return net_output_blobs_;//返回輸出blob
211   }
212   inline const vector<int>& input_blob_indices() const {
213     return net_input_blob_indices_;//返回輸入blob下標
214   }
215   inline const vector<int>& output_blob_indices() const {
216     return net_output_blob_indices_;//返回輸出blob下標
217   }
218   bool has_blob(const string& blob_name) const;//查找網絡是否包含該blob
219   const shared_ptr<Blob<Dtype> > blob_by_name(const string& blob_name) const;//如果有則找出
220   bool has_layer(const string& layer_name) const;//查找網絡是否包含該層
221   const shared_ptr<Layer<Dtype> > layer_by_name(const string& layer_name) const;//如果有則找出
222 
223   void set_debug_info(const bool value) { debug_info_ = value; }
224 
225   // Helpers for Init.以下用於初始化
226   /**
227    * @brief Remove layers that the user specified should be excluded given the current
228    *        phase, level, and stage.
229    */
230   //過濾掉用戶指定的在某階段、級別、狀態下不應包含的層
231   static void FilterNet(const NetParameter& param,
232       NetParameter* param_filtered);
233   /// @brief return whether NetState state meets NetStateRule rule
234   //判斷網絡狀態是否滿足網絡規則
235   static bool StateMeetsRule(const NetState& state, const NetStateRule& rule,
236       const string& layer_name);
237 
238   // Invoked at specific points during an iteration
239   class Callback {
240    protected:
241     virtual void run(int layer) = 0;
242 
243     template <typename T>
244     friend class Net;
245   };
246   const vector<Callback*>& before_forward() const { return before_forward_; }
247   void add_before_forward(Callback* value) {
248     before_forward_.push_back(value);
249   }
250   const vector<Callback*>& after_forward() const { return after_forward_; }
251   void add_after_forward(Callback* value) {
252     after_forward_.push_back(value);
253   }
254   const vector<Callback*>& before_backward() const { return before_backward_; }
255   void add_before_backward(Callback* value) {
256     before_backward_.push_back(value);
257   }
258   const vector<Callback*>& after_backward() const { return after_backward_; }
259   void add_after_backward(Callback* value) {
260     after_backward_.push_back(value);
261   }
262 
263  protected:
264   // Helpers for Init.
265   /// @brief Append a new top blob to the net.給網絡追加新的輸出blob
266   void AppendTop(const NetParameter& param, const int layer_id,
267                  const int top_id, set<string>* available_blobs,
268                  map<string, int>* blob_name_to_idx);
269   /// @brief Append a new bottom blob to the net.給網絡追加新的輸入blob
270   int AppendBottom(const NetParameter& param, const int layer_id,
271                    const int bottom_id, set<string>* available_blobs,
272                    map<string, int>* blob_name_to_idx);
273   /// @brief Append a new parameter blob to the net.給網絡追加新的權值blob
274   void AppendParam(const NetParameter& param, const int layer_id,
275                    const int param_id);
276   //以下幾個函數顯示調試信息
277   /// @brief Helper for displaying debug info in Forward.
278   void ForwardDebugInfo(const int layer_id);
279   /// @brief Helper for displaying debug info in Backward.
280   void BackwardDebugInfo(const int layer_id);
281   /// @brief Helper for displaying debug info in Update.
282   void UpdateDebugInfo(const int param_id);
283 
284   /// @brief The network name網絡名
285   string name_;
286   /// @brief The phase: TRAIN or TEST 網絡當前階段(訓練 測試)
287   Phase phase_;
288   /// @brief Individual layers in the net 網絡中的獨立層
289   vector<shared_ptr<Layer<Dtype> > > layers_;
290   vector<string> layer_names_;//層名稱
291   map<string, int> layer_names_index_;//層名稱與索引映射表
292   vector<bool> layer_need_backward_;//標記某個層是否需要反向傳播
293   /// @brief the blobs storing intermediate results between the layer.
294   vector<shared_ptr<Blob<Dtype> > > blobs_;//層與層之間傳遞數據通道
295   vector<string> blob_names_;//Blob名稱
296   map<string, int> blob_names_index_;//Blob名稱和索引映射表
297   vector<bool> blob_need_backward_;//標記Blob是否需要反向傳播
298   /// bottom_vecs stores the vectors containing the input for each layer.
299   /// They don't actually host the blobs (blobs_ does), so we simply store
300   /// pointers.
301   //存放每個層的輸入Blob指針(真正數據所有者為blobs_)
302   vector<vector<Blob<Dtype>*> > bottom_vecs_;
303   vector<vector<int> > bottom_id_vecs_;
304   vector<vector<bool> > bottom_need_backward_;
305   /// top_vecs stores the vectors containing the output for each layer
306   //存放每個層的輸出Blob指針(真正數據所有者為blobs_)
307   vector<vector<Blob<Dtype>*> > top_vecs_;
308   vector<vector<int> > top_id_vecs_;
309   /// Vector of weight in the loss (or objective) function of each net blob,
310   /// indexed by blob_id.
311   //每個blob對全局損失(目標函數)的貢獻權重
312   vector<Dtype> blob_loss_weights_;
313   vector<vector<int> > param_id_vecs_;
314   vector<int> param_owners_;
315   vector<string> param_display_names_;
316   vector<pair<int, int> > param_layer_indices_;
317   map<string, int> param_names_index_;
318   /// blob indices for the input and the output of the net
319   //網絡輸入輸出blob的索引
320   vector<int> net_input_blob_indices_;
321   vector<int> net_output_blob_indices_;
322   vector<Blob<Dtype>*> net_input_blobs_;
323   vector<Blob<Dtype>*> net_output_blobs_;
324   /// The parameters in the network. 網絡權值
325   vector<shared_ptr<Blob<Dtype> > > params_;
326   vector<Blob<Dtype>*> learnable_params_;//可訓練的網絡權值
327   /**
328    * The mapping from params_ -> learnable_params_: we have
329    * learnable_param_ids_.size() == params_.size(),
330    * and learnable_params_[learnable_param_ids_[i]] == params_[i].get()
331    * if and only if params_[i] is an "owner"; otherwise, params_[i] is a sharer
332    * and learnable_params_[learnable_param_ids_[i]] gives its owner.
333    */
334    //params_ 到 learnable_params_映射: 
335    //當且僅當params_[i]是所有者時,learnable_param_ids_.size() == params_.size(),
336    //learnable_params_[learnable_param_ids_[i]] == params_[i].get()
337    //否則 params_[i] 只是共享者,learnable_params_[learnable_param_ids_[i]] 給出所有者
338   vector<int> learnable_param_ids_;
339   /// the learning rate multipliers for learnable_params_
340   vector<float> params_lr_;//學習率倍乘因子
341   vector<bool> has_params_lr_;
342   /// the weight decay multipliers for learnable_params_
343   vector<float> params_weight_decay_;//權值衰減因子
344   vector<bool> has_params_decay_;
345   /// The bytes of memory used by this net
346   size_t memory_used_;//記錄網絡占用的內存大小
347   /// Whether to compute and display debug info for the net.
348   bool debug_info_;//是否顯示調試信息
349   // Callbacks
350   vector<Callback*> before_forward_;
351   vector<Callback*> after_forward_;
352   vector<Callback*> before_backward_;
353   vector<Callback*> after_backward_;
354 
355 DISABLE_COPY_AND_ASSIGN(Net);//禁止拷貝構造函數、賦值運算函數
356 };
357 
358 
359 }  // namespace caffe
360 
361 #endif  // CAFFE_NET_HPP_

 

 

未完待續……

 

 

 

 

 

 

 

 

 

內容來自趙永科《深度學習 21天實戰caffe》

 


免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM