Caffe C++使用教程


Caffe C++使用教程

Caffe使用教程

by Shicai Yang(@星空下的巫師)on 2015/08/06

 

初始化網絡

#include "caffe/caffe.hpp"
#include <string>
#include <vector>
using namespace caffe;

char *proto = "H:\\Models\\Caffe\\deploy.prototxt"; /* 加載CaffeNet的配置 */
Phase phase = TEST; /* or TRAIN */
Caffe::set_mode(Caffe::CPU);
// Caffe::set_mode(Caffe::GPU);
// Caffe::SetDevice(0);

//! Note: 后文所有提到的net,都是這個net
boost::shared_ptr< Net<float> > net(new caffe::Net<float>(proto, phase));
加載已訓練好的模型

char *model = "H:\\Models\\Caffe\\bvlc_reference_caffenet.caffemodel";    
net->CopyTrainedLayersFrom(model);
讀取圖像均值

char *mean_file = "H:\\Models\\Caffe\\imagenet_mean.binaryproto";
Blob<float> image_mean;
BlobProto blob_proto;
const float *mean_ptr;
unsigned int num_pixel;

bool succeed = ReadProtoFromBinaryFile(mean_file, &blob_proto);
if (succeed)
{
    image_mean.FromProto(blob_proto);
    num_pixel = image_mean.count(); /* NCHW=1x3x256x256=196608 */
    mean_ptr = (const float *) image_mean.cpu_data();
}
根據指定數據,前向傳播網絡

//! Note: data_ptr指向已經處理好(去均值的,符合網絡輸入圖像的長寬和Batch Size)的數據
void caffe_forward(boost::shared_ptr< Net<float> > & net, float *data_ptr)
{
    Blob<float>* input_blobs = net->input_blobs()[0];
    switch (Caffe::mode())
    {
    case Caffe::CPU:
        memcpy(input_blobs->mutable_cpu_data(), data_ptr,
            sizeof(float) * input_blobs->count());
        break;
    case Caffe::GPU:
        cudaMemcpy(input_blobs->mutable_gpu_data(), data_ptr,
            sizeof(float) * input_blobs->count(), cudaMemcpyHostToDevice);
        break;
    default:
        LOG(FATAL) << "Unknown Caffe mode.";
    } 
    net->ForwardPrefilled();
}
根據Feature層的名字獲取其在網絡中的Index

//! Note: Net的Blob是指,每個層的輸出數據,即Feature Maps
// char *query_blob_name = "conv1";
unsigned int get_blob_index(boost::shared_ptr< Net<float> > & net, char *query_blob_name)
{
    std::string str_query(query_blob_name);    
    vector< string > const & blob_names = net->blob_names();
    for( unsigned int i = 0; i != blob_names.size(); ++i ) 
    { 
        if( str_query == blob_names[i] ) 
        { 
            return i;
        } 
    }
    LOG(FATAL) << "Unknown blob name: " << str_query;
}
讀取網絡指定Feature層數據

//! Note: 根據CaffeNet的deploy.prototxt文件,該Net共有15個Blob,從data一直到prob    
char *query_blob_name = "conv1"; /* data, conv1, pool1, norm1, fc6, prob, etc */
unsigned int blob_id = get_blob_index(net, query_blob_name);

boost::shared_ptr<Blob<float> > blob = net->blobs()[blob_id];
unsigned int num_data = blob->count(); /* NCHW=10x96x55x55 */
const float *blob_ptr = (const float *) blob->cpu_data();
根據文件列表,獲取特征,並存為二進制文件

詳見get_features.cpp文件:

主要包括三個步驟

生成文件列表,格式與訓練用的類似,每行一個圖像 包括文件全路徑、空格、標簽(沒有的話,可以置0)
根據train_val或者deploy的prototxt,改寫生成feat.prototxt 主要是將輸入層改為image_data層,最后加上prob和argmax(為了輸出概率和Top1/5預測標簽)
根據指定參數,運行程序后會生成若干個二進制文件,可以用MATLAB讀取數據,進行分析
根據Layer的名字獲取其在網絡中的Index

//! Note: Layer包括神經網絡所有層,比如,CaffeNet共有23層
// char *query_layer_name = "conv1";
unsigned int get_layer_index(boost::shared_ptr< Net<float> > & net, char *query_layer_name)
{
    std::string str_query(query_layer_name);    
    vector< string > const & layer_names = net->layer_names();
    for( unsigned int i = 0; i != layer_names.size(); ++i ) 
    { 
        if( str_query == layer_names[i] ) 
        { 
            return i;
        } 
    }
    LOG(FATAL) << "Unknown layer name: " << str_query;
}
讀取指定Layer的權重數據

//! Note: 不同於Net的Blob是Feature Maps,Layer的Blob是指Conv和FC等層的Weight和Bias
char *query_layer_name = "conv1";
const float *weight_ptr, *bias_ptr;
unsigned int layer_id = get_layer_index(net, query_layer_name);
boost::shared_ptr<Layer<float> > layer = net->layers()[layer_id];
std::vector<boost::shared_ptr<Blob<float>  >> blobs = layer->blobs();
if (blobs.size() > 0)
{
    weight_ptr = (const float *) blobs[0]->cpu_data();
    bias_ptr = (const float *) blobs[1]->cpu_data();
}

//! Note: 訓練模式下,讀取指定Layer的梯度數據,與此相似,唯一的區別是將cpu_data改為cpu_diff
修改某層的Weight數據

const float* data_ptr;          /* 指向待寫入數據的指針, 源數據指針*/
float* weight_ptr = NULL;       /* 指向網絡中某層權重的指針,目標數據指針*/
unsigned int data_size;         /* 待寫入的數據量 */
char *layer_name = "conv1";     /* 需要修改的Layer名字 */

unsigned int layer_id = get_layer_index(net, query_layer_name);    
boost::shared_ptr<Blob<float> > blob = net->layers()[layer_id]->blobs()[0];

CHECK(data_size == blob->count());
switch (Caffe::mode())
{
case Caffe::CPU:
    weight_ptr = blob->mutable_cpu_data();
    break;
case Caffe::GPU:
    weight_ptr = blob->mutable_gpu_data();
    break;
default:
    LOG(FATAL) << "Unknown Caffe mode";
}
caffe_copy(blob->count(), data_ptr, weight_ptr);

//! Note: 訓練模式下,手動修改指定Layer的梯度數據,與此相似
// mutable_cpu_data改為mutable_cpu_diff,mutable_gpu_data改為mutable_gpu_diff
保存新的模型

char* weights_file = "bvlc_reference_caffenet_new.caffemodel";
NetParameter net_param;
net->ToProto(&net_param, false);
WriteProtoToBinaryFile(net_param, weights_file);

 

 

 


免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM