//針對大小為winsize的圖,計算所有HaarFeature的rect,存入features返回,即獲取所有特征坐標
CvIntHaarFeatures* icvCreateIntHaarFeatures( CvSize winsize,int mode,int symmetric )
//訓練樣本圖集
//創建結構CvHaarTrainigData,分配空間,並未填入sample數據
CvHaarTrainigData* icvCreateHaarTrainingData( CvSize winsize, int maxnumsamples )
//計算訓練樣本集的特征值,調用icvGetTrainingDataCallback
//已知訓練樣本的積分圖,計算特征值valcache、索引idxcache
void icvPrecalculate( CvHaarTrainingData* data, CvIntHaarFeatures* haarFeatures,int numprecalculated )//numprecalculated:要計算的特征數
//userdata:含訓練數據、haar特征
//計算從first開始的num個特征,結果存入mat
//若sampleIdx為null,則計算全部樣本,否則計算由sampleIdx指定的樣本
void icvGetTrainingDataCallback( CvMat* mat, CvMat* sampleIdx, CvMat*,int first, int num, void* userdata )
//計算一個樣本(積分圖為sum和tilted)的一個HaarFeature,並返回該值
CV_INLINE float cvEvalFastHaarFeature( CvFastHaarFeature* feature,sum_type* sum, sum_type* tilted )
//特征的rect由坐標表示轉換為由像素索引表示
//每個haarFeature最多由3個rect組成,但都保留3個的存儲空間
void icvConvertToFastHaarFeature( CvTHaarFeature* haarFeature,CvFastHaarFeature* fastHaarFeature,int size, int step )
OpenCV源碼中真正計算Haar特征的那段在哪?
我想我要找的就在CvIntHaarFeatures* icvCreateIntHaarFeatures里面
我之前也看過 好像沒有看到計算不同scale的 OpenCV計算了不同scale嗎?
函數里的那個大循環就是在遍歷所有不同的尺度和位置的haar特征
第二部分
cvCreateTreeCascadeClassifier
//CvTreeCascadeNode包含CvStageHaarClassifier* stage;也就是說找最后一個stage作為最深的葉leaf;
CV_CALL( leaves = icvFindDeepestLeaves( tcc ) );
CV_CALL( icvPrintTreeCascade( tcc->root ) );
//根據模式和對稱性以及winsize獲得haar特征,每個特征由最多三個矩形矩形加減形成,
//這里包含了所有的允許特征,允許特征是可能特征的一部分,過濾掉的是面積比較小的特征
haar_features = icvCreateIntHaarFeatures( winsize, mode, symmetric );
printf( "Number of features used : %d\n", haar_features->count );
//分配用於訓練的緩沖區,包括正負樣本的矩形積分圖和傾斜積分圖
// CvMat normfactor;
// CvMat cls;
// CvMat weights;
training_data = icvCreateHaarTrainingData( winsize, npos + nneg );
sprintf( stage_name, "%s/", dirname );
suffix = stage_name + strlen( stage_name );
//獲得背景信息,包括讀取背景信息里背景文件的文件名信息並索引該文件,
consumed = 0;
//讀取正樣本,並計算通過所有的前面的stage的正采樣數量,這樣可以計算出檢測率
//調用函數情況
//icvGetHaarTrainingDataFromVec內部調用icvGetHaarTrainingData
//icvGetHaarTrainingData,從規定的回調函數里icvGetHaarTraininDataFromVecCallback獲得數據,
//並通過前面訓練出的分類器過濾掉少量正采樣,然后計算積分圖,
//積分圖存放在training_data結構中
poscount = icvGetHaarTrainingDataFromVec( training_data, 0, npos,
(CvIntHaarClassifier*) tcc, vecfilename, &consumed );
proctime = -TIME( 0 );
//讀負采樣,並返回虛警率
//從文件中將負采樣讀出,並用前面的訓練出的stage進行過濾獲得若干被錯誤划分為正采樣的負采樣,如果得到的數量達不到nneg
//則會重復提取這些負樣本,以獲得nneg個負采樣,所以如果當被錯誤划分為正采樣的負采樣在當前的stage后為0,則會出現死循環
//解決方法可以通過reader的round值判斷。
//這種情況應該是訓練收斂,因為虛警率為0,符合條件if( leaf_fa_rate <= required_leaf_fa_rate ),可以考慮退出訓練
nneg = (int) (neg_ratio * poscount);
//icvGetHaarTrainingDataFromBG內部調用
//icvGetBackgroundImage獲得數據並計算積分圖,將其放在training_data結構分配的內存,位置是在poscount開始nneg個數量
//training_data分配了npos + nneg個積分圖內存以及權值
negcount = icvGetHaarTrainingDataFromBG( training_data, poscount, nneg,
(CvIntHaarClassifier*) tcc, &false_alarm );
printf( "NEG: %d %g\n", negcount, false_alarm );
icvSetNumSamples( training_data, poscount + negcount );
posweight = (equalweights) ? 1.0F / (poscount + negcount) : (0.5F/poscount);
negweight = (equalweights) ? 1.0F / (poscount + negcount) : (0.5F/negcount);
//這里將正樣本設置為1,負樣本設置為0,在后面將用於分辨樣本類型,統計檢測率和虛警率
//這里也設置加權值
icvSetWeightsAndClasses( training_data,
poscount, posweight, 1.0F, negcount, negweight, 0.0F );
//預先計算每個樣本(包括正負樣本的前面numprecalculated特征)
//內部調用cvGetSortedIndices將所有樣本的每一個特征按其特征值升序排序,idx和特征值分別放在training_data
//的 *data->valcache和*data->idxcache中;
icvPrecalculate( training_data, haar_features, numprecalculated );
/訓練由多個弱分類器級連的強分類器
single_cluster->stage =
(CvStageHaarClassifier*) icvCreateCARTStageClassifier
//cluster_idx的容量足夠嗎?CV_CALL( ptr = cvCreateMat( num, total, CV_32FC1 ) );ptr即val,這里的total是特征數,num為poscount
//而cluster_idx僅僅是npos+nneg之和
//CV_CALL( cluster_idx = cvCreateMat( 1, npos + nneg, CV_32SC1 ) );
//難道是KMean2的問題?vals是poscount*featurecount,
//是否綜合正樣本的stage特征作為一個向量,以一個向量為一個樣本,vals有poscount個向量作為輸入將分類后的結果放到cluster_idx
//如果這樣理解則不會出現越界現象cluster_idx的容量也是夠的
CV_CALL( cvKMeans2( vals, k, cluster_idx, CV_TERM_CRITERIA() ) );
//統計每個類別的數量
for( cluster = 0; cluster < k; cluster++ )
num[cluster] = 0;
//minpos大於任意一個分類,則退出分類
for( cluster = 0; cluster < k; cluster++ )
//這里得到符合要求的正樣本分類,
cur_num = 0;
cur_node = last_node = NULL;
//best_num保留的是當前強分類器所有弱分類器特征值之和
for( cluster = 0; (cluster < k) && (cur_num < best_num); cluster++ )
{
CvTreeCascadeNode* new_node;
int num_splits;
int last_pos;
int total_pos;
printf( "Cluster: %d\n", cluster );
last_pos = negcount;
//將分類好的正樣本根據cluster與負樣本組合,則訓練出k個node,
//與前面不一樣的是正樣本放后面負樣本放前面
//重新計算加權
icvSetWeightsAndClasses( training_data,
poscount, posweight, 1.0F, negcount, negweight, 0.0F );
//注意這里的idx和上面的不同,不再是NULL了
new_node->stage = (CvStageHaarClassifier*)
static
CvIntHaarClassifier* icvCreateCARTStageClassifier( CvHaarTrainingData* data,
CvMat* sampleIdx,
CvIntHaarFeatures* haarFeatures,
float minhitrate,
float maxfalsealarm,
int symmetric,
float weightfraction,
int numsplits,
CvBoostType boosttype,
CvStumpError stumperror,
int maxsplits )
int num_splits;
//N,是弱分類器的序號
//SMP由weightfraction決定,表示通過剔除小權值的樣本后與總體樣本數的%比值,
//weightfraction會改變參與下一個弱分類器的樣本數量,即大權值樣本
//考慮AdaBoost算法,權值更新的時候一般正確分類的權值會乘一個因子,應該對應權值會變小,這樣是否會減少參與訓練的正樣本數?
//由於函數cvTrimWeights內部與門限相同的權值會被保留,減少的數目有限,(考慮一種極端情況,正確分類的都被踢除掉)
//F表示symmetric環境下奇數弱分類器,如果考慮是從0計數,則應該是第偶數個弱分類器,是否指示采用對稱特征???
#ifdef CV_VERBOSE
printf( "+----+----+-+---------+---------+---------+---------+\n" );
printf( "| N |%%SMP|F| ST.THR | HR | FA | EXP. ERR|\n" );
printf( "+----+----+-+---------+---------+---------+---------+\n" );
#endif
//userdata包含了參與訓練的積分圖和特征,其指針應該是用於回調的用戶參數
userdata = cvUserdata( data, haarFeatures );
//權值只有在LB這樣的boosttype中起作用icvBoostStartTrainingLB。
//這里討論非LB情況,函數根據cls值計算參與訓練特征的weakTrainVals,weakTrainVals或為+1或為-1
trainer = cvBoostStartTraining( &data->cls, weakTrainVals, &data->weights,
sampleIdx, boosttype );
乎剔除小權值的特征,將大權值的index放到trimmedIdx數據結構中
trimmedIdx = cvTrimWeights( &data->weights, sampleIdx, weightfraction );
numtrimmed = (trimmedIdx) ? MAX( trimmedIdx->rows, trimmedIdx->cols ) : m;
//data->valcache有預計算特征值
//flags是CV_COL_SAMPLE或CV_ROW_SAMPLE
//weakTrainVals?
//
//得到對稱特征
if( classifier->feature[i].desc[0] == 'h' )
{//傾斜
int tmp = 0;
//計算各對稱特征的特征值
for( j = 0; j < numtrimmed; j++ )
/調用icvEvalCARTHaarClassifier函數計算訓練出來的弱分類器對於所有參與訓練的樣本的特征值
eval.data.fl[idx] = classifier->eval_r( (CvIntHaarClassifier*) classifier,
(sum_type*) (data->sum.data.ptr + idx * data->sum.step),
(sum_type*) (data->tilted.data.ptr + idx * data->tilted.step),
data->normfactor.data.fl[idx] );
//這里應該是更新各個樣本的weight
alpha = cvBoostNextWeakClassifier( &eval, &data->cls, weakTrainVals,
&data->weights, trainer );
//統計正樣本數量,並且計算出正樣本的對於弱分類器的特征值(一個弱分類器對應一個特征)
//icvEvalCARTHaarClassifier中的left right是否是考慮多特征情況下,按照一定順序編號
//找到最適合的特征值返回
eval.data.fl[numpos] = 0.0F;
for( j = 0; j < seq->total; j++ )
{
classifier = *((CvCARTHaarClassifier**) cvGetSeqElem( seq, j ));
eval.data.fl[numpos] += classifier->eval_r(
(CvIntHaarClassifier*) classifier,
(sum_type*) (data->sum.data.ptr + idx * data->sum.step),
(sum_type*) (data->tilted.data.ptr + idx * data->tilted.step),
data->normfactor.data.fl[idx] );
}
//根據檢測率取門限,通過該門限獲得虛警率
threshold = eval.data.fl[(int) ((1.0F - minhitrate) * numpos)];
CvClassifier* cvCreateCARTClassifier( CvMat* trainData,
//包含每個樣本預計算的特征的特征值,按特征值升序排列的索引數組在trainParams的sortedIdx中
int flags,
CvMat* trainClasses,//+1,-1
CvMat* typeMask,
CvMat* missedMeasurementsMask,
CvMat* compIdx,
CvMat* sampleIdx,//如果不為NULL,樣本的序,按權值升序排列
CvMat* weights,//樣本權值
CvClassifierTrainParams* trainParams )
CvClassifier* cvCreateMTStumpClassifier( CvMat* trainData,//預先計算的特征值
int flags,
CvMat* trainClasses,//樣本分類+1或者-1
CvMat* typeMask,
CvMat* missedMeasurementsMask,
CvMat* compIdx,
CvMat* sampleIdx,//剔除小權值樣本后的序號數組
CvMat* weights,//樣本權值
CvClassifierTrainParams* trainParams )
//datan為預計算特征數,如果能得到新的特征(getTrainData!=NULL)則n應該為所有能夠得到的特征數,n肯定大於等於datan
//當在進行對稱訓練的時候cvCreateMTStumpClassifier中的getTrainData為NULL,此時為訓練剩下的特征數
assert( datan <= n );
//m為所有可能參與訓練的樣本數,如果剔除了小權值樣本,則sampleIdx!=NULL,此時為l為實際參與訓練的樣本數,
//按照權值對樣本排序產生sampleIdx與按照特征值對樣本排序產生sortedata
if( sampleIdx != NULL )
//filter!=NULL表示剔除小權值樣本並且樣本按照特特征值排序
if( filter != NULL || sortedn < n )
問:為什么要對正樣本進行聚類分析?
http://blog.sina.com.cn/s/blog_75e063c10100za53.html
opencv haartraining 分析一:cvCreateTreeCascadeClassifier
void cvCreateTreeCascadeClassifier( const char* dirname,//存放n個stage的文本數據的位置,例如:c:\data
const char* vecfilename,//存放vec樣本文件的位置
const char* bgfilename, //存放bg負樣本文件的位置
int npos, int nneg, int nstages,
int numprecalculated,
int numsplits,
float minhitrate, float maxfalsealarm,
float weightfraction,
int mode, int symmetric,
int equalweights,
int winwidth, int winheight,
int boosttype, int stumperror,
int maxtreesplits, int minpos, bool bg_vecfile )//maxtreesplits – maximum number of nodes in tree. If maxtreesplits < nsplits, tree will not be built
{
CvTreeCascadeClassifier* tcc = NULL;
CvIntHaarFeatures* haar_features = NULL;
CvHaarTrainingData* training_data = NULL;
CvMat* vals = NULL;
CvMat* cluster_idx = NULL;
CvMat* idx = NULL;
CvMat* features_idx = NULL;
CV_FUNCNAME( "cvCreateTreeCascadeClassifier" );
__BEGIN__;
int i, k;
CvTreeCascadeNode* leaves;
int best_num, cur_num;
CvSize winsize;
char stage_name[PATH_MAX];
char buf[PATH_MAX];
char* suffix;
int total_splits;
int poscount;
int negcount;
int consumed;
double false_alarm;
double proctime;
int nleaves;
double required_leaf_fa_rate;
float neg_ratio;
int max_clusters;
max_clusters = CV_MAX_CLUSTERS;
neg_ratio = (float) nneg / npos;
nleaves = 1 + MAX( 0, maxtreesplits );
required_leaf_fa_rate = pow( (double) maxfalsealarm, (double) nstages ) / nleaves;//最大虛警率的nstages次方,再除以葉子總數即為葉子虛警率
printf( "Required leaf false alarm rate: %g\n", required_leaf_fa_rate );
total_splits = 0;
winsize = cvSize( winwidth, winheight );
CV_CALL( cluster_idx = cvCreateMat( 1, npos + nneg, CV_32SC1 ) );
CV_CALL( idx = cvCreateMat( 1, npos + nneg, CV_32SC1 ) );
CV_CALL( tcc = (CvTreeCascadeClassifier*)
icvLoadTreeCascadeClassifier( dirname, winwidth + 1, &total_splits ) );
CV_CALL( leaves = icvFindDeepestLeaves( tcc ) );//最后一個stage作為最深的葉子leaf
CV_CALL( icvPrintTreeCascade( tcc->root ) );
haar_features = icvCreateIntHaarFeatures( winsize, mode, symmetric );//這個是計算haar特征的數目以及相關參數(計算每個特征的計算公式),
typedef struct CvTHaarFeature
{
char desc[CV_HAAR_FEATURE_DESC_MAX];
int tilted;
struct
{
CvRect r;
float weight;
} rect[CV_HAAR_FEATURE_MAX];
} CvTHaarFeature;
typedef struct CvFastHaarFeature
{
int tilted;
struct
{
int p0, p1, p2, p3;
float weight;
} rect[CV_HAAR_FEATURE_MAX];
} CvFastHaarFeature;
typedef struct CvIntHaarFeatures
{
CvSize winsize;
int count;
CvTHaarFeature* feature;
CvFastHaarFeature* fastfeature;
} CvIntHaarFeatures;
其中CvTHaarFeature和CvFastHaarFeature的區別在於:CvTHaarFeature是標示特征覆蓋的窗口的坐標(Cvrect r),CvFastHaarFeature是將特征覆蓋的窗口區域拉直,然后計算cvEvalFastHaarFeature.
CV_INLINE float cvEvalFastHaarFeature( const CvFastHaarFeature* feature,
const sum_type* sum, const sum_type* tilted )
{
const sum_type* img = feature->tilted ? tilted : sum;//此處img是判斷是否是旋轉后的。如果不是,那么這個是已經計算了每個位置的像素積分和的。
float ret = feature->rect[0].weight*
(img[feature->rect[0].p0] - img[feature->rect[0].p1] -
img[feature->rect[0].p2] + img[feature->rect[0].p3]) +
feature->rect[1].weight*
(img[feature->rect[1].p0] - img[feature->rect[1].p1] -
img[feature->rect[1].p2] + img[feature->rect[1].p3]);
if( feature->rect[2].weight != 0.0f )
ret += feature->rect[2].weight *
( img[feature->rect[2].p0] - img[feature->rect[2].p1] -
img[feature->rect[2].p2] + img[feature->rect[2].p3] );
return ret;
}
printf( "Number of features used : %d\n", haar_features->count );//輸出32*32的子窗口所有特征的數目
training_data = icvCreateHaarTrainingData( winsize, npos + nneg );
sprintf( stage_name, "%s/", dirname );
suffix = stage_name + strlen( stage_name );
if (! bg_vecfile)
if( !icvInitBackgroundReaders( bgfilename, winsize ) && nstages > 0 )//icvInitBackgroundReaders創建背景讀取指針,並返回1
CV_ERROR( CV_StsError, "Unable to read negative images" );
if( nstages > 0 )
{
do
{
CvSplit* first_split;
CvSplit* last_split;
CvSplit* cur_split;
CvTreeCascadeNode* parent;
CvTreeCascadeNode* cur_node;
CvTreeCascadeNode* last_node;
first_split = last_split = cur_split = NULL;
parent = leaves;
leaves = NULL;
do
{
int best_clusters;
float posweight, negweight;
double leaf_fa_rate;
if( parent ) sprintf( buf, "%d", parent->idx );
else sprintf( buf, "NULL" );
printf( "\nParent node: %s\n\n", buf );
printf( "*** 1 cluster ***\n" );
tcc->eval = icvEvalTreeCascadeClassifierFilter;//此處設定是為了后面樣本的過濾作用,用來過濾出能夠通過前面各個stage強分類器的正負樣本。
icvSetLeafNode( tcc, parent );//設置從根節點到葉子節點的路徑
consumed = 0;
poscount = icvGetHaarTrainingDataFromVec( training_data, 0, npos,
(CvIntHaarClassifier*) tcc, vecfilename, &consumed );//從vec正樣本文件中獲取計算過積分圖的正樣本到training_data中。
printf( "POS: %d %d %f\n", poscount, consumed, ((double) poscount)/consumed );
if( poscount <= 0 )
CV_ERROR( CV_StsError, "Unable to obtain positive samples" );
fflush( stdout );
proctime = -TIME( 0 );
nneg = (int) (neg_ratio * poscount);
negcount = icvGetHaarTrainingDataFromBG( training_data, poscount, nneg,
(CvIntHaarClassifier*) tcc, &false_alarm, bg_vecfile ? bgfilename : NULL );
printf( "NEG: %d %g\n", negcount, false_alarm );
printf( "BACKGROUND PROCESSING TIME: %.2f\n", (proctime + TIME( 0 )) );
if( negcount <= 0 )
CV_ERROR( CV_StsError, "Unable to obtain negative samples" );
leaf_fa_rate = false_alarm;
if( leaf_fa_rate <= required_leaf_fa_rate )//達到最低要求的葉子誤警率即結束。
{
printf( "Required leaf false alarm rate achieved. "
"Branch training terminated.\n" );
}
else if( nleaves == 1 && tcc->next_idx == nstages )
{
printf( "Required number of stages achieved. "
"Branch training terminated.\n" );//達到設定的stages也結束。
}
else
{
CvTreeCascadeNode* single_cluster;
CvTreeCascadeNode* multiple_clusters;
CvSplit* cur_split;
int single_num;
icvSetNumSamples( training_data, poscount + negcount );
posweight = (equalweights) ? 1.0F / (poscount + negcount) : (0.5F/poscount);
negweight = (equalweights) ? 1.0F / (poscount + negcount) : (0.5F/negcount);
icvSetWeightsAndClasses( training_data,
poscount, posweight, 1.0F, negcount, negweight, 0.0F );//為每個樣本設置權值和類別。
fflush( stdout );
proctime = -TIME( 0 );
icvPrecalculate( training_data, haar_features, numprecalculated );//負責計算所有取出的正負樣本的前 numprecalculated個 Haar特征值(由 icvGetTrainingDataCallback 實現),
for( i = 0; i < num_samples; i++ )
{
for( j = 0; j < num; j++ )//num=numprecalculated
{
val = cvEvalFastHaarFeature(
( haar_features->fastfeature
+ first + j ),
(sum_type*) (training_data->sum.data.ptr
+ i * training_data->sum.step),
(sum_type*) (training_data->tilted.data.ptr
+ i * training_data->tilted.step) );
normfactor = training_data->normfactor.data.fl[i];
val = ( normfactor == 0.0F ) ? 0.0F : (val / normfactor);
#ifdef CV_COL_ARRANGEMENT
CV_MAT_ELEM( *mat, float, j, i ) = val;
#else
CV_MAT_ELEM( *mat, float, i, j ) = val;//將計算好的特征值放在mat指針指定的內存中。
#endif
}
}
並且對每種特征,將所有樣本標號按其特征值升序排序(由 cvGetSortedIndices實現,每種特征分別排序)。
printf( "Precalculation time: %.2f\n", (proctime + TIME( 0 )) );
CV_CALL( single_cluster = icvCreateTreeCascadeNode() );
fflush( stdout );
proctime = -TIME( 0 );
single_cluster->stage =
(CvStageHaarClassifier*) icvCreateCARTStageClassifier(
training_data, NULL, haar_features,
minhitrate, maxfalsealarm, symmetric,
weightfraction, numsplits, (CvBoostType) boosttype,
(CvStumpError) stumperror, 0 );//這地方開始創建cart的stage強分類器,其中numsplits表示樹的最小節點數
printf( "Stage training time: %.2f\n", (proctime + TIME( 0 )) );
single_num = icvNumSplits( single_cluster->stage );//表示強分類器
是由幾個弱分類器構建的
best_num = single_num;
best_clusters = 1;
multiple_clusters = NULL;
printf( "Number of used features: %d\n", single_num );
if( maxtreesplits >= 0 )
{
max_clusters = MIN( max_clusters, maxtreesplits - total_splits + 1 );
}
vals = NULL;
for( k = 2; k <= max_clusters; k++ )
{
int cluster;
int stop_clustering;
printf( "*** %d clusters ***\n", k );
stop_clustering = ( k * minpos > poscount );
if( !stop_clustering )
{
int num[CV_MAX_CLUSTERS];
if( k == 2 )
{
proctime = -TIME( 0 );
CV_CALL( vals = icvGetUsedValues( training_data, 0, poscount,
haar_features, single_cluster->stage ) );
printf( "Getting values for clustering time: %.2f\n", (proctime + TIME(0)) );
printf( "Value matirx size: %d x %d\n", vals->rows, vals->cols );
fflush( stdout );
cluster_idx->cols = vals->rows;
for( i = 0; i < negcount; i++ ) idx->data.i[i] = poscount + i;
}
proctime = -TIME( 0 );
CV_CALL( cvKMeans2( vals, k, cluster_idx, CV_TERM_CRITERIA() ) );
printf( "Clustering time: %.2f\n", (proctime + TIME( 0 )) );
for( cluster = 0; cluster < k; cluster++ ) num[cluster] = 0;
for( i = 0; i < cluster_idx->cols; i++ )
num[cluster_idx->data.i[i]]++;
for( cluster = 0; cluster < k; cluster++ )
{
if( num[cluster] < minpos )
{
stop_clustering = 1;
break;
}
}
}
if( stop_clustering )
{
printf( "Clusters are too small. Clustering aborted.\n" );
break;
}
cur_num = 0;
cur_node = last_node = NULL;
for( cluster = 0; (cluster < k) && (cur_num < best_num); cluster++ )
{
CvTreeCascadeNode* new_node;
int num_splits;
int last_pos;
int total_pos;
printf( "Cluster: %d\n", cluster );
last_pos = negcount;
for( i = 0; i < cluster_idx->cols; i++ )
{
if( cluster_idx->data.i[i] == cluster )
{
idx->data.i[last_pos++] = i;
}
}
idx->cols = last_pos;
total_pos = idx->cols - negcount;
printf( "# pos: %d of %d. (%d%%)\n", total_pos, poscount,
100 * total_pos / poscount );
CV_CALL( new_node = icvCreateTreeCascadeNode() );
if( last_node ) last_node->next = new_node;
else cur_node = new_node;
last_node = new_node;
posweight = (equalweights)
? 1.0F / (total_pos + negcount) : (0.5F / total_pos);
negweight = (equalweights)
? 1.0F / (total_pos + negcount) : (0.5F / negcount);
icvSetWeightsAndClasses( training_data,
poscount, posweight, 1.0F, negcount, negweight, 0.0F );
fflush( stdout );
proctime = -TIME( 0 );
new_node->stage = (CvStageHaarClassifier*)
icvCreateCARTStageClassifier( training_data, idx, haar_features,
minhitrate, maxfalsealarm, symmetric,
weightfraction, numsplits, (CvBoostType) boosttype,
(CvStumpError) stumperror, best_num - cur_num );
printf( "Stage training time: %.2f\n", (proctime + TIME( 0 )) );
if( !(new_node->stage) )
{
printf( "Stage training aborted.\n" );
cur_num = best_num + 1;
}
else
{
num_splits = icvNumSplits( new_node->stage );
cur_num += num_splits;
printf( "Number of used features: %d\n", num_splits );
}
}
if( cur_num < best_num )
{
icvReleaseTreeCascadeNodes( &multiple_clusters );
best_num = cur_num;
best_clusters = k;
multiple_clusters = cur_node;
}
else
{
icvReleaseTreeCascadeNodes( &cur_node );
}
}
cvReleaseMat( &vals );
CV_CALL( cur_split = (CvSplit*) cvAlloc( sizeof( *cur_split ) ) );
CV_ZERO_OBJ( cur_split );
if( last_split ) last_split->next = cur_split;
else first_split = cur_split;
last_split = cur_split;
cur_split->single_cluster = single_cluster;
cur_split->multiple_clusters = multiple_clusters;
cur_split->num_clusters = best_clusters;
cur_split->parent = parent;
cur_split->single_multiple_ratio = (float) single_num / best_num;
}
if( parent ) parent = parent->next_same_level;
} while( parent );
do
{
float max_single_multiple_ratio;
cur_split = NULL;
max_single_multiple_ratio = 0.0F;
last_split = first_split;
while( last_split )
{
if( last_split->single_cluster && last_split->multiple_clusters &&
last_split->single_multiple_ratio > max_single_multiple_ratio )
{
max_single_multiple_ratio = last_split->single_multiple_ratio;
cur_split = last_split;
}
last_split = last_split->next;
}
if( cur_split )
{
if( maxtreesplits < 0 ||
cur_split->num_clusters <= maxtreesplits - total_splits + 1 )
{
cur_split->single_cluster = NULL;
total_splits += cur_split->num_clusters - 1;
}
else
{
icvReleaseTreeCascadeNodes( &(cur_split->multiple_clusters) );
cur_split->multiple_clusters = NULL;
}
}
} while( cur_split );
leaves = last_node = NULL;
last_split = first_split;
while( last_split )
{
cur_node = (last_split->multiple_clusters)
? last_split->multiple_clusters : last_split->single_cluster;
parent = last_split->parent;
if( parent ) parent->child = cur_node;
for( ; cur_node; cur_node = cur_node->next )
{
FILE* file;
if( last_node ) last_node->next_same_level = cur_node;
else leaves = cur_node;
last_node = cur_node;
cur_node->parent = parent;
cur_node->idx = tcc->next_idx;
tcc->next_idx++;
sprintf( suffix, "%d/%s", cur_node->idx, CV_STAGE_CART_FILE_NAME );
file = NULL;
if( icvMkDir( stage_name ) && (file = fopen( stage_name, "w" )) != 0 )
{
cur_node->stage->save( (CvIntHaarClassifier*) cur_node->stage, file );
fprintf( file, "\n%d\n%d\n",
((parent) ? parent->idx : -1),
((cur_node->next) ? tcc->next_idx : -1) );
}
else
{
printf( "Failed to save classifier into %s\n", stage_name );
}
if( file ) fclose( file );
}
if( parent ) sprintf( buf, "%d", parent->idx );
else sprintf( buf, "NULL" );
printf( "\nParent node: %s\n", buf );
printf( "Chosen number of splits: %d\n\n", (last_split->multiple_clusters)
? (last_split->num_clusters - 1) : 0 );
cur_split = last_split;
last_split = last_split->next;
cvFree( &cur_split );
}
printf( "Total number of splits: %d\n", total_splits );
if( !(tcc->root) ) tcc->root = leaves;
CV_CALL( icvPrintTreeCascade( tcc->root ) );
} while( leaves );
{
char xml_path[1024];
int len = (int)strlen(dirname);
CvHaarClassifierCascade* cascade = 0;
strcpy( xml_path, dirname );
if( xml_path[len-1] == '\\' || xml_path[len-1] == '/' )
len--;
strcpy( xml_path + len, ".xml" );
cascade = cvLoadHaarClassifierCascade( dirname, cvSize(winwidth,winheight) );
if( cascade )
cvSave( xml_path, cascade );
cvReleaseHaarClassifierCascade( &cascade );
}
}
printf( "\nCascade performance\n" );
tcc->eval = icvEvalTreeCascadeClassifier;
consumed = 0;
poscount = icvGetHaarTrainingDataFromVec( training_data, 0, npos,
(CvIntHaarClassifier*) tcc, vecfilename, &consumed );
printf( "POS: %d %d %f\n", poscount, consumed,
(consumed > 0) ? (((float) poscount)/consumed) : 0 );
if( poscount <= 0 )
fprintf( stderr, "Warning: unable to obtain positive samples\n" );
proctime = -TIME( 0 );
negcount = icvGetHaarTrainingDataFromBG( training_data, poscount, nneg,
(CvIntHaarClassifier*) tcc, &false_alarm, bg_vecfile ? bgfilename : NULL );
printf( "NEG: %d %g\n", negcount, false_alarm );
printf( "BACKGROUND PROCESSING TIME: %.2f\n", (proctime + TIME( 0 )) );
if( negcount <= 0 )
fprintf( stderr, "Warning: unable to obtain negative samples\n" );
__END__;
if (! bg_vecfile)
icvDestroyBackgroundReaders();
if( tcc ) tcc->release( (CvIntHaarClassifier**) &tcc );
icvReleaseIntHaarFeatures( &haar_features );
icvReleaseHaarTrainingData( &training_data );
cvReleaseMat( &cluster_idx );
cvReleaseMat( &idx );
cvReleaseMat( &vals );
cvReleaseMat( &features_idx );
}
一、樹狀分類器
1、構造一棵決策樹CvCARTClassifier,樹狀分類器
//層次關系:CvCARTClassifier CvCARTNode CvStumpClassifier
//CvCARTClassifier由count個CvCARTNode組成,每個CvCARTNode有一個CvStumpClassifier,
CvClassifier* cvCreateCARTClassifier( CvMat* trainData,//所有樣本的所有特征值
int flags, //標識矩陣按行或列組織
CvMat* trainClasses,
CvMat* typeMask,
CvMat* missedMeasurementsMask,
CvMat* compIdx,
CvMat* sampleIdx,//選擇部分樣本時的樣本號
CvMat* weights,
CvClassifierTrainParams* trainParams )
#define CV_CLASSIFIER_FIELDS() \
int flags; \
float(*eval)( struct CvClassifier*, CvMat* ); \
void (*tune)( struct CvClassifier*, CvMat*, int flags, CvMat*, CvMat*, CvMat*, \
CvMat*, CvMat* ); \
int (*save)( struct CvClassifier*, const char* file_name ); \
void (*release)( struct CvClassifier** );
typedef struct CvClassifier
{
CV_CLASSIFIER_FIELDS()
} CvClassifier;
typedef struct CvCARTNode
{
CvMat* sampleIdx;
CvStumpClassifier* stump;
int parent; //父節點索引號
int leftflag; //1:left節點時;0:right節點
float errdrop;//剩余的誤差
} CvCARTNode;
//一個弱分類器,所用特征的索引、閾值及哪側為正樣本
typedef struct CvStumpClassifier
{
CV_CLASSIFIER_FIELDS()
int compidx; //對應特征的索引
float lerror;
float rerror;
float threshold; //該特征閾值
float left; //均值或左側正樣本比例,left=p(y=1)/(p(y=1)+p(y=-1)),對分類器若left為正樣本則為1,反之為0
float right;
} CvStumpClassifier;
typedef struct CvCARTClassifier
{
CV_CLASSIFIER_FIELDS()
int count;
int* compidx;
float* threshold;
int* left;//當前節點的左子節點為葉節點時,存葉節點序號的負值(從0開始);非葉節點,存該節點序號
int* right;//當前節點的右子節點為葉節點時,存葉節點序號的負值(從0開始);非葉節點,存該節點序號
float* val;//存葉節點的stump->left或stump->right,值為正樣本比例p(y=1)/(p(y=1)+p(y=-1))
} CvCARTClassifier;
typedef struct CvCARTTrainParams
{
CV_CLASSIFIER_TRAIN_PARAM_FIELDS()
int count;//節點數
CvClassifierTrainParams* stumpTrainParams;
CvClassifierConstructor stumpConstructor;
//定義了函數指針變量,變量名為splitIdx,將樣本按第compidx個特征的threshold分為left和right
void (*splitIdx)( int compidx, float threshold,
CvMat* idx, CvMat** left, CvMat** right,
void* userdata );
void* userdata;
} CvCARTTrainParams;
2、用樹狀分類器進行檢測
//sample只是一個樣本,判斷該樣本在CvCARTClassifier樹中的那個葉節點上
//返回該樣本所在葉節點的正樣本比例p(y=1)/(p(y=1)+p(y=-1))
float cvEvalCARTClassifier( CvClassifier* classifier, CvMat* sample )
//根據樹狀分類器判斷樣本在樹上的位置,即在哪個葉節點上。返回葉節點序號
float cvEvalCARTClassifierIdx( CvClassifier* classifier, CvMat* sample )
二、boost
1、boost過程流程,將各種類型歸為一個函數cvBoostStartTraining/cvBoostNextWeakClassifier,
通過參數區分不同類型的boost。都是在最優弱分類器已知,各樣本對該分類器估計值已計算存入weakEvalVals
typedef struct CvBoostTrainer
{
CvBoostType type;
int count;
int* idx;//要么null,要么存樣本索引號
float* F;//存logiBoost的F
} CvBoostTrainer;
調用順序:cvBoostStartTraining ———> startTraining[type] ———> icvBoostStartTraining等
//定義函數cvBoostStartTraining
CvBoostTrainer* cvBoostStartTraining( ...,CvBoostType type )
{
return startTraining[type]( trainClasses, weakTrainVals, weights, sampleIdx, type );
}
//定義函數指針類型的數組變量startTraining[4]
CvBoostStartTraining startTraining[4] = {
icvBoostStartTraining,
icvBoostStartTraining,
icvBoostStartTrainingLB,
icvBoostStartTraining
};
//定義函數指針類型CvBoostStartTraining
typedef CvBoostTrainer* (*CvBoostStartTraining)( CvMat* trainClasses,
CvMat* weakTrainVals,
CvMat* weights,
CvMat* sampleIdx,
CvBoostType type );
調用順序:cvBoostNextWeakClassifier———> nextWeakClassifier[trainer->type]———> icvBoostNextWeakClassifierLB等
//定義函數cvBoostNextWeakClassifier
float cvBoostNextWeakClassifier( ..., CvBoostTrainer* trainer )
{
return nextWeakClassifier[trainer->type]( weakEvalVals, trainClasses,weakTrainVals, weights, trainer);
}
//定義函數指針類型的數組變量nextWeakClassifier[4]
CvBoostNextWeakClassifier nextWeakClassifier[4] = {
icvBoostNextWeakClassifierDAB,
icvBoostNextWeakClassifierRAB,
icvBoostNextWeakClassifierLB,
icvBoostNextWeakClassifierGAB
};
//定義函數指針類型CvBoostNextWeakClassifier
typedef float (*CvBoostNextWeakClassifier)( CvMat* weakEvalVals,
CvMat* trainClasses,
CvMat* weakTrainVals,
CvMat* weights,
CvBoostTrainer* data );
2、具體的startTraining和NextWeakClassifier
//y*=2y-1,類別標簽由{0,1}變為{-1,1},並將它填入weakTrainVals
//返回CvBoostTrainer,其中F = NULL;
CvBoostTrainer* icvBoostStartTraining( CvMat* trainClasses,//類別標簽{0,1},
CvMat* weakTrainVals,//類別標簽{-1,1},
CvMat* weights,
CvMat* sampleIdx,//要么null,要么存樣本索引號
CvBoostType type )
//更新權重,特征的響應函數值weakEvalVals已知,即分類器已確定,分類結果在weakEvalVals
float icvBoostNextWeakClassifierDAB( CvMat* weakEvalVals,//響應函數值{1,-1}
CvMat* trainClasses,//類別標簽{0,1},
CvMat* weakTrainVals,//沒使用,應該是{1,-1}
CvMat* weights, //將被更新
CvBoostTrainer* trainer )//用於確定要被更新權重的樣本
//更新Real AdaBoost權重,特征的響應函數值weakEvalVals已知,即分類器已確定
float icvBoostNextWeakClassifierRAB( CvMat* weakEvalVals,//響應函數值,應該是測對率=(測對數/總數)
CvMat* trainClasses,//類別標簽{0,1},
CvMat* weakTrainVals,//沒使用
CvMat* weights, //被更新,w=w*[exp(-1/2*log(evaldata/1-evaldata))]
CvBoostTrainer* trainer )//用於確定要被更新的樣本
//樣本數,權重,類別標簽,響應函數值F,z值,樣本索引
//由F計算LogitBoost的w和z,z返回到traindata
void icvResponsesAndWeightsLB( int num, uchar* wdata, int wstep,
uchar* ydata, int ystep, //類別標簽
uchar* fdata, int fstep, //響應函數值F
uchar* traindata, int trainstep,//用於存z
int* indices ) //樣本索引
//初始F=0;得p=1/2,計算w、z
CvBoostTrainer* icvBoostStartTrainingLB( CvMat* trainClasses,//類別標簽{0,1},
CvMat* weakTrainVals, //存Z值
CvMat* weights,
CvMat* sampleIdx,//要么null,要么存樣本索引號
CvBoostType type )
//已知f,先算F=F+f,再算p=1/(1+exp(-F)),再算z,w
float icvBoostNextWeakClassifierLB( CvMat* weakEvalVals,//f,是對z的回歸
CvMat* trainClasses,//類別標簽{0,1}
CvMat* weakTrainVals,//存Z值
CvMat* weights,
CvBoostTrainer* trainer )
//Gentle AdaBoost,已知f,算w=w*exp(-yf)
CV_BOOST_IMPL
float icvBoostNextWeakClassifierGAB( CvMat* weakEvalVals,//f=p(y=1|x)-p(y=-1|x)
CvMat* trainClasses,//類別標簽{0,1}
CvMat* weakTrainVals,//沒使用
CvMat* weights,
CvBoostTrainer* trainer )
typedef struct CvTreeCascadeNode
{
CvStageHaarClassifier* stage; //與節點對應的分類器
struct CvTreeCascadeNode* next;
struct CvTreeCascadeNode* child;
struct CvTreeCascadeNode* parent;
struct CvTreeCascadeNode* next_same_level;
struct CvTreeCascadeNode* child_eval;
int idx;
int leaf;
} CvTreeCascadeNode;
opencv haartraining 分析二:每級stage正負樣本的獲取
函數 poscount =icvGetHaarTrainingDataFromVec( training_data, 0, npos, (CvIntHaarClassifier*) tcc, vecfilename, &consumed )負責從正樣本集*.vec 文件中載入 count(npos)個正樣本。在程序第一次運行到此(即訓練第一個分類器之前)時,只要正樣本集中有 count 個樣本,就一定能取出 count 個正樣本。在以后運行到此時,有可能取不到 count 個樣本,因為
必須是用前面的級聯強分類器((CvIntHaarClassifier*) tcc)分類為正樣本(即分類正確的樣本)的樣本才會被取出作為下一個強分類器訓練樣本,具體可參考 icvGetHaarTrainingData和icvEvalTreeCascadeClassifierFilter函數。
訓練負樣本,具體可參考icvGetHaarTrainingDataFromBG和icvEvalTreeCascadeClassifierFilter函數。
int icvGetHaarTrainingDataFromBG( CvHaarTrainingData* data, int first, int count,
CvIntHaarClassifier* cascade, double* acceptance_ratio, const char * filename = NULL )
傳遞返回值的 acceptance_ratio 參數記錄的是實際取出的負樣本數與查詢過的負樣本數(如果通過前面級聯stage強分類器的負樣本數很少時,那么程序會循環重復讀取負樣本,並用thread_consumed_count計數)之比(acceptance_ratio = ((double) count) / consumed_count),也就是虛警率,用於判斷已訓練的級聯分類器是否達到指標,若達到指標,則停止訓練過程。
注意函數 icvGetHaarTrainingData中一個主要的 For 循環:
for( i = first; i < first + count; i++ ) //共讀取 count 個負樣本,當讀取不到
{ //這么多負樣本時將出現死循環!
對上面代碼中的注釋有必要進一步說明一下:只有當之前的強分類器對負樣本集內的樣本全部分類正確時才會出現死循環。因為只要有一個樣本會被錯分為正樣本,那么通過 count次掃描整個負樣本集就能得到 count 個負樣本,當然這 count 個負樣本實際上就是一個負樣本的 count 個拷貝。為避免這些情況的發生,負樣本集中的樣本數需要足夠多。
在負樣本圖像大小與正樣本大小完全一致時,假設最終的分類器虛警率要求是falsealarm,參加訓練的負樣本要求是 count 個,則需要的負樣本總數可計算如下: TotalCount = count / falsealarm
以 Rainer Lienhart 的文章中的一些參數為例,falsealarm=0.5^20=9.6e-07, count=3000,
則 TotalCount=3000/(0.5^20)= 3,145,728,000=31 億。
函數 icvGetHaarTrainingDataFromBG ()負責從負樣本集中載入 count 個負樣本。在程序第一次運行到此(即訓練第一個分類器之前)時,只要負樣本集中有 count 個樣本,就一定能取出 count 個負樣本。在以后運行到此時,有可能取不到 count 個樣本,因為必須是用前面的級聯強分類器分類為正樣本的樣本(即分類錯誤的樣本)才會被取出作為下一個強分類器的負樣本輸入。
對於int icvGetHaarTrainingData( CvHaarTrainingData* data, int first, int count, CvIntHaarClassifier* cascade, CvGetHaarTrainingDataCallback callback, void* userdata,
int* consumed, double* acceptance_ratio )
這個函數的解釋:
這是個對於讀取正負樣本通用的函數,區別在於callback的調用。在這個函數中有個變量thread_getcount,表示將樣本分為正樣本的數目(不論這個樣本是負樣本還是正樣本)。
傳遞返回值的 Consumed 參數表示為取 count 個正樣本,查詢過的正樣本總數。對於負樣本為空(null),沒有返回值。
opencv haartraining 分析三:icvCreateCARTStageClassifier
CvIntHaarClassifier* icvCreateCARTStageClassifier( CvHaarTrainingData* data,
CvMat* sampleIdx,
CvIntHaarFeatures* haarFeatures,
float minhitrate,
float maxfalsealarm,
int symmetric,
float weightfraction,
int numsplits,
CvBoostType boosttype,
CvStumpError stumperror,
int maxsplits )
{
#ifdef CV_COL_ARRANGEMENT
int flags = CV_COL_SAMPLE;
#else
int flags = CV_ROW_SAMPLE;
#endif
CvStageHaarClassifier* stage = NULL;
CvBoostTrainer* trainer;
CvCARTClassifier* cart = NULL;
CvCARTTrainParams trainParams;
CvMTStumpTrainParams stumpTrainParams;
//CvMat* trainData = NULL;
//CvMat* sortedIdx = NULL;
CvMat eval;
int n = 0;
int m = 0;
int numpos = 0;
int numneg = 0;
int numfalse = 0;
float sum_stage = 0.0F;
float threshold = 0.0F;
float falsealarm = 0.0F;
//CvMat* sampleIdx = NULL;
CvMat* trimmedIdx;
//float* idxdata = NULL;
//float* tempweights = NULL;
//int idxcount = 0;
CvUserdata userdata;
int i = 0;
int j = 0;
int idx;
int numsamples;
int numtrimmed;
CvCARTHaarClassifier* classifier;
CvSeq* seq = NULL;
CvMemStorage* storage = NULL;
CvMat* weakTrainVals;
float alpha;
float sumalpha;
int num_splits;
#ifdef CV_VERBOSE
printf( "+----+----+-+---------+---------+---------+---------+\n" );
printf( "| N |%%SMP|F| ST.THR | HR | FA | EXP. ERR|\n" );
printf( "+----+----+-+---------+---------+---------+---------+\n" );
#endif
n = haarFeatures->count;//這是haar特征的數目,對於32*32的子窗口,特征數目為26萬多
m = data->sum.rows;
numsamples = (sampleIdx) ? MAX( sampleIdx->rows, sampleIdx->cols ) : m;
userdata = cvUserdata( data, haarFeatures );
stumpTrainParams.type = ( boosttype == CV_DABCLASS )
? CV_CLASSIFICATION_CLASS : CV_REGRESSION;
stumpTrainParams.error = ( boosttype == CV_LBCLASS || boosttype == CV_GABCLASS )
? CV_SQUARE : stumperror;
stumpTrainParams.portion = CV_STUMP_TRAIN_PORTION;
stumpTrainParams.getTrainData = icvGetTrainingDataCallback;
stumpTrainParams.numcomp = n;
stumpTrainParams.userdata = &userdata;
stumpTrainParams.sortedIdx = data->idxcache;//這是對構建cart的每個節點的stump一級決策樹參數的設置
trainParams.count = numsplits;
trainParams.stumpTrainParams = (CvClassifierTrainParams*) &stumpTrainParams;
trainParams.stumpConstructor = cvCreateMTStumpClassifier;
trainParams.splitIdx = icvSplitIndicesCallback;
trainParams.userdata = &userdata;//這是對cart弱分類器參數的設置
eval = cvMat( 1, m, CV_32FC1, cvAlloc( sizeof( float ) * m ) );
storage = cvCreateMemStorage();
seq = cvCreateSeq( 0, sizeof( *seq ), sizeof( classifier ), storage );
weakTrainVals = cvCreateMat( 1, m, CV_32FC1 );
trainer = cvBoostStartTraining( &data->cls, weakTrainVals, &data->weights,
sampleIdx, boosttype );//這是用data->cls來計算weakTrainVals。其中weakTrainVals=2*cls-1,cls屬於{0,1},則weakTrainVals屬於{-1,1}
num_splits = 0;
sumalpha = 0.0F;
do
{
#ifdef CV_VERBOSE
int v_wt = 0;
int v_flipped = 0;
#endif
trimmedIdx = cvTrimWeights( &data->weights, sampleIdx, weightfraction );//剔除小權值,由weightfraction來控制。
numtrimmed = (trimmedIdx) ? MAX( trimmedIdx->rows, trimmedIdx->cols ) : m;
#ifdef CV_VERBOSE
v_wt = 100 * numtrimmed / numsamples;
v_flipped = 0;
#endif
cart = (CvCARTClassifier*) cvCreateCARTClassifier( data->valcache,
flags,
weakTrainVals, 0, 0, 0, trimmedIdx,
&(data->weights),
(CvClassifierTrainParams*) &trainParams );//開始構建cart樹弱分類器
classifier = (CvCARTHaarClassifier*) icvCreateCARTHaarClassifier( numsplits );
icvInitCARTHaarClassifier( classifier, cart, haarFeatures );
num_splits += classifier->count;
cart->release( (CvClassifier**) &cart );
if( symmetric && (seq->total % 2) )
{
float normfactor = 0.0F;
CvStumpClassifier* stump;
for( i = 0; i < classifier->count; i++ )
{
if( classifier->feature[i].desc[0] == 'h' )
{
for( j = 0; j < CV_HAAR_FEATURE_MAX &&
classifier->feature[i].rect[j].weight != 0.0F; j++ )
{
classifier->feature[i].rect[j].r.x = data->winsize.width -
classifier->feature[i].rect[j].r.x -
classifier->feature[i].rect[j].r.width;
}
}
else
{
int tmp = 0;
for( j = 0; j < CV_HAAR_FEATURE_MAX &&
classifier->feature[i].rect[j].weight != 0.0F; j++ )
{
classifier->feature[i].rect[j].r.x = data->winsize.width -
classifier->feature[i].rect[j].r.x;
CV_SWAP( classifier->feature[i].rect[j].r.width,
classifier->feature[i].rect[j].r.height, tmp );
}
}
}
icvConvertToFastHaarFeature( classifier->feature,
classifier->fastfeature,
classifier->count, data->winsize.width + 1 );
stumpTrainParams.getTrainData = NULL;
stumpTrainParams.numcomp = 1;
stumpTrainParams.userdata = NULL;
stumpTrainParams.sortedIdx = NULL;
for( i = 0; i < classifier->count; i++ )
{
for( j = 0; j < numtrimmed; j++ )
{
idx = icvGetIdxAt( trimmedIdx, j );
eval.data.fl[idx] = cvEvalFastHaarFeature( &classifier->fastfeature[i],
(sum_type*) (data->sum.data.ptr + idx * data->sum.step),
(sum_type*) (data->tilted.data.ptr + idx * data->tilted.step) );
normfactor = data->normfactor.data.fl[idx];
eval.data.fl[idx] = ( normfactor == 0.0F )
? 0.0F : (eval.data.fl[idx] / normfactor);
}
stump = (CvStumpClassifier*) trainParams.stumpConstructor( &eval,
CV_COL_SAMPLE,
weakTrainVals, 0, 0, 0, trimmedIdx,
&(data->weights),
trainParams.stumpTrainParams );
classifier->threshold[i] = stump->threshold;
if( classifier->left[i] <= 0 )
{
classifier->val[-classifier->left[i]] = stump->left;
}
if( classifier->right[i] <= 0 )
{
classifier->val[-classifier->right[i]] = stump->right;
}
stump->release( (CvClassifier**) &stump );
}
stumpTrainParams.getTrainData = icvGetTrainingDataCallback;
stumpTrainParams.numcomp = n;
stumpTrainParams.userdata = &userdata;
stumpTrainParams.sortedIdx = data->idxcache;
#ifdef CV_VERBOSE
v_flipped = 1;
#endif
}
if( trimmedIdx != sampleIdx )
{
cvReleaseMat( &trimmedIdx );
trimmedIdx = NULL;
}
for( i = 0; i < numsamples; i++ )
{
idx = icvGetIdxAt( sampleIdx, i );
eval.data.fl[idx] = classifier->eval_r( (CvIntHaarClassifier*) classifier,
(sum_type*) (data->sum.data.ptr + idx * data->sum.step),
(sum_type*) (data->tilted.data.ptr + idx * data->tilted.step),
data->normfactor.data.fl[idx] );
}
alpha = cvBoostNextWeakClassifier( &eval, &data->cls, weakTrainVals,
&data->weights, trainer );
sumalpha += alpha;
for( i = 0; i <= classifier->count; i++ )
{
if( boosttype == CV_RABCLASS )
{
classifier->val[i] = cvLogRatio( classifier->val[i] );
}
classifier->val[i] *= alpha;
}
cvSeqPush( seq, (void*) &classifier );
numpos = 0;
for( i = 0; i < numsamples; i++ )
{
idx = icvGetIdxAt( sampleIdx, i );
if( data->cls.data.fl[idx] == 1.0F )
{
eval.data.fl[numpos] = 0.0F;
for( j = 0; j < seq->total; j++ )
{
classifier = *((CvCARTHaarClassifier**) cvGetSeqElem( seq, j ));
eval.data.fl[numpos] += classifier->eval_r(
(CvIntHaarClassifier*) classifier,
(sum_type*) (data->sum.data.ptr + idx * data->sum.step),
(sum_type*) (data->tilted.data.ptr + idx * data->tilted.step),
data->normfactor.data.fl[idx] );
}
numpos++;
}
}
icvSort_32f( eval.data.fl, numpos, 0 );
threshold = eval.data.fl[(int) ((1.0F - minhitrate) * numpos)];
numneg = 0;
numfalse = 0;
for( i = 0; i < numsamples; i++ )
{
idx = icvGetIdxAt( sampleIdx, i );
if( data->cls.data.fl[idx] == 0.0F )
{
numneg++;
sum_stage = 0.0F;
for( j = 0; j < seq->total; j++ )
{
classifier = *((CvCARTHaarClassifier**) cvGetSeqElem( seq, j ));
sum_stage += classifier->eval_r( (CvIntHaarClassifier*) classifier,
(sum_type*) (data->sum.data.ptr + idx * data->sum.step),
(sum_type*) (data->tilted.data.ptr + idx * data->tilted.step),
data->normfactor.data.fl[idx] );
}
if( sum_stage >= (threshold - CV_THRESHOLD_EPS) )
{
numfalse++;
}
}
}
falsealarm = ((float) numfalse) / ((float) numneg);
#ifdef CV_VERBOSE
{
float v_hitrate = 0.0F;
float v_falsealarm = 0.0F;
float v_experr = 0.0F;
for( i = 0; i < numsamples; i++ )
{
idx = icvGetIdxAt( sampleIdx, i );
sum_stage = 0.0F;
for( j = 0; j < seq->total; j++ )
{
classifier = *((CvCARTHaarClassifier**) cvGetSeqElem( seq, j ));
sum_stage += classifier->eval_r( (CvIntHaarClassifier*) classifier,
(sum_type*) (data->sum.data.ptr + idx * data->sum.step),
(sum_type*) (data->tilted.data.ptr + idx * data->tilted.step),
data->normfactor.data.fl[idx] );
}
if( sum_stage >= (threshold - CV_THRESHOLD_EPS) )
{
if( data->cls.data.fl[idx] == 1.0F )
{
v_hitrate += 1.0F;
}
else
{
v_falsealarm += 1.0F;
}
}
if( ( sum_stage >= 0.0F ) != (data->cls.data.fl[idx] == 1.0F) )
{
v_experr += 1.0F;
}
}
v_experr /= numsamples;
printf( "|M|=%%|%c|�|�|�|�|\n",
seq->total, v_wt, ( (v_flipped) ? '+' : '-' ),
threshold, v_hitrate / numpos, v_falsealarm / numneg,
v_experr );
printf( "+----+----+-+---------+---------+---------+---------+\n" );
fflush( stdout );
}
#endif
} while( falsealarm > maxfalsealarm && (!maxsplits || (num_splits < maxsplits) ) );
cvBoostEndTraining( &trainer );
if( falsealarm > maxfalsealarm )
{
stage = NULL;
}
else
{
stage = (CvStageHaarClassifier*) icvCreateStageHaarClassifier( seq->total,
threshold );
cvCvtSeqToArray( seq, (CvArr*) stage->classifier );
}
cvReleaseMemStorage( &storage );
cvReleaseMat( &weakTrainVals );
cvFree( &(eval.data.ptr) );
return (CvIntHaarClassifier*) stage;
}