學習OpenCV——BOW特征提取函數(特征點篇)


from:  http://www.xuebuyuan.com/582331.html

簡單的通過特征點分類的方法:                                                                      

一、train

1.提取+/- sample的feature,每幅圖提取出的sift特征個數不定(假設每個feature有128維)

2.利用聚類方法(e.g K-means)將不定數量的feature聚類為固定數量的(比如10個)words即BOW(bag of word)

(本篇文章主要完成以上的工作!)

3.normalize,並作這10個類的直方圖e.g [0.1,0.2,0.7,0...0];

4.將each image的這10個word作為feature_instance 和 (手工標記的) label(+/-)進入SVM訓練

 

二、predict

1. 提取test_img的feature(如137個)

2. 分別求each feature與10個類的距離(e.g. 128維歐氏距離),確定該feature屬於哪個類

3. normalize,並作這10個類的直方圖e.g [0,0.2,0.2,0.6,0...0];

4. 應用SVM_predict進行結果預測

 

 

通過OpenCV實現feature聚類 BOW                                                             

首先在此介紹一下OpenCV的特征描述符與BOW的通用函數。

主要的通用接口有:

 

1.特征點提取

Ptr<FeatureDetector> FeatureDetector::create(const string& detectorType)

	Ptr<FeatureDetector> FeatureDetector::create(const string& detectorType)
// 	"FAST" – FastFeatureDetector 
// 	"STAR" – StarFeatureDetector 
// 	"SIFT" – SIFT (nonfree module)//必須使用 initModule_nonfree()初始化
// 	"SURF" – SURF (nonfree module)//同上; 
// 	"ORB" – ORB 
// 	"MSER" – MSER 
// 	"GFTT" – GoodFeaturesToTrackDetector 
// 	"HARRIS" – GoodFeaturesToTrackDetector with Harris detector enabled 
// 	"Dense" – DenseFeatureDetector 
// 	"SimpleBlob" – SimpleBlobDetector 

 

根據以上接口,測試不同的特征點:

對同一幅圖像進行水平翻轉前后的兩幅圖像檢測特征點檢測結果,

檢測到的特征點的坐標類型為:pt: int / float(與keyPoint的性質有關)

數量分別為num1, num2,

 

 "FAST" – FastFeatureDetector           pt:int (num1:615  num2:618)
 "STAR" – StarFeatureDetector           pt:int (num1:43   num2:42 )
 "SIFT" – SIFT (nonfree module)          pt:float(num1:155  num2:135)            //必須使用 initModule_nonfree()初始化
 "SURF" – SURF (nonfree module)     pt:float(num1:344  num2:342)           
//同上; 
 "ORB" – ORB                                        pt:float(num1:496  num2:497)
 "MSER" – MSER                                 pt:float(num1:51   num2:45 )
 "GFTT" – GoodFeaturesToTrackDetector        pt:int (num1:744  num2:771)
 "HARRIS" – GoodFeaturesToTrackDetector with Harris detector enabled         pt:float(num1:162  num2:160)
 "Dense" – DenseFeatureDetector          pt:int (num1:3350 num2:3350)

 

2.特征描述符提取

Ptr<DescriptorExtractor> DescriptorExtractor::create(const string& descriptorExtractorType)

//  Ptr<DescriptorExtractor> DescriptorExtractor::create(const string& descriptorExtractorType)	
// 	"SIFT" – SIFT 
// 	"SURF" – SURF 
// 	"ORB" – ORB 
// 	"BRIEF" – BriefDescriptorExtractor 

 

3.描述符匹配

Ptr<DescriptorMatcher> descriptorMatcher = DescriptorMatcher::create(const string& descriptorMatcherType)

// 	descriptorMatcherType – Descriptor matcher type. 
//	Now the following matcher types are supported: 
// 		BruteForce (it uses L2 ) 
// 		BruteForce-L1 
// 		BruteForce-Hamming 
// 		BruteForce-Hamming(2) 
// 		FlannBased 
	Ptr<DescriptorMatcher> descriptorMatcher = DescriptorMatcher::create( "BruteForce" );

 

4.class BOWTrainer

class BOWKmeansTrainer::public BOWTrainer:Kmeans算法訓練

BOWKMeansTrainer ::BOWKmeansTrainer(int clusterCount, const TermCriteria& termcrit=TermCriteria(), int attempts=3, int flags=KMEANS_PP_CENTERS)

parameter same as 
Kmeans

 

代碼實現:                                                                                                                    

1.畫特征點。

2.特征點Kmeans聚類,每一種顏色代表一個類別。

 

#include "opencv2/highgui/highgui.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/nonfree/nonfree.hpp"

#include <iostream>

using namespace cv;
using namespace std;

#define ClusterNum 10

void DrawAndMatchKeypoints(const Mat& Img1,const Mat& Img2,const vector<KeyPoint>& Keypoints1,
	const vector<KeyPoint>& Keypoints2,const Mat& Descriptors1,const Mat& Descriptors2)
{
	Mat keyP1,keyP2;
	drawKeypoints(Img1,Keypoints1,keyP1,Scalar::all(-1),0);
	drawKeypoints(Img2,Keypoints2,keyP2,Scalar::all(-1),0);
	putText(keyP1, "drawKeyPoints", cvPoint(10,30), FONT_HERSHEY_SIMPLEX, 1 ,Scalar :: all(-1));
	putText(keyP2, "drawKeyPoints", cvPoint(10,30), FONT_HERSHEY_SIMPLEX, 1 ,Scalar :: all(-1));
	imshow("img1 keyPoints",keyP1);
	imshow("img2 keyPoints",keyP2);

	Ptr<DescriptorMatcher> descriptorMatcher = DescriptorMatcher::create( "BruteForce" );
	vector<DMatch> matches;
	descriptorMatcher->match( Descriptors1, Descriptors2, matches );
	Mat show;
	drawMatches(Img1,Keypoints1,Img2,Keypoints2,matches,show,Scalar::all(-1),CV_RGB(255,255,255),Mat(),4);
	putText(show, "drawMatchKeyPoints", cvPoint(10,30), FONT_HERSHEY_SIMPLEX, 1 ,Scalar :: all(-1));  
	imshow("match",show);
}

//測試OpenCV:class BOWTrainer
void BOWKeams(const Mat& img, const vector<KeyPoint>& Keypoints, 
	const Mat& Descriptors, Mat& centers)
{
	//BOW的kmeans算法聚類;
	BOWKMeansTrainer bowK(ClusterNum, 
		cvTermCriteria (CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 10, 0.1),3,2);
	centers = bowK.cluster(Descriptors);
	cout<<endl<<"< cluster num: "<<centers.rows<<" >"<<endl;
	
	Ptr<DescriptorMatcher> descriptorMatcher = DescriptorMatcher::create( "BruteForce" );
	vector<DMatch> matches;
	descriptorMatcher->match(Descriptors,centers,matches);//const Mat& queryDescriptors, const Mat& trainDescriptors第一個參數是待分類節點,第二個參數是聚類中心;
	Mat demoCluster;
	img.copyTo(demoCluster);
	
	//為每一類keyPoint定義一種顏色
	Scalar color[]={CV_RGB(255,255,255),
     CV_RGB(255,0,0),CV_RGB(0,255,0),CV_RGB(0,0,255),
     CV_RGB(255,255,0),CV_RGB(255,0,255),CV_RGB(0,255,255),
     CV_RGB(123,123,0),CV_RGB(0,123,123),CV_RGB(123,0,123)};


	for (vector<DMatch>::iterator iter=matches.begin();iter!=matches.end();iter++)
	{
		cout<<"< descriptorsIdx:"<<iter->queryIdx<<"  centersIdx:"<<iter->trainIdx
			<<" distincs:"<<iter->distance<<" >"<<endl;
		Point center= Keypoints[iter->queryIdx].pt;
		circle(demoCluster,center,2,color[iter->trainIdx],-1);
	}
	putText(demoCluster, "KeyPoints Clustering: 一種顏色代表一種類型",
		cvPoint(10,30), FONT_HERSHEY_SIMPLEX, 1 ,Scalar :: all(-1));
	imshow("KeyPoints Clusrtering",demoCluster);
	
}




int main()
{
	cv::initModule_nonfree();//使用SIFT/SURF create之前,必須先initModule_<modulename>(); 

	cout << "< Creating detector, descriptor extractor and descriptor matcher ...";
	Ptr<FeatureDetector> detector = FeatureDetector::create( "SIFT" );

	Ptr<DescriptorExtractor> descriptorExtractor = DescriptorExtractor::create( "SIFT" );

	Ptr<DescriptorMatcher> descriptorMatcher = DescriptorMatcher::create( "BruteForce" );



	cout << ">" << endl;

	if( detector.empty() || descriptorExtractor.empty() )
	{
		cout << "Can not create detector or descriptor exstractor or descriptor matcher of given types" << endl;
		return -1;
	}
	cout << endl << "< Reading images..." << endl;
	Mat img1 = imread("D:/demo0.jpg");
	Mat img2 = imread("D:/demo1.jpg");
	cout<<endl<<">"<<endl;


	//detect keypoints;
	cout << endl << "< Extracting keypoints from images..." << endl;
	vector<KeyPoint> keypoints1,keypoints2;
	detector->detect( img1, keypoints1 );
	detector->detect( img2, keypoints2 );
	cout <<"img1:"<< keypoints1.size() << " points  img2:" <<keypoints2.size() 
		<< " points" << endl << ">" << endl;
	
	//compute descriptors for keypoints;
	cout << "< Computing descriptors for keypoints from images..." << endl;
	Mat descriptors1,descriptors2;
	descriptorExtractor->compute( img1, keypoints1, descriptors1 );
	descriptorExtractor->compute( img2, keypoints2, descriptors2 );

	cout<<endl<<"< Descriptoers Size: "<<descriptors2.size()<<" >"<<endl;
	cout<<endl<<"descriptor's col: "<<descriptors2.cols<<endl
		<<"descriptor's row: "<<descriptors2.rows<<endl;
	cout << ">" << endl;

	//Draw And Match img1,img2 keypoints
	//匹配的過程是對特征點的descriptors進行match;
	DrawAndMatchKeypoints(img1,img2,keypoints1,keypoints2,descriptors1,descriptors2);

	Mat center;
	//對img1提取特征點,並聚類
	//測試OpenCV:class BOWTrainer
	BOWKeams(img1,keypoints1,descriptors1,center);


	waitKey();

}

 

 

通過Qt實現DrawKeypoints:

void Qt_test1::on_DrawKeypoints_clicked()
{
	//initModule_nonfree();
	Ptr<FeatureDetector> detector = FeatureDetector::create( "FAST" );
	vector<KeyPoint> keypoints;
	detector->detect( src, keypoints );

	Mat DrawKeyP;
	drawKeypoints(src,keypoints,DrawKeyP,Scalar::all(-1),0);
	putText(DrawKeyP, "drawKeyPoints", cvPoint(10,30), 
		FONT_HERSHEY_SIMPLEX, 0.5 ,Scalar :: all(255));
	cvtColor(DrawKeyP, image, CV_RGB2RGBA);
	QImage img = QImage((const unsigned char*)(image.data), 
		image.cols, image.rows, QImage::Format_RGB32);
	QLabel *label = new QLabel(this);
	label->move(50, 50);//圖像在窗口中所處的位置;
	label->setPixmap(QPixmap::fromImage(img));
	label->resize(label->pixmap()->size());	
	label->show();
}

由於initModule_nonfree()總是出錯,無法對SIFT與SURF特征點提取,

而且無法實現聚類因為運行/BOW的kmeans算法聚類:BOWKMeansTrainer bowK(ClusterNum, cvTermCriteria (CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 10, 0.1),3,2);總是出錯,不知道咋解決~~~~~(>_<)~~~~ 需要繼續學習


免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM