OpenCV特征點檢測匹配圖像-----添加包圍盒





最終效果:


其實這個小功能非常有用,甚至加上只有給人感覺好像人臉檢測,目標檢測直接成了demo了,主要代碼如下:
// localize the object
	std::vector<Point2f> obj;
	std::vector<Point2f> scene;

	for (size_t i = 0; i < good_matches.size(); ++i)
	{
		// get the keypoints from the good matches
		obj.push_back(keyPoints_1[ good_matches[i].queryIdx ].pt);
		scene.push_back(keyPoints_2[ good_matches[i].trainIdx ].pt);
	}
	Mat H = findHomography( obj, scene, CV_RANSAC );

	// get the corners from the image_1
	std::vector<Point2f> obj_corners(4);
	obj_corners[0] = cvPoint(0,0);
	obj_corners[1] = cvPoint( img_1.cols, 0);
	obj_corners[2] = cvPoint( img_1.cols, img_1.rows);
	obj_corners[3] = cvPoint( 0, img_1.rows);
	std::vector<Point2f> scene_corners(4);

	perspectiveTransform( obj_corners, scene_corners, H);

	// draw lines between the corners (the mapped object in the scene - image_2)
	line( img_matches, scene_corners[0] + Point2f( img_1.cols, 0), scene_corners[1] + Point2f( img_1.cols, 0),Scalar(0,255,0));
	line( img_matches, scene_corners[1] + Point2f( img_1.cols, 0), scene_corners[2] + Point2f( img_1.cols, 0),Scalar(0,255,0));
	line( img_matches, scene_corners[2] + Point2f( img_1.cols, 0), scene_corners[3] + Point2f( img_1.cols, 0),Scalar(0,255,0));
	line( img_matches, scene_corners[3] + Point2f( img_1.cols, 0), scene_corners[0] + Point2f( img_1.cols, 0),Scalar(0,255,0));




    基本原理是利用函數:findHomography,該 函數是求兩幅圖像的單應性矩陣或者叫(單映射矩陣),它是一個3*3的矩陣。 findHomography: 計算多個二維點對之間的 最優單映射變換 矩陣 H(3行x3列) ,使用最小均方誤差或者RANSAC方法 。
    單應性矩陣算過后的投影點的偏移量 scene_corners[0],就是在匹配圖像中的點的位置,因為效果圖像相當於增加了一個待匹配圖像的寬度,所以每一個點都要加上Point2f( img_1.cols, 0)

兩個重要函數的介紹:

  
  
  
          

findHomography

功能:在兩個平面之間尋找單映射變換矩陣

結構:

Mat findHomography(InputArray srcPoints, InputArray dstPoints, int method=0, double ransacReprojThreshold=3, OutputArray mask=noArray() )

srcPoints :在原平面上點的坐標,CV_32FC2 的矩陣或者vector<Point2f> 
dstPoints :在目標平面上點的坐標,CV_32FC2 的矩陣或者 vector<Point2f> . 
method – 
用於計算單映射矩陣的方法.  
0 - 使用所有的點的常規方法 
CV_RANSAC - 基於 RANSAC 的方法

CV_LMEDS - 基於Least-Median 的方法

ransacReprojThreshold: 處理一組點對為內部點的最大容忍重投影誤差(只在RANSAC方法中使用),其形式為: 


   如果     \| \texttt{dstPoints} _i -  \texttt{convertPointsHomogeneous} ( \texttt{H} * \texttt{srcPoints} _i) \|  >  \texttt{ransacReprojThreshold}

那么點i則被考慮為內部點,如果srcPoints和dstPoints是以像素為單位,通常把參數設置為1-10范圍內  

這個函數的作用是在原平面和目標平面之間返回一個單映射矩陣

s_i  \vecthree{x'_i}{y'_i}{1} \sim H  \vecthree{x_i}{y_i}{1} 

因此反投影誤差\sum _i \left ( x'_i- \frac{h_{11} x_i + h_{12} y_i + h_{13}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2+ \left ( y'_i- \frac{h_{21} x_i + h_{22} y_i + h_{23}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2 是最小的。

如果參數被設置為0,那么這個函數使用所有的點和一個簡單的最小二乘算法來計算最初的單應性估計,但是,如果不是所有的點對都完全符合透視變換,那么這個初始的估計會很差,在這種情況下,你可以使用兩個robust算法中的一個。 RANSAC 和LMeDS , 使用坐標點對生成了很多不同的隨機組合子集(每四對一組),使用這些子集和一個簡單的最小二乘法來估計變換矩陣,然后計算出單應性的質量,最好的子集被用來產生初始單應性的估計和掩碼。 
RANSAC方法幾乎可以處理任何異常,但是需要一個閾值, LMeDS 方法不需要任何閾值,但是只有在inliers大於50%時才能計算正確,最后,如果沒有outliers和噪音非常小,則可以使用默認的方法。

PerspectiveTransform

功能:向量數組的透視變換 

結構:

void perspectiveTransform(InputArray src, OutputArray dst, InputArray m)

src :輸入兩通道或三通道的浮點數組,每一個元素是一個2D/3D 的矢量轉換

dst :輸出和src同樣的size和type 
m :3x3 或者4x4浮點轉換矩陣 
轉換方法為:

(x, y, z)  \rightarrow (x'/w, y'/w, z'/w) 


文檔官方介紹:


(x', y', z', w') =  \texttt{mat} \cdot \begin{bmatrix} x & y & z & 1  \end{bmatrix} 

w =  \fork{w'}{if $w' \ne 0$}{\infty}{otherwise}實現代碼:


// OpenCV_sift.cpp : 定義控制台應用程序的入口點。
//

#include "stdafx.h"
#include <iostream>

#include <vector>
#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/legacy/legacy.hpp"
#include "opencv2/calib3d/calib3d.hpp"

using namespace cv;
using namespace std;

#pragma comment(lib,"opencv_core2410d.lib")                  
#pragma comment(lib,"opencv_highgui2410d.lib")                  
#pragma comment(lib,"opencv_objdetect2410d.lib")     
#pragma comment(lib,"opencv_imgproc2410d.lib")    
#pragma comment(lib,"opencv_features2d2410d.lib")
#pragma comment(lib,"opencv_legacy2410d.lib")
#pragma comment(lib,"opencv_calib3d2410d.lib")

int main()
{
    Mat img_1 = imread("1.jpg");
    Mat img_2 = imread("2.jpg");
    if (!img_1.data || !img_2.data)
    {
        cout << "error reading images " << endl;
        return -1;
    }

    ORB orb;
    vector<KeyPoint> keyPoints_1, keyPoints_2;
    Mat descriptors_1, descriptors_2;

    orb(img_1, Mat(), keyPoints_1, descriptors_1);
    orb(img_2, Mat(), keyPoints_2, descriptors_2);

    BruteForceMatcher<HammingLUT> matcher;
    vector<DMatch> matches;
    matcher.match(descriptors_1, descriptors_2, matches);

    double max_dist = 0; double min_dist = 100;
    //-- Quick calculation of max and min distances between keypoints
    for( int i = 0; i < descriptors_1.rows; i++ )
    { 
        double dist = matches[i].distance;
        if( dist < min_dist ) min_dist = dist;
        if( dist > max_dist ) max_dist = dist;
    }
    printf("-- Max dist : %f \n", max_dist );
    printf("-- Min dist : %f \n", min_dist );
    //-- Draw only "good" matches (i.e. whose distance is less than 0.6*max_dist )
    //-- PS.- radiusMatch can also be used here.
    std::vector< DMatch > good_matches;
    for( int i = 0; i < descriptors_1.rows; i++ )
    { 
        if( matches[i].distance < 0.6*max_dist )
        { 
            good_matches.push_back( matches[i]); 
        }
    }

    Mat img_matches;
    drawMatches(img_1, keyPoints_1, img_2, keyPoints_2,
        good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
        vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);

    // localize the object
    std::vector<Point2f> obj;
    std::vector<Point2f> scene;

    for (size_t i = 0; i < good_matches.size(); ++i)
    {
        // get the keypoints from the good matches
        obj.push_back(keyPoints_1[ good_matches[i].queryIdx ].pt);
        scene.push_back(keyPoints_2[ good_matches[i].trainIdx ].pt);
    }
    Mat H = findHomography( obj, scene, CV_RANSAC );

    // get the corners from the image_1
    std::vector<Point2f> obj_corners(4);
    obj_corners[0] = cvPoint(0,0);
    obj_corners[1] = cvPoint( img_1.cols, 0);
    obj_corners[2] = cvPoint( img_1.cols, img_1.rows);
    obj_corners[3] = cvPoint( 0, img_1.rows);
    std::vector<Point2f> scene_corners(4);

    perspectiveTransform( obj_corners, scene_corners, H);

    // draw lines between the corners (the mapped object in the scene - image_2)
    line( img_matches, scene_corners[0] + Point2f( img_1.cols, 0), scene_corners[1] + Point2f( img_1.cols, 0),Scalar(0,255,0));
    line( img_matches, scene_corners[1] + Point2f( img_1.cols, 0), scene_corners[2] + Point2f( img_1.cols, 0),Scalar(0,255,0));
    line( img_matches, scene_corners[2] + Point2f( img_1.cols, 0), scene_corners[3] + Point2f( img_1.cols, 0),Scalar(0,255,0));
    line( img_matches, scene_corners[3] + Point2f( img_1.cols, 0), scene_corners[0] + Point2f( img_1.cols, 0),Scalar(0,255,0));


    imshow( "Match", img_matches);
    cvWaitKey();
    return 0;
}



當然也可以用其他特征點檢測的算法來做:
/*
SIFT sift;
sift(img_1, Mat(), keyPoints_1, descriptors_1);
sift(img_2, Mat(), keyPoints_2, descriptors_2);
BruteForceMatcher<L2<float> >  matcher;
*/

/*
SURF surf;
surf(img_1, Mat(), keyPoints_1);
surf(img_2, Mat(), keyPoints_2);
SurfDescriptorExtractor extrator;
extrator.compute(img_1, keyPoints_1, descriptors_1);
extrator.compute(img_2, keyPoints_2, descriptors_2);
BruteForceMatcher<L2<float> >  matcher;
*/




圖片:












參考文獻:
ORB特征
早在, OpenCV2.3RC中已經有了實現
OpenCV中ORB特征這個是之前系列中轉載整理的文章
5.http://blog.csdn.net/merlin_q/article/details/7026375


免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM