基於SURF特征的圖像與視頻拼接技術的研究和實現(一)
一直有計划研究實時圖像拼接,但是直到最近拜讀西電2013年張亞娟的《基於SURF特征的圖像與視頻拼接技術的研究和實現》,條理清晰、內容完整、實現的技術具有市場價值。
因此定下決心以這篇論文為基礎脈絡,結合實際情況,進行“
基於SURF特征的圖像與視頻拼接技術的研究和實現
”。
一、基於opencv的surf實現
3.0以后,surf被分到了"
opencv_contrib-master
"中去,操作起來不習慣,這里仍然選擇一直在使用的opencv2.48,其surf的調用方式為:
// raw_surf.cpp : 本例是對opencv-2.48相關例子的實現
//
# include "stdafx.h"
# include <iostream >
# include "opencv2/core/core.hpp"
# include "opencv2/features2d/features2d.hpp"
# include "opencv2/highgui/highgui.hpp"
# include "opencv2/nonfree/features2d.hpp"
using namespace std;
using namespace cv;
int main( int argc, char * * argv )
{
Mat img_1 = imread( "img_opencv_1.png", 0 );
Mat img_2 = imread( "img_opencv_2.png", 0 );
if( !img_1.data || !img_2.data )
{ std : :cout << " --(!) Error reading images " << std : :endl; return - 1; }
//-- Step 1: Detect the keypoints using SURF Detector
int minHessian = 10000;
SurfFeatureDetector detector( minHessian );
std : :vector <KeyPoint > keypoints_1, keypoints_2;
detector.detect( img_1, keypoints_1 );
detector.detect( img_2, keypoints_2 );
//-- Draw keypoints
Mat img_keypoints_1; Mat img_keypoints_2;
drawKeypoints( img_1, keypoints_1, img_keypoints_1, Scalar : :all( - 1), DrawMatchesFlags : :DEFAULT );
drawKeypoints( img_2, keypoints_2, img_keypoints_2, Scalar : :all( - 1), DrawMatchesFlags : :DEFAULT );
//-- Step 2: Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
Mat descriptors_1, descriptors_2;
extractor.compute( img_1, keypoints_1, descriptors_1 );
extractor.compute( img_2, keypoints_2, descriptors_2 );
//-- Step 3: Matching descriptor vectors with a brute force matcher
BFMatcher matcher(NORM_L2);
std : :vector < DMatch > matches;
matcher.match( descriptors_1, descriptors_2, matches );
//-- Draw matches
Mat img_matches;
drawMatches( img_1, keypoints_1, img_2, keypoints_2, matches, img_matches );
//-- Show detected (drawn) keypoints
imshow( "Keypoints 1", img_keypoints_1 );
imshow( "Keypoints 2", img_keypoints_2 );
//-- Show detected matches
imshow( "Matches", img_matches );
waitKey( 0);
return 0;
}
//
# include "stdafx.h"
# include <iostream >
# include "opencv2/core/core.hpp"
# include "opencv2/features2d/features2d.hpp"
# include "opencv2/highgui/highgui.hpp"
# include "opencv2/nonfree/features2d.hpp"
using namespace std;
using namespace cv;
int main( int argc, char * * argv )
{
Mat img_1 = imread( "img_opencv_1.png", 0 );
Mat img_2 = imread( "img_opencv_2.png", 0 );
if( !img_1.data || !img_2.data )
{ std : :cout << " --(!) Error reading images " << std : :endl; return - 1; }
//-- Step 1: Detect the keypoints using SURF Detector
int minHessian = 10000;
SurfFeatureDetector detector( minHessian );
std : :vector <KeyPoint > keypoints_1, keypoints_2;
detector.detect( img_1, keypoints_1 );
detector.detect( img_2, keypoints_2 );
//-- Draw keypoints
Mat img_keypoints_1; Mat img_keypoints_2;
drawKeypoints( img_1, keypoints_1, img_keypoints_1, Scalar : :all( - 1), DrawMatchesFlags : :DEFAULT );
drawKeypoints( img_2, keypoints_2, img_keypoints_2, Scalar : :all( - 1), DrawMatchesFlags : :DEFAULT );
//-- Step 2: Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
Mat descriptors_1, descriptors_2;
extractor.compute( img_1, keypoints_1, descriptors_1 );
extractor.compute( img_2, keypoints_2, descriptors_2 );
//-- Step 3: Matching descriptor vectors with a brute force matcher
BFMatcher matcher(NORM_L2);
std : :vector < DMatch > matches;
matcher.match( descriptors_1, descriptors_2, matches );
//-- Draw matches
Mat img_matches;
drawMatches( img_1, keypoints_1, img_2, keypoints_2, matches, img_matches );
//-- Show detected (drawn) keypoints
imshow( "Keypoints 1", img_keypoints_1 );
imshow( "Keypoints 2", img_keypoints_2 );
//-- Show detected matches
imshow( "Matches", img_matches );
waitKey( 0);
return 0;
}
這里采用的是surffeaturedector的方法進行點的尋找,而后采用BFMatcher的方法進行數據比對。但這種方法錯誤的比較多,提供了FLANN的方法進行比對:
// raw_surf.cpp : 本例是對opencv-2.48相關例子的實現
//
# include "stdafx.h"
# include <iostream >
# include "opencv2/core/core.hpp"
# include "opencv2/features2d/features2d.hpp"
# include "opencv2/highgui/highgui.hpp"
# include "opencv2/nonfree/features2d.hpp"
using namespace std;
using namespace cv;
int main( int argc, char * * argv )
{
Mat img_1 = imread( "img_opencv_1.png", 0 );
Mat img_2 = imread( "img_opencv_2.png", 0 );
if( !img_1.data || !img_2.data )
{ std : :cout << " --(!) Error reading images " << std : :endl; return - 1; }
//-- Step 1: Detect the keypoints using SURF Detector
int minHessian = 400;
SurfFeatureDetector detector( minHessian );
std : :vector <KeyPoint > keypoints_1, keypoints_2;
detector.detect( img_1, keypoints_1 );
detector.detect( img_2, keypoints_2 );
//-- Draw keypoints
Mat img_keypoints_1; Mat img_keypoints_2;
drawKeypoints( img_1, keypoints_1, img_keypoints_1, Scalar : :all( - 1), DrawMatchesFlags : :DEFAULT );
drawKeypoints( img_2, keypoints_2, img_keypoints_2, Scalar : :all( - 1), DrawMatchesFlags : :DEFAULT );
//-- Step 2: Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
Mat descriptors_1, descriptors_2;
extractor.compute( img_1, keypoints_1, descriptors_1 );
extractor.compute( img_2, keypoints_2, descriptors_2 );
//-- Step 3: Matching descriptor vectors using FLANN matcher
FlannBasedMatcher matcher;
std : :vector < DMatch > matches;
matcher.match( descriptors_1, descriptors_2, matches );
double max_dist = 0; double min_dist = 100;
//-- Quick calculation of max and min distances between keypoints
for( int i = 0; i < descriptors_1.rows; i ++ )
{ double dist = matches[i].distance;
if( dist < min_dist ) min_dist = dist;
if( dist > max_dist ) max_dist = dist;
}
printf( "-- Max dist : %f \n", max_dist );
printf( "-- Min dist : %f \n", min_dist );
//-- Draw only "good" matches (i.e. whose distance is less than 2*min_dist,
//-- or a small arbitary value ( 0.02 ) in the event that min_dist is very
//-- small)
//-- PS.- radiusMatch can also be used here.
std : :vector < DMatch > good_matches;
for( int i = 0; i < descriptors_1.rows; i ++ )
{ if( matches[i].distance < = max( 2 *min_dist, 0. 02) )
{ good_matches.push_back( matches[i]); }
}
//-- Draw only "good" matches
Mat img_matches;
drawMatches( img_1, keypoints_1, img_2, keypoints_2,
good_matches, img_matches, Scalar : :all( - 1), Scalar : :all( - 1),
vector < char >(), DrawMatchesFlags : :NOT_DRAW_SINGLE_POINTS );
//-- Show detected matches
imshow( "Good Matches", img_matches );
for( int i = 0; i < ( int)good_matches.size(); i ++ )
{ printf( "-- Good Match [%d] Keypoint 1: %d -- Keypoint 2: %d \n", i, good_matches[i].queryIdx, good_matches[i].trainIdx ); }
waitKey( 0);
return 0;
}
//
# include "stdafx.h"
# include <iostream >
# include "opencv2/core/core.hpp"
# include "opencv2/features2d/features2d.hpp"
# include "opencv2/highgui/highgui.hpp"
# include "opencv2/nonfree/features2d.hpp"
using namespace std;
using namespace cv;
int main( int argc, char * * argv )
{
Mat img_1 = imread( "img_opencv_1.png", 0 );
Mat img_2 = imread( "img_opencv_2.png", 0 );
if( !img_1.data || !img_2.data )
{ std : :cout << " --(!) Error reading images " << std : :endl; return - 1; }
//-- Step 1: Detect the keypoints using SURF Detector
int minHessian = 400;
SurfFeatureDetector detector( minHessian );
std : :vector <KeyPoint > keypoints_1, keypoints_2;
detector.detect( img_1, keypoints_1 );
detector.detect( img_2, keypoints_2 );
//-- Draw keypoints
Mat img_keypoints_1; Mat img_keypoints_2;
drawKeypoints( img_1, keypoints_1, img_keypoints_1, Scalar : :all( - 1), DrawMatchesFlags : :DEFAULT );
drawKeypoints( img_2, keypoints_2, img_keypoints_2, Scalar : :all( - 1), DrawMatchesFlags : :DEFAULT );
//-- Step 2: Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
Mat descriptors_1, descriptors_2;
extractor.compute( img_1, keypoints_1, descriptors_1 );
extractor.compute( img_2, keypoints_2, descriptors_2 );
//-- Step 3: Matching descriptor vectors using FLANN matcher
FlannBasedMatcher matcher;
std : :vector < DMatch > matches;
matcher.match( descriptors_1, descriptors_2, matches );
double max_dist = 0; double min_dist = 100;
//-- Quick calculation of max and min distances between keypoints
for( int i = 0; i < descriptors_1.rows; i ++ )
{ double dist = matches[i].distance;
if( dist < min_dist ) min_dist = dist;
if( dist > max_dist ) max_dist = dist;
}
printf( "-- Max dist : %f \n", max_dist );
printf( "-- Min dist : %f \n", min_dist );
//-- Draw only "good" matches (i.e. whose distance is less than 2*min_dist,
//-- or a small arbitary value ( 0.02 ) in the event that min_dist is very
//-- small)
//-- PS.- radiusMatch can also be used here.
std : :vector < DMatch > good_matches;
for( int i = 0; i < descriptors_1.rows; i ++ )
{ if( matches[i].distance < = max( 2 *min_dist, 0. 02) )
{ good_matches.push_back( matches[i]); }
}
//-- Draw only "good" matches
Mat img_matches;
drawMatches( img_1, keypoints_1, img_2, keypoints_2,
good_matches, img_matches, Scalar : :all( - 1), Scalar : :all( - 1),
vector < char >(), DrawMatchesFlags : :NOT_DRAW_SINGLE_POINTS );
//-- Show detected matches
imshow( "Good Matches", img_matches );
for( int i = 0; i < ( int)good_matches.size(); i ++ )
{ printf( "-- Good Match [%d] Keypoint 1: %d -- Keypoint 2: %d \n", i, good_matches[i].queryIdx, good_matches[i].trainIdx ); }
waitKey( 0);
return 0;
}

可以發現,除了錯誤一例,其他都是正確的。
繼續來做,計算出單應矩陣
// raw_surf.cpp : 本例是對opencv-2.48相關例子的實現
//
# include "stdafx.h"
# include <iostream >
# include "opencv2/core/core.hpp"
# include "opencv2/features2d/features2d.hpp"
# include "opencv2/highgui/highgui.hpp"
# include "opencv2/nonfree/features2d.hpp"
# include "opencv2/calib3d/calib3d.hpp"
using namespace std;
using namespace cv;
int main( int argc, char * * argv )
{
Mat img_1 = imread( "img_opencv_1.png", 0 );
Mat img_2 = imread( "img_opencv_2.png", 0 );
if( !img_1.data || !img_2.data )
{ std : :cout << " --(!) Error reading images " << std : :endl; return - 1; }
//-- Step 1: Detect the keypoints using SURF Detector
int minHessian = 400;
SurfFeatureDetector detector( minHessian );
std : :vector <KeyPoint > keypoints_1, keypoints_2;
detector.detect( img_1, keypoints_1 );
detector.detect( img_2, keypoints_2 );
//-- Draw keypoints
Mat img_keypoints_1; Mat img_keypoints_2;
drawKeypoints( img_1, keypoints_1, img_keypoints_1, Scalar : :all( - 1), DrawMatchesFlags : :DEFAULT );
drawKeypoints( img_2, keypoints_2, img_keypoints_2, Scalar : :all( - 1), DrawMatchesFlags : :DEFAULT );
//-- Step 2: Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
Mat descriptors_1, descriptors_2;
extractor.compute( img_1, keypoints_1, descriptors_1 );
extractor.compute( img_2, keypoints_2, descriptors_2 );
//-- Step 3: Matching descriptor vectors using FLANN matcher
FlannBasedMatcher matcher;
std : :vector < DMatch > matches;
matcher.match( descriptors_1, descriptors_2, matches );
double max_dist = 0; double min_dist = 100;
//-- Quick calculation of max and min distances between keypoints
for( int i = 0; i < descriptors_1.rows; i ++ )
{ double dist = matches[i].distance;
if( dist < min_dist ) min_dist = dist;
if( dist > max_dist ) max_dist = dist;
}
printf( "-- Max dist : %f \n", max_dist );
printf( "-- Min dist : %f \n", min_dist );
//-- Draw only "good" matches (i.e. whose distance is less than 2*min_dist,
//-- or a small arbitary value ( 0.02 ) in the event that min_dist is very
//-- small)
//-- PS.- radiusMatch can also be used here.
std : :vector < DMatch > good_matches;
for( int i = 0; i < descriptors_1.rows; i ++ )
{ if( matches[i].distance < = /*max(2*min_dist, 0.02)*/ 3 *min_dist )
{ good_matches.push_back( matches[i]); }
}
//-- Draw only "good" matches
Mat img_matches;
drawMatches( img_1, keypoints_1, img_2, keypoints_2,
good_matches, img_matches, Scalar : :all( - 1), Scalar : :all( - 1),
vector < char >(), DrawMatchesFlags : :NOT_DRAW_SINGLE_POINTS );
//-- Localize the object from img_1 in img_2
std : :vector <Point2f > obj;
std : :vector <Point2f > scene;
for( int i = 0; i < ( int)good_matches.size(); i ++ )
{
obj.push_back( keypoints_1[ good_matches[i].queryIdx ].pt );
scene.push_back( keypoints_2[ good_matches[i].trainIdx ].pt );
printf( "-- Good Match [%d] Keypoint 1: %d -- Keypoint 2: %d \n", i, good_matches[i].queryIdx, good_matches[i].trainIdx );
}
//直接調用ransac
Mat H = findHomography( obj, scene, CV_RANSAC );
//-- Get the corners from the image_1 ( the object to be "detected" )
std : :vector <Point2f > obj_corners( 4);
obj_corners[ 0] = Point( 0, 0); obj_corners[ 1] = Point( img_1.cols, 0 );
obj_corners[ 2] = Point( img_1.cols, img_1.rows ); obj_corners[ 3] = Point( 0, img_1.rows );
std : :vector <Point2f > scene_corners( 4);
perspectiveTransform( obj_corners, scene_corners, H);
//-- Draw lines between the corners (the mapped object in the scene - image_2 )
Point2f offset( ( float)img_1.cols, 0);
line( img_matches, scene_corners[ 0] + offset, scene_corners[ 1] + offset, Scalar( 0, 255, 0), 4 );
line( img_matches, scene_corners[ 1] + offset, scene_corners[ 2] + offset, Scalar( 0, 255, 0), 4 );
line( img_matches, scene_corners[ 2] + offset, scene_corners[ 3] + offset, Scalar( 0, 255, 0), 4 );
line( img_matches, scene_corners[ 3] + offset, scene_corners[ 0] + offset, Scalar( 0, 255, 0), 4 );
//-- Show detected matches
imshow( "Good Matches & Object detection", img_matches );
waitKey( 0);
return 0;
}
//
# include "stdafx.h"
# include <iostream >
# include "opencv2/core/core.hpp"
# include "opencv2/features2d/features2d.hpp"
# include "opencv2/highgui/highgui.hpp"
# include "opencv2/nonfree/features2d.hpp"
# include "opencv2/calib3d/calib3d.hpp"
using namespace std;
using namespace cv;
int main( int argc, char * * argv )
{
Mat img_1 = imread( "img_opencv_1.png", 0 );
Mat img_2 = imread( "img_opencv_2.png", 0 );
if( !img_1.data || !img_2.data )
{ std : :cout << " --(!) Error reading images " << std : :endl; return - 1; }
//-- Step 1: Detect the keypoints using SURF Detector
int minHessian = 400;
SurfFeatureDetector detector( minHessian );
std : :vector <KeyPoint > keypoints_1, keypoints_2;
detector.detect( img_1, keypoints_1 );
detector.detect( img_2, keypoints_2 );
//-- Draw keypoints
Mat img_keypoints_1; Mat img_keypoints_2;
drawKeypoints( img_1, keypoints_1, img_keypoints_1, Scalar : :all( - 1), DrawMatchesFlags : :DEFAULT );
drawKeypoints( img_2, keypoints_2, img_keypoints_2, Scalar : :all( - 1), DrawMatchesFlags : :DEFAULT );
//-- Step 2: Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
Mat descriptors_1, descriptors_2;
extractor.compute( img_1, keypoints_1, descriptors_1 );
extractor.compute( img_2, keypoints_2, descriptors_2 );
//-- Step 3: Matching descriptor vectors using FLANN matcher
FlannBasedMatcher matcher;
std : :vector < DMatch > matches;
matcher.match( descriptors_1, descriptors_2, matches );
double max_dist = 0; double min_dist = 100;
//-- Quick calculation of max and min distances between keypoints
for( int i = 0; i < descriptors_1.rows; i ++ )
{ double dist = matches[i].distance;
if( dist < min_dist ) min_dist = dist;
if( dist > max_dist ) max_dist = dist;
}
printf( "-- Max dist : %f \n", max_dist );
printf( "-- Min dist : %f \n", min_dist );
//-- Draw only "good" matches (i.e. whose distance is less than 2*min_dist,
//-- or a small arbitary value ( 0.02 ) in the event that min_dist is very
//-- small)
//-- PS.- radiusMatch can also be used here.
std : :vector < DMatch > good_matches;
for( int i = 0; i < descriptors_1.rows; i ++ )
{ if( matches[i].distance < = /*max(2*min_dist, 0.02)*/ 3 *min_dist )
{ good_matches.push_back( matches[i]); }
}
//-- Draw only "good" matches
Mat img_matches;
drawMatches( img_1, keypoints_1, img_2, keypoints_2,
good_matches, img_matches, Scalar : :all( - 1), Scalar : :all( - 1),
vector < char >(), DrawMatchesFlags : :NOT_DRAW_SINGLE_POINTS );
//-- Localize the object from img_1 in img_2
std : :vector <Point2f > obj;
std : :vector <Point2f > scene;
for( int i = 0; i < ( int)good_matches.size(); i ++ )
{
obj.push_back( keypoints_1[ good_matches[i].queryIdx ].pt );
scene.push_back( keypoints_2[ good_matches[i].trainIdx ].pt );
printf( "-- Good Match [%d] Keypoint 1: %d -- Keypoint 2: %d \n", i, good_matches[i].queryIdx, good_matches[i].trainIdx );
}
//直接調用ransac
Mat H = findHomography( obj, scene, CV_RANSAC );
//-- Get the corners from the image_1 ( the object to be "detected" )
std : :vector <Point2f > obj_corners( 4);
obj_corners[ 0] = Point( 0, 0); obj_corners[ 1] = Point( img_1.cols, 0 );
obj_corners[ 2] = Point( img_1.cols, img_1.rows ); obj_corners[ 3] = Point( 0, img_1.rows );
std : :vector <Point2f > scene_corners( 4);
perspectiveTransform( obj_corners, scene_corners, H);
//-- Draw lines between the corners (the mapped object in the scene - image_2 )
Point2f offset( ( float)img_1.cols, 0);
line( img_matches, scene_corners[ 0] + offset, scene_corners[ 1] + offset, Scalar( 0, 255, 0), 4 );
line( img_matches, scene_corners[ 1] + offset, scene_corners[ 2] + offset, Scalar( 0, 255, 0), 4 );
line( img_matches, scene_corners[ 2] + offset, scene_corners[ 3] + offset, Scalar( 0, 255, 0), 4 );
line( img_matches, scene_corners[ 3] + offset, scene_corners[ 0] + offset, Scalar( 0, 255, 0), 4 );
//-- Show detected matches
imshow( "Good Matches & Object detection", img_matches );
waitKey( 0);
return 0;
}

簡化后和注釋后的版本
// raw_surf.cpp : 本例是對opencv-2.48相關例子的實現
//
#include "stdafx.h"
#include <iostream>
#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/nonfree/features2d.hpp"
#include "opencv2/calib3d/calib3d.hpp"
using namespace std;
using namespace cv;
int main( int argc, char** argv )
{
Mat img_1 = imread( "img_opencv_1.png", 0 );
Mat img_2 = imread( "img_opencv_2.png", 0 );
if( !img_1.data || !img_2.data )
{ std::cout<< " --(!) Error reading images " << std::endl; return -1; }
//-- Step 1: 使用SURF識別出特征點
int minHessian = 400;
SurfFeatureDetector detector( minHessian );
std::vector<KeyPoint> keypoints_1, keypoints_2;
detector.detect( img_1, keypoints_1 );
detector.detect( img_2, keypoints_2 );
//-- Step 2: 描述SURF特征
SurfDescriptorExtractor extractor;
Mat descriptors_1, descriptors_2;
extractor.compute( img_1, keypoints_1, descriptors_1 );
extractor.compute( img_2, keypoints_2, descriptors_2 );
//-- Step 3: 匹配
FlannBasedMatcher matcher;//BFMatcher為強制匹配
std::vector< DMatch > matches;
matcher.match( descriptors_1, descriptors_2, matches );
//取最大最小距離
double max_dist = 0; double min_dist = 100;
for( int i = 0; i < descriptors_1.rows; i++ )
{
double dist = matches[i].distance;
if( dist < min_dist ) min_dist = dist;
if( dist > max_dist ) max_dist = dist;
}
std::vector< DMatch > good_matches;
for( int i = 0; i < descriptors_1.rows; i++ )
{
if( matches[i].distance <= 3*min_dist )//這里的閾值選擇了3倍的min_dist
{
good_matches.push_back( matches[i]);
}
}
//畫出"good match"
Mat img_matches;
drawMatches( img_1, keypoints_1, img_2, keypoints_2,
good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
//-- Localize the object from img_1 in img_2
std::vector<Point2f> obj;
std::vector<Point2f> scene;
for( int i = 0; i < (int)good_matches.size(); i++ )
{
obj.push_back( keypoints_1[ good_matches[i].queryIdx ].pt );
scene.push_back( keypoints_2[ good_matches[i].trainIdx ].pt );
}
//直接調用ransac,計算單應矩陣
Mat H = findHomography( obj, scene, CV_RANSAC );
//-- Get the corners from the image_1 ( the object to be "detected" )
std::vector<Point2f> obj_corners(4);
obj_corners[0] = Point(0,0);
obj_corners[1] = Point( img_1.cols, 0 );
obj_corners[2] = Point( img_1.cols, img_1.rows );
obj_corners[3] = Point( 0, img_1.rows );
std::vector<Point2f> scene_corners(4);
perspectiveTransform( obj_corners, scene_corners, H);
//-- Draw lines between the corners (the mapped object in the scene - image_2 )
Point2f offset( (float)img_1.cols, 0);
line( img_matches, scene_corners[0] + offset, scene_corners[1] + offset, Scalar(0, 255, 0), 4 );
line( img_matches, scene_corners[1] + offset, scene_corners[2] + offset, Scalar( 0, 255, 0), 4 );
line( img_matches, scene_corners[2] + offset, scene_corners[3] + offset, Scalar( 0, 255, 0), 4 );
line( img_matches, scene_corners[3] + offset, scene_corners[0] + offset, Scalar( 0, 255, 0), 4 );
//-- Show detected matches
imshow( "Good Matches & Object detection", img_matches );
waitKey(0);
return 0;
}

這里有兩點需要注意,一個是除了
FlannBasedMatcher
之外,還有一種mathcer叫做BFMatcher,后者為強制匹配.
此外計算所謂GOODFEATURE的時候,采用了 3*min_dist的方法,我認為這里和論文中指出的“誤差閾值設為3”是一致的,如果理解錯誤請指出,感謝!
同時測試了航拍圖片和連鑄圖片,航拍圖片是自然圖片,特征豐富;

連鑄圖片由於表面干擾大於原始紋理,無法得到單應矩陣


最后,添加計算RANSAC內點外點的相關代碼,這里以3作為分界線
// raw_surf.cpp : 本例是對opencv-2.48相關例子的實現
//
# include "stdafx.h"
# include <iostream >
# include "opencv2/core/core.hpp"
# include "opencv2/imgproc/imgproc.hpp"
# include "opencv2/features2d/features2d.hpp"
# include "opencv2/highgui/highgui.hpp"
# include "opencv2/nonfree/features2d.hpp"
# include "opencv2/calib3d/calib3d.hpp"
using namespace std;
using namespace cv;
//獲得兩個pointf之間的距離
float fDistance(Point2f p1,Point2f p2)
{
float ftmp = (p1.x -p2.x) *(p1.x -p2.x) + (p1.y -p2.y) *(p1.y -p2.y);
ftmp = sqrt(( float)ftmp);
return ftmp;
}
int main( int argc, char * * argv )
{
Mat img_1 = imread( "img_opencv_1.png", 0 );
Mat img_2 = imread( "img_opencv_2.png", 0 );
////添加於連鑄圖像
//img_1 = img_1(Rect(20,0,img_1.cols-40,img_1.rows));
//img_2 = img_2(Rect(20,0,img_1.cols-40,img_1.rows));
// cv::Canny(img_1,img_1,100,200);
// cv::Canny(img_2,img_2,100,200);
if( !img_1.data || !img_2.data )
{ std : :cout << " --(!) Error reading images " << std : :endl; return - 1; }
//-- Step 1: 使用SURF識別出特征點
int minHessian = 400;
SurfFeatureDetector detector( minHessian );
std : :vector <KeyPoint > keypoints_1, keypoints_2;
detector.detect( img_1, keypoints_1 );
detector.detect( img_2, keypoints_2 );
//-- Step 2: 描述SURF特征
SurfDescriptorExtractor extractor;
Mat descriptors_1, descriptors_2;
extractor.compute( img_1, keypoints_1, descriptors_1 );
extractor.compute( img_2, keypoints_2, descriptors_2 );
//-- Step 3: 匹配
FlannBasedMatcher matcher; //BFMatcher為強制匹配
std : :vector < DMatch > matches;
matcher.match( descriptors_1, descriptors_2, matches );
//取最大最小距離
double max_dist = 0; double min_dist = 100;
for( int i = 0; i < descriptors_1.rows; i ++ )
{
double dist = matches[i].distance;
if( dist < min_dist ) min_dist = dist;
if( dist > max_dist ) max_dist = dist;
}
std : :vector < DMatch > good_matches;
for( int i = 0; i < descriptors_1.rows; i ++ )
{
if( matches[i].distance < = 3 *min_dist ) //這里的閾值選擇了3倍的min_dist
{
good_matches.push_back( matches[i]);
}
}
//畫出"good match"
Mat img_matches;
drawMatches( img_1, keypoints_1, img_2, keypoints_2,
good_matches, img_matches, Scalar : :all( - 1), Scalar : :all( - 1),
vector < char >(), DrawMatchesFlags : :NOT_DRAW_SINGLE_POINTS );
//-- Localize the object from img_1 in img_2
std : :vector <Point2f > obj;
std : :vector <Point2f > scene;
for( int i = 0; i < ( int)good_matches.size(); i ++ )
{
obj.push_back( keypoints_1[ good_matches[i].queryIdx ].pt );
scene.push_back( keypoints_2[ good_matches[i].trainIdx ].pt );
}
//直接調用ransac,計算單應矩陣
Mat H = findHomography( obj, scene, CV_RANSAC );
//-- Get the corners from the image_1 ( the object to be "detected" )
std : :vector <Point2f > obj_corners( 4);
obj_corners[ 0] = Point( 0, 0);
obj_corners[ 1] = Point( img_1.cols, 0 );
obj_corners[ 2] = Point( img_1.cols, img_1.rows );
obj_corners[ 3] = Point( 0, img_1.rows );
std : :vector <Point2f > scene_corners( 4);
perspectiveTransform( obj_corners, scene_corners, H);
//計算內點外點
std : :vector <Point2f > scene_test(obj.size());
perspectiveTransform(obj,scene_test,H);
for ( int i = 0;i <scene_test.size();i ++)
{
printf( "%d is %f \n",i + 1,fDistance(scene[i],scene_test[i]));
}
//-- Draw lines between the corners (the mapped object in the scene - image_2 )
Point2f offset( ( float)img_1.cols, 0);
line( img_matches, scene_corners[ 0] + offset, scene_corners[ 1] + offset, Scalar( 0, 255, 0), 4 );
line( img_matches, scene_corners[ 1] + offset, scene_corners[ 2] + offset, Scalar( 0, 255, 0), 4 );
line( img_matches, scene_corners[ 2] + offset, scene_corners[ 3] + offset, Scalar( 0, 255, 0), 4 );
line( img_matches, scene_corners[ 3] + offset, scene_corners[ 0] + offset, Scalar( 0, 255, 0), 4 );
//-- Show detected matches
imshow( "Good Matches & Object detection", img_matches );
waitKey( 0);
return 0;
}
# include "stdafx.h"
# include <iostream >
# include "opencv2/core/core.hpp"
# include "opencv2/imgproc/imgproc.hpp"
# include "opencv2/features2d/features2d.hpp"
# include "opencv2/highgui/highgui.hpp"
# include "opencv2/nonfree/features2d.hpp"
# include "opencv2/calib3d/calib3d.hpp"
using namespace std;
using namespace cv;
//獲得兩個pointf之間的距離
float fDistance(Point2f p1,Point2f p2)
{
float ftmp = (p1.x -p2.x) *(p1.x -p2.x) + (p1.y -p2.y) *(p1.y -p2.y);
ftmp = sqrt(( float)ftmp);
return ftmp;
}
int main( int argc, char * * argv )
{
Mat img_1 = imread( "img_opencv_1.png", 0 );
Mat img_2 = imread( "img_opencv_2.png", 0 );
////添加於連鑄圖像
//img_1 = img_1(Rect(20,0,img_1.cols-40,img_1.rows));
//img_2 = img_2(Rect(20,0,img_1.cols-40,img_1.rows));
// cv::Canny(img_1,img_1,100,200);
// cv::Canny(img_2,img_2,100,200);
if( !img_1.data || !img_2.data )
{ std : :cout << " --(!) Error reading images " << std : :endl; return - 1; }
//-- Step 1: 使用SURF識別出特征點
int minHessian = 400;
SurfFeatureDetector detector( minHessian );
std : :vector <KeyPoint > keypoints_1, keypoints_2;
detector.detect( img_1, keypoints_1 );
detector.detect( img_2, keypoints_2 );
//-- Step 2: 描述SURF特征
SurfDescriptorExtractor extractor;
Mat descriptors_1, descriptors_2;
extractor.compute( img_1, keypoints_1, descriptors_1 );
extractor.compute( img_2, keypoints_2, descriptors_2 );
//-- Step 3: 匹配
FlannBasedMatcher matcher; //BFMatcher為強制匹配
std : :vector < DMatch > matches;
matcher.match( descriptors_1, descriptors_2, matches );
//取最大最小距離
double max_dist = 0; double min_dist = 100;
for( int i = 0; i < descriptors_1.rows; i ++ )
{
double dist = matches[i].distance;
if( dist < min_dist ) min_dist = dist;
if( dist > max_dist ) max_dist = dist;
}
std : :vector < DMatch > good_matches;
for( int i = 0; i < descriptors_1.rows; i ++ )
{
if( matches[i].distance < = 3 *min_dist ) //這里的閾值選擇了3倍的min_dist
{
good_matches.push_back( matches[i]);
}
}
//畫出"good match"
Mat img_matches;
drawMatches( img_1, keypoints_1, img_2, keypoints_2,
good_matches, img_matches, Scalar : :all( - 1), Scalar : :all( - 1),
vector < char >(), DrawMatchesFlags : :NOT_DRAW_SINGLE_POINTS );
//-- Localize the object from img_1 in img_2
std : :vector <Point2f > obj;
std : :vector <Point2f > scene;
for( int i = 0; i < ( int)good_matches.size(); i ++ )
{
obj.push_back( keypoints_1[ good_matches[i].queryIdx ].pt );
scene.push_back( keypoints_2[ good_matches[i].trainIdx ].pt );
}
//直接調用ransac,計算單應矩陣
Mat H = findHomography( obj, scene, CV_RANSAC );
//-- Get the corners from the image_1 ( the object to be "detected" )
std : :vector <Point2f > obj_corners( 4);
obj_corners[ 0] = Point( 0, 0);
obj_corners[ 1] = Point( img_1.cols, 0 );
obj_corners[ 2] = Point( img_1.cols, img_1.rows );
obj_corners[ 3] = Point( 0, img_1.rows );
std : :vector <Point2f > scene_corners( 4);
perspectiveTransform( obj_corners, scene_corners, H);
//計算內點外點
std : :vector <Point2f > scene_test(obj.size());
perspectiveTransform(obj,scene_test,H);
for ( int i = 0;i <scene_test.size();i ++)
{
printf( "%d is %f \n",i + 1,fDistance(scene[i],scene_test[i]));
}
//-- Draw lines between the corners (the mapped object in the scene - image_2 )
Point2f offset( ( float)img_1.cols, 0);
line( img_matches, scene_corners[ 0] + offset, scene_corners[ 1] + offset, Scalar( 0, 255, 0), 4 );
line( img_matches, scene_corners[ 1] + offset, scene_corners[ 2] + offset, Scalar( 0, 255, 0), 4 );
line( img_matches, scene_corners[ 2] + offset, scene_corners[ 3] + offset, Scalar( 0, 255, 0), 4 );
line( img_matches, scene_corners[ 3] + offset, scene_corners[ 0] + offset, Scalar( 0, 255, 0), 4 );
//-- Show detected matches
imshow( "Good Matches & Object detection", img_matches );
waitKey( 0);
return 0;
}
結果顯示

其中,有誤差的點就很明顯了。
小結一下,這里實現了使用opencv得到兩幅圖像之間的單應矩陣的方法。不是所有的圖像都能夠獲得單應矩陣的,必須是兩幅本身就有關系的圖片才可以;而且最好是自然圖像,像生產線上的這種圖像,其拼接就需要采用其他方法。
二、拼接和融合
由於之前已經計算出了“單應矩陣”,所以這里直接利用這個矩陣就好。需要注意的一點是理清楚“幀”和拼接圖像之間的關系。一般來說,我們采用的是“柱面坐標”或平面坐標。書中采用的是若干圖像在水平方向上基本上是一字排開,是平面坐標。那么,如果按照文中的“幀到拼接圖像”的方法,我們認為圖像拼接的順序就是由左到右,一幅一幅地計算誤差,而后進行疊加。
為了方便說明算法,采用了《學習opencv》中提供的教堂圖像


其結果就是經過surf匹配,而將右邊的圖像形變成為適合疊加的狀態。
基於此,進行圖像對准
// raw_surf.cpp : 本例是對opencv-2.48相關例子的實現
//
# include "stdafx.h"
# include <iostream >
# include "opencv2/core/core.hpp"
# include "opencv2/imgproc/imgproc.hpp"
# include "opencv2/features2d/features2d.hpp"
# include "opencv2/highgui/highgui.hpp"
# include "opencv2/nonfree/features2d.hpp"
# include "opencv2/calib3d/calib3d.hpp"
using namespace std;
using namespace cv;
int main( int argc, char * * argv )
{
Mat img_1 ;
Mat img_2 ;
Mat img_raw_1 = imread( "c1.bmp");
Mat img_raw_2 = imread( "c3.bmp");
cvtColor(img_raw_1,img_1,CV_BGR2GRAY);
cvtColor(img_raw_2,img_2,CV_BGR2GRAY);
//-- Step 1: 使用SURF識別出特征點
int minHessian = 400;
SurfFeatureDetector detector( minHessian );
std : :vector <KeyPoint > keypoints_1, keypoints_2;
detector.detect( img_1, keypoints_1 );
detector.detect( img_2, keypoints_2 );
//-- Step 2: 描述SURF特征
SurfDescriptorExtractor extractor;
Mat descriptors_1, descriptors_2;
extractor.compute( img_1, keypoints_1, descriptors_1 );
extractor.compute( img_2, keypoints_2, descriptors_2 );
//-- Step 3: 匹配
FlannBasedMatcher matcher; //BFMatcher為強制匹配
std : :vector < DMatch > matches;
matcher.match( descriptors_1, descriptors_2, matches );
//取最大最小距離
double max_dist = 0; double min_dist = 100;
for( int i = 0; i < descriptors_1.rows; i ++ )
{
double dist = matches[i].distance;
if( dist < min_dist ) min_dist = dist;
if( dist > max_dist ) max_dist = dist;
}
std : :vector < DMatch > good_matches;
for( int i = 0; i < descriptors_1.rows; i ++ )
{
if( matches[i].distance < = 3 *min_dist ) //這里的閾值選擇了3倍的min_dist
{
good_matches.push_back( matches[i]);
}
}
//-- Localize the object from img_1 in img_2
std : :vector <Point2f > obj;
std : :vector <Point2f > scene;
for( int i = 0; i < ( int)good_matches.size(); i ++ )
{
//這里采用“幀向拼接圖像中添加的方法”,因此左邊的是scene,右邊的是obj
scene.push_back( keypoints_1[ good_matches[i].queryIdx ].pt );
obj.push_back( keypoints_2[ good_matches[i].trainIdx ].pt );
}
//直接調用ransac,計算單應矩陣
Mat H = findHomography( obj, scene, CV_RANSAC );
//圖像對准
Mat result;
warpPerspective(img_raw_2,result,H,Size( 2 *img_2.cols,img_2.rows));
Mat half(result,cv : :Rect( 0, 0,img_2.cols,img_2.rows));
img_raw_1.copyTo(half);
imshow( "result",result);
waitKey( 0);
return 0;
}
//
# include "stdafx.h"
# include <iostream >
# include "opencv2/core/core.hpp"
# include "opencv2/imgproc/imgproc.hpp"
# include "opencv2/features2d/features2d.hpp"
# include "opencv2/highgui/highgui.hpp"
# include "opencv2/nonfree/features2d.hpp"
# include "opencv2/calib3d/calib3d.hpp"
using namespace std;
using namespace cv;
int main( int argc, char * * argv )
{
Mat img_1 ;
Mat img_2 ;
Mat img_raw_1 = imread( "c1.bmp");
Mat img_raw_2 = imread( "c3.bmp");
cvtColor(img_raw_1,img_1,CV_BGR2GRAY);
cvtColor(img_raw_2,img_2,CV_BGR2GRAY);
//-- Step 1: 使用SURF識別出特征點
int minHessian = 400;
SurfFeatureDetector detector( minHessian );
std : :vector <KeyPoint > keypoints_1, keypoints_2;
detector.detect( img_1, keypoints_1 );
detector.detect( img_2, keypoints_2 );
//-- Step 2: 描述SURF特征
SurfDescriptorExtractor extractor;
Mat descriptors_1, descriptors_2;
extractor.compute( img_1, keypoints_1, descriptors_1 );
extractor.compute( img_2, keypoints_2, descriptors_2 );
//-- Step 3: 匹配
FlannBasedMatcher matcher; //BFMatcher為強制匹配
std : :vector < DMatch > matches;
matcher.match( descriptors_1, descriptors_2, matches );
//取最大最小距離
double max_dist = 0; double min_dist = 100;
for( int i = 0; i < descriptors_1.rows; i ++ )
{
double dist = matches[i].distance;
if( dist < min_dist ) min_dist = dist;
if( dist > max_dist ) max_dist = dist;
}
std : :vector < DMatch > good_matches;
for( int i = 0; i < descriptors_1.rows; i ++ )
{
if( matches[i].distance < = 3 *min_dist ) //這里的閾值選擇了3倍的min_dist
{
good_matches.push_back( matches[i]);
}
}
//-- Localize the object from img_1 in img_2
std : :vector <Point2f > obj;
std : :vector <Point2f > scene;
for( int i = 0; i < ( int)good_matches.size(); i ++ )
{
//這里采用“幀向拼接圖像中添加的方法”,因此左邊的是scene,右邊的是obj
scene.push_back( keypoints_1[ good_matches[i].queryIdx ].pt );
obj.push_back( keypoints_2[ good_matches[i].trainIdx ].pt );
}
//直接調用ransac,計算單應矩陣
Mat H = findHomography( obj, scene, CV_RANSAC );
//圖像對准
Mat result;
warpPerspective(img_raw_2,result,H,Size( 2 *img_2.cols,img_2.rows));
Mat half(result,cv : :Rect( 0, 0,img_2.cols,img_2.rows));
img_raw_1.copyTo(half);
imshow( "result",result);
waitKey( 0);
return 0;
}

依據論文中提到的3種方法進行融合
// raw_surf.cpp : 本例是對opencv-2.48相關例子的實現
//
# include "stdafx.h"
# include <iostream >
# include "opencv2/core/core.hpp"
# include "opencv2/imgproc/imgproc.hpp"
# include "opencv2/features2d/features2d.hpp"
# include "opencv2/highgui/highgui.hpp"
# include "opencv2/nonfree/features2d.hpp"
# include "opencv2/calib3d/calib3d.hpp"
using namespace std;
using namespace cv;
int main( int argc, char * * argv )
{
Mat img_1 ;
Mat img_2 ;
Mat img_raw_1 = imread( "c1.bmp");
Mat img_raw_2 = imread( "c3.bmp");
cvtColor(img_raw_1,img_1,CV_BGR2GRAY);
cvtColor(img_raw_2,img_2,CV_BGR2GRAY);
//-- Step 1: 使用SURF識別出特征點
int minHessian = 400;
SurfFeatureDetector detector( minHessian );
std : :vector <KeyPoint > keypoints_1, keypoints_2;
detector.detect( img_1, keypoints_1 );
detector.detect( img_2, keypoints_2 );
//-- Step 2: 描述SURF特征
SurfDescriptorExtractor extractor;
Mat descriptors_1, descriptors_2;
extractor.compute( img_1, keypoints_1, descriptors_1 );
extractor.compute( img_2, keypoints_2, descriptors_2 );
//-- Step 3: 匹配
FlannBasedMatcher matcher; //BFMatcher為強制匹配
std : :vector < DMatch > matches;
matcher.match( descriptors_1, descriptors_2, matches );
//取最大最小距離
double max_dist = 0; double min_dist = 100;
for( int i = 0; i < descriptors_1.rows; i ++ )
{
double dist = matches[i].distance;
if( dist < min_dist ) min_dist = dist;
if( dist > max_dist ) max_dist = dist;
}
std : :vector < DMatch > good_matches;
for( int i = 0; i < descriptors_1.rows; i ++ )
{
if( matches[i].distance < = 3 *min_dist ) //這里的閾值選擇了3倍的min_dist
{
good_matches.push_back( matches[i]);
}
}
//-- Localize the object from img_1 in img_2
std : :vector <Point2f > obj;
std : :vector <Point2f > scene;
for( int i = 0; i < ( int)good_matches.size(); i ++ )
{
//這里采用“幀向拼接圖像中添加的方法”,因此左邊的是scene,右邊的是obj
scene.push_back( keypoints_1[ good_matches[i].queryIdx ].pt );
obj.push_back( keypoints_2[ good_matches[i].trainIdx ].pt );
}
//直接調用ransac,計算單應矩陣
Mat H = findHomography( obj, scene, CV_RANSAC );
//圖像對准
Mat result;
Mat resultback; //保存的是新幀經過單應矩陣變換以后的圖像
warpPerspective(img_raw_2,result,H,Size( 2 *img_2.cols,img_2.rows));
result.copyTo(resultback);
Mat half(result,cv : :Rect( 0, 0,img_2.cols,img_2.rows));
img_raw_1.copyTo(half);
imshow( "ajust",result);
//漸入漸出融合
Mat result_linerblend = result.clone();
double dblend = 0. 0;
int ioffset =img_2.cols - 100;
for ( int i = 0;i < 100;i ++)
{
result_linerblend.col(ioffset +i) = result.col(ioffset +i) *( 1 -dblend) + resultback.col(ioffset +i) *dblend;
dblend = dblend + 0. 01;
}
imshow( "result_linerblend",result_linerblend);
//最大值法融合
Mat result_maxvalue = result.clone();
for ( int i = 0;i <img_2.rows;i ++)
{
for ( int j = 0;j < 100;j ++)
{
int iresult = result.at <Vec3b >(i,ioffset +j)[ 0] + result.at <Vec3b >(i,ioffset +j)[ 1] + result.at <Vec3b >(i,ioffset +j)[ 2];
int iresultback = resultback.at <Vec3b >(i,ioffset +j)[ 0] + resultback.at <Vec3b >(i,ioffset +j)[ 1] + resultback.at <Vec3b >(i,ioffset +j)[ 2];
if (iresultback >iresult)
{
result_maxvalue.at <Vec3b >(i,ioffset +j) = resultback.at <Vec3b >(i,ioffset +j);
}
}
}
imshow( "result_maxvalue",result_maxvalue);
//帶閾值的加權平滑處理
Mat result_advance = result.clone();
for ( int i = 0;i <img_2.rows;i ++)
{
for ( int j = 0;j < 33;j ++)
{
int iimg1 = result.at <Vec3b >(i,ioffset +j)[ 0] + result.at <Vec3b >(i,ioffset +j)[ 1] + result.at <Vec3b >(i,ioffset +j)[ 2];
//int iimg2= resultback.at<Vec3b>(i,ioffset+j)[0]+ resultback.at<Vec3b>(i,ioffset+j)[1]+ resultback.at<Vec3b>(i,ioffset+j)[2];
int ilinerblend = result_linerblend.at <Vec3b >(i,ioffset +j)[ 0] + result_linerblend.at <Vec3b >(i,ioffset +j)[ 1] + result_linerblend.at <Vec3b >(i,ioffset +j)[ 2];
if (abs(iimg1 - ilinerblend) < 3)
{
result_advance.at <Vec3b >(i,ioffset +j) = result_linerblend.at <Vec3b >(i,ioffset +j);
}
}
}
for ( int i = 0;i <img_2.rows;i ++)
{
for ( int j = 33;j < 66;j ++)
{
int iimg1 = result.at <Vec3b >(i,ioffset +j)[ 0] + result.at <Vec3b >(i,ioffset +j)[ 1] + result.at <Vec3b >(i,ioffset +j)[ 2];
int iimg2 = resultback.at <Vec3b >(i,ioffset +j)[ 0] + resultback.at <Vec3b >(i,ioffset +j)[ 1] + resultback.at <Vec3b >(i,ioffset +j)[ 2];
int ilinerblend = result_linerblend.at <Vec3b >(i,ioffset +j)[ 0] + result_linerblend.at <Vec3b >(i,ioffset +j)[ 1] + result_linerblend.at <Vec3b >(i,ioffset +j)[ 2];
if (abs(max(iimg1,iimg2) - ilinerblend) < 3)
{
result_advance.at <Vec3b >(i,ioffset +j) = result_linerblend.at <Vec3b >(i,ioffset +j);
}
else if (iimg2 >iimg1)
{
result_advance.at <Vec3b >(i,ioffset +j) = resultback.at <Vec3b >(i,ioffset +j);
}
}
}
for ( int i = 0;i <img_2.rows;i ++)
{
for ( int j = 66;j < 100;j ++)
{
//int iimg1= result.at<Vec3b>(i,ioffset+j)[0]+ result.at<Vec3b>(i,ioffset+j)[1]+ result.at<Vec3b>(i,ioffset+j)[2];
int iimg2 = resultback.at <Vec3b >(i,ioffset +j)[ 0] + resultback.at <Vec3b >(i,ioffset +j)[ 1] + resultback.at <Vec3b >(i,ioffset +j)[ 2];
int ilinerblend = result_linerblend.at <Vec3b >(i,ioffset +j)[ 0] + result_linerblend.at <Vec3b >(i,ioffset +j)[ 1] + result_linerblend.at <Vec3b >(i,ioffset +j)[ 2];
if (abs(iimg2 - ilinerblend) < 3)
{
result_advance.at <Vec3b >(i,ioffset +j) = result_linerblend.at <Vec3b >(i,ioffset +j);
}
else
{
result_advance.at <Vec3b >(i,ioffset +j) = resultback.at <Vec3b >(i,ioffset +j);
}
}
}
imshow( "result_advance",result_advance);
waitKey( 0);
return 0;
}
//
# include "stdafx.h"
# include <iostream >
# include "opencv2/core/core.hpp"
# include "opencv2/imgproc/imgproc.hpp"
# include "opencv2/features2d/features2d.hpp"
# include "opencv2/highgui/highgui.hpp"
# include "opencv2/nonfree/features2d.hpp"
# include "opencv2/calib3d/calib3d.hpp"
using namespace std;
using namespace cv;
int main( int argc, char * * argv )
{
Mat img_1 ;
Mat img_2 ;
Mat img_raw_1 = imread( "c1.bmp");
Mat img_raw_2 = imread( "c3.bmp");
cvtColor(img_raw_1,img_1,CV_BGR2GRAY);
cvtColor(img_raw_2,img_2,CV_BGR2GRAY);
//-- Step 1: 使用SURF識別出特征點
int minHessian = 400;
SurfFeatureDetector detector( minHessian );
std : :vector <KeyPoint > keypoints_1, keypoints_2;
detector.detect( img_1, keypoints_1 );
detector.detect( img_2, keypoints_2 );
//-- Step 2: 描述SURF特征
SurfDescriptorExtractor extractor;
Mat descriptors_1, descriptors_2;
extractor.compute( img_1, keypoints_1, descriptors_1 );
extractor.compute( img_2, keypoints_2, descriptors_2 );
//-- Step 3: 匹配
FlannBasedMatcher matcher; //BFMatcher為強制匹配
std : :vector < DMatch > matches;
matcher.match( descriptors_1, descriptors_2, matches );
//取最大最小距離
double max_dist = 0; double min_dist = 100;
for( int i = 0; i < descriptors_1.rows; i ++ )
{
double dist = matches[i].distance;
if( dist < min_dist ) min_dist = dist;
if( dist > max_dist ) max_dist = dist;
}
std : :vector < DMatch > good_matches;
for( int i = 0; i < descriptors_1.rows; i ++ )
{
if( matches[i].distance < = 3 *min_dist ) //這里的閾值選擇了3倍的min_dist
{
good_matches.push_back( matches[i]);
}
}
//-- Localize the object from img_1 in img_2
std : :vector <Point2f > obj;
std : :vector <Point2f > scene;
for( int i = 0; i < ( int)good_matches.size(); i ++ )
{
//這里采用“幀向拼接圖像中添加的方法”,因此左邊的是scene,右邊的是obj
scene.push_back( keypoints_1[ good_matches[i].queryIdx ].pt );
obj.push_back( keypoints_2[ good_matches[i].trainIdx ].pt );
}
//直接調用ransac,計算單應矩陣
Mat H = findHomography( obj, scene, CV_RANSAC );
//圖像對准
Mat result;
Mat resultback; //保存的是新幀經過單應矩陣變換以后的圖像
warpPerspective(img_raw_2,result,H,Size( 2 *img_2.cols,img_2.rows));
result.copyTo(resultback);
Mat half(result,cv : :Rect( 0, 0,img_2.cols,img_2.rows));
img_raw_1.copyTo(half);
imshow( "ajust",result);
//漸入漸出融合
Mat result_linerblend = result.clone();
double dblend = 0. 0;
int ioffset =img_2.cols - 100;
for ( int i = 0;i < 100;i ++)
{
result_linerblend.col(ioffset +i) = result.col(ioffset +i) *( 1 -dblend) + resultback.col(ioffset +i) *dblend;
dblend = dblend + 0. 01;
}
imshow( "result_linerblend",result_linerblend);
//最大值法融合
Mat result_maxvalue = result.clone();
for ( int i = 0;i <img_2.rows;i ++)
{
for ( int j = 0;j < 100;j ++)
{
int iresult = result.at <Vec3b >(i,ioffset +j)[ 0] + result.at <Vec3b >(i,ioffset +j)[ 1] + result.at <Vec3b >(i,ioffset +j)[ 2];
int iresultback = resultback.at <Vec3b >(i,ioffset +j)[ 0] + resultback.at <Vec3b >(i,ioffset +j)[ 1] + resultback.at <Vec3b >(i,ioffset +j)[ 2];
if (iresultback >iresult)
{
result_maxvalue.at <Vec3b >(i,ioffset +j) = resultback.at <Vec3b >(i,ioffset +j);
}
}
}
imshow( "result_maxvalue",result_maxvalue);
//帶閾值的加權平滑處理
Mat result_advance = result.clone();
for ( int i = 0;i <img_2.rows;i ++)
{
for ( int j = 0;j < 33;j ++)
{
int iimg1 = result.at <Vec3b >(i,ioffset +j)[ 0] + result.at <Vec3b >(i,ioffset +j)[ 1] + result.at <Vec3b >(i,ioffset +j)[ 2];
//int iimg2= resultback.at<Vec3b>(i,ioffset+j)[0]+ resultback.at<Vec3b>(i,ioffset+j)[1]+ resultback.at<Vec3b>(i,ioffset+j)[2];
int ilinerblend = result_linerblend.at <Vec3b >(i,ioffset +j)[ 0] + result_linerblend.at <Vec3b >(i,ioffset +j)[ 1] + result_linerblend.at <Vec3b >(i,ioffset +j)[ 2];
if (abs(iimg1 - ilinerblend) < 3)
{
result_advance.at <Vec3b >(i,ioffset +j) = result_linerblend.at <Vec3b >(i,ioffset +j);
}
}
}
for ( int i = 0;i <img_2.rows;i ++)
{
for ( int j = 33;j < 66;j ++)
{
int iimg1 = result.at <Vec3b >(i,ioffset +j)[ 0] + result.at <Vec3b >(i,ioffset +j)[ 1] + result.at <Vec3b >(i,ioffset +j)[ 2];
int iimg2 = resultback.at <Vec3b >(i,ioffset +j)[ 0] + resultback.at <Vec3b >(i,ioffset +j)[ 1] + resultback.at <Vec3b >(i,ioffset +j)[ 2];
int ilinerblend = result_linerblend.at <Vec3b >(i,ioffset +j)[ 0] + result_linerblend.at <Vec3b >(i,ioffset +j)[ 1] + result_linerblend.at <Vec3b >(i,ioffset +j)[ 2];
if (abs(max(iimg1,iimg2) - ilinerblend) < 3)
{
result_advance.at <Vec3b >(i,ioffset +j) = result_linerblend.at <Vec3b >(i,ioffset +j);
}
else if (iimg2 >iimg1)
{
result_advance.at <Vec3b >(i,ioffset +j) = resultback.at <Vec3b >(i,ioffset +j);
}
}
}
for ( int i = 0;i <img_2.rows;i ++)
{
for ( int j = 66;j < 100;j ++)
{
//int iimg1= result.at<Vec3b>(i,ioffset+j)[0]+ result.at<Vec3b>(i,ioffset+j)[1]+ result.at<Vec3b>(i,ioffset+j)[2];
int iimg2 = resultback.at <Vec3b >(i,ioffset +j)[ 0] + resultback.at <Vec3b >(i,ioffset +j)[ 1] + resultback.at <Vec3b >(i,ioffset +j)[ 2];
int ilinerblend = result_linerblend.at <Vec3b >(i,ioffset +j)[ 0] + result_linerblend.at <Vec3b >(i,ioffset +j)[ 1] + result_linerblend.at <Vec3b >(i,ioffset +j)[ 2];
if (abs(iimg2 - ilinerblend) < 3)
{
result_advance.at <Vec3b >(i,ioffset +j) = result_linerblend.at <Vec3b >(i,ioffset +j);
}
else
{
result_advance.at <Vec3b >(i,ioffset +j) = resultback.at <Vec3b >(i,ioffset +j);
}
}
}
imshow( "result_advance",result_advance);
waitKey( 0);
return 0;
}




目前看來,maxvalue是最好的融合方法,但是和論文中提到的一樣,此類圖片不能很好地體現融合算法的特點,為此我也拍攝了和論文中類似的圖片。發現想拍攝質量較好的圖片,還是需要一定的硬件和技巧的。因此,軟件和硬件,在使用的過程中應該結合起來。
此外,使用文中圖片,效果如下




換一組圖片,可以發現不同的結果




相比較而言,還是linerblend能夠保持不錯的質量,而具體到底采取哪種拼接的方式,必須根據實際情況來選擇。
三、多圖連續融合拼接
前面處理的是2圖的例子,至少將這種情況推廣到3圖,這樣才能夠得到統一處理的經驗。
連續圖像處理,不僅僅是在已經處理好的圖像上面再添加一幅圖,其中比較關鍵的一點就是如何來處理已經拼接好的圖像。

那么,m2也就是H.at<char>(0,2)就是水平位移。但是在實際使用中,始終無法正確取得這個值

Mat outImage
=H.clone();
uchar * outData =outImage.ptr <uchar >( 0);
int itemp = outData[ 2]; //獲得偏移
line(result_linerblend,Point(result_linerblend.cols -itemp, 0),Point(result_linerblend.cols -itemp,img_2.rows),Scalar( 255, 255, 255), 2);
imshow( "result_linerblend",result_linerblend);
uchar * outData =outImage.ptr <uchar >( 0);
int itemp = outData[ 2]; //獲得偏移
line(result_linerblend,Point(result_linerblend.cols -itemp, 0),Point(result_linerblend.cols -itemp,img_2.rows),Scalar( 255, 255, 255), 2);
imshow( "result_linerblend",result_linerblend);
只好采取編寫專門代碼的方法進行處理
//獲取已經處理圖像的邊界
Mat matmask = result_linerblend.clone();
int idaterow0 = 0; int idaterowend = 0; //標識了最上面和最小面第一個不為0的樹,這里采用的是寬度減去的算法
for( int j =matmask.cols - 1;j > = 0;j --)
{
if (matmask.at <Vec3b >( 0,j)[ 0] > 0)
{
idaterow0 = j;
break;
}
}
for( int j =matmask.cols - 1;j > = 0;j --)
{
if (matmask.at <Vec3b >(matmask.rows - 1,j)[ 0] > 0)
{
idaterowend = j;
break;
}
}
line(matmask,Point(min(idaterow0,idaterowend), 0),Point(min(idaterow0,idaterowend),img_2.rows),Scalar( 255, 255, 255), 2);
imshow( "result_linerblend",matmask);
Mat matmask = result_linerblend.clone();
int idaterow0 = 0; int idaterowend = 0; //標識了最上面和最小面第一個不為0的樹,這里采用的是寬度減去的算法
for( int j =matmask.cols - 1;j > = 0;j --)
{
if (matmask.at <Vec3b >( 0,j)[ 0] > 0)
{
idaterow0 = j;
break;
}
}
for( int j =matmask.cols - 1;j > = 0;j --)
{
if (matmask.at <Vec3b >(matmask.rows - 1,j)[ 0] > 0)
{
idaterowend = j;
break;
}
}
line(matmask,Point(min(idaterow0,idaterowend), 0),Point(min(idaterow0,idaterowend),img_2.rows),Scalar( 255, 255, 255), 2);
imshow( "result_linerblend",matmask);

效果良好穩定.目前的實現是將白線以左的區域切割下來進行拼接。
基於此,編寫3圖拼接,效果如下。目前的圖像質量,在差值上面可能還需要增強,下一步處理

// blend_series.cpp : 多圖拼接
//
# include "stdafx.h"
# include <iostream >
# include "opencv2/core/core.hpp"
# include "opencv2/imgproc/imgproc.hpp"
# include "opencv2/features2d/features2d.hpp"
# include "opencv2/highgui/highgui.hpp"
# include "opencv2/nonfree/features2d.hpp"
# include "opencv2/calib3d/calib3d.hpp"
using namespace std;
using namespace cv;
int main( int argc, char * * argv )
{
Mat img_1 ;
Mat img_2 ;
Mat img_raw_1 = imread( "Univ3.jpg");
Mat img_raw_2 = imread( "Univ2.jpg");
cvtColor(img_raw_1,img_1,CV_BGR2GRAY);
cvtColor(img_raw_2,img_2,CV_BGR2GRAY);
//-- Step 1: 使用SURF識別出特征點
int minHessian = 400;
SurfFeatureDetector detector( minHessian );
std : :vector <KeyPoint > keypoints_1, keypoints_2;
detector.detect( img_1, keypoints_1 );
detector.detect( img_2, keypoints_2 );
//-- Step 2: 描述SURF特征
SurfDescriptorExtractor extractor;
Mat descriptors_1, descriptors_2;
extractor.compute( img_1, keypoints_1, descriptors_1 );
extractor.compute( img_2, keypoints_2, descriptors_2 );
//-- Step 3: 匹配
FlannBasedMatcher matcher; //BFMatcher為強制匹配
std : :vector < DMatch > matches;
matcher.match( descriptors_1, descriptors_2, matches );
//取最大最小距離
double max_dist = 0; double min_dist = 100;
for( int i = 0; i < descriptors_1.rows; i ++ )
{
double dist = matches[i].distance;
if( dist < min_dist ) min_dist = dist;
if( dist > max_dist ) max_dist = dist;
}
std : :vector < DMatch > good_matches;
for( int i = 0; i < descriptors_1.rows; i ++ )
{
if( matches[i].distance < = 3 *min_dist ) //這里的閾值選擇了3倍的min_dist
{
good_matches.push_back( matches[i]);
}
}
//-- Localize the object from img_1 in img_2
std : :vector <Point2f > obj;
std : :vector <Point2f > scene;
for( int i = 0; i < ( int)good_matches.size(); i ++ )
{
//這里采用“幀向拼接圖像中添加的方法”,因此左邊的是scene,右邊的是obj
scene.push_back( keypoints_1[ good_matches[i].queryIdx ].pt );
obj.push_back( keypoints_2[ good_matches[i].trainIdx ].pt );
}
//直接調用ransac,計算單應矩陣
Mat H = findHomography( obj, scene, CV_RANSAC );
//圖像對准
Mat result;
Mat resultback; //保存的是新幀經過單應矩陣變換以后的圖像
warpPerspective(img_raw_2,result,H,Size( 2 *img_2.cols,img_2.rows));
result.copyTo(resultback);
Mat half(result,cv : :Rect( 0, 0,img_2.cols,img_2.rows));
img_raw_1.copyTo(half);
//imshow("ajust",result);
//漸入漸出融合
Mat result_linerblend = result.clone();
double dblend = 0. 0;
int ioffset =img_2.cols - 100;
for ( int i = 0;i < 100;i ++)
{
result_linerblend.col(ioffset +i) = result.col(ioffset +i) *( 1 -dblend) + resultback.col(ioffset +i) *dblend;
dblend = dblend + 0. 01;
}
//獲取已經處理圖像的邊界
Mat matmask = result_linerblend.clone();
int idaterow0 = 0; int idaterowend = 0; //標識了最上面和最小面第一個不為0的樹,這里采用的是寬度減去的算法
for( int j =matmask.cols - 1;j > = 0;j --)
{
if (matmask.at <Vec3b >( 0,j)[ 0] > 0)
{
idaterow0 = j;
break;
}
}
for( int j =matmask.cols - 1;j > = 0;j --)
{
if (matmask.at <Vec3b >(matmask.rows - 1,j)[ 0] > 0)
{
idaterowend = j;
break;
}
}
line(matmask,Point(min(idaterow0,idaterowend), 0),Point(min(idaterow0,idaterowend),img_2.rows),Scalar( 255, 255, 255), 2);
imshow( "result_linerblend",matmask);
/////////////////---------------對結果圖像繼續處理---------------------------------/////////////////
img_raw_1 = result_linerblend(Rect( 0, 0,min(idaterow0,idaterowend),img_2.rows));
img_raw_2 = imread( "Univ1.jpg");
cvtColor(img_raw_1,img_1,CV_BGR2GRAY);
cvtColor(img_raw_2,img_2,CV_BGR2GRAY);
////-- Step 1: 使用SURF識別出特征點
//
SurfFeatureDetector detector2( minHessian );
keypoints_1.clear();
keypoints_2.clear();
detector2.detect( img_1, keypoints_1 );
detector2.detect( img_2, keypoints_2 );
//-- Step 2: 描述SURF特征
SurfDescriptorExtractor extractor2;
extractor2.compute( img_1, keypoints_1, descriptors_1 );
extractor2.compute( img_2, keypoints_2, descriptors_2 );
//-- Step 3: 匹配
FlannBasedMatcher matcher2; //BFMatcher為強制匹配
matcher2.match( descriptors_1, descriptors_2, matches );
//取最大最小距離
max_dist = 0; min_dist = 100;
for( int i = 0; i < descriptors_1.rows; i ++ )
{
double dist = matches[i].distance;
if( dist < min_dist ) min_dist = dist;
if( dist > max_dist ) max_dist = dist;
}
good_matches.clear();
for( int i = 0; i < descriptors_1.rows; i ++ )
{
if( matches[i].distance < = 3 *min_dist ) //這里的閾值選擇了3倍的min_dist
{
good_matches.push_back( matches[i]);
}
}
//-- Localize the object from img_1 in img_2
obj.clear();
scene.clear();
for( int i = 0; i < ( int)good_matches.size(); i ++ )
{
//這里采用“幀向拼接圖像中添加的方法”,因此左邊的是scene,右邊的是obj
scene.push_back( keypoints_1[ good_matches[i].queryIdx ].pt );
obj.push_back( keypoints_2[ good_matches[i].trainIdx ].pt );
}
//直接調用ransac,計算單應矩陣
H = findHomography( obj, scene, CV_RANSAC );
//圖像對准
warpPerspective(img_raw_2,result,H,Size(img_1.cols +img_2.cols,img_2.rows));
result.copyTo(resultback);
Mat half2(result,cv : :Rect( 0, 0,img_1.cols,img_1.rows));
img_raw_1.copyTo(half2);
imshow( "ajust",result);
//漸入漸出融合
result_linerblend = result.clone();
dblend = 0. 0;
ioffset =img_1.cols - 100;
for ( int i = 0;i < 100;i ++)
{
result_linerblend.col(ioffset +i) = result.col(ioffset +i) *( 1 -dblend) + resultback.col(ioffset +i) *dblend;
dblend = dblend + 0. 01;
}
imshow( "result_linerblend",result_linerblend);
waitKey( 0);
return 0;
}
//
# include "stdafx.h"
# include <iostream >
# include "opencv2/core/core.hpp"
# include "opencv2/imgproc/imgproc.hpp"
# include "opencv2/features2d/features2d.hpp"
# include "opencv2/highgui/highgui.hpp"
# include "opencv2/nonfree/features2d.hpp"
# include "opencv2/calib3d/calib3d.hpp"
using namespace std;
using namespace cv;
int main( int argc, char * * argv )
{
Mat img_1 ;
Mat img_2 ;
Mat img_raw_1 = imread( "Univ3.jpg");
Mat img_raw_2 = imread( "Univ2.jpg");
cvtColor(img_raw_1,img_1,CV_BGR2GRAY);
cvtColor(img_raw_2,img_2,CV_BGR2GRAY);
//-- Step 1: 使用SURF識別出特征點
int minHessian = 400;
SurfFeatureDetector detector( minHessian );
std : :vector <KeyPoint > keypoints_1, keypoints_2;
detector.detect( img_1, keypoints_1 );
detector.detect( img_2, keypoints_2 );
//-- Step 2: 描述SURF特征
SurfDescriptorExtractor extractor;
Mat descriptors_1, descriptors_2;
extractor.compute( img_1, keypoints_1, descriptors_1 );
extractor.compute( img_2, keypoints_2, descriptors_2 );
//-- Step 3: 匹配
FlannBasedMatcher matcher; //BFMatcher為強制匹配
std : :vector < DMatch > matches;
matcher.match( descriptors_1, descriptors_2, matches );
//取最大最小距離
double max_dist = 0; double min_dist = 100;
for( int i = 0; i < descriptors_1.rows; i ++ )
{
double dist = matches[i].distance;
if( dist < min_dist ) min_dist = dist;
if( dist > max_dist ) max_dist = dist;
}
std : :vector < DMatch > good_matches;
for( int i = 0; i < descriptors_1.rows; i ++ )
{
if( matches[i].distance < = 3 *min_dist ) //這里的閾值選擇了3倍的min_dist
{
good_matches.push_back( matches[i]);
}
}
//-- Localize the object from img_1 in img_2
std : :vector <Point2f > obj;
std : :vector <Point2f > scene;
for( int i = 0; i < ( int)good_matches.size(); i ++ )
{
//這里采用“幀向拼接圖像中添加的方法”,因此左邊的是scene,右邊的是obj
scene.push_back( keypoints_1[ good_matches[i].queryIdx ].pt );
obj.push_back( keypoints_2[ good_matches[i].trainIdx ].pt );
}
//直接調用ransac,計算單應矩陣
Mat H = findHomography( obj, scene, CV_RANSAC );
//圖像對准
Mat result;
Mat resultback; //保存的是新幀經過單應矩陣變換以后的圖像
warpPerspective(img_raw_2,result,H,Size( 2 *img_2.cols,img_2.rows));
result.copyTo(resultback);
Mat half(result,cv : :Rect( 0, 0,img_2.cols,img_2.rows));
img_raw_1.copyTo(half);
//imshow("ajust",result);
//漸入漸出融合
Mat result_linerblend = result.clone();
double dblend = 0. 0;
int ioffset =img_2.cols - 100;
for ( int i = 0;i < 100;i ++)
{
result_linerblend.col(ioffset +i) = result.col(ioffset +i) *( 1 -dblend) + resultback.col(ioffset +i) *dblend;
dblend = dblend + 0. 01;
}
//獲取已經處理圖像的邊界
Mat matmask = result_linerblend.clone();
int idaterow0 = 0; int idaterowend = 0; //標識了最上面和最小面第一個不為0的樹,這里采用的是寬度減去的算法
for( int j =matmask.cols - 1;j > = 0;j --)
{
if (matmask.at <Vec3b >( 0,j)[ 0] > 0)
{
idaterow0 = j;
break;
}
}
for( int j =matmask.cols - 1;j > = 0;j --)
{
if (matmask.at <Vec3b >(matmask.rows - 1,j)[ 0] > 0)
{
idaterowend = j;
break;
}
}
line(matmask,Point(min(idaterow0,idaterowend), 0),Point(min(idaterow0,idaterowend),img_2.rows),Scalar( 255, 255, 255), 2);
imshow( "result_linerblend",matmask);
/////////////////---------------對結果圖像繼續處理---------------------------------/////////////////
img_raw_1 = result_linerblend(Rect( 0, 0,min(idaterow0,idaterowend),img_2.rows));
img_raw_2 = imread( "Univ1.jpg");
cvtColor(img_raw_1,img_1,CV_BGR2GRAY);
cvtColor(img_raw_2,img_2,CV_BGR2GRAY);
////-- Step 1: 使用SURF識別出特征點
//
SurfFeatureDetector detector2( minHessian );
keypoints_1.clear();
keypoints_2.clear();
detector2.detect( img_1, keypoints_1 );
detector2.detect( img_2, keypoints_2 );
//-- Step 2: 描述SURF特征
SurfDescriptorExtractor extractor2;
extractor2.compute( img_1, keypoints_1, descriptors_1 );
extractor2.compute( img_2, keypoints_2, descriptors_2 );
//-- Step 3: 匹配
FlannBasedMatcher matcher2; //BFMatcher為強制匹配
matcher2.match( descriptors_1, descriptors_2, matches );
//取最大最小距離
max_dist = 0; min_dist = 100;
for( int i = 0; i < descriptors_1.rows; i ++ )
{
double dist = matches[i].distance;
if( dist < min_dist ) min_dist = dist;
if( dist > max_dist ) max_dist = dist;
}
good_matches.clear();
for( int i = 0; i < descriptors_1.rows; i ++ )
{
if( matches[i].distance < = 3 *min_dist ) //這里的閾值選擇了3倍的min_dist
{
good_matches.push_back( matches[i]);
}
}
//-- Localize the object from img_1 in img_2
obj.clear();
scene.clear();
for( int i = 0; i < ( int)good_matches.size(); i ++ )
{
//這里采用“幀向拼接圖像中添加的方法”,因此左邊的是scene,右邊的是obj
scene.push_back( keypoints_1[ good_matches[i].queryIdx ].pt );
obj.push_back( keypoints_2[ good_matches[i].trainIdx ].pt );
}
//直接調用ransac,計算單應矩陣
H = findHomography( obj, scene, CV_RANSAC );
//圖像對准
warpPerspective(img_raw_2,result,H,Size(img_1.cols +img_2.cols,img_2.rows));
result.copyTo(resultback);
Mat half2(result,cv : :Rect( 0, 0,img_1.cols,img_1.rows));
img_raw_1.copyTo(half2);
imshow( "ajust",result);
//漸入漸出融合
result_linerblend = result.clone();
dblend = 0. 0;
ioffset =img_1.cols - 100;
for ( int i = 0;i < 100;i ++)
{
result_linerblend.col(ioffset +i) = result.col(ioffset +i) *( 1 -dblend) + resultback.col(ioffset +i) *dblend;
dblend = dblend + 0. 01;
}
imshow( "result_linerblend",result_linerblend);
waitKey( 0);
return 0;
}
復制粘貼,實現5圖拼接。這個時候發現,3圖往往是一個極限值(這也可能就是為什么opencv里面的例子提供的是3圖),當第四圖出現的時候,其單應效果非常差

為什么會出現這種情況,反思后認識到,論文中采用的是平面坐標,也就是所有的圖片都是基本位於一個平面上的,這一點特別通過她后面的那個羅技攝像頭的部署能夠看出來。但是在現實中,更常見的情況是人站在中間,360度地拍攝,這個時候需要采用柱面坐標系,也就是一開始對於圖像要進行相關處理,也就是所謂的柱狀投影。

可以得到這樣的效果,這個效果是否正確還有待商榷,但是基於此的確可以更進一步地做東西了。
// column_transoform.cpp : 桶裝投影
//
# include "stdafx.h"
# include <iostream >
# include "opencv2/core/core.hpp"
# include "opencv2/imgproc/imgproc.hpp"
# include "opencv2/features2d/features2d.hpp"
# include "opencv2/highgui/highgui.hpp"
# include "opencv2/nonfree/features2d.hpp"
# include "opencv2/calib3d/calib3d.hpp"
using namespace std;
using namespace cv;
# define PI 3. 14159
int main( int argc, char * * argv )
{
Mat img_1 = imread( "Univ1.jpg");
Mat img_result = img_1.clone();
for( int i = 0;i <img_result.rows;i ++)
{ for( int j = 0;j <img_result.cols;j ++)
{
img_result.at <Vec3b >(i,j) = 0;
}
}
int W = img_1.cols;
int H = img_1.rows;
float r = W /( 2 *tan(PI / 6));
float k = 0;
float fx = 0;
float fy = 0;
for( int i = 0;i <img_1.rows;i ++)
{ for( int j = 0;j <img_1.cols;j ++)
{
k = sqrt(( float)(r *r +(W / 2 -j) *(W / 2 -j)));
fx = r *sin(PI / 6) +r *sin(atan((j -W / 2 ) /r));
fy = H / 2 +r *(i -H / 2) /k;
int ix = ( int)fx;
int iy = ( int)fy;
if (ix <W &&ix > = 0 &&iy <H &&iy > = 0)
{
img_result.at <Vec3b >(iy,ix) = img_1.at <Vec3b >(i,j);
}
}
}
imshow( "桶狀投影", img_1 );
imshow( "img_result",img_result);
waitKey( 0);
return 0;
}
//
# include "stdafx.h"
# include <iostream >
# include "opencv2/core/core.hpp"
# include "opencv2/imgproc/imgproc.hpp"
# include "opencv2/features2d/features2d.hpp"
# include "opencv2/highgui/highgui.hpp"
# include "opencv2/nonfree/features2d.hpp"
# include "opencv2/calib3d/calib3d.hpp"
using namespace std;
using namespace cv;
# define PI 3. 14159
int main( int argc, char * * argv )
{
Mat img_1 = imread( "Univ1.jpg");
Mat img_result = img_1.clone();
for( int i = 0;i <img_result.rows;i ++)
{ for( int j = 0;j <img_result.cols;j ++)
{
img_result.at <Vec3b >(i,j) = 0;
}
}
int W = img_1.cols;
int H = img_1.rows;
float r = W /( 2 *tan(PI / 6));
float k = 0;
float fx = 0;
float fy = 0;
for( int i = 0;i <img_1.rows;i ++)
{ for( int j = 0;j <img_1.cols;j ++)
{
k = sqrt(( float)(r *r +(W / 2 -j) *(W / 2 -j)));
fx = r *sin(PI / 6) +r *sin(atan((j -W / 2 ) /r));
fy = H / 2 +r *(i -H / 2) /k;
int ix = ( int)fx;
int iy = ( int)fy;
if (ix <W &&ix > = 0 &&iy <H &&iy > = 0)
{
img_result.at <Vec3b >(iy,ix) = img_1.at <Vec3b >(i,j);
}
}
}
imshow( "桶狀投影", img_1 );
imshow( "img_result",img_result);
waitKey( 0);
return 0;
}

效果依然是不佳,看來在這個地方,不僅僅是做一個桶形變換那么簡單,一定有定量的參數在里面,也可能是我的變換寫錯了。這個下一步研究。
【未完待續】