OpenMP並行編程應用—加速OpenCV圖像拼接算法


OpenMP是一種應用於多處理器程序設計的並行編程處理方案,它提供了對於並行編程的高層抽象。僅僅須要在程序中加入簡單的指令,就能夠編寫高效的並行程序,而不用關心詳細的並行實現細節。減少了並行編程的難度和復雜度。也正由於OpenMP的簡單易用性,它並不適合於須要復雜的線程間同步和相互排斥的場合。


OpenCV中使用Sift或者Surf特征進行圖像拼接的算法。須要分別對兩幅或多幅圖像進行特征提取和特征描寫敘述,之后再進行圖像特征點的配對。圖像變換等操作。不同圖像的特征提取和描寫敘述的工作是整個過程中最耗費時間的,也是獨立 執行的,能夠使用OpenMP進行加速。


下面是不使用OpenMP加速的Sift圖像拼接原程序:

#include "highgui/highgui.hpp"    
#include "opencv2/nonfree/nonfree.hpp"    
#include "opencv2/legacy/legacy.hpp"   
#include "omp.h"

using namespace cv;

//計算原始圖像點位在經過矩陣變換后在目標圖像上相應位置  
Point2f getTransformPoint(const Point2f originalPoint, const Mat &transformMaxtri);

int main(int argc, char *argv[])
{
	float startTime = omp_get_wtime();

	Mat image01 = imread("Test01.jpg");
	Mat image02 = imread("Test02.jpg");
	imshow("拼接圖像1", image01);
	imshow("拼接圖像2", image02);

	//灰度圖轉換  
	Mat image1, image2;
	cvtColor(image01, image1, CV_RGB2GRAY);
	cvtColor(image02, image2, CV_RGB2GRAY);

	//提取特征點    
	SiftFeatureDetector siftDetector(800);  // 海塞矩陣閾值  
	vector<KeyPoint> keyPoint1, keyPoint2;
	siftDetector.detect(image1, keyPoint1);
	siftDetector.detect(image2, keyPoint2);

	//特征點描寫敘述,為下邊的特征點匹配做准備    
	SiftDescriptorExtractor siftDescriptor;
	Mat imageDesc1, imageDesc2;
	siftDescriptor.compute(image1, keyPoint1, imageDesc1);
	siftDescriptor.compute(image2, keyPoint2, imageDesc2);

	float endTime = omp_get_wtime();
	std::cout << "不使用OpenMP加速消耗時間: " << endTime - startTime << std::endl;
	//獲得匹配特征點。並提取最優配對     
	FlannBasedMatcher matcher;
	vector<DMatch> matchePoints;
	matcher.match(imageDesc1, imageDesc2, matchePoints, Mat());
	sort(matchePoints.begin(), matchePoints.end()); //特征點排序    
													//獲取排在前N個的最優匹配特征點  
	vector<Point2f> imagePoints1, imagePoints2;
	for (int i = 0; i < 10; i++)
	{
		imagePoints1.push_back(keyPoint1[matchePoints[i].queryIdx].pt);
		imagePoints2.push_back(keyPoint2[matchePoints[i].trainIdx].pt);
	}

	//獲取圖像1到圖像2的投影映射矩陣,尺寸為3*3  
	Mat homo = findHomography(imagePoints1, imagePoints2, CV_RANSAC);
	Mat adjustMat = (Mat_<double>(3, 3) << 1.0, 0, image01.cols, 0, 1.0, 0, 0, 0, 1.0);
	Mat adjustHomo = adjustMat*homo;

	//獲取最強配對點在原始圖像和矩陣變換后圖像上的相應位置,用於圖像拼接點的定位  
	Point2f originalLinkPoint, targetLinkPoint, basedImagePoint;
	originalLinkPoint = keyPoint1[matchePoints[0].queryIdx].pt;
	targetLinkPoint = getTransformPoint(originalLinkPoint, adjustHomo);
	basedImagePoint = keyPoint2[matchePoints[0].trainIdx].pt;

	//圖像配准  
	Mat imageTransform1;
	warpPerspective(image01, imageTransform1, adjustMat*homo, Size(image02.cols + image01.cols + 110, image02.rows));

	//在最強匹配點左側的重疊區域進行累加。是銜接穩定過渡。消除突變  
	Mat image1Overlap, image2Overlap; //圖1和圖2的重疊部分     
	image1Overlap = imageTransform1(Rect(Point(targetLinkPoint.x - basedImagePoint.x, 0), Point(targetLinkPoint.x, image02.rows)));
	image2Overlap = image02(Rect(0, 0, image1Overlap.cols, image1Overlap.rows));
	Mat image1ROICopy = image1Overlap.clone();  //復制一份圖1的重疊部分  
	for (int i = 0; i < image1Overlap.rows; i++)
	{
		for (int j = 0; j < image1Overlap.cols; j++)
		{
			double weight;
			weight = (double)j / image1Overlap.cols;  //隨距離改變而改變的疊加系數  
			image1Overlap.at<Vec3b>(i, j)[0] = (1 - weight)*image1ROICopy.at<Vec3b>(i, j)[0] + weight*image2Overlap.at<Vec3b>(i, j)[0];
			image1Overlap.at<Vec3b>(i, j)[1] = (1 - weight)*image1ROICopy.at<Vec3b>(i, j)[1] + weight*image2Overlap.at<Vec3b>(i, j)[1];
			image1Overlap.at<Vec3b>(i, j)[2] = (1 - weight)*image1ROICopy.at<Vec3b>(i, j)[2] + weight*image2Overlap.at<Vec3b>(i, j)[2];
		}
	}
	Mat ROIMat = image02(Rect(Point(image1Overlap.cols, 0), Point(image02.cols, image02.rows)));  //圖2中不重合的部分  
	ROIMat.copyTo(Mat(imageTransform1, Rect(targetLinkPoint.x, 0, ROIMat.cols, image02.rows))); //不重合的部分直接銜接上去  
	namedWindow("拼接結果", 0);
	imshow("拼接結果", imageTransform1);
	imwrite("D:\\拼接結果.jpg", imageTransform1);
	waitKey();
	return 0;
}

//計算原始圖像點位在經過矩陣變換后在目標圖像上相應位置  
Point2f getTransformPoint(const Point2f originalPoint, const Mat &transformMaxtri)
{
	Mat originelP, targetP;
	originelP = (Mat_<double>(3, 1) << originalPoint.x, originalPoint.y, 1.0);
	targetP = transformMaxtri*originelP;
	float x = targetP.at<double>(0, 0) / targetP.at<double>(2, 0);
	float y = targetP.at<double>(1, 0) / targetP.at<double>(2, 0);
	return Point2f(x, y);
}


圖像一:



圖像二:



拼接結果 :



在我的機器上不使用OpenMP平均耗時 4.7S。


使用OpenMP也非常easy。VS 內置了對OpenMP的支持。在項目上右鍵->屬性->配置屬性->C/C++->語言->OpenMP支持里選擇是:



之后在程序中增加OpenMP的頭文件“omp.h”就能夠了:

#include "highgui/highgui.hpp"    
#include "opencv2/nonfree/nonfree.hpp"    
#include "opencv2/legacy/legacy.hpp"   
#include "omp.h"

using namespace cv;

//計算原始圖像點位在經過矩陣變換后在目標圖像上相應位置  
Point2f getTransformPoint(const Point2f originalPoint, const Mat &transformMaxtri);

int main(int argc, char *argv[])
{
	float startTime = omp_get_wtime();

	Mat image01, image02;
	Mat image1, image2;
	vector<KeyPoint> keyPoint1, keyPoint2;
	Mat imageDesc1, imageDesc2;
	SiftFeatureDetector siftDetector(800);  // 海塞矩陣閾值  
	SiftDescriptorExtractor siftDescriptor;
	//使用OpenMP的sections制導指令開啟多線程
#pragma omp parallel sections  
	{
#pragma omp section  
		{
			image01 = imread("Test01.jpg");
			imshow("拼接圖像1", image01);
			//灰度圖轉換 
			cvtColor(image01, image1, CV_RGB2GRAY);
			//提取特征點  
			siftDetector.detect(image1, keyPoint1);
			//特征點描寫敘述。為下邊的特征點匹配做准備    
			siftDescriptor.compute(image1, keyPoint1, imageDesc1);
		}
#pragma omp section  
		{
			image02 = imread("Test02.jpg");
			imshow("拼接圖像2", image02);
			cvtColor(image02, image2, CV_RGB2GRAY);
			siftDetector.detect(image2, keyPoint2);
			siftDescriptor.compute(image2, keyPoint2, imageDesc2);
		}
	}
	float endTime = omp_get_wtime();
	std::cout << "使用OpenMP加速消耗時間: " << endTime - startTime << std::endl;

	//獲得匹配特征點。並提取最優配對     
	FlannBasedMatcher matcher;
	vector<DMatch> matchePoints;
	matcher.match(imageDesc1, imageDesc2, matchePoints, Mat());
	sort(matchePoints.begin(), matchePoints.end()); //特征點排序    
	//獲取排在前N個的最優匹配特征點  
	vector<Point2f> imagePoints1, imagePoints2;
	for (int i = 0; i < 10; i++)
	{
		imagePoints1.push_back(keyPoint1[matchePoints[i].queryIdx].pt);
		imagePoints2.push_back(keyPoint2[matchePoints[i].trainIdx].pt);
	}

	//獲取圖像1到圖像2的投影映射矩陣。尺寸為3*3  
	Mat homo = findHomography(imagePoints1, imagePoints2, CV_RANSAC);
	Mat adjustMat = (Mat_<double>(3, 3) << 1.0, 0, image01.cols, 0, 1.0, 0, 0, 0, 1.0);
	Mat adjustHomo = adjustMat*homo;

	//獲取最強配對點在原始圖像和矩陣變換后圖像上的相應位置。用於圖像拼接點的定位  
	Point2f originalLinkPoint, targetLinkPoint, basedImagePoint;
	originalLinkPoint = keyPoint1[matchePoints[0].queryIdx].pt;
	targetLinkPoint = getTransformPoint(originalLinkPoint, adjustHomo);
	basedImagePoint = keyPoint2[matchePoints[0].trainIdx].pt;

	//圖像配准  
	Mat imageTransform1;
	warpPerspective(image01, imageTransform1, adjustMat*homo, Size(image02.cols + image01.cols + 110, image02.rows));

	//在最強匹配點左側的重疊區域進行累加,是銜接穩定過渡,消除突變  
	Mat image1Overlap, image2Overlap; //圖1和圖2的重疊部分     
	image1Overlap = imageTransform1(Rect(Point(targetLinkPoint.x - basedImagePoint.x, 0), Point(targetLinkPoint.x, image02.rows)));
	image2Overlap = image02(Rect(0, 0, image1Overlap.cols, image1Overlap.rows));
	Mat image1ROICopy = image1Overlap.clone();  //復制一份圖1的重疊部分 
	for (int i = 0; i < image1Overlap.rows; i++)
	{
		for (int j = 0; j < image1Overlap.cols; j++)
		{
			double weight;
			weight = (double)j / image1Overlap.cols;  //隨距離改變而改變的疊加系數  
			image1Overlap.at<Vec3b>(i, j)[0] = (1 - weight)*image1ROICopy.at<Vec3b>(i, j)[0] + weight*image2Overlap.at<Vec3b>(i, j)[0];
			image1Overlap.at<Vec3b>(i, j)[1] = (1 - weight)*image1ROICopy.at<Vec3b>(i, j)[1] + weight*image2Overlap.at<Vec3b>(i, j)[1];
			image1Overlap.at<Vec3b>(i, j)[2] = (1 - weight)*image1ROICopy.at<Vec3b>(i, j)[2] + weight*image2Overlap.at<Vec3b>(i, j)[2];
		}
	}
	Mat ROIMat = image02(Rect(Point(image1Overlap.cols, 0), Point(image02.cols, image02.rows)));  //圖2中不重合的部分  
	ROIMat.copyTo(Mat(imageTransform1, Rect(targetLinkPoint.x, 0, ROIMat.cols, image02.rows))); //不重合的部分直接銜接上去  
	namedWindow("拼接結果", 0);
	imshow("拼接結果", imageTransform1);
	imwrite("D:\\拼接結果.jpg", imageTransform1);
	waitKey();
	return 0;
}

//計算原始圖像點位在經過矩陣變換后在目標圖像上相應位置  
Point2f getTransformPoint(const Point2f originalPoint, const Mat &transformMaxtri)
{
	Mat originelP, targetP;
	originelP = (Mat_<double>(3, 1) << originalPoint.x, originalPoint.y, 1.0);
	targetP = transformMaxtri*originelP;
	float x = targetP.at<double>(0, 0) / targetP.at<double>(2, 0);
	float y = targetP.at<double>(1, 0) / targetP.at<double>(2, 0);
	return Point2f(x, y);
}


OpenMP中for制導指令用於迭代計算的任務分配,sections制導指令用於非迭代計算的任務分配,每一個#pragma omp section 語句會引導一個線程。

在上邊的程序中相當於是兩個線程分別運行兩幅圖像的特征提取和描寫敘述操作。使用OpenMP后平均耗時2.5S,速度幾乎相同提升了一倍。



免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM