圖像處理之圖像拼接二


上次的圖像拼接效果還是不夠好,有鬼影。所有一直在想怎么解決。

才發現基於拉普拉斯金字塔的圖像融合是可以的。然后就找到原來還有最佳拼接縫一說。然后發現opencv高版本是帶這個的,但是怎么解決呢?

http://blog.csdn.net/wd1603926823/article/details/49536691

http://blog.csdn.net/hanshuning/article/details/41960401

http://blog.csdn.net/manji_lee/article/details/9002228

 

 

還沒解決

 

1 找最佳縫合線模板

2 調用拉普拉斯進行多分辨率融合

 

opencv自帶的stitch 貌似必須2.4版本以上才行,代碼:

 1 #include "stdafx.h"
 2 #include <iostream>
 3 #include <fstream>
 4 #include <string>
 5 #include "opencv2/opencv_modules.hpp"
 6 #include "opencv2/highgui/highgui.hpp"
 7 #include "opencv2/stitching/detail/autocalib.hpp"
 8 #include "opencv2/stitching/detail/blenders.hpp"
 9 #include "opencv2/stitching/detail/camera.hpp"
10 #include "opencv2/stitching/detail/exposure_compensate.hpp"
11 #include "opencv2/stitching/detail/matchers.hpp"
12 #include "opencv2/stitching/detail/motion_estimators.hpp"
13 #include "opencv2/stitching/detail/seam_finders.hpp"
14 #include "opencv2/stitching/detail/util.hpp"
15 #include "opencv2/stitching/detail/warpers.hpp"
16 #include "opencv2/stitching/warpers.hpp"
17 #include <opencv2/stitching/stitcher.hpp>
18 #include<time.h>
19 using namespace std;
20 using namespace cv;
21 using namespace cv::detail;
22 //定義參數
23 
24 bool try_use_gpu = false;
25 vector<Mat> imgs;
26 string result_name = "result.jpg";
27 
28 int main()
29 {
30     Mat img = imread("1.jpg");
31     imgs.push_back(img);
32     img = imread("2.jpg");
33     imgs.push_back(img);
34 
35     Mat pano;
36     Stitcher stitcher = Stitcher::createDefault(try_use_gpu);
37     Stitcher::Status status = stitcher.stitch(imgs, pano);
38 
39     if (status != Stitcher::OK)
40     {
41         cout << "Can't stitch images, error code = " << int(status) << endl;
42         return -1;
43     }
44 
45     imwrite(result_name, pano);
46     return 0;
47     system("pause");
48     return 0;
49 }

傻瓜式的,無法改寫的設置。

然后改成另外一個可以改寫的版本:

  1 #include "stdafx.h"
  2 #include <iostream>
  3 #include <fstream>
  4 #include <string>
  5 #include "opencv2/opencv_modules.hpp"
  6 #include "opencv2/highgui/highgui.hpp"
  7 #include "opencv2/stitching/detail/autocalib.hpp"
  8 #include "opencv2/stitching/detail/blenders.hpp"
  9 #include "opencv2/stitching/detail/camera.hpp"
 10 #include "opencv2/stitching/detail/exposure_compensate.hpp"
 11 #include "opencv2/stitching/detail/matchers.hpp"
 12 #include "opencv2/stitching/detail/motion_estimators.hpp"
 13 #include "opencv2/stitching/detail/seam_finders.hpp"
 14 #include "opencv2/stitching/detail/util.hpp"
 15 #include <opencv2/stitching/warpers.hpp>
 16 #include <opencv2/stitching/stitcher.hpp>
 17 #include<time.h>
 18 using namespace std;
 19 using namespace cv;
 20 using namespace cv::detail;
 21 //定義參數
 22 
 23 bool try_use_gpu = false;
 24 vector<Mat> imgs;
 25 string result_name = "result.jpg";
 26 
 27 int main()
 28 {
 29     clock_t start, finish;
 30     double totaltime;
 31     start = clock();
 32     int filenum = 2;
 33     char* fdir[] = { "1.jpg", "2.jpg", "3.jpg", "4.jpg", "5.jpg"};
 34     Mat img, pano;
 35     for (int i = 0; i < filenum; i++){
 36         img = imread(fdir[i]);
 37         imgs.push_back(img);
 38     }
 39     Stitcher stitcher = Stitcher::createDefault(try_use_gpu);
 40     stitcher.setRegistrationResol(0.6);//為了加速,我選0.1,默認是0.6,最大值1最慢,此方法用於特征點檢測階段,如果找不到特征點,調高吧
 41     //stitcher.setSeamEstimationResol(0.1);//默認是0.1
 42     //stitcher.setCompositingResol(-1);//默認是-1,用於特征點檢測階段,找不到特征點的話,改-1
 43     stitcher.setPanoConfidenceThresh(1);//默認是1,見過有設0.6和0.4的
 44     stitcher.setWaveCorrection(false);//默認是true,為加速選false,表示跳過WaveCorrection步驟
 45     //stitcher.setWaveCorrectKind(detail::WAVE_CORRECT_HORIZ);//還可以選detail::WAVE_CORRECT_VERT ,波段修正(wave correction)功能(水平方向/垂直方向修正)。因為setWaveCorrection設的false,此語句沒用
 46 
 47     //找特征點surf算法,此算法計算量大,但對剛體運動、縮放、環境影響等情況下較為穩定
 48     detail::SurfFeaturesFinder *featureFinder = new detail::SurfFeaturesFinder();
 49     stitcher.setFeaturesFinder(featureFinder);
 50 
 51     //找特征點ORB算法,但是發現草地這組圖,這個算法不能完成拼接
 52     //detail::OrbFeaturesFinder *featureFinder = new detail::OrbFeaturesFinder();
 53     //stitcher.setFeaturesFinder(featureFinder);
 54 
 55     //Features matcher which finds two best matches for each feature and leaves the best one only if the ratio between descriptor distances is greater than the threshold match_conf.
 56     detail::BestOf2NearestMatcher *matcher = new detail::BestOf2NearestMatcher(false, 0.5f/*=match_conf默認是0.65,我選0.8,選太大了就沒特征點啦,0.8都失敗了*/);
 57     stitcher.setFeaturesMatcher(matcher);
 58 
 59     // Rotation Estimation,It takes features of all images, pairwise matches between all images and estimates rotations of all cameras.
 60     //Implementation of the camera parameters refinement algorithm which minimizes sum of the distances between the rays passing through the camera center and a feature,這個耗時短
 61     stitcher.setBundleAdjuster(new detail::BundleAdjusterRay());
 62     //Implementation of the camera parameters refinement algorithm which minimizes sum of the reprojection error squares.
 63     //stitcher.setBundleAdjuster(new detail::BundleAdjusterReproj());
 64 
 65     //Seam Estimation
 66     //Minimum graph cut-based seam estimator
 67     //stitcher.setSeamFinder(new detail::GraphCutSeamFinder(detail::GraphCutSeamFinderBase::COST_COLOR));//默認就是這個
 68     //stitcher.setSeamFinder(new detail::GraphCutSeamFinder(detail::GraphCutSeamFinderBase::COST_COLOR_GRAD));//GraphCutSeamFinder的第二種形式
 69     //啥SeamFinder也不用,Stub seam estimator which does nothing.
 70     stitcher.setSeamFinder(new detail::NoSeamFinder);
 71     //Voronoi diagram-based seam estimator.
 72     //stitcher.setSeamFinder(new detail::VoronoiSeamFinder);
 73 
 74     //exposure compensators曝光補償
 75     //stitcher.setExposureCompensator(new detail::BlocksGainCompensator());//默認的就是這個
 76     //不要曝光補償
 77     stitcher.setExposureCompensator(new detail::NoExposureCompensator());
 78     //Exposure compensator which tries to remove exposure related artifacts by adjusting image intensities
 79     //stitcher.setExposureCompensator(new detail::detail::GainCompensator());
 80     //Exposure compensator which tries to remove exposure related artifacts by adjusting image block intensities  
 81     //stitcher.setExposureCompensator(new detail::detail::BlocksGainCompensator()); 
 82 
 83     //Image Blenders
 84     //Blender which uses multi-band blending algorithm 
 85     stitcher.setBlender(new detail::MultiBandBlender(try_use_gpu));//默認的是這個
 86     //Simple blender which mixes images at its borders
 87     //stitcher.setBlender(new detail::FeatherBlender());//這個簡單,耗時少
 88 
 89     //柱面?球面OR平面?默認為球面
 90     //PlaneWarper*  cw = new PlaneWarper();
 91     //SphericalWarper*  cw = new SphericalWarper();
 92     //CylindricalWarper*  cw = new CylindricalWarper();
 93     //stitcher.setWarper(cw);
 94 
 95     Stitcher::Status status = stitcher.estimateTransform(imgs);
 96     if (status != Stitcher::OK)
 97     {
 98         cout << "Can't stitch images, error code = " << int(status) << endl;
 99         return -1;
100     }
101     status = stitcher.composePanorama(pano);
102     if (status != Stitcher::OK)
103     {
104         cout << "Can't stitch images, error code = " << int(status) << endl;
105         return -1;
106     }
107     cout << "程序開始";
108     imwrite(result_name, pano);
109     finish = clock();
110     totaltime = (double)(finish - start) / CLOCKS_PER_SEC;
111     cout << "\n此程序的運行時間為" << totaltime << "秒!" << endl;
112     system("pause");
113     return 0;
114 }

可以設置一些,比如融合方式等。

但是我現在的問題是知道拼接量,怎么使用光照補償 曝光補償和多分辨率融合呢?

 還有一種方法就是分解opencv stitch過程


#include "stdafx.h"
#include <iostream>
#include <fstream>
#include <string>
#include "opencv2/opencv_modules.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/stitching/detail/autocalib.hpp"
#include "opencv2/stitching/detail/blenders.hpp"
#include "opencv2/stitching/detail/camera.hpp"
#include "opencv2/stitching/detail/exposure_compensate.hpp"
#include "opencv2/stitching/detail/matchers.hpp"
#include "opencv2/stitching/detail/motion_estimators.hpp"
#include "opencv2/stitching/detail/seam_finders.hpp"
#include "opencv2/stitching/detail/util.hpp"
#include "opencv2/stitching/detail/warpers.hpp"
#include "opencv2/stitching/warpers.hpp"
#include<time.h>
using namespace std;
using namespace cv;
using namespace cv::detail;
//定義參數
vector<string> img_names; //圖像名容器
bool try_gpu = false;
double work_megapix = 1;//圖像匹配的分辨率大小,圖像的面積尺寸變為work_megapix*100000

double seam_megapix = 0.1;//拼接縫像素的大小
double compose_megapix = 0.6;//拼接分辨率
float conf_thresh = 1.f;//兩幅圖來自同一全景圖的置信度
WaveCorrectKind wave_correct = detail::WAVE_CORRECT_HORIZ;//波形校驗,水平
int expos_comp_type = ExposureCompensator::GAIN_BLOCKS;//光照補償方法,默認是gain_blocks
float match_conf = 0.65f;//特征點檢測置信等級,最近鄰匹配距離與次近鄰匹配距離的比值,surf默認為0.65
int blend_type = Blender::MULTI_BAND;//融合方法,默認是多頻段融合
float blend_strength = 5;//融合強度,0 - 100.默認是5.
string result_name = "result.jpg";//輸出圖像的文件名

int main()
{
clock_t start, finish;
double totaltime;
start = clock();
int argc = 2;
char* argv[] = { "1.jpg", "2.jpg", "3.jpg", "4.jpg", "5.jpg", "6.jpg", "7.jpg"
};
for (int i = 0; i < argc; ++i)
img_names.push_back(argv[i]);
int num_images = static_cast<int>(img_names.size());
double work_scale = 1, seam_scale = 1, compose_scale = 1;
//特征點檢測以及對圖像進行預處理(尺寸縮放),然后計算每幅圖形的特征點,以及特征點描述子
cout << "Finding features..." << endl;

Ptr<FeaturesFinder> finder;
finder = new SurfFeaturesFinder();///采用Surf特征點檢測

Mat full_img1, full_img, img;
vector<ImageFeatures> features(num_images);
vector<Mat> images(num_images);
vector<Size> full_img_sizes(num_images);
double seam_work_aspect = 1;

for (int i = 0; i < num_images; ++i)
{
full_img1 = imread(img_names[i]);
resize(full_img1, full_img, Size(full_img1.cols, full_img1.rows));
full_img_sizes[i] = full_img.size();

//計算work_scale,將圖像resize到面積在work_megapix*10^6以下
work_scale = min(1.0, sqrt(work_megapix * 1e6 / full_img.size().area()));

resize(full_img, img, Size(), work_scale, work_scale);
//將圖像resize到面積在work_megapix*10^6以下
seam_scale = min(1.0, sqrt(seam_megapix * 1e6 / full_img.size().area()));
seam_work_aspect = seam_scale / work_scale;
// 計算圖像特征點,以及計算特征點描述子,並將img_idx設置為i
(*finder)(img, features[i]);
features[i].img_idx = i;
cout << "Features in image #" << i + 1 << ": " << features[i].keypoints.size() << endl;
//將源圖像resize到seam_megapix*10^6,並存入image[]中
resize(full_img, img, Size(), seam_scale, seam_scale);
images[i] = img.clone();
}

finder->collectGarbage();
full_img.release();
img.release();

//對圖像進行兩兩匹配
cout << "Pairwise matching" << endl;

//使用最近鄰和次近鄰匹配,對任意兩幅圖進行特征點匹配
vector<MatchesInfo> pairwise_matches;
BestOf2NearestMatcher matcher(try_gpu, match_conf);//最近鄰和次近鄰法
matcher(features, pairwise_matches); //對每兩個圖片進行匹配
matcher.collectGarbage();
//將置信度高於門限的所有匹配合並到一個集合中
///只留下確定是來自同一全景圖的圖片

vector<int> indices = leaveBiggestComponent(features, pairwise_matches, conf_thresh);
vector<Mat> img_subset;
vector<string> img_names_subset;
vector<Size> full_img_sizes_subset;
for (size_t i = 0; i < indices.size(); ++i)
{
img_names_subset.push_back(img_names[indices[i]]);
img_subset.push_back(images[indices[i]]);
full_img_sizes_subset.push_back(full_img_sizes[indices[i]]);
}

images = img_subset;
img_names = img_names_subset;
full_img_sizes = full_img_sizes_subset;

// 檢查圖片數量是否依舊滿足要求
num_images = static_cast<int>(img_names.size());
if (num_images < 2)
{
cout << "Need more images" << endl;
return -1;
}

HomographyBasedEstimator estimator;//基於單應性的估計量
vector<CameraParams> cameras;//相機參數
estimator(features, pairwise_matches, cameras);

for (size_t i = 0; i < cameras.size(); ++i)
{
Mat R;
cameras[i].R.convertTo(R, CV_32F);
cameras[i].R = R;
cout << "Initial intrinsics #" << indices[i] + 1 << ":\n" << cameras[i].K() << endl;
}

Ptr<detail::BundleAdjusterBase> adjuster;//光束調整器參數
adjuster = new detail::BundleAdjusterRay();//使用Bundle Adjustment(光束法平差)方法對所有圖片進行相機參數校正

adjuster->setConfThresh(conf_thresh);//設置配置閾值
Mat_<uchar> refine_mask = Mat::zeros(3, 3, CV_8U);
refine_mask(0, 0) = 1;
refine_mask(0, 1) = 1;
refine_mask(0, 2) = 1;
refine_mask(1, 1) = 1;
refine_mask(1, 2) = 1;
adjuster->setRefinementMask(refine_mask);
(*adjuster)(features, pairwise_matches, cameras);//進行矯正


// 求出的焦距取中值和所有圖片的焦距並構建camera參數,將矩陣寫入camera
vector<double> focals;
for (size_t i = 0; i < cameras.size(); ++i)
{
cout << "Camera #" << indices[i] + 1 << ":\n" << cameras[i].K() << endl;
focals.push_back(cameras[i].focal);
}

sort(focals.begin(), focals.end());
float warped_image_scale;
if (focals.size() % 2 == 1)
warped_image_scale = static_cast<float>(focals[focals.size() / 2]);
else
warped_image_scale = static_cast<float>(focals[focals.size() / 2 - 1] + focals[focals.size() / 2]) * 0.5f;

///波形矯正
vector<Mat> rmats;
for (size_t i = 0; i < cameras.size(); ++i)
rmats.push_back(cameras[i].R);
waveCorrect(rmats, wave_correct);////波形矯正
for (size_t i = 0; i < cameras.size(); ++i)
cameras[i].R = rmats[i];


cout << "Warping images ... " << endl;
vector<Point> corners(num_images);//統一坐標后的頂點
vector<Mat> masks_warped(num_images);
vector<Mat> images_warped(num_images);
vector<Size> sizes(num_images);
vector<Mat> masks(num_images);//融合掩碼
// 准備圖像融合掩碼
for (int i = 0; i < num_images; ++i)
{
masks[i].create(images[i].size(), CV_8U);
masks[i].setTo(Scalar::all(255));
}

//彎曲圖像和融合掩碼

Ptr<WarperCreator> warper_creator;
warper_creator = new cv::SphericalWarper();

Ptr<RotationWarper> warper = warper_creator->create(static_cast<float>(warped_image_scale * seam_work_aspect));

for (int i = 0; i < num_images; ++i)
{
Mat_<float> K;
cameras[i].K().convertTo(K, CV_32F);
float swa = (float)seam_work_aspect;
K(0, 0) *= swa; K(0, 2) *= swa;
K(1, 1) *= swa; K(1, 2) *= swa;

corners[i] = warper->warp(images[i], K, cameras[i].R, INTER_LINEAR, BORDER_REFLECT, images_warped[i]);//計算統一后坐標頂點
sizes[i] = images_warped[i].size();

warper->warp(masks[i], K, cameras[i].R, INTER_NEAREST, BORDER_CONSTANT, masks_warped[i]);//彎曲當前圖像
}

vector<Mat> images_warped_f(num_images);
for (int i = 0; i < num_images; ++i)
images_warped[i].convertTo(images_warped_f[i], CV_32F);


Ptr<ExposureCompensator> compensator = ExposureCompensator::createDefault(expos_comp_type);//建立補償器以進行關照補償,補償方法是gain_blocks
compensator->feed(corners, images_warped, masks_warped);

 

//查找接縫
Ptr<SeamFinder> seam_finder;
seam_finder = new detail::GraphCutSeamFinder(GraphCutSeamFinderBase::COST_COLOR);
seam_finder->find(images_warped_f, corners, masks_warped);

//namedWindow("images_warped", 0);
//imshow("images_warped", images_warped[0]);
//namedWindow("masks_warped", 0);
//imshow("masks_warped", masks_warped[0]);

// 釋放未使用的內存
images.clear();
images_warped.clear();
images_warped_f.clear();
masks.clear();

//////圖像融合
cout << "Compositing..." << endl;


Mat img_warped, img_warped_s;
Mat dilated_mask, seam_mask, mask, mask_warped;
Ptr<Blender> blender;

double compose_work_aspect = 1;

for (int img_idx = 0; img_idx < num_images; ++img_idx)
{
cout << "Compositing image #" << indices[img_idx] + 1 << endl;
//由於以前進行處理的圖片都是以work_scale進行縮放的,所以圖像的內參
//corner(統一坐標后的頂點),mask(融合的掩碼)都需要重新計算

// 讀取圖像和做必要的調整

full_img1 = imread(img_names[img_idx]);
resize(full_img1, full_img, Size(full_img1.cols, full_img1.rows));
compose_scale = min(1.0, sqrt(compose_megapix * 1e6 / full_img.size().area()));
compose_work_aspect = compose_scale / work_scale;
// 更新彎曲圖像比例
warped_image_scale *= static_cast<float>(compose_work_aspect);
warper = warper_creator->create(warped_image_scale);

// 更新corners和sizes
for (int i = 0; i < num_images; ++i)
{
// 更新相機以下特性
cameras[i].focal *= compose_work_aspect;
cameras[i].ppx *= compose_work_aspect;
cameras[i].ppy *= compose_work_aspect;

// 更新corners和sizes
Size sz = full_img_sizes[i];
if (std::abs(compose_scale - 1) > 1e-1)
{
sz.width = cvRound(full_img_sizes[i].width * compose_scale);
sz.height = cvRound(full_img_sizes[i].height * compose_scale);
}

Mat K;
cameras[i].K().convertTo(K, CV_32F);
Rect roi = warper->warpRoi(sz, K, cameras[i].R);
corners[i] = roi.tl();
sizes[i] = roi.size();
}

if (abs(compose_scale - 1) > 1e-1)
resize(full_img, img, Size(), compose_scale, compose_scale);
else
img = full_img;
full_img.release();
Size img_size = img.size();

Mat K;
cameras[img_idx].K().convertTo(K, CV_32F);
// 扭曲當前圖像
warper->warp(img, K, cameras[img_idx].R, INTER_LINEAR, BORDER_REFLECT, img_warped);

// 扭曲當前圖像掩模

mask.create(img_size, CV_8U);
mask.setTo(Scalar::all(255));
warper->warp(mask, K, cameras[img_idx].R, INTER_NEAREST, BORDER_CONSTANT, mask_warped);

// 曝光補償
compensator->apply(img_idx, corners[img_idx], img_warped, mask_warped);


img_warped.convertTo(img_warped_s, CV_16S);
img_warped.release();
img.release();
mask.release();

 

dilate(masks_warped[img_idx], dilated_mask, Mat());
resize(dilated_mask, seam_mask, mask_warped.size());
mask_warped = seam_mask & mask_warped;

//初始化blender
if (blender.empty())
{
blender = Blender::createDefault(blend_type, try_gpu);
Size dst_sz = resultRoi(corners, sizes).size();
float blend_width = sqrt(static_cast<float>(dst_sz.area())) * blend_strength / 100.f;
if (blend_width < 1.f)
blender = Blender::createDefault(Blender::NO, try_gpu);
else
{
MultiBandBlender* mb = dynamic_cast<MultiBandBlender*>(static_cast<Blender*>(blender));
mb->setNumBands(static_cast<int>(ceil(log(blend_width) / log(2.)) - 1.));
cout << "Multi-band blender, number of bands: " << mb->numBands() << endl;
}
//根據corners頂點和圖像的大小確定最終全景圖的尺寸
blender->prepare(corners, sizes);
}

// // 融合當前圖像
blender->feed(img_warped_s, mask_warped, corners[img_idx]);
//blender->feed(img, mask, corners[img_idx]);
}

Mat result, result_mask;
blender->blend(result, result_mask);

imwrite(result_name, result);

namedWindow("result", 0);
imshow("result", result);
finish = clock();
totaltime = (double)(finish - start) / CLOCKS_PER_SEC;
cout << "\n此程序的運行時間為" << totaltime << "秒!" << endl;
waitKey(0);
system("pause");
return 0;

waitKey(0);
system("pause");
return 0;

}

 

 

 

 

上面的方法現在存在兩個問題:

1 conner 左上角的點是怎么獲得的?

2 有了左上角的點,用圖割方法獲得最佳分割線,然后調用拉普拉斯金字塔能否實現完美拼接。

 

 

 


免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM