(2)特征點匹配,並求旋轉矩陣R和位移向量t


 

include頭文件中有slamBase.h

# pragma once
// 各種頭文件 
// C++標准庫
#include <fstream>
#include <vector>
using namespace std;

// OpenCV
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>

//PCL
#include <pcl/io/pcd_io.h>
#include <pcl/point_types.h>

// 類型定義
typedef pcl::PointXYZRGBA PointT;
typedef pcl::PointCloud<PointT> PointCloud;

// 相機內參結構
struct CAMERA_INTRINSIC_PARAMETERS 
{ 
    double cx, cy, fx, fy, scale;
};

// 函數接口
// image2PonitCloud 將rgb圖和深度圖轉換為點雲
PointCloud::Ptr image2PointCloud( cv::Mat& rgb, cv::Mat& depth, CAMERA_INTRINSIC_PARAMETERS& camera );

// point2dTo3d 將單個點從圖像坐標轉換為空間坐標
// input: 3維點Point3f (u,v,d)
cv::Point3f point2dTo3d( cv::Point3f& point, CAMERA_INTRINSIC_PARAMETERS& camera );

其中有三個部分,相機內參結構,rgb圖和深度圖轉點雲,2維像素點轉3維空間點坐標(頭文件中函數原型)。

src中源程序slamBase.cpp

#include "slamBase.h"

//傳入rgb, depth, 和相機內參 PointCloud::Ptr image2PointCloud( cv::Mat& rgb, cv::Mat& depth, CAMERA_INTRINSIC_PARAMETERS& camera ) { PointCloud::Ptr cloud ( new PointCloud ); for (int m = 0; m < depth.rows; m++) for (int n=0; n < depth.cols; n++) { // 獲取深度圖中(m,n)處的值 ushort d = depth.ptr<ushort>(m)[n]; // d 可能沒有值,若如此,跳過此點 if (d == 0) continue; // d 存在值,則向點雲增加一個點 PointT p; // 計算這個點的空間坐標 p.z = double(d) / camera.scale; p.x = (n - camera.cx) * p.z / camera.fx; p.y = (m - camera.cy) * p.z / camera.fy; // 從rgb圖像中獲取它的顏色 // rgb是三通道的BGR格式圖,所以按下面的順序獲取顏色 p.b = rgb.ptr<uchar>(m)[n*3]; p.g = rgb.ptr<uchar>(m)[n*3+1]; p.r = rgb.ptr<uchar>(m)[n*3+2]; // 把p加入到點雲中 cloud->points.push_back( p ); } // 設置並保存點雲 cloud->height = 1; cloud->width = cloud->points.size(); cloud->is_dense = false; return cloud; } //像素坐標轉為3維點 cv::Point3f point2dTo3d( cv::Point3f& point, CAMERA_INTRINSIC_PARAMETERS& camera ) { cv::Point3f p; // 3D 點 p.z = double( point.z ) / camera.scale; //point.z d p.x = ( point.x - camera.cx) * p.z / camera.fx; //point.x u p.y = ( point.y - camera.cy) * p.z / camera.fy; //point.y v return p; }

和實現程序slamBase.cpp在同一文件夾下的CMakeLists.txt

# 增加PCL庫的依賴
FIND_PACKAGE( PCL REQUIRED COMPONENTS common io )

list(REMOVE_ITEM PCL_LIBRARIES "vtkproj4") # use this in Ubuntu 16.04

# 增加opencv的依賴
FIND_PACKAGE( OpenCV REQUIRED )
INCLUDE_DIRECTORIES( ${OpenCV_INCLUDE_DIRS} )

# 添加頭文件和庫文件
ADD_DEFINITIONS( ${PCL_DEFINITIONS} )
INCLUDE_DIRECTORIES( ${PCL_INCLUDE_DIRS}  )
LINK_LIBRARIES( ${PCL_LIBRARY_DIRS} )

#把slamBase.cpp編譯成 slamBase 庫,並把該庫里調到的OpenCV和PCL的部分,和相應的庫鏈接起來
ADD_LIBRARY( slambase slamBase.cpp )   # 實現文件  slamBase.cpp
TARGET_LINK_LIBRARIES( slambase
    ${OpenCV_LIBS} 
    ${PCL_LIBRARIES} )

ADD_EXECUTABLE( detectFeatures detectFeatures.cpp ) #  可執行程序  detectFeatures.cpp
TARGET_LINK_LIBRARIES( detectFeatures 
    slambase
    ${OpenCV_LIBS} 
    ${PCL_LIBRARIES} )

庫函數:ADD_LIBRARY( slambase slamBase.cpp )        TARGET_LINK_LIBRARIES( slambase ${OpenCV_LIBS} ${PCL_LIBRARIES} )

可執行程序:ADD_EXECUTABLE( detectFeatures detectFeatures.cpp )     TARGET_LINK_LIBRARIES( detectFeatures slambase ${OpenCV_LIBS} ${PCL_LIBRARIES} ) 使用到上訴的庫

#include<iostream>
#include "slamBase.h" //
using namespace std;

// OpenCV 特征檢測模塊
#include <opencv2/features2d/features2d.hpp>
// #include <opencv2/nonfree/nonfree.hpp> // use this if you want to use SIFT or SURF
#include <opencv2/calib3d/calib3d.hpp>

int main( int argc, char** argv )
{
    // 聲明並從data文件夾里讀取兩個rgb與深度圖
    cv::Mat rgb1 = cv::imread( "./data/rgb20.png");
    cv::Mat rgb2 = cv::imread( "./data/rgb21.png");
    cv::Mat depth1 = cv::imread( "./data/depth20.png", -1);
    cv::Mat depth2 = cv::imread( "./data/depth21.png", -1);

    // 聲明特征提取器與描述子提取器
    cv::Ptr<cv::FeatureDetector> detector;
    cv::Ptr<cv::DescriptorExtractor> descriptor;

    // 構建提取器,默認兩者都為 ORB
    
    // 如果使用 sift, surf ,之前要初始化nonfree模塊
    // cv::initModule_nonfree();
    // _detector = cv::FeatureDetector::create( "SIFT" );
    // _descriptor = cv::DescriptorExtractor::create( "SIFT" );
    
    detector = cv::ORB::create();
    descriptor = cv::ORB::create();

    vector< cv::KeyPoint > kp1, kp2; //關鍵點
    detector->detect( rgb1, kp1 );  //提取關鍵點
    detector->detect( rgb2, kp2 );

    cout<<"Key points of two images: "<<kp1.size()<<", "<<kp2.size()<<endl;
    
    // 可視化, 顯示關鍵點
    cv::Mat imgShow;
    cv::drawKeypoints( rgb1, kp1, imgShow, cv::Scalar::all(-1), cv::DrawMatchesFlags::DRAW_RICH_KEYPOINTS );
    cv::namedWindow("Keypoints",0);
    cv::imshow( "keypoints", imgShow );
    cv::imwrite( "./data/keypoints.png", imgShow );
    cv::waitKey(0); //暫停等待一個按鍵
   
    // 計算描述子
    cv::Mat desp1, desp2;
    descriptor->compute( rgb1, kp1, desp1 );
    descriptor->compute( rgb2, kp2, desp2 );

    // 匹配描述子
    vector< cv::DMatch > matches; 
    cv::BFMatcher matcher;
    matcher.match( desp1, desp2, matches );
    cout<<"Find total "<<matches.size()<<" matches."<<endl;

    // 可視化:顯示匹配的特征
    cv::Mat imgMatches;
    cv::drawMatches( rgb1, kp1, rgb2, kp2, matches, imgMatches );
    cv::namedWindow("matches",0);
    cv::imshow( "matches", imgMatches );
    cv::imwrite( "./data/matches.png", imgMatches );
    cv::waitKey( 0 );

    // 篩選匹配,把距離太大的去掉
    // 這里使用的准則是去掉大於四倍最小距離的匹配
    vector< cv::DMatch > goodMatches;
    double minDis = 9999;
    for ( size_t i=0; i<matches.size(); i++ )
    {
        if ( matches[i].distance < minDis )
            minDis = matches[i].distance;
    }
    cout<<"min dis = "<<minDis<<endl;

    for ( size_t i=0; i<matches.size(); i++ )
    {
        if (matches[i].distance < 10*minDis)
            goodMatches.push_back( matches[i] );
    }

    // 顯示 good matches
    cout<<"good matches="<<goodMatches.size()<<endl;
    cv::drawMatches( rgb1, kp1, rgb2, kp2, goodMatches, imgMatches );
    cv::namedWindow("good matches",0);
    cv::imshow( "good matches", imgMatches );
    cv::imwrite( "./data/good_matches.png", imgMatches );
    cv::waitKey(0);

    // 計算圖像間的運動關系
    // 關鍵函數:cv::solvePnPRansac()
    // 為調用此函數准備必要的參數
    
    // 第一個幀的三維點
    vector<cv::Point3f> pts_obj;
    // 第二個幀的圖像點
    vector< cv::Point2f > pts_img;

    // 相機內參,使用到結構
    CAMERA_INTRINSIC_PARAMETERS C;
    C.cx = 682.3;
    C.cy = 254.9;
    C.fx = 979.8;
    C.fy = 942.8;
    C.scale = 1000.0;

    for (size_t i=0; i<goodMatches.size(); i++)
    {
        // query 是第一個, train 是第二個
        cv::Point2f p = kp1[goodMatches[i].queryIdx].pt;
        // 獲取d是要小心!x是向右的,y是向下的,所以y才是行,x是列!
        ushort d = depth1.ptr<ushort>( int(p.y) )[ int(p.x) ];
        if (d == 0)
            continue;
        pts_img.push_back( cv::Point2f( kp2[goodMatches[i].trainIdx].pt ) );

        // 將(u,v,d)轉成(x,y,z)
        cv::Point3f pt ( p.x, p.y, d ); //   深度值/scale = z
        cv::Point3f pd = point2dTo3d( pt, C );
        pts_obj.push_back( pd );
    }
    //使用到結構 double camera_matrix_data[3][3] = {
        {C.fx, 0, C.cx},
        {0, C.fy, C.cy},
        {0, 0, 1}
    };

    // 構建相機矩陣
    cv::Mat cameraMatrix( 3, 3, CV_64F, camera_matrix_data );
    cv::Mat rvec, tvec, inliers;
    // 求解pnp
    cv::solvePnPRansac( pts_obj, pts_img, cameraMatrix, cv::Mat(), rvec, tvec, false, 100, 1.0, 0.95, inliers ,cv::SOLVEPNP_ITERATIVE);
    // 求旋轉向量和平移向量,旋轉矩陣
    cout<<"inliers: "<<inliers.rows<<endl;
    cout<<"R="<<rvec<<endl;
    cout<<"t="<<tvec<<endl;
//旋轉矩陣
cv::Mat R;
cv::Rodrigues(rvec,R);
cout<<"R_matrix="<<R<<endl;

// 畫出inliers匹配 vector< cv::DMatch > matchesShow; for (size_t i=0; i<inliers.rows; i++) { matchesShow.push_back( goodMatches[inliers.ptr<int>(i)[0]] ); } cv::drawMatches( rgb1, kp1, rgb2, kp2, matchesShow, imgMatches ); cv::namedWindow("inlier matches",0); cv::imshow( "inlier matches", imgMatches ); cv::imwrite( "./data/inliers.png", imgMatches ); cv::waitKey( 0 ); return 0; }

使用到結構,和將旋轉向量轉為旋轉矩陣的函數cv::Rodrigues()

 OpenCV會利用一種“隨機采樣一致性”(Random Sample Consensus)的思路(見https://en.wikipedia.org/wiki/RANSAC

cv::solvePnPRansac()函數    https://blog.csdn.net/jay463261929/article/details/53818611

 

 和src,include文件同一文件夾下的CMakeLists.txt

CMAKE_MINIMUM_REQUIRED( VERSION 2.8 )
PROJECT( slam )

SET(CMAKE_CXX_COMPILER "g++")
SET( CMAKE_BUILD_TYPE Debug  )
SET(EXECUTABLE_OUTPUT_PATH ${PROJECT_SOURCE_DIR}/bin) #可執行文件輸出的文件夾
SET(LIBRARY_OUTPUT_PATH ${PROJECT_SOURCE_DIR}/lib) #庫函數編譯輸出位置

INCLUDE_DIRECTORIES( ${PROJECT_SOURCE_DIR}/include ) #頭文件
LINK_DIRECTORIES( ${PROJECT_SOURCE_DIR}/lib) #使用編譯好的庫文件libslamBase.a

ADD_SUBDIRECTORY( ${PROJECT_SOURCE_DIR}/src ) #可執行程序

 

注意:當lib 文件下已經有編譯好的庫庫文件libslamBase.a,可以將第一個CMakeLists.txt中ADD_LIBRARY( slambase slamBase.cpp )  TARGET_LINK_LIBRARIES( slambase ${OpenCV_LIBS} ${PCL_LIBRARIES} )去掉,因為slamBase.cpp已經被編譯。

將第二個CMakeLists.txt中SET(LIBRARY_OUTPUT_PATH ${PROJECT_SOURCE_DIR}/lib) 去掉。

 


免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM