(3)視覺里程計 Visual Odometry


首先分析include頭文件下的slamBase.h文件

# pragma once

// 各種頭文件 
// C++標准庫
#include <fstream>
#include <vector>
#include <map>
using namespace std;

// Eigen
#include <Eigen/Core>
#include <Eigen/Geometry>

// OpenCV
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/calib3d/calib3d.hpp>


// PCL
#include <pcl/io/pcd_io.h>
#include <pcl/point_types.h>
#include <pcl/common/transforms.h>
#include <pcl/visualization/cloud_viewer.h>
#include <pcl/filters/voxel_grid.h>  //體素濾波器 進行降采樣
#include <pcl/filters/statistical_outlier_removal.h>  //統計濾波器 去除 孤立點

// 類型定義
typedef pcl::PointXYZRGBA PointT;
typedef pcl::PointCloud<PointT> PointCloud;

// 相機內參結構
struct CAMERA_INTRINSIC_PARAMETERS 
{ 
    double cx, cy, fx, fy, scale;
};

// 幀結構
struct FRAME
{
    cv::Mat rgb, depth; //該幀對應的彩色圖與深度圖
    cv::Mat desp;       //特征描述子
    vector<cv::KeyPoint> kp; //關鍵點
};

// PnP 結果
struct RESULT_OF_PNP
{
    cv::Mat rvec, tvec;
    int inliers;
};

// 函數接口
// image2PonitCloud 將rgb圖轉換為點雲
PointCloud::Ptr image2PointCloud( cv::Mat& rgb, cv::Mat& depth, CAMERA_INTRINSIC_PARAMETERS& camera );

// point2dTo3d 將單個點從圖像坐標轉換為空間坐標
// input: 3維點Point3f (u,v,d)
cv::Point3f point2dTo3d( cv::Point3f& point, CAMERA_INTRINSIC_PARAMETERS& camera );

// computeKeyPointsAndDesp 同時提取關鍵點與特征描述子
void computeKeyPointsAndDesp( FRAME& frame, string detector, string descriptor );

// estimateMotion 計算兩個幀之間的運動
// 輸入:幀1和幀2, 相機內參
RESULT_OF_PNP estimateMotion( FRAME& frame1, FRAME& frame2, CAMERA_INTRINSIC_PARAMETERS& camera );

// cvMat2Eigen, 將cv的旋轉矢量與位移矢量轉換為變換矩陣,類型為Eigen::Isometry3d
Eigen::Isometry3d cvMat2Eigen( cv::Mat& rvec, cv::Mat& tvec );

// joinPointCloud , 組合點雲
PointCloud::Ptr joinPointCloud( PointCloud::Ptr original, FRAME& newFrame, Eigen::Isometry3d T, CAMERA_INTRINSIC_PARAMETERS& camera ) ;

// 參數讀取類
class ParameterReader
{

public:
    map<string, string> data;

public:
    ParameterReader( string filename="./parameters.txt" )
    {
        ifstream fin( filename.c_str() );
        if (!fin)
        {
            cerr<<"parameter file does not exist."<<endl;
            return;
        }
        while(!fin.eof())
        {
            string str;
            getline( fin, str );
            if (str[0] == '#')
            {
                // 以‘#’開頭的是注釋
                continue;
            }
            int pos = str.find("=");
            if (pos == -1)
                continue;
            string key = str.substr( 0, pos );
            string value = str.substr( pos+1, str.length() );
            data[key] = value;

            if ( !fin.good() )
                break;
        }
    }
    string getData( string key )
    {
        map<string, string>::iterator iter = data.find(key);
        if (iter == data.end())
        {
            cerr<<"Parameter name "<<key<<" not found!"<<endl;
            return string("NOT_FOUND");
        }
        return iter->second;
    }

};

inline static CAMERA_INTRINSIC_PARAMETERS getDefaultCamera()
{
    ParameterReader pd;
    CAMERA_INTRINSIC_PARAMETERS camera;
    camera.fx = atof( pd.getData( "camera.fx" ).c_str());
    camera.fy = atof( pd.getData( "camera.fy" ).c_str());
    camera.cx = atof( pd.getData( "camera.cx" ).c_str());
    camera.cy = atof( pd.getData( "camera.cy" ).c_str());
    camera.scale = atof( pd.getData( "camera.scale" ).c_str() );
    return camera;
}

從parameters.txt讀取相機內參函數,參數不寫進程序,修改時不需要重新編譯,只需要修改參數文件。

inline static CAMERA_INTRINSIC_PARAMETERS getDefaultCamera()
{
    ParameterReader pd;
    CAMERA_INTRINSIC_PARAMETERS camera;
    camera.fx = atof( pd.getData( "camera.fx" ).c_str());
    camera.fy = atof( pd.getData( "camera.fy" ).c_str());
    camera.cx = atof( pd.getData( "camera.cx" ).c_str());
    camera.cy = atof( pd.getData( "camera.cy" ).c_str());
    camera.scale = atof( pd.getData( "camera.scale" ).c_str() );
    return camera;
}

getDefaultCamera()函數返回的是CAMERA_INTRINSIC_PARAMETERS結構,獲得所有相機內參(其中用到class ParameterReader類)。class ParameterReader類,成員函數為getData()。class ParameterReader類得到一個data(參數文件左邊為key,右邊為value),而成員函數為getData()根據key獲得對應的value( 如camera.fx = atof( pd.getData( "camera.fx" ).c_str()) )。

 

以上slamBase.h中源函數在slamBase.cpp中的實現

#include "slamBase.h"

PointCloud::Ptr image2PointCloud( cv::Mat& rgb, cv::Mat& depth, CAMERA_INTRINSIC_PARAMETERS& camera )
{
    PointCloud::Ptr cloud ( new PointCloud );

    for (int m = 0; m < depth.rows; m+=2)
        for (int n=0; n < depth.cols; n+=2)
        {
            // 獲取深度圖中(m,n)處的值
            ushort d = depth.ptr<ushort>(m)[n];
            // d 可能沒有值,若如此,跳過此點
            if (d == 0 || d>=4096)
                continue;
            // d 存在值,則向點雲增加一個點
            PointT p;

            // 計算這個點的空間坐標
            p.z = double(d) / camera.scale;
            p.x = (n - camera.cx) * p.z / camera.fx;
            p.y = (m - camera.cy) * p.z / camera.fy;
            
            // 從rgb圖像中獲取它的顏色
            // rgb是三通道的BGR格式圖,所以按下面的順序獲取顏色
            p.b = rgb.ptr<uchar>(m)[n*3];
            p.g = rgb.ptr<uchar>(m)[n*3+1];
            p.r = rgb.ptr<uchar>(m)[n*3+2];

            // 把p加入到點雲中
            cloud->points.push_back( p );
        }
    // 設置並保存點雲
    cloud->height = 1;
    cloud->width = cloud->points.size();
    cloud->is_dense = false;

    return cloud;
}

cv::Point3f point2dTo3d( cv::Point3f& point, CAMERA_INTRINSIC_PARAMETERS& camera )
{
    cv::Point3f p; // 3D 點
    p.z = double( point.z ) / camera.scale;
    p.x = ( point.x - camera.cx) * p.z / camera.fx;
    p.y = ( point.y - camera.cy) * p.z / camera.fy;
    return p;
}

// computeKeyPointsAndDesp 同時提取關鍵點與特征描述子
void computeKeyPointsAndDesp( FRAME& frame )
{
    cv::Ptr<cv::FeatureDetector> _detector;
    cv::Ptr<cv::DescriptorExtractor> _descriptor;

    _detector = cv::ORB::create();
    _descriptor = cv::ORB::create();
    /*
    if (!_detector || !_descriptor)
    {
        cerr<<"Unknown detector or discriptor type !"<<detector<<","<<descriptor<<endl;
        return;
    }
    */
    _detector->detect( frame.rgb, frame.kp );
    _descriptor->compute( frame.rgb, frame.kp, frame.desp );

    return;
}

// estimateMotion 計算兩個幀之間的運動
// 輸入:幀1和幀2
// 輸出:rvec 和 tvec
RESULT_OF_PNP estimateMotion( FRAME& frame1, FRAME& frame2, CAMERA_INTRINSIC_PARAMETERS& camera )
{
    static ParameterReader pd;
    vector< cv::DMatch > matches;
    cv::BFMatcher matcher;
    matcher.match( frame1.desp, frame2.desp, matches );
   
    RESULT_OF_PNP result;
    vector< cv::DMatch > goodMatches;
    double minDis = 9999;
    double good_match_threshold = atof( pd.getData( "good_match_threshold" ).c_str() );
    for ( size_t i=0; i<matches.size(); i++ )
    {
        if ( matches[i].distance < minDis )
            minDis = matches[i].distance;
    }
    
    cout<<"min dis = "<<minDis<<endl;
    if ( minDis < 10 ) 
        minDis = 10;

    for ( size_t i=0; i<matches.size(); i++ )
    {
        if (matches[i].distance < good_match_threshold*minDis )
            goodMatches.push_back( matches[i] );
    }

    cout<<"good matches: "<<goodMatches.size()<<endl;

    if (goodMatches.size() <= 5) 
    {
        result.inliers = -1;
        return result;
    }
    // 第一個幀的三維點
    vector<cv::Point3f> pts_obj;
    // 第二個幀的圖像點
    vector< cv::Point2f > pts_img;

    // 相機內參
    for (size_t i=0; i<goodMatches.size(); i++)
    {
        // query 是第一個, train 是第二個
        cv::Point2f p = frame1.kp[goodMatches[i].queryIdx].pt;
        // 獲取d是要小心!x是向右的,y是向下的,所以y才是行,x是列!
        ushort d = frame1.depth.ptr<ushort>( int(p.y) )[ int(p.x) ];
        if (d == 0 || d>=4096)
            continue;
        pts_img.push_back( cv::Point2f( frame2.kp[goodMatches[i].trainIdx].pt ) );

        // 將(u,v,d)轉成(x,y,z)
        cv::Point3f pt ( p.x, p.y, d );
        cv::Point3f pd = point2dTo3d( pt, camera );
        pts_obj.push_back( pd );
    }

    if (pts_obj.size() ==0 || pts_img.size()==0)
    {
        result.inliers = -1;
        return result;
    }

    double camera_matrix_data[3][3] = {
        {camera.fx, 0, camera.cx},
        {0, camera.fy, camera.cy},
        {0, 0, 1}
    };

    // 構建相機矩陣
    cv::Mat cameraMatrix( 3, 3, CV_64F, camera_matrix_data );
    cv::Mat rvec, tvec, inliers;
    // 求解pnp
    cv::solvePnPRansac( pts_obj, pts_img, cameraMatrix, cv::Mat(), rvec, tvec, false, 100, 1.0, 0.95, inliers ,cv::SOLVEPNP_ITERATIVE);

    result.rvec = rvec;
    result.tvec = tvec;
    result.inliers = inliers.rows;

    return result;
}


// cvMat2Eigen     (R,t -> T)  ,將cv的旋轉矢量與位移矢量轉換為變換矩陣,類型為Eigen::Isometry3d
Eigen::Isometry3d cvMat2Eigen( cv::Mat& rvec, cv::Mat& tvec )
{
    cv::Mat R;
    cv::Rodrigues( rvec, R );
    Eigen::Matrix3d r;
    for ( int i=0; i<3; i++ )
        for ( int j=0; j<3; j++ ) 
            r(i,j) = R.at<double>(i,j);
  
    // 將平移向量和旋轉矩陣轉換成變換矩陣
    Eigen::Isometry3d T = Eigen::Isometry3d::Identity();

    Eigen::AngleAxisd angle(r);
    T = angle;
    T(0,3) = tvec.at<double>(0,0); 
    T(1,3) = tvec.at<double>(1,0); 
    T(2,3) = tvec.at<double>(2,0);
    return T;
}

// joinPointCloud 
// 輸入:原始點雲,新來的幀以及它的位姿
// 輸出:將新來幀加到原始幀后的圖像
PointCloud::Ptr joinPointCloud( PointCloud::Ptr original, FRAME& newFrame, Eigen::Isometry3d T, CAMERA_INTRINSIC_PARAMETERS& camera ) 
{
    PointCloud::Ptr newCloud = image2PointCloud( newFrame.rgb, newFrame.depth, camera );  //新的點雲
  
    // depth filter and statistical removal 
    PointCloud::Ptr df ( new PointCloud );
    pcl::StatisticalOutlierRemoval<PointT> statistical_filter;     //newCloud -> df
    statistical_filter.setMeanK(50);
    statistical_filter.setStddevMulThresh(1.0);
    statistical_filter.setInputCloud(newCloud);
    statistical_filter.filter( *df );

    // 合並點雲
    PointCloud::Ptr output (new PointCloud());
    pcl::transformPointCloud( *original, *output, T.matrix() );  //將舊點雲轉到當前點雲坐標系下
    *df += *output; //新點雲 + 之前的點雲



//加入newCloud后,新的點雲,進行濾波
///////////////////////////////////////////////////////////
    // Voxel grid 濾波降采樣
    static pcl::VoxelGrid<PointT> voxel;
    static ParameterReader pd; //參數讀取
    double gridsize = atof( pd.getData("voxel_grid").c_str() ); //分辨率
    voxel.setLeafSize( gridsize, gridsize, gridsize );                  //df -> tmp
    voxel.setInputCloud( df );
    PointCloud::Ptr tmp( new PointCloud() );
    voxel.filter( *tmp );
///////////////////////////////////////////////////////////
    return tmp;
}

 將rgb圖和對應的深度圖轉為點雲(包含相機內參結構)

PointCloud::Ptr image2PointCloud( cv::Mat& rgb, cv::Mat& depth, CAMERA_INTRINSIC_PARAMETERS& camera )
{
    PointCloud::Ptr cloud ( new PointCloud );

    for (int m = 0; m < depth.rows; m+=2)
        for (int n=0; n < depth.cols; n+=2)
        {
            // 獲取深度圖中(m,n)處的值
            ushort d = depth.ptr<ushort>(m)[n]; 
            // d 可能沒有值,若如此,跳過此點
            if (d == 0 || d>=4096)
                continue;
            // d 存在值,則向點雲增加一個點
            PointT p;

            // 計算這個點的空間坐標
            p.z = double(d) / camera.scale;
            p.x = (n - camera.cx) * p.z / camera.fx;
            p.y = (m - camera.cy) * p.z / camera.fy;
            
            // 從rgb圖像中獲取它的顏色
            // rgb是三通道的BGR格式圖,所以按下面的順序獲取顏色
            p.b = rgb.ptr<uchar>(m)[n*3];
            p.g = rgb.ptr<uchar>(m)[n*3+1];
            p.r = rgb.ptr<uchar>(m)[n*3+2];

            // 把p加入到點雲中
            cloud->points.push_back( p );
        }
    // 設置並保存點雲
    cloud->height = 1;
    cloud->width = cloud->points.size();
    cloud->is_dense = false;

    return cloud;
}
// 獲取深度圖中(m,n)處的值
ushort d = depth.ptr<ushort>(m)[n]; depth是深度圖
PointT p;一個點包含位置(p.x ,p.y ,p.z),和顏色RGB(p.b , p.g , p.r),將點p放入點雲cloud中。
// computeKeyPointsAndDesp 同時提取關鍵點與特征描述子
void computeKeyPointsAndDesp( FRAME& frame )
{
    cv::Ptr<cv::FeatureDetector> _detector;   //特征點提取
    cv::Ptr<cv::DescriptorExtractor> _descriptor;  //描述子

    _detector = cv::ORB::create();
    _descriptor = cv::ORB::create();
    /*
    if (!_detector || !_descriptor)
    {
        cerr<<"Unknown detector or discriptor type !"<<detector<<","<<descriptor<<endl;
        return;
    }
    */
    _detector->detect( frame.rgb, frame.kp );   //rgb圖片frame.rgb, -> frame.kp特征點
    _descriptor->compute( frame.rgb, frame.kp, frame.desp );  //(frame.rgb,frame.kp) -> frame.desp描述子

    return;
}
 
        

使用引用類型,不需要返回任何值。void computeKeyPointsAndDesp( FRAME& frame )傳入的值為frame結構,包含rgb,depth,desp,kp,求得的frame.rgb,frame.desp,與frame.rgb(彩色圖片)對應。

// estimateMotion 計算兩個幀之間的運動
// 輸入:幀1和幀2
// 輸出:rvec 和 tvec
RESULT_OF_PNP estimateMotion( FRAME& frame1, FRAME& frame2, CAMERA_INTRINSIC_PARAMETERS& camera )
{
    static ParameterReader pd;
    vector< cv::DMatch > matches;
    cv::BFMatcher matcher;
    matcher.match( frame1.desp, frame2.desp, matches );
   
    RESULT_OF_PNP result;
    vector< cv::DMatch > goodMatches;
    double minDis = 9999;
    double good_match_threshold = atof( pd.getData( "good_match_threshold" ).c_str() );
for ( size_t i=0; i<matches.size(); i++ )//輸出最小匹配距離 { if ( matches[i].distance < minDis ) minDis = matches[i].distance; } cout<<"min dis = "<<minDis<<endl;
if ( minDis < 10 ) minDis = 10; for ( size_t i=0; i<matches.size(); i++ ) { if (matches[i].distance < good_match_threshold*minDis ) //小於最小距離的十倍,則將該匹配放入goodMatches中 goodMatches.push_back( matches[i] ); } cout<<"good matches: "<<goodMatches.size()<<endl; if (goodMatches.size() <= 5) //匹配點太少時,不進行之后計算 { result.inliers = -1; return result; } // 第一個幀的三維點 vector<cv::Point3f> pts_obj; // 第二個幀的圖像點 vector< cv::Point2f > pts_img; // 相機內參,獲得pts_img,pts_obj for (size_t i=0; i<goodMatches.size(); i++) { // query 是第一個, train 是第二個 cv::Point2f p = frame1.kp[goodMatches[i].queryIdx].pt; //取第一幅圖最佳匹配點坐標 // 獲取d是要小心!x是向右的,y是向下的,所以y才是行,x是列! ushort d = frame1.depth.ptr<ushort>( int(p.y) )[ int(p.x) ]; //取以上坐標的深度 if (d == 0 || d>=4096) continue; pts_img.push_back( cv::Point2f( frame2.kp[goodMatches[i].trainIdx].pt ) ); //取第二幅圖最佳匹配點像素坐標 // 將(u,v,d)轉成(x,y,z) cv::Point3f pt ( p.x, p.y, d ); cv::Point3f pd = point2dTo3d( pt, camera ); pts_obj.push_back( pd ); } if (pts_obj.size() ==0 || pts_img.size()==0) { result.inliers = -1; return result; } //數組,相機內參 double camera_matrix_data[3][3] =
{ {camera.fx,
0, camera.cx}, {0, camera.fy, camera.cy}, {0, 0, 1} }; // 構建相機矩陣 cv::Mat cameraMatrix( 3, 3, CV_64F, camera_matrix_data ); cv::Mat rvec, tvec, inliers; //rvec,旋轉向量 tvec,位移向量 // 求解pnp cv::solvePnPRansac( pts_obj, pts_img, cameraMatrix, cv::Mat(), rvec, tvec, false, 100, 1.0, 0.95, inliers ,cv::SOLVEPNP_ITERATIVE); result.rvec = rvec; result.tvec = tvec; result.inliers = inliers.rows; return result; }

 RESULT_OF_PNP estimateMotion( FRAME& frame1, FRAME& frame2, CAMERA_INTRINSIC_PARAMETERS& camera )函數返回的類型是RESULT_OF_PNP結構包含數據為旋轉向量rvec,位移向量tvec,inliers。其中用到的函數是cv::solvePnPRansac()https://blog.csdn.net/jay463261929/article/details/53818611

https://docs.opencv.org/master/d9/d0c/group__calib3d.html#ga50620f0e26e02caa2e9adc07b5fbf24e

其中最主要的程序為

 

    // 相機內參,獲得pts_img,pts_obj
    for (size_t i=0; i<goodMatches.size(); i++)
    {
        // query 是第一個, train 是第二個
        cv::Point2f p = frame1.kp[goodMatches[i].queryIdx].pt;  //取第一幅圖最佳匹配點坐標
        // 獲取d是要小心!x是向右的,y是向下的,所以y才是行,x是列!
        ushort d = frame1.depth.ptr<ushort>( int(p.y) )[ int(p.x) ];  //取以上坐標的深度
        if (d == 0 || d>=4096)
            continue;
        // 將(u,v,d)轉成(x,y,z)
        cv::Point3f pt ( p.x, p.y, d );
        cv::Point3f pd = point2dTo3d( pt, camera );
        pts_obj.push_back( pd );

        pts_img.push_back( cv::Point2f( frame2.kp[goodMatches[i].trainIdx].pt ) );  //取第二幅圖最佳匹配點像素坐標

    }

獲得第一幅圖特征點的3維點空間點坐標pts_obj,和對應第二幅圖特征點的圖像坐標pts_img。

frame1.kp[goodMatches[i].queryIdx]圖像frame1特征點對應於frame2.kp[goodMatches[i].trainIdx]圖像frame2特征點。frame1.kp[goodMatches[i].queryIdx].pt為特征點坐標。

 點雲合成

// joinPointCloud 
// 輸入:原始點雲,新來的幀以及它的位姿
// 輸出:將新來幀加到原始幀后的圖像
PointCloud::Ptr joinPointCloud( PointCloud::Ptr original, FRAME& newFrame, Eigen::Isometry3d T, CAMERA_INTRINSIC_PARAMETERS& camera ) 
{
    PointCloud::Ptr newCloud = image2PointCloud( newFrame.rgb, newFrame.depth, camera );  //新的點雲
  
    // depth filter and statistical removal 
    PointCloud::Ptr df ( new PointCloud );
    pcl::StatisticalOutlierRemoval<PointT> statistical_filter;     //newCloud -> df
    statistical_filter.setMeanK(50);
    statistical_filter.setStddevMulThresh(1.0);
    statistical_filter.setInputCloud(newCloud);
    statistical_filter.filter( *df );

    // 合並點雲
    PointCloud::Ptr output (new PointCloud());
    pcl::transformPointCloud( *original, *output, T.matrix() );  //將舊點雲轉到當前點雲坐標系下
    *df += *output; //新點雲 + 之前的點雲



//加入newCloud后,新的點雲,進行濾波
///////////////////////////////////////////////////////////
    // Voxel grid 濾波降采樣
    static pcl::VoxelGrid<PointT> voxel;
    static ParameterReader pd; //參數讀取
    double gridsize = atof( pd.getData("voxel_grid").c_str() ); //分辨率
    voxel.setLeafSize( gridsize, gridsize, gridsize );                  //df -> tmp
    voxel.setInputCloud( df );
    PointCloud::Ptr tmp( new PointCloud() );
    voxel.filter( *tmp );
///////////////////////////////////////////////////////////
    return tmp;
}

其中包含兩個濾波,一個是統計濾波器,用於去除孤立噪聲點,另一個是體素濾波器,進行降采樣,保證了在某個一定大小的立方體內僅有一個點。

視覺里程計visualOdometry.cpp

#include <iostream>
#include <fstream>
#include <sstream>
using namespace std;

#include "slamBase.h"

// 給定index,讀取一幀數據
FRAME readFrame( int index, ParameterReader& pd );
// 度量運動的大小
double normofTransform( cv::Mat rvec, cv::Mat tvec );

int main( int argc, char** argv )
{
    ParameterReader pd;
    int startIndex  =   atoi( pd.getData( "start_index" ).c_str() );
    int endIndex    =   atoi( pd.getData( "end_index"   ).c_str() );

    // initialize
    cout<<"Initializing ..."<<endl;
    int currIndex = startIndex; // 當前索引為currIndex
    FRAME lastFrame = readFrame( currIndex, pd ); // 上一幀數據
    // 我們總是在比較currFrame和lastFrame
/*
    string detector = pd.getData( "detector" );
    string descriptor = pd.getData( "descriptor" );
*/
    CAMERA_INTRINSIC_PARAMETERS camera = getDefaultCamera();  //相機內參
    computeKeyPointsAndDesp( lastFrame);
    PointCloud::Ptr cloud = image2PointCloud( lastFrame.rgb, lastFrame.depth, camera );
    
    pcl::visualization::CloudViewer viewer("viewer");

    // 是否顯示點雲
    bool visualize = pd.getData("visualize_pointcloud")==string("yes");

    int min_inliers = atoi( pd.getData("min_inliers").c_str() );
    double max_norm = atof( pd.getData("max_norm").c_str() );

    for ( currIndex=startIndex+1; currIndex<endIndex; currIndex++ )
    {
        cout<<"Reading files "<<currIndex<<endl;
        FRAME currFrame = readFrame( currIndex,pd ); // 讀取currFrame
        computeKeyPointsAndDesp( currFrame );
        // 比較currFrame 和 lastFrame
        RESULT_OF_PNP result = estimateMotion( lastFrame, currFrame, camera );
        if ( result.inliers < min_inliers ) //inliers不夠,放棄該幀
            continue;
        // 計算運動范圍是否太大
        double norm = normofTransform(result.rvec, result.tvec);
        cout<<"norm = "<<norm<<endl;
        if ( norm >= max_norm )
        {
            cout<<"\033[41;36m move too much \033[0m "<<endl;
            continue;
        }
        Eigen::Isometry3d T = cvMat2Eigen( result.rvec, result.tvec );
        cout<<"T="<<T.matrix()<<endl;
        
        cloud = joinPointCloud( cloud, currFrame, T, camera );

/////////////////////////////////////////////////////////////////////
        PointCloud::Ptr Cloudreverse (new PointCloud());
        Eigen::Isometry3d T1 = Eigen::Isometry3d::Identity();//x
        T1(0,0) = 1; T1(0,1) = 0;
        T1(1,0) = 0; T1(1,1) = -1;
                              T1(2,2) = -1;  


        pcl::transformPointCloud( *cloud, *Cloudreverse, (T1).matrix() );  

//////////////////////////////////////////////////////////////////////////
        
        if ( visualize == true )
            viewer.showCloud( Cloudreverse );

        lastFrame = currFrame;
    }

    pcl::io::savePCDFile( "data/result.pcd", *cloud );
    return 0;
}

FRAME readFrame( int index, ParameterReader& pd )
{
    FRAME f;
    string rgbDir   =   pd.getData("rgb_dir");
    string depthDir =   pd.getData("depth_dir");
    
    string rgbExt   =   pd.getData("rgb_extension");
    string depthExt =   pd.getData("depth_extension");

    stringstream ss;
    ss<<rgbDir<<index<<rgbExt;
    string filename;
    ss>>filename;
    f.rgb = cv::imread( filename );

    ss.clear();
    filename.clear();
    ss<<depthDir<<index<<depthExt;
    ss>>filename;

    f.depth = cv::imread( filename, -1 );
    return f;
}

double normofTransform( cv::Mat rvec, cv::Mat tvec )
{
    return fabs(min(cv::norm(rvec), 2*M_PI-cv::norm(rvec)))+ fabs(cv::norm(tvec));
}

參數文件(parameters.txt):

# 特征類型
detector=ORB
descriptor=ORB
# 篩選good match的倍數
good_match_threshold=10

# camera
camera.cx=682.3;
camera.cy=254.9;
camera.fx=979.8;
camera.fy=942.8;
camera.scale=1000.0;

# part 5 
# 數據相關
# 起始與終止索引
start_index=1
end_index=300
# 數據所在目錄
rgb_dir=./data/rgb_png/
rgb_extension=.png
depth_dir=./data/depth_png/
depth_extension=.png
# 點雲分辨率
voxel_grid=0.005
# 是否實時可視化
visualize_pointcloud=yes
# 最小匹配數量
min_good_match=10
# 最小內點
min_inliers=5
# 最大運動誤差
max_norm=0.3

 

程序框圖:

 


免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM