主要內容
1. 求解
P153-154
2. 討論
1)三角測量是由平移得到的,有平移才會有對極幾何中的三角形
純旋轉無法使用三角測量
2) 三角測量的不確定性
平移較大時,在同樣的相機分辨率下,三角化測量將更精確
3. 提高三角測量精度:
1) 提高特征點的提取精度,提高圖像分辨率
缺點是增大計算成本
2) 增大平移量
缺點:圖像的外觀發生明顯的變化,外觀變化會使得特征提取與匹配變得困難(三角化的矛盾)
4. 代碼中需注意的點
1)cv::triangulatePoints 函數的使用(輸入參數,輸出結果的形式,坐標系等)
2) 最終結果的驗證方式:利用像素坐標計算的歸一化坐標和三角化計算的三維左邊,計算兩種方式的殘差信息。(這是后續3D-2D的核心思想——最小化重投影誤差)
參考鏈接
Opencv學習(9)——triangulatePoints()
代碼
#include <iostream> #include <opencv2/core/core.hpp> #include <opencv2/features2d/features2d.hpp> #include <opencv2/highgui/highgui.hpp> #include <opencv2/calib3d/calib3d.hpp> // #include "extra.h" // used in opencv2 using namespace std; using namespace cv; void find_feature_matches ( const Mat& img_1, const Mat& img_2, std::vector<KeyPoint>& keypoints_1, std::vector<KeyPoint>& keypoints_2, std::vector< DMatch >& matches ); void pose_estimation_2d2d ( const std::vector<KeyPoint>& keypoints_1, const std::vector<KeyPoint>& keypoints_2, const std::vector< DMatch >& matches, Mat& R, Mat& t ); void triangulation ( const vector<KeyPoint>& keypoint_1, const vector<KeyPoint>& keypoint_2, const std::vector< DMatch >& matches, const Mat& R, const Mat& t, vector<Point3d>& points ); // 像素坐標轉相機歸一化坐標 Point2f pixel2cam( const Point2d& p, const Mat& K ); int main ( int argc, char** argv ) { if ( argc != 3 ) { cout<<"usage: triangulation img1 img2"<<endl; return 1; } //-- 讀取圖像 Mat img_1 = imread ( argv[1], CV_LOAD_IMAGE_COLOR ); Mat img_2 = imread ( argv[2], CV_LOAD_IMAGE_COLOR ); vector<KeyPoint> keypoints_1, keypoints_2; vector<DMatch> matches; find_feature_matches ( img_1, img_2, keypoints_1, keypoints_2, matches ); cout<<"一共找到了"<<matches.size() <<"組匹配點"<<endl; //-- 估計兩張圖像間運動 Mat R,t; pose_estimation_2d2d ( keypoints_1, keypoints_2, matches, R, t ); //-- 三角化 vector<Point3d> points; triangulation( keypoints_1, keypoints_2, matches, R, t, points ); //-- 驗證三角化點與特征點的重投影關系 Mat K = ( Mat_<double> ( 3,3 ) << 520.9, 0, 325.1, 0, 521.0, 249.7, 0, 0, 1 ); for ( int i=0; i<matches.size(); i++ ) { Point2d pt1_cam = pixel2cam( keypoints_1[ matches[i].queryIdx ].pt, K ); Point2d pt1_cam_3d( points[i].x/points[i].z, points[i].y/points[i].z ); cout<<"point in the first camera frame: "<<pt1_cam<<endl; cout<<"point projected from 3D "<<pt1_cam_3d<<", d="<<points[i].z<<endl; // 第二個圖 Point2f pt2_cam = pixel2cam( keypoints_2[ matches[i].trainIdx ].pt, K ); Mat pt2_trans = R*( Mat_<double>(3,1) << points[i].x, points[i].y, points[i].z ) + t; pt2_trans /= pt2_trans.at<double>(2,0); cout<<"point in the second camera frame: "<<pt2_cam<<endl; cout<<"point reprojected from second frame: "<<pt2_trans.t()<<endl; cout<<endl; } return 0; } void find_feature_matches ( const Mat& img_1, const Mat& img_2, std::vector<KeyPoint>& keypoints_1, std::vector<KeyPoint>& keypoints_2, std::vector< DMatch >& matches ) { //-- 初始化 Mat descriptors_1, descriptors_2; // used in OpenCV3 Ptr<FeatureDetector> detector = ORB::create(); Ptr<DescriptorExtractor> descriptor = ORB::create(); // use this if you are in OpenCV2 // Ptr<FeatureDetector> detector = FeatureDetector::create ( "ORB" ); // Ptr<DescriptorExtractor> descriptor = DescriptorExtractor::create ( "ORB" ); Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create("BruteForce-Hamming"); //-- 第一步:檢測 Oriented FAST 角點位置 detector->detect ( img_1,keypoints_1 ); detector->detect ( img_2,keypoints_2 ); //-- 第二步:根據角點位置計算 BRIEF 描述子 descriptor->compute ( img_1, keypoints_1, descriptors_1 ); descriptor->compute ( img_2, keypoints_2, descriptors_2 ); //-- 第三步:對兩幅圖像中的BRIEF描述子進行匹配,使用 Hamming 距離 vector<DMatch> match; // BFMatcher matcher ( NORM_HAMMING ); matcher->match ( descriptors_1, descriptors_2, match ); //-- 第四步:匹配點對篩選 double min_dist=10000, max_dist=0; //找出所有匹配之間的最小距離和最大距離, 即是最相似的和最不相似的兩組點之間的距離 for ( int i = 0; i < descriptors_1.rows; i++ ) { double dist = match[i].distance; if ( dist < min_dist ) min_dist = dist; if ( dist > max_dist ) max_dist = dist; } printf ( "-- Max dist : %f \n", max_dist ); printf ( "-- Min dist : %f \n", min_dist ); //當描述子之間的距離大於兩倍的最小距離時,即認為匹配有誤.但有時候最小距離會非常小,設置一個經驗值30作為下限. for ( int i = 0; i < descriptors_1.rows; i++ ) { if ( match[i].distance <= max ( 2*min_dist, 30.0 ) ) { matches.push_back ( match[i] ); } } } void pose_estimation_2d2d ( const std::vector<KeyPoint>& keypoints_1, const std::vector<KeyPoint>& keypoints_2, const std::vector< DMatch >& matches, Mat& R, Mat& t ) { // 相機內參,TUM Freiburg2 Mat K = ( Mat_<double> ( 3,3 ) << 520.9, 0, 325.1, 0, 521.0, 249.7, 0, 0, 1 ); //-- 把匹配點轉換為vector<Point2f>的形式 vector<Point2f> points1; vector<Point2f> points2; for ( int i = 0; i < ( int ) matches.size(); i++ ) { points1.push_back ( keypoints_1[matches[i].queryIdx].pt ); points2.push_back ( keypoints_2[matches[i].trainIdx].pt ); } //-- 計算基礎矩陣 Mat fundamental_matrix; fundamental_matrix = findFundamentalMat ( points1, points2, CV_FM_8POINT ); cout<<"fundamental_matrix is "<<endl<< fundamental_matrix<<endl; //-- 計算本質矩陣 Point2d principal_point ( 325.1, 249.7 ); //相機主點, TUM dataset標定值 int focal_length = 521; //相機焦距, TUM dataset標定值 Mat essential_matrix; essential_matrix = findEssentialMat ( points1, points2, focal_length, principal_point ); cout<<"essential_matrix is "<<endl<< essential_matrix<<endl; //-- 計算單應矩陣 Mat homography_matrix; homography_matrix = findHomography ( points1, points2, RANSAC, 3 ); cout<<"homography_matrix is "<<endl<<homography_matrix<<endl; //-- 從本質矩陣中恢復旋轉和平移信息. recoverPose ( essential_matrix, points1, points2, R, t, focal_length, principal_point ); cout<<"R is "<<endl<<R<<endl; cout<<"t is "<<endl<<t<<endl; } void triangulation ( const vector< KeyPoint >& keypoint_1, const vector< KeyPoint >& keypoint_2, const std::vector< DMatch >& matches, const Mat& R, const Mat& t, vector< Point3d >& points ) { Mat T1 = (Mat_<float> (3,4) << 1,0,0,0, 0,1,0,0, 0,0,1,0); Mat T2 = (Mat_<float> (3,4) << R.at<double>(0,0), R.at<double>(0,1), R.at<double>(0,2), t.at<double>(0,0), R.at<double>(1,0), R.at<double>(1,1), R.at<double>(1,2), t.at<double>(1,0), R.at<double>(2,0), R.at<double>(2,1), R.at<double>(2,2), t.at<double>(2,0) ); Mat K = ( Mat_<double> ( 3,3 ) << 520.9, 0, 325.1, 0, 521.0, 249.7, 0, 0, 1 ); vector<Point2f> pts_1, pts_2; for ( DMatch m:matches ) { // 將像素坐標轉換至相機坐標 pts_1.push_back ( pixel2cam( keypoint_1[m.queryIdx].pt, K) ); pts_2.push_back ( pixel2cam( keypoint_2[m.trainIdx].pt, K) ); } Mat pts_4d; cv::triangulatePoints( T1, T2, pts_1, pts_2, pts_4d ); // 轉換成非齊次坐標 for ( int i=0; i<pts_4d.cols; i++ ) { Mat x = pts_4d.col(i); x /= x.at<float>(3,0); // 歸一化 Point3d p ( x.at<float>(0,0), x.at<float>(1,0), x.at<float>(2,0) ); points.push_back( p ); } } Point2f pixel2cam ( const Point2d& p, const Mat& K ) { return Point2f ( ( p.x - K.at<double>(0,2) ) / K.at<double>(0,0), ( p.y - K.at<double>(1,2) ) / K.at<double>(1,1) ); }
結果及分析
point in the first camera frame: [-0.151193, -0.0780827] point projected from 3D [-0.151193, -0.0780893], d=9.31937 point in the second camera frame: [-0.179854, -0.0589785] point reprojected from second frame: [-0.1798545644710269, -0.05897215312873306, 1]
輸出結果為:
某一像素點:
在第一幅圖中利用像素坐標計算歸一化坐標信息, 和 利用三角測量計算出來的坐標歸一化以后的殘差信息
在第二幅圖中利用像素坐標計算歸一化坐標信息, 和 利用運動信息和三角測量出來的點的信息,得到第二個相機下的三維坐標,歸一化,相減得到殘差信息
殘差的精度在0.000×。