1 #include<opencv2/opencv.hpp> 2 using namespace cv; 3 #include<iostream> 4 using namespace std; 5 int main() 6 { 7 8 /*Mat img_object = imread("D://1.jpg", IMREAD_GRAYSCALE); 9 Mat img_scene = imread("d://2.jpg", IMREAD_GRAYSCALE);*/ 10 11 /*Mat img_object = imread("7.png", IMREAD_GRAYSCALE); 12 Mat img_scene = imread("8.png", IMREAD_GRAYSCALE);*/ 13 14 Mat img_object = imread("adam1.png", IMREAD_GRAYSCALE); 15 Mat img_scene = imread("adam2.png", IMREAD_GRAYSCALE); 16 17 if (img_object.empty() || img_scene.empty()) 18 { 19 cout << "Could not open or find the image!\n" << endl; 20 return -1; 21 } 22 //-- Step 1: Detect the keypoints using SURF Detector, compute the descriptors 23 int minHessian = 10000; // default: 400 24 Ptr<SIFT> detector = SIFT::create(minHessian); 25 std::vector<KeyPoint> keypoints_object, keypoints_scene; 26 Mat descriptors_object, descriptors_scene; 27 detector->detectAndCompute(img_object, noArray(), keypoints_object, descriptors_object); 28 detector->detectAndCompute(img_scene, noArray(), keypoints_scene, descriptors_scene); 29 30 //-- Step 2: Matching descriptor vectors with a FLANN based matcher 31 // Since SURF is a floating-point descriptor NORM_L2 is used 32 Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create(DescriptorMatcher::FLANNBASED); 33 std::vector< std::vector<DMatch> > knn_matches; 34 matcher->knnMatch(descriptors_object, descriptors_scene, knn_matches, 2); 35 36 //-- Filter matches using the Lowe's ratio test 37 const float ratio_thresh = 0.75f; 38 std::vector<DMatch> good_matches; 39 for (size_t i = 0; i < knn_matches.size(); i++) 40 { 41 if (knn_matches[i][0].distance < ratio_thresh * knn_matches[i][1].distance) 42 { 43 good_matches.push_back(knn_matches[i][0]); 44 } 45 } 46 47 //-- Draw matches 48 Mat img_matches; 49 drawMatches(img_object, keypoints_object, img_scene, keypoints_scene, good_matches, img_matches, Scalar::all(-1), 50 Scalar::all(-1), std::vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS); 51 52 //-- Localize the object 53 std::vector<Point2f> obj; 54 std::vector<Point2f> scene; 55 56 for (size_t i = 0; i < good_matches.size(); i++) 57 { 58 //-- Get the keypoints from the good matches 59 obj.push_back(keypoints_object[good_matches[i].queryIdx].pt); 60 scene.push_back(keypoints_scene[good_matches[i].trainIdx].pt); 61 } 62 vector<uchar>inliers; 63 Mat H = findHomography(obj, scene, inliers, RANSAC); 64 65 66 //-- Draw matches with RANSAC 67 Mat img_matches_ransac; 68 std::vector<DMatch> good_matches_ransac; 69 for (size_t i = 0; i < inliers.size(); i++) 70 { 71 if (inliers[i]) 72 { 73 good_matches_ransac.push_back(good_matches[i]); 74 } 75 } 76 drawMatches(img_object, keypoints_object, img_scene, keypoints_scene, good_matches_ransac, img_matches_ransac, Scalar::all(-1), 77 Scalar::all(-1), std::vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS); 78 79 80 ////-- Get the corners from the image_1 ( the object to be "detected" ) 81 //std::vector<Point2f> obj_corners(4); 82 //obj_corners[0] = Point2f(0, 0); 83 //obj_corners[1] = Point2f((float)img_object.cols, 0); 84 //obj_corners[2] = Point2f((float)img_object.cols, (float)img_object.rows); 85 //obj_corners[3] = Point2f(0, (float)img_object.rows); 86 //std::vector<Point2f> scene_corners(4); 87 88 //perspectiveTransform(obj_corners, scene_corners, H); 89 90 ////-- Draw lines between the corners (the mapped object in the scene - image_2 ) 91 //line(img_matches, scene_corners[0] + Point2f((float)img_object.cols, 0), 92 // scene_corners[1] + Point2f((float)img_object.cols, 0), Scalar(0, 255, 0), 4); 93 //line(img_matches, scene_corners[1] + Point2f((float)img_object.cols, 0), 94 // scene_corners[2] + Point2f((float)img_object.cols, 0), Scalar(0, 255, 0), 4); 95 //line(img_matches, scene_corners[2] + Point2f((float)img_object.cols, 0), 96 // scene_corners[3] + Point2f((float)img_object.cols, 0), Scalar(0, 255, 0), 4); 97 //line(img_matches, scene_corners[3] + Point2f((float)img_object.cols, 0), 98 // scene_corners[0] + Point2f((float)img_object.cols, 0), Scalar(0, 255, 0), 4); 99 100 //-- Show detected matches 101 namedWindow("img_matches", WINDOW_NORMAL); 102 imshow("img_matches", img_matches); 103 104 namedWindow("img_matches_ransac", WINDOW_NORMAL); 105 imshow("img_matches_ransac", img_matches_ransac); 106 107 waitKey(0); 108 return 1; 109 }
速度比ASIFT快的多了,准确率也很高,几乎没见到过误匹配,但是点数量和ASIFT无法比,这对相机位姿估算不见得是一件好事
ASIFT找的点多,而且分散,这是VSLAM乐见的一件事,虽然非常耗时,但是作为局部小规模重建是一件好事,有空测试一下低纹理图片。