環境
- ubuntu 19.04
- vscode 1.37.0
- opencv 3.4.7
- cmake 3.13.4
拜一下julao的數字圖像處理提綱 https://bitlecture.github.io/notes/%E6%95%B0%E5%AD%97%E5%9B%BE%E5%83%8F%E5%A4%84%E7%90%86/
然后開始跟着毛星雲的blog跑demo來學opencv 實際上如果論實用性的話,以下的系列blog可能還會更好一些? https://blog.csdn.net/morewindows/article/category/1291764
https://www.cnblogs.com/long5683/p/10094122.html
實際上學一會就會發現RM里面使用到的視覺(僅僅看這篇開源的話)並不困難
https://blog.csdn.net/u010750137/article/details/91344986
https://blog.csdn.net/qq_31669419/article/details/53053321
反而是去年的神符里面涉及到了一些類似機器學習一樣的東西,更有研究的空間在
那么讓我們開始視覺學習之路
文件讀取和輸出 https://blog.csdn.net/poem_qianmo/article/details/20537737
定義圖像
Mat image = imread("Filename");
namedWindow("Windowname");
imshow("Windowname",image);
需要注意的是圖片要放到build的文件夾里面,如果沒能成功imread的話,會報錯——
error: (-215:Assertion failed) size.width>0 && size.height>0 in function 'imshow'
視頻讀取
VideoCapture cap;
cap.open("Filename");
打開攝像頭
cap.open(0);
檢測是否讀取到的方法:
//方法1
if(!image.data){printf("未能讀取")};
//方法2
if(image.empty()){printf("未能讀取")};
划定特定區域(ROI) https://blog.csdn.net/poem_qianmo/article/details/20911629
Mat imageROI;
//方法一
imageROI= srcImage4(Rect(200,250,logoImage.cols,logoImage.rows));
//方法二
imageROI= srcImage4(Range(250,250+logoImage.rows),Range(200,200+logoImage.cols));
圖像變換應該也挺重要的 https://blog.csdn.net/xiaowei_cqu/article/details/7616044
圖像線性混合 使用addWeighted可以直接混合兩張圖片,
int main()
{
double alphavalue = 0.5;
double betavalue;
Mat satori = imread("satori.jpg");
Mat name = imread("name.png");
if(satori.empty()){cout << "未能成功讀取圖片satori" << endl;exit;};
if(name.empty()){cout << "未能成功讀取圖片satori2" << endl;exit;};
betavalue = 1 - alphavalue;
//在satori上划出ROI
Mat ROI = satori(Rect(0,0,name.cols,name.rows));
//將划出了ROI的satori和name做合並
addWeighted(ROI,alphavalue,name,betavalue,0.,ROI);
namedWindow("混合效果");
imshow("混合效果",satori);
waitKey();
return 0;
}
分離/合並顏色通道 split()/merge()
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/opencv.hpp"
#include "iostream"
using namespace cv;
using namespace std;
int main()
{
Mat satori = imread("satori.jpg");
Mat name = imread("name.png",0);
vector<Mat>channels;
Mat blue_channel;
if(satori.empty()){cout << "未能成功讀取圖片satori" << endl;exit;};
if(name.empty()){cout << "未能成功讀取圖片satori2" << endl;exit;};
//分割成幾個顏色通道
split(satori,channels);
blue_channel = channels.at(0);
addWeighted(blue_channel(Rect(0,0,name.cols,name.rows)),1.0,name,0.5,0,blue_channel(Rect(0,0,name.cols,name.rows)));
//混合通道
merge(channels,satori);
namedWindow("混合效果");
imshow("混合效果",satori);
waitKey();
return 0;
}
從顏色通道的角度來說,可以扒掉另外兩個通道,只留一個通道做合成來形成單色圖片 opencv里面可以設置圖片類型,比如CV_8UC1,就是unsigned int8+channel_1,所以這里的操作還是挺簡單的,就是用black來取代掉另外兩個通道(black意味着灰度值為0),把它給另外兩個通道即可
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/opencv.hpp"
#include "iostream"
using namespace cv;
using namespace std;
int main()
{
Mat satori = imread("satori.jpg");
vector<Mat> channels(satori.channels());
vector<Mat> channels_mix(satori.channels());
Mat mixed;
if(satori.empty()){cout << "未能成功讀取圖片satori" << endl;exit;}
int w = satori.cols;
int h = satori.rows;
split(satori,channels);
Mat black;
black.create(h,w,CV_8UC1);
black = Scalar(0);
channels_mix[0] = channels[0];
channels_mix[1] = black;
channels_mix[2] = black;
merge(channels_mix,mixed);
imshow("mixed",mixed);
waitKey();
return 0;
}
顏色通道和ROI,以及線性混合的內容再補充一個畫矩形? https://blog.csdn.net/wc781708249/article/details/78518447
會用rectangle就行了
這個是邊緣查找,感覺也是個有意思的demo https://www.cnblogs.com/skyfsm/p/6890863.html
還是繼續跑demo,拿小圓當看板是有點東西的,tracebar的話,應該相當於提供了類似嵌入式開發中的在線debug一樣的功能? https://blog.csdn.net/poem_qianmo/article/details/21479533
關於向量這個數據類型 https://www.cnblogs.com/mr-wid/archive/2013/01/22/2871105.html
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/opencv.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "iostream"
using namespace cv;
using namespace std;
Mat satori;
int threval = 160;
static void trace_bar(int,void*)
{
Mat image = threval > 128? (satori < threval) : (satori > threval);
vector<vector<Point>> contours;
vector<Vec4i> hierarchy;
findContours(image,contours,hierarchy,CV_RETR_CCOMP,CV_CHAIN_APPROX_SIMPLE);
Mat dst = Mat::zeros(satori.size(),CV_8UC3);
if(!contours.empty() && !hierarchy.empty())
{
for (int i = 0; i >=0; i=hierarchy[i][0])
{
Scalar color((rand()&255),(rand()&255),(rand()&255));
drawContours(dst,contours,i,color,CV_FILLED,8,hierarchy);
}
}
imshow("satori",dst);
}
int main()
{
satori = imread("satori.jpg",0);
if(satori.empty()){cout << "未能成功讀取圖片satori" << endl;exit;}
namedWindow("satori");
createTrackbar("treashould","satori",&threval,255,trace_bar);
trace_bar(threval,0);
waitKey();
return 0;
}
所以實際上主要就是找輪廓+填色,有點意思
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/opencv.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "iostream"
using namespace cv;
using namespace std;
int Contrast,Bright;
Mat srcImage,dstImage;
static void trace_bar(int,void *)
{
for (int i = 0; i < srcImage.cols; i++)
{
for (int j = 0; j < srcImage.rows; j++)
{
for (int k = 0; k < 3; k++)
{
dstImage.at<Vec3b>(j,i)[k] = saturate_cast<uchar>((Contrast*0.01)*srcImage.at<Vec3b>(j,i)[k] + Bright);
}
}
}
imshow("satori",dstImage);
};
int main()
{
srcImage = imread("satori.jpg");
if(srcImage.empty()){cout << "未能成功讀取圖片satori" << endl;return -1;}
dstImage = Mat::zeros(srcImage.size(),srcImage.type());
Contrast = 80;
Bright = 80;
namedWindow("satori");
createTrackbar("contrast","satori",&Contrast,255,trace_bar);
createTrackbar("bright","satori",&Bright,255,trace_bar);
trace_bar(Contrast,0);
trace_bar(Bright,0);
waitKey();
return 0;
}
這個demo主要是試了一下針對像素調bright和contrast,我沒想到居然就是這么簡單的線性運算關系,另外就是對單獨的像素操作 其實我們已經看出來了,圖片的一種表現方式就是每個Image.at
到濾波了 https://blog.csdn.net/poem_qianmo/article/details/22745559 https://blog.csdn.net/xiaowei_cqu/article/details/7785365
方框濾波——boxblur函數 均值濾波(鄰域平均濾波)——blur函數 高斯濾波——GaussianBlur函數 中值濾波——medianBlur函數 雙邊濾波——bilateralFilter函數
https://wenku.baidu.com/view/f55e1bc6f90f76c661371ac5.html 二維卷積挺有用的,包括之后做邊沿檢測用的sobel算子等
https://blog.csdn.net/dang_boy/article/details/76150067
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/opencv.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "iostream"
using namespace cv;
using namespace std;
int main()
{
Mat srcImage = imread("satori.jpg");
if(srcImage.empty()){cout << "未能成功讀取圖片satori" << endl;return -1;}
Mat dstImage1,dstImage2,dstImage3,dstImage4,dstImage5;
dstImage1 = srcImage.clone();
dstImage2 = srcImage.clone();
dstImage3 = srcImage.clone();
dstImage4 = srcImage.clone();
dstImage5 = srcImage.clone();
imshow("原圖",srcImage);
boxFilter(srcImage,dstImage1,-1,Size(5,5));
imshow("方框濾波",dstImage1);
blur(srcImage,dstImage2,Size(5,5));
imshow("均值濾波",dstImage2);
GaussianBlur(srcImage,dstImage3,Size(3,3),0,0);
imshow("高斯濾波",dstImage3);
medianBlur(srcImage,dstImage4,5);
imshow("中值濾波",dstImage4);
bilateralFilter(srcImage,dstImage5,25,25*2,25/2);
imshow("雙邊濾波",dstImage5);
waitKey();
destroyAllWindows();
return 0;
}
簡單的濾波跑了一下而已,Size(w,h)規定了卷積核的大小,卷積核的大小會影響模糊的效果
然后是非線性濾波 中值濾波和雙線性濾波
雙線性濾波的效果非常神奇,把原圖上一些類似於陳舊的紋理一樣的效果給修沒了,非常6p(磨皮?)
膨脹腐蝕
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/opencv.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "iostream"
using namespace cv;
using namespace std;
int Elem_Size = 3;
int value1;
Mat srcImage,dstImage;
static void tracebar(int,void*)
{
Mat element = getStructuringElement(MORPH_RECT,Size(2*Elem_Size+1,2*Elem_Size+1),Point(Elem_Size,Elem_Size));
if(value1 == 0)
{
erode(srcImage,dstImage,element);
}
else
{
dilate(srcImage,dstImage,element);
}
imshow("satori",dstImage);
}
int main()
{
srcImage = imread("satori.jpg");
if(srcImage.empty()){cout << "未能成功讀取圖片satori" << endl;return -1;}
Mat element = getStructuringElement(MORPH_RECT,Size(2*Elem_Size+1,2*Elem_Size+1),Point(Elem_Size,Elem_Size));
erode(srcImage,dstImage,element);
imshow("satori",dstImage);
createTrackbar("腐蝕/膨脹","satori",&value1,1,tracebar);
createTrackbar("內核尺寸","satori",&Elem_Size,21,tracebar);
tracebar(value1,0);
tracebar(Elem_Size,0);
while(char(waitKey(1)) != 'q');
return 0;
}
腐蝕是將暗的像素擴大,膨脹是將亮的像素擴大
在這個基礎上還有開運算,閉運算,黑帽運算...... 開運算其實就是分開細微鏈接的像素,閉運算是填平小的裂痕 https://blog.csdn.net/hanshanbuleng/article/details/80657148
Mat element = getStructuringElement(MORPH_RECT,Size(2*Elem_Size+1,2*Elem_Size+1),Point(Elem_Size,Elem_Size));
morphologyEx(srcImage,dstImage,MORPH_OPEN,element);
更改第三個參數即可
終於到快樂的算子環節了
在具體介紹之前,先來一起看看邊緣檢測的一般步驟吧。
1)濾波:邊緣檢測的算法主要是基於圖像強度的一階和二階導數,但導數通常對噪聲很敏感,因此必須采用濾波器來改善與噪聲有關的邊緣檢測器的性能。常見的濾波方法主要有高斯濾波,即采用離散化的高斯函數產生一組歸一化的高斯核(具體見“高斯濾波原理及其編程離散化實現方法”一文),然后基於高斯核函數對圖像灰度矩陣的每一點進行加權求和(具體程序實現見下文)。
2)增強:增強邊緣的基礎是確定圖像各點鄰域強度的變化值。增強算法可以將圖像灰度點鄰域強度值有顯著變化的點凸顯出來。在具體編程實現時,可通過計算梯度幅值來確定。
3)檢測:經過增強的圖像,往往鄰域中有很多點的梯度值比較大,而在特定的應用中,這些點並不是我們要找的邊緣點,所以應該采用某種方法來對這些點進行取舍。實際工程中,常用的方法是通過閾值化方法來檢測。
邊緣檢測應該是RM里面非常常用的算法了,識別裝甲板應該主要就用了這個,識別到邊緣之后solvepnp
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/opencv.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "iostream"
using namespace cv;
using namespace std;
int Elem_Size = 1;
int value1;
Mat srcImage,dstImage;
int main()
{
srcImage = imread("satori.jpg");
if(srcImage.empty()){cout << "未能成功讀取圖片satori" << endl;return -1;}
Canny(srcImage,dstImage,300,100);
imshow("satori",dstImage);
while (char(waitKey(1)) != 'q');
return 0;
}
用canny很容易就可以看到效果,調節一下兩個閾值則可以起到抑制噪聲的作用
sobel算子可以計算x方向和y方向各自的梯度方向,相比canny而言,可以在一些相對比較特定(特征在x/y方向)的場景起到作用
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/opencv.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "iostream"
using namespace cv;
using namespace std;
int Elem_Size = 1;
int value1;
Mat srcImage,dstImage,dstImage2,dstImage3;
int main()
{
Mat satori;
satori = imread("satori.jpg");
if(satori.empty()){cout << "未能成功讀取圖片satori" << endl;return -1;}
imshow("image",satori);
bilateralFilter(satori,srcImage,25,25*2,25/2);
cvtColor(srcImage,srcImage,CV_RGB2GRAY);
Sobel(srcImage,dstImage,srcImage.depth(),1,0,3,1,0,BORDER_DEFAULT);
Sobel(srcImage,dstImage2,srcImage.depth(),0,1,3,1,0,BORDER_DEFAULT);
imshow("satori",dstImage);
imshow("satori2",dstImage2);
addWeighted(dstImage,1,dstImage2,1,1,dstImage3);
imshow("satori3",dstImage3);
while (char(waitKey(1)) != 'q');
return 0;
}
結合了雙邊濾波后在x,y方向做sobel檢測,然后合成,效果還行
結果試了一下雙邊濾波后做laplace檢測,效果更好,嘖嘖
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/opencv.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "iostream"
using namespace cv;
using namespace std;
int Elem_Size = 1;
int value1;
Mat srcImage,dstImage,dstImage2,dstImage3;
int main()
{
Mat satori;
satori = imread("satori.jpg");
if(satori.empty()){cout << "未能成功讀取圖片satori" << endl;return -1;}
imshow("image",satori);
bilateralFilter(satori,srcImage,25,25*2,25/2);
cvtColor(srcImage,srcImage,CV_RGB2GRAY);
Laplacian(srcImage,dstImage,srcImage.depth());
imshow("satori",dstImage);
while (char(waitKey(1)) != 'q');
return 0;
}
但是還有Scharr,可以看成對sobel的進一步優化?試試看效果
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/opencv.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "iostream"
using namespace cv;
using namespace std;
int Elem_Size = 1;
int value1;
Mat srcImage,dstImage,dstImage2,dstImage3;
int main()
{
Mat satori;
satori = imread("satori.jpg");
if(satori.empty()){cout << "未能成功讀取圖片satori" << endl;return -1;}
bilateralFilter(satori,srcImage,25,25*2,25/2);
imshow("image",srcImage);
cvtColor(srcImage,srcImage,CV_RGB2GRAY);
Scharr(srcImage,dstImage,srcImage.depth(),1,0,1,0,BORDER_DEFAULT);
Scharr(srcImage,dstImage2,srcImage.depth(),0,1,1,0,BORDER_DEFAULT);
imshow("satori",dstImage);
imshow("satori2",dstImage2);
addWeighted(dstImage,1,dstImage2,1,1,dstImage3);
imshow("satori3",dstImage3);
while (char(waitKey(1)) != 'q');
return 0;
}
結果沒看出了什么優化,反而引入了更多的噪聲......可能是我參數沒繼續調吧(另一層面上來說更加敏銳?
好了,我們到了快樂的resize階段,還有pryUp,pryDown這兩個金字塔放大縮小函數 https://blog.csdn.net/poem_qianmo/article/details/26157633
我覺得沒啥特別好說的,就是研究怎么樣盡可能合理的采樣或者插值,然后高斯函數的優勢又一次被體現出來了。不得不這是個非常偉大的函數(我剛學到大數定律時就被這個函數的神奇性給嚇到了)
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/opencv.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "iostream"
using namespace cv;
using namespace std;
int main()
{
Mat tmpImage,dstImage;
tmpImage = imread("satori.jpg");
if(tmpImage.empty()){cout << "未能成功讀取圖片satori" << endl;return -1;}
dstImage = tmpImage;
while (1)
{
char key = waitKey(1);
switch (key)
{
case 'q':
return 0;
break;
case 'w':
resize(tmpImage,dstImage,Size(tmpImage.cols*2,tmpImage.rows*2));
break;
case 's':
resize(tmpImage,dstImage,Size(tmpImage.cols/2,tmpImage.rows/2));
break;
default:
break;
}
tmpImage = dstImage;
imshow("satori",dstImage);
}
}
不出意外的,縮小之后再放大之后會上天
霍夫線/圓檢測算法
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/opencv.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "iostream"
using namespace cv;
using namespace std;
int main()
{
Mat srcImage,dstImage,midImage;
srcImage = imread("test.jpg");
if(srcImage.empty()){cout << "未能成功讀取圖片" << endl;return -1;}
Canny(srcImage,midImage,400,100,3);
cvtColor(midImage,dstImage,CV_GRAY2BGR);
vector<Vec2f> lines;
HoughLines(midImage,lines,1,CV_PI/180,150,0,0);
for (size_t i = 0; i < lines.size(); i++)
{
float rho = lines[i][0] , theta = lines[i][1];
Point pt1,pt2;
double a = cos(theta),b = sin(theta);
double x0 = a*rho,y0 = b*rho;
pt1.x = cvRound(x0 + 1000 * (-b));
pt1.y = cvRound(y0 + 1000 * (a));
pt2.x = cvRound(x0 - 1000 * (-b));
pt2.y = cvRound(y0 - 1000 * (a));
line(dstImage,pt1,pt2,Scalar(55,100,95),1,CV_AA);
}
imshow("dst",dstImage);
while(char(waitKey(1)) != 'q');
return 0;
}
這個檢測看得我頭大 另外還有HoughLinesP這個檢測方法,有點意思
試驗了一下,HoughLinesP應該是相比HoughLine更優秀的一種檢測方法,因為他是可以檢測出線的起始的,而且有更多實用的可調參數(可以顯示的最小/最大線段長度等)
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/opencv.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "iostream"
using namespace cv;
using namespace std;
int main()
{
Mat srcImage,dstImage,midImage;
srcImage = imread("test.jpg");
if(srcImage.empty()){cout << "未能成功讀取圖片" << endl;return -1;}
Canny(srcImage,midImage,400,100,3);
cvtColor(midImage,dstImage,CV_GRAY2BGR);
vector<Vec4i> lines;
HoughLinesP(midImage,lines,1,CV_PI/180,150,0,0);
for (size_t i = 0; i < lines.size(); i++)
{
Vec4i l = lines[i];
line(dstImage,Point(l[0],l[1]),Point(l[2],l[3]),Scalar(0,100,0),5,CV_AA);
}
imshow("dst",dstImage);
while(char(waitKey(1)) != 'q');
return 0;
}
還有HoughCircles RM上倒是不太用得上圓檢測,除非要檢測彈丸(誰沒事干檢測彈丸),RC這邊倒是應該用得上?畢竟有幾次賽題要扔球來着
HoughCircles里面,第5個參數規定了可檢測的最大半徑的圓形,有篩選作用,6,7參數則起到規定閾值的作用,也挺有用的
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/opencv.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "iostream"
using namespace cv;
using namespace std;
int main()
{
Mat srcImage,dstImage,midImage;
srcImage = imread("circle.jpeg");
if(srcImage.empty()){cout << "未能成功讀取圖片" << endl;return -1;}
cvtColor(srcImage,midImage,CV_BGR2GRAY);
GaussianBlur(midImage,midImage,Size(3,3),1,1);
vector<Vec3f> circles;
HoughCircles(midImage,circles,CV_HOUGH_GRADIENT,1.5,20,300,100,0,0);
for (size_t i = 0; i < circles.size(); i++)
{
Point center(cvRound(circles[i][0]),cvRound(circles[i][1]));
int radius = cvRound(circles[i][2]);
circle(srcImage,center,radius,Scalar(0,100,0),3);
}
imshow("dst",srcImage);
while(char(waitKey(1)) != 'q');
return 0;
}
調了一下參數,能夠比較好的檢測到圖中想找的圓
去研究源碼實現的話會發現其實HoughCircle,HoughCircleP其實都是基於HoughCircle2(舊的霍夫圓檢測)實現的,這里就不去太扣底層的東西,來日方長
到快樂的漫水填充算法了
floodFill從功能上去理解就是和ps的魔術棒一樣,總的來說是非常重要的一個功能
https://blog.csdn.net/poem_qianmo/article/details/28261997
算法原理其實挺好理解的,就是先選中一個點作為種子,以這個種子作為起點去計算周邊像素差值,在閾值范圍內的像素作為下一批種子
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/opencv.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "iostream"
using namespace cv;
using namespace std;
int main()
{
Mat srcImage,dstImage,midImage;
srcImage = imread("satori.jpg");
if(srcImage.empty()){cout << "未能成功讀取圖片" << endl;return -1;}
Rect ccomp;
floodFill(srcImage,Point(0,0),Scalar(0,0,0),&ccomp,Scalar(10,10,10),Scalar(10,10,10));
imshow("dst",srcImage);
while(char(waitKey(1)) != 'q');
return 0;
}
最后兩個Scalar參數用來框定選取閾值,調了一下之后可以比較好的把人物摳出來
floodFill還可以設置掩膜模式,避免漫水填充到掩膜內的非0像素
最后floodFill的最后參數還有一個32位操作數,高8位,中8位,低8位都有含義,相當的復雜,這里就不去過於細致的研究了
emmmm到角點檢測了
角點檢測應該是比較重要的,不管是RC還是RM,像RM里面識別到燈柱之后,要給燈柱四個角上的關鍵點都標注出來,然后才能做pnp結算
圖像特征類型可以被分為如下三種:
- <1>邊緣
- <2>角點 (感興趣關鍵點)
- ❤️>斑點(Blobs)(感興趣區域)
在當前的圖像處理領域,角點檢測算法可歸納為三類:
- <1>基於灰度圖像的角點檢測
- <2>基於二值圖像的角點檢測
- ❤️>基於輪廓曲線的角點檢測
角點檢測算法又是梯度運算的一大應用場景(這個想想就能知道)
配合角點檢測的還有一個知名度非常高的方法那就是二值化——一共有5種方法(茴香豆的茴字有幾種寫法啊?)
先試一下二值化
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/opencv.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "iostream"
using namespace cv;
using namespace std;
int thresholdvalue;
Mat src_image,dst_image1,dst_image2,dst_image3,dst_image4,dst_image5;
static void tracebar(int,void*)
{
threshold(src_image,dst_image1,thresholdvalue,255,THRESH_BINARY);
imshow("test",dst_image1);
}
int main()
{
src_image = imread("satori.jpg");
if(src_image.empty()){cout << "未能成功讀取圖片" << endl;return -1;}
cvtColor(src_image,src_image,CV_BGR2GRAY);
namedWindow("test");
createTrackbar("threshold","test",&thresholdvalue,255,tracebar);
tracebar(thresholdvalue,0);
while(char(waitKey(1)) != 'q');
return 0;
}
我就不嘗試每一種方法了,總之還是挺立竿見影的
值得一提的是connerHarris之后的圖像只有通過一個很小閾值的threshold之后才能顯現出來
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/opencv.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "iostream"
using namespace cv;
using namespace std;
int thresholdvalue;
Mat src_image,dst_image1,dst_image2,dst_image3,dst_image4,dst_image5;
Mat mid_image;
int main()
{
src_image = imread("satori.jpg");
if(src_image.empty()){cout << "未能成功讀取圖片" << endl;return -1;}
cvtColor(src_image,src_image,CV_BGR2GRAY);
namedWindow("test");
cornerHarris(src_image,mid_image,5,3,0.01);
threshold(mid_image,dst_image1,0.0001,255,THRESH_BINARY);
imshow("test",dst_image1);
while(char(waitKey(1)) != 'q');
return 0;
}
可以知道經過harris檢測之后的圖像值被放的很小,不符合我們常用的0-255的灰度規定,所以如果要用的話,一般來說還得經過操作,比如二值化,比如normolize
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/opencv.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "iostream"
using namespace cv;
using namespace std;
int thresholdvalue;
Mat src_image,dst_image1,dst_image2,dst_image3,dst_image4,dst_image5;
Mat mid_image;
static void tracebar(int,void*)
{
threshold(dst_image1,dst_image2,thresholdvalue,255,THRESH_BINARY);
imshow("test",dst_image2);
}
int main()
{
src_image = imread("satori.jpg");
if(src_image.empty()){cout << "未能成功讀取圖片" << endl;return -1;}
cvtColor(src_image,src_image,CV_BGR2GRAY);
namedWindow("test");
cornerHarris(src_image,mid_image,5,3,0.01);
normalize(mid_image,dst_image1,0,255,NORM_MINMAX,CV_32FC1,Mat());
createTrackbar("threshold","test",&thresholdvalue,255,tracebar);
tracebar(thresholdvalue,0);
while(char(waitKey(1)) != 'q');
return 0;
}
這下拖條總算有點用了
重映射和surf特征點檢測,surf應該是比較通用的特征點檢測算法了,總體而言
https://www.cnblogs.com/dengxiaojun/p/5302778.html
重映射主要是使用remap這個函數,remap的數學定義如下
其中 $map_1$和$map_2$都是作為參數輸入remap函數的(值得一提的是map的類型不是隨意的,create的時候要創建CV_32FC1)
這里有個簡單的應用,比如將一個圖片給鏡像翻轉
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/opencv.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "iostream"
using namespace cv;
using namespace std;
int thresholdvalue;
Mat src_image,dst_image1,dst_image2;
Mat mid_image;
int main()
{
src_image = imread("satori.jpg");
if(src_image.empty()){cout << "未能成功讀取圖片" << endl;return -1;}
Mat map_x,map_y;
map_x.create(src_image.size(),CV_32FC1);
map_y.create(src_image.size(),CV_32FC1);
for (size_t i = 0; i < src_image.cols; i++)
{
for (size_t j = 0; j < src_image.rows; j++)
{
map_x.at<float>(j,i) = static_cast<float>(i);
map_y.at<float>(j,i) = static_cast<float>(src_image.rows-j);
}
}
remap(src_image,dst_image1,map_x,map_y,CV_INTER_LINEAR,BORDER_CONSTANT,Scalar(0,0,0));
imshow("dst1",dst_image1);
while(char(waitKey(1)) != 'q');
return 0;
}
這個沒有什么難度的,像素操作的時候稍微注意一點就行了
然后我們來關注一下重點——我們親愛的SURF特征檢測算法,在opencv里面SURF被封成了一個類,有一堆可以執行的亂七八糟的操作
我們跑個drawKeypoints試試
然后發現好玩的事情
https://blog.csdn.net/zhounanzhaode/article/details/50302385
所以為了使用SURF我還得再操作一下,真的煩人
直接使用apt-get的方法失敗了,會在添加ppa時報錯——
沒有 Release 文件。 N: 無法安全地用該源進行更新,所以默認禁用該源。
采用自己摸索的方法操作成功,具體的操作方法為:
去github上面下載這個庫
https://github.com/opencv/opencv_contrib/tree/3.4
然后checkout到和opencv版本一致的tag下面
將opencv_contrib/modules/....下面所需要的模組復制粘貼到opencv/modules/下面去
然后修改opencv/modules下的cmakelist中的
set(FIXED_ORDER_MODULES core imgproc imgcodecs videoio highgui video calib3d features2d objdetect dnn ml flann photo stitching xfeatures2d)
添加自己需要的新的模組即可
然后就是和安裝時一樣的cmake,make,make install三連,成功的話就可以正常include nonfree.hpp了
emmmmmm
看了一下,更新到opencv3之后,SURF的使用和opencv2完全不一樣了,除了drawKeypoint這個api仍然保留之外,其他的好像都變化了
隨手找了個教程,結果又找到RM相關的了,哈哈哈哈哈哈
https://www.cnblogs.com/long5683/p/9692987.html
結果發現之前的操作沒弄干凈......編譯的時候出現了這個
error: (-213:The function/feature is not implemented) This algorithm is patented and is excluded in this configuration; Set OPENCV_ENABLE_NONFREE CMake option and rebuild the library in function 'create'
結果就是還得在cmake的時候操作一下.....干啊
https://blog.csdn.net/zhoukehu_CSDN/article/details/83145026
按照這個博客走,記得在cmake-gui里面把OPENCV_ENABLE_NONFREE這個勾上......唉,這個是真的惡心,又要make整整20分鍾了
重新make完之后就一切ok了
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/opencv.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/xfeatures2d/nonfree.hpp"
#include "iostream"
using namespace cv;
using namespace cv::xfeatures2d;
using namespace std;
int thresholdvalue;
Mat src_image,dst_image1,dst_image2;
Mat mid_image;
int main()
{
src_image = imread("satori.jpg");
if(src_image.empty()){cout << "未能成功讀取圖片" << endl;return -1;}
int minHessian = 400;
Ptr<SURF> detector = SURF::create(minHessian);
vector<KeyPoint> keypoints;
detector->detect(src_image,keypoints,Mat());
drawKeypoints(src_image,keypoints,dst_image1,Scalar(0,0,0));
imshow("dst",dst_image1);
while(char(waitKey(1)) != 'q');
return 0;
}
暫時跟着demo跑的內容就先這么多吧
實際上還需要學習的內容包括相機有關的一些知識(內參矩陣標定),solvePnp的使用等等,包括video模塊下的一些有用的功能我也還沒去做了解
后面就開始看幾個實際項目研究研究,一邊鞏固已經學習的東西,一邊積累工作經驗吧