好久沒有發OpenCV的博客了,最近想到了一個識別地圖輪廓的方案,就寫來試試。(識別中國的28個省份地圖輪廓,不考慮直轄市)
首先,我的基本思路是 用最小的矩形將地圖的輪廓圈出來,可以根據長方形的長寬比判斷,也可將其縮放至特定的大小,計算其輪廓上的像素個數來判斷。
缺點:用攝像頭讀取圖片時,使用這種方法會有一些誤差。
也可以ANN訓練識別,但是這樣做效率低。
step 1. 讀取圖片、處理圖像
Mat src = imread("12.jpg"); Mat grayImage; cvtColor(src, grayImage, CV_BGR2GRAY); threshold(grayImage, grayImage, 48, 255, CV_THRESH_BINARY); imshow("grayImage", grayImage);
問題來了,處理圖片后的grayImage根本無法顯示,結果為一張灰色的圖片。
最后發現,因為大意,程序的最后沒有加 cvWaitKey(0); 這句話,因此圖片無法顯示。
step 2. 尋找輪廓並畫出
#include <opencv2/opencv.hpp> #include <iostream> using namespace cv; using namespace std; int main() { Mat src = imread("timg.jpg"); Mat grayImage, dstImage; src.copyTo(dstImage); int g_nStructElementSize = 1; //結構元素(內核矩陣)的尺寸 //獲取自定義核 Mat element = getStructuringElement(MORPH_RECT, Size(2 * g_nStructElementSize + 1, 2 * g_nStructElementSize + 1), Point(g_nStructElementSize, g_nStructElementSize)); erode(src, src, element); cvtColor(src, grayImage, CV_BGR2GRAY); threshold(grayImage, grayImage, 48, 255, CV_THRESH_BINARY); imshow("2dst", grayImage); vector<vector<Point>> contours; vector<Vec4i> hierarchy; findContours(grayImage, contours, hierarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE); int i = 0; Point2f pp[5][4]; vector<vector<Point>>::iterator It; Rect rect[10]; for (It = contours.begin(); It < contours.end(); It++){ //畫出包圍輪廓的最小矩形 Point2f vertex[4]; rect[i] = boundingRect(*It); vertex[0] = rect[i].tl(); //矩陣左上角的點 vertex[1].x = (float)rect[i].tl().x, vertex[1].y = (float)rect[i].br().y; //矩陣左下方的點 vertex[2] = rect[i].br(); //矩陣右下角的點 vertex[3].x = (float)rect[i].br().x, vertex[3].y = (float)rect[i].tl().y; //矩陣右上方的點 for (int j = 0; j < 4; j++) line(dstImage, vertex[j], vertex[(j + 1) % 4], Scalar(0, 0, 255), 1); } imshow("dst", dstImage); cvWaitKey(0); return 0; }
結果發現根本找不到輪廓,最后發現原來是threshold函數參數設置錯誤,參數應如下:
threshold(grayImage, grayImage,48,255, THRESH_BINARY_INV);
同時由於地圖邊框線太細,應當先腐蝕圖像,再二值化:
int g_nStructElementSize = 1; //結構元素(內核矩陣)的尺寸 ///獲取自定義核 Mat element = getStructuringElement(MORPH_RECT, Size(2 * g_nStructElementSize + 1, 2 * g_nStructElementSize + 1), Point(g_nStructElementSize, g_nStructElementSize)); erode(src, src, element);
step 3. 收集地圖的數據后,用10個省得數據來檢測
最后在不斷的探索中,發現有兩個數據可以作為一個地圖的特征,即輪廓長寬比和輪廓面積與圖片的像素數之比。
最后的代碼如下:
#include <opencv2/opencv.hpp> #include <iostream> #include <math.h> using namespace cv; using namespace std; Mat result; const double cha = 0.02; //可接受范圍的誤差 bool compare(double a, double b) { if (abs(a - b) < cha){ return true; } return false; } bool result_output(double rate1,double rate2) { if (compare(rate1, (double)172 / 96) && compare(rate2, 0.171524)){ cout << "陝西省" << endl; return true; } if (compare(rate1, (double)172 / 143) && compare(rate2, 0.270173)){ cout << "安徽省" << endl; return true; } if (compare(rate1, (double)154 / 123) && compare(rate2, 0.230148)){ cout << "福建省" << endl; return true; } if (compare(rate1, (double)170 / 190) && compare(rate2, 0.132584)){ cout << "甘肅省" << endl; return true; } if (compare(rate1, (double)155 / 208) && compare(rate2, 0.200146)){ cout << "廣東省" << endl; return true; } if (compare(rate1, (double)129 / 180) && compare(rate2, 0.22718)){ cout <<"廣西壯族自治區" << endl; return true; } if (compare(rate1, (double)118 / 145) && compare(rate2, 0.219451)){ cout << "貴州省" << endl; return true; } if (compare(rate1, (double)77 / 96) && compare(rate2, 0.196616)){ cout << "海南省" << endl; return true; } if (compare(rate1, (double)162 / 119) && compare(rate2, 0.247134)){ cout << "河北省" << endl; return true; } if (compare(rate1, (double)125 / 135) && compare(rate2, 0.176323)){ cout << "河南省" << endl; return true; } cout << "無法檢測" << endl; return false; } int main() { Mat src = imread("1.jpg"); Mat grayImage, dstImage; src.copyTo(dstImage); int g_nStructElementSize = 1; //結構元素(內核矩陣)的尺寸 ///獲取自定義核 Mat element = getStructuringElement(MORPH_RECT, Size(2 * g_nStructElementSize + 1, 2 * g_nStructElementSize + 1), Point(g_nStructElementSize, g_nStructElementSize)); erode(src, src, element); cvtColor(src, grayImage, CV_BGR2GRAY); blur(grayImage, grayImage, Size(3, 3)); threshold(grayImage, grayImage,48,255, THRESH_BINARY_INV); grayImage.copyTo(result); vector< vector<Point> > contours; vector<Vec4i> hierarchy; findContours(grayImage, contours, hierarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE); drawContours(dstImage, contours, -1, (255, 255, 255)); vector<Point> point = contours[0]; Rect rect = boundingRect(point); Point2f vertex[4]; vertex[0] = rect.tl(); vertex[1].x = (float)rect.tl().x, vertex[1].y = (float)rect.br().y; vertex[2] = rect.br(); vertex[3].x = (float)rect.br().x, vertex[3].y = (float)rect.tl().y; for (int j = 0; j < 4; j++) line(dstImage, vertex[j], vertex[(j + 1) % 4], Scalar(0, 0, 255), 1); int x = rect.x, y = rect.y; int h = rect.height, w = rect.width; double rate = (double)h / w; cout << "height:" << h << endl; cout << "width:" << w << endl; cout << "h / w:" << rate << endl; double area = contourArea(point, false); double sum = grayImage.cols * grayImage.rows; cout << "面積:" << area << endl; cout << "面積比:" << area / sum << endl; imshow("show", dstImage); result_output(rate, area / sum); cvWaitKey(0); return 0; }
最后發現一個問題,由於需要通過攝像頭檢測地圖,圖片可能會有一定角度的傾斜,因此應將Rect換成RotatedRect。
RotatedRect rect = minAreaRect(point); Point2f vertex[4]; rect.points(vertex); for (int j = 0; j < 4; j++) line(dstImage, vertex[j], vertex[(j + 1) % 4], Scalar(0, 0, 255), 1); int h = rect.size.height, w = rect.size.width;