一、opencv和Qt的環境搭建。
網上有很多資料,所以我也是依照網上資料進行,主要是對opencv庫的編譯以及Qt對opencv的使用。
需要下載所需版本的opencv的源碼,另外需要一個cmake工具。 我下載的opencv版本是3.2.0版本,通過cmake后在使用VS2013進行編譯得到最終的庫文件。
然后在Qt中就可以像一般的添加庫方式添加。
另外想說明的是,最好是將編譯得到的文件,放到一個指定目錄,將其中的頭文件和庫文件路徑添加到環境變量中,這樣會方便你的開發。
(要注意的是,如果你之后改變了這個文件的路徑,記得環境變量也要進行修改,不然Qt中程序崩潰而且不容易發現這個問題)
二、opencv的初步接觸
推薦大家去了解opencv的基礎數據類型。
以下是我學習時用到的兩個博主的網站:
另外推薦一個網站,但是都是英文需要慢慢閱讀。 opencv的api說明
三、練習代碼
我習慣的學習方法是,練習一些基礎功能、函數。並且在運行中記錄錯誤增加自己理解的備注,方便以后復查和看代碼時快速理解。
所以這里都是記錄的一些練習,有自己的一些注釋。
我的Qt的pro文件配置,以后不再重復:

INCLUDEPATH += $$PWD/install/include \ $$PWD/install/include/opencv \ $$PWD/install/include/opencv2 CONFIG(debug, debug|release): { LIBS += -L$$PWD/install/x86/vc12/lib LIBS += -lopencv_calib3d320d \ -lopencv_core320d \ -lopencv_features2d320d \ -lopencv_flann320d \ -lopencv_highgui320d \ -lopencv_imgcodecs320d \ -lopencv_imgproc320d \ -lopencv_ml320d \ -lopencv_objdetect320d \ -lopencv_photo320d \ -lopencv_shape320d \ -lopencv_stitching320d \ -lopencv_superres320d \ -lopencv_video320d \ -lopencv_videoio320d \ -lopencv_videostab320d } else:CONFIG(release, debug|release): { LIBS += -L$$PWD/install/x86/vc12/lib LIBS += -lopencv_calib3d320 \ -lopencv_core320 \ -lopencv_features2d320 \ -lopencv_flann320 \ -lopencv_highgui320 \ -lopencv_imgcodecs320 \ -lopencv_imgproc320 \ -lopencv_ml320 \ -lopencv_objdetect320 \ -lopencv_photo320 \ -lopencv_shape320 \ -lopencv_stitching320 \ -lopencv_superres320 \ -lopencv_video320 \ -lopencv_videoio320 \ -lopencv_videostab320 }
練習代碼:

#include "widget.h" #include <QApplication> #include <opencv2/opencv.hpp> #include <stdio.h> #include <QDebug> #if _MSC_VER >= 1600 #pragma execution_character_set("utf-8") #endif using namespace std; using namespace cv; void createAlphaMat(Mat& mat){ for(int i = 0; i < mat.rows; i++){ for(int j = 0; j < mat.cols; j++){ Vec4b &rgba = mat.at<Vec4b>(i, j); rgba[0] = UCHAR_MAX; rgba[1] = saturate_cast<uchar>(float((mat.cols) - j) / ((float)mat.cols) * UCHAR_MAX); rgba[2] = saturate_cast<uchar>(float((mat.cols) - i) / ((float)mat.rows) * UCHAR_MAX ); rgba[3] = 0; } } } int main(int argc, char *argv[]) { QApplication a(argc, argv); //Widget w; //w.show(); /**顯示圖片 Mat image = imread("C:\\Users\\zhangenhao\\Desktop\\IMG_000000011.jpg"); Mat im(4,4,CV_8U,Scalar(101,102,103)); cvNamedWindow("windows", CV_WINDOW_NORMAL); imshow("windows", image); //返回矩陣通道的數量 qDebug() << "mat channels:" << image.channels(); waitKey(0); */ /**自畫圖像並保存 Mat mat(480, 640, CV_8UC4); createAlphaMat(mat); namedWindow("image", CV_WINDOW_AUTOSIZE); imshow("image", mat); //保存失敗 vector<int> compression_params; compression_params.resize(2); compression_params[0] = CV_IMWRITE_PNG_COMPRESSION; compression_params[1] = CV_IMWRITE_PNG_COMPRESSION; //compression_params.push_back(CV_IMWRITE_PNG_COMPRESSION); qDebug() << compression_params.size(); imwrite("./genera.png", mat, compression_params); **/ /** 方框濾波 均值濾波 高斯濾波 namedWindow("原圖", CV_WINDOW_NORMAL); namedWindow("Box效果圖"); namedWindow("Blur效果圖"); namedWindow("Gauss效果圖"); Mat srcImg = imread("C:\\Users\\zhangenhao\\Desktop\\IMG_000000011.jpg"); imshow("原圖", srcImg); Mat Boxout; boxFilter(srcImg, Boxout, -1, Size(7, 7)); imshow("Box效果圖", Boxout); Mat Blurout; blur(srcImg, Blurout, Size(7, 7)); imshow("Blur效果圖", Blurout); Mat Gauss; GaussianBlur(srcImg, Gauss, Size(7, 7), 0, 0); imshow("Gauss效果圖", Gauss); **/ /**圖像制式轉化 cvtColor Mat image = imread("C:\\Users\\zhangenhao\\Desktop\\IMG_000000011.jpg"); Mat ret; cvtColor(image, ret, CV_RGB2GRAY);//轉為灰度圖 cvNamedWindow("windows", CV_WINDOW_NORMAL); imshow("windows", ret); //返回矩陣通道的數量 qDebug() << "mat channels:" << image.channels(); cout << "mat :" << endl << image << endl; waitKey(0); **/ /** //unsigned char 0~255 //char -128~127 Mat A = (Mat_<unsigned char>(3, 3) << 1,2,3,4,5,6,7,8,9); cout << "A =" << endl << A <<endl; cout << "A.t() =" << endl << A.t() << endl; Mat B = A.t(); cout << "B =" << endl << B << endl; //row,column 賦值某個像素點 B.at<unsigned char>(1, 2) = 10; cout << "after assigning value to B." << endl; cout << "A = " << endl << A << endl; cout << "B = " << endl << B << endl; Mat C = A.clone(); //全部+1,減法也適用 C += 1; cout << "C += 1 ->" << endl << C << endl; C -= 1; //按照參數類型確定最大值最小值 C = A.clone(); C += 250; cout << "C += 250 ->" << endl << C << endl; C = A.clone(); C -= 250; cout << "C -= 250 ->" << endl << C << endl; C = A.clone(); // 全部*10 C *= 10; cout << "C *= 10 ->" << endl << C << endl; C = A.clone(); //矩陣相加、相減 C += A; cout << "C += A ->" << endl << C << endl; C -= A; cout << "C -= A ->" << endl << C << endl; Mat D; A.convertTo(D, CV_32FC1);// Only CV_32FC1, CV_32FC2, CV_64FC1 and, CV_64FC2 //能夠被用於矩陣乘法 cout << "D = " << endl << D << endl; D *= D; cout << "D *= ->" << endl << D << endl; Mat DD = A.clone(); cout << "DD.type() = " << DD.type() << endl; cout << "DD = " << endl << DD << endl; DD.convertTo(DD, CV_64FC1); cout << "DD.type() = " << DD.type() << endl; cout << "DD = " << endl << DD << endl; DD *= 100; cout << "DD *= ->" << endl << DD << endl; DD.convertTo(DD, CV_8UC1); cout << "DD convert to CV_8UC1 ->" << endl << DD <<endl; //type和depth應該是一樣的,表示每個像素的位數 Mat E; //E = D + A;//error,runtime exception add(A, D, E, cv::Mat(), CV_32SC1); cout << "E = " << endl << DD << endl; cout << "E.type() =" << endl << E.type() << endl; B = 250; add(B, D, E, cv::Mat(), CV_32SC1); cout << "B=250 ,E = B + D with CV_32SC1 ->" << endl << E << endl; cout << "E->depth: "<< endl << E.depth()<< endl; cv::add(B, D, E, cv::Mat(), CV_8UC1); std::cout << "B = 250, E = B + D with CV_8UC1 -> " << std::endl << E << std::endl << std::endl; cout << "E->depth: "<< endl << E.depth()<< endl; cout << "E->channels:"<<endl << E.channels()<<endl; //創建一個7*7的矩陣,類型為CV_32F,C3表示是3通道。 //scalar是對矩陣進行初始化賦值,第一個通道為1,第二個為3,第三個為5 Mat M(7, 7, CV_32FC3, Scalar(1, 3, 5)); cout << "M =" << endl << M << endl; //3通道修改某個像素點的值,可以通過循環M的行、列修改所有的像素點值 Vec3f pix; pix[0] = 2; pix[1] = 5; pix[2] = 8; M.at<Vec3f>(0, 0) = pix; //單獨修改 某個像素點的某條通道,三通道0、1、2 M.at<Vec3f>(1, 1)[1] = 10; cout << "M =" << endl << M << endl; **/ /*圖片線性疊加 Mat srcImage1 = imread("./hehua.jpg"); Mat srcImage2 = imread("./shuimu.jpg"); Mat desImage; if(srcImage1.empty() || srcImage2.empty()){ cout << "圖像加載失敗" << endl; return -1; } double alpha = 0.2; double beta = 1.0 - alpha; namedWindow("線性混合", WINDOW_NORMAL); //1圖,線性混合1圖權重,2圖,線性混合2圖權重,添加到每一個線性總和的gamma值,目標圖像 addWeighted(srcImage1, alpha, srcImage2, beta, 0.0, desImage); imshow("線性混合", desImage); namedWindow("img1", WINDOW_NORMAL); imshow("img1", srcImage1); namedWindow("img2", WINDOW_NORMAL); imshow("img2", srcImage2); waitKey(0); */ /*不同大小圖片的線性混合 Mat srcImage = imread("./maomi.jpg"); Mat addImage = imread("./hehua.jpg"); Mat maskImage = imread("./hehua.jpg", IMREAD_GRAYSCALE);//灰度圖 if(srcImage.empty() || addImage.empty()){ cout << "加載失敗。" << endl; return 0; } imshow("srcImage", srcImage); imshow("addImage", addImage); //通過Rect函數設置ROI區域,可以調節圖片的權重 //出來的imageROI,按照addImage圖像的大小切下來的srcImage大小。 //Rect前兩個參數表示圖像的坐標,然后才是矩形的長寬(多少列就是長,多少行就是寬) //imageROI不是srcImage的副本,是和srcImage矩陣的頭指針關聯,如果修改了 //imageROI,那么srcImage也會相應改變。所以ROI是srcImage的感興趣區域 //Mat imageROI = srcImage(Rect(50, 50, addImage.cols, addImage.rows)); //addWeighted(imageROI, 0.7, addImage, 0.3, 0, imageROI); //以Range()函數設置ROI區域,不能調節權重 //第一個Rang表示行采取關聯的行范圍,第二個Rang表示采取關聯的列范圍 Mat imageROI = srcImage(Range(50, 50+maskImage.rows), Range(50, 50+maskImage.cols)); //這里應該是把maskImage的非零元素復制給imageROI,但是addImage做了什么? //這里imageROI被改變,相應的srcImage的對應矩陣部分改變 addImage.copyTo(imageROI, maskImage); imshow("hunhe", srcImage); */ return a.exec(); }
其中每一段功能都以/** **/分隔,方便測試。
注:imwrite();這個接口,如果在pro中不分debug和release編譯,直接加載所有的庫文件,在debug的時候是會出錯的,保存不了圖片,release則沒問題。所以pro中區分了編譯加載。