在談談NITE 2與OpenCV結合的第一個程序中,通過手心坐標能夠粗略的截取手的圖像信息,但還是有種意猶未盡的感覺,所以今天根據OpenCV常用的輪廓、凸包等圖像處理函數,在此基礎上,獲得指尖坐標(我表示很粗糙,請高手們勿噴~~~)。
這里廢話不多說了,直接上代碼:
// YeHandTrackerUsingOpenCV.cpp : 定義控制台應用程序的入口點。 // #include "stdafx.h" #include <iostream> // 載入NiTE.h頭文件 #include <NiTE.h> // 載入OpenCV頭文件 #include "opencv2/opencv.hpp" #include <opencv2/core/core.hpp> #include <opencv2/highgui/highgui.hpp> #include <opencv2/imgproc/imgproc.hpp> using namespace std; using namespace cv; const unsigned int XRES = 640; const unsigned int YRES = 480; const float DEPTH_SCALE_FACTOR = 255./4096.; const unsigned int BIN_THRESH_OFFSET = 5; const unsigned int ROI_OFFSET = 70; const unsigned int MEDIAN_BLUR_K = 5; const double GRASPING_THRESH = 0.9; // colors const Scalar COLOR_BLUE = Scalar(240,40,0); const Scalar COLOR_DARK_GREEN = Scalar(0, 128, 0); const Scalar COLOR_LIGHT_GREEN = Scalar(0,255,0); const Scalar COLOR_YELLOW = Scalar(0,128,200); const Scalar COLOR_RED = Scalar(0,0,255); // conversion from cvConvexityDefect struct ConvexityDefect { Point start; Point end; Point depth_point; float depth; }; // Thanks to Jose Manuel Cabrera for part of this C++ wrapper function void findConvexityDefects(vector<Point>& contour, vector<int>& hull, vector<ConvexityDefect>& convexDefects) { if(hull.size() > 0 && contour.size() > 0) { CvSeq* contourPoints; CvSeq* defects; CvMemStorage* storage; CvMemStorage* strDefects; CvMemStorage* contourStr; CvConvexityDefect *defectArray = 0; strDefects = cvCreateMemStorage(); defects = cvCreateSeq( CV_SEQ_KIND_GENERIC|CV_32SC2, sizeof(CvSeq),sizeof(CvPoint), strDefects ); //We transform our vector<Point> into a CvSeq* object of CvPoint. contourStr = cvCreateMemStorage(); contourPoints = cvCreateSeq(CV_SEQ_KIND_GENERIC|CV_32SC2, sizeof(CvSeq), sizeof(CvPoint), contourStr); for(int i = 0; i < (int)contour.size(); i++) { CvPoint cp = {contour[i].x, contour[i].y}; cvSeqPush(contourPoints, &cp); } //Now, we do the same thing with the hull index int count = (int) hull.size(); //int hullK[count]; int* hullK = (int*) malloc(count*sizeof(int)); for(int i = 0; i < count; i++) { hullK[i] = hull.at(i); } CvMat hullMat = cvMat(1, count, CV_32SC1, hullK); // calculate convexity defects storage = cvCreateMemStorage(0); defects = cvConvexityDefects(contourPoints, &hullMat, storage); defectArray = (CvConvexityDefect*)malloc(sizeof(CvConvexityDefect)*defects->total); cvCvtSeqToArray(defects, defectArray, CV_WHOLE_SEQ); for(int i = 0; i<defects->total; i++){ ConvexityDefect def; def.start = Point(defectArray[i].start->x, defectArray[i].start->y); def.end = Point(defectArray[i].end->x, defectArray[i].end->y); def.depth_point = Point(defectArray[i].depth_point->x, defectArray[i].depth_point->y); def.depth = defectArray[i].depth; convexDefects.push_back(def); } // release memory cvReleaseMemStorage(&contourStr); cvReleaseMemStorage(&strDefects); cvReleaseMemStorage(&storage); } } int main(int argc, char** argv) { // 初始化NITE nite::NiTE::initialize(); // 創建Hand跟蹤器 nite::HandTracker* mHandTracker = new nite::HandTracker; mHandTracker->create(); Mat depthShow(YRES, XRES, CV_8UC1); Mat handDebug; // 從深度圖像提取出手的輪廓大小 Rect roi; roi.width = ROI_OFFSET*2; roi.height = ROI_OFFSET*2; namedWindow("depthFrame", CV_WINDOW_AUTOSIZE); // 循環讀取數據流信息並保存在HandFrameRef中 nite::HandTrackerFrameRef mHandFrame; // 開始手勢探測 mHandTracker->startGestureDetection(nite::GESTURE_CLICK); int key = 0; while(key != 27 && key != 'q') { // 讀取Frame信息 nite::Status rc = mHandTracker->readFrame(&mHandFrame); if (rc != nite::STATUS_OK) { cout << "GetNextData failed" << endl; return 0; } // 將深度數據轉換成OpenCV格式 const cv::Mat depthRaw( mHandFrame.getDepthFrame().getHeight(), mHandFrame.getDepthFrame().getWidth(), CV_16UC1, (void*)mHandFrame.getDepthFrame().getData()); /*memcpy(depthRaw.data, mHandFrame.getDepthFrame().getData(), XRES*YRES*2);*/ depthRaw.convertTo(depthShow, CV_8U, DEPTH_SCALE_FACTOR); // 獲取定位的手的快照信息,讀取此時一共有多少個手勢 const nite::Array<nite::GestureData>& gestures = mHandFrame.getGestures(); for (int i = 0; i < gestures.getSize(); ++i) { // 當獲取的手勢是正確完成了 if (gestures[i].isComplete()) { // 就開始定位此時手勢的坐標 const nite::Point3f& position = gestures[i].getCurrentPosition(); cout << "Gesture " << gestures[i].getType() << " at" << position.x << "," << position.y <<"," << position.z; // nite::HandId newId ===>typedef short int HandId; nite::HandId newId; // 開始跟蹤該有效手勢的手心坐標,並確定該手的Id。 // 函數原型為:NITE_API NiteStatus niteStartHandTracking(NiteHandTrackerHandle, const NitePoint3f*, NiteHandId* pNewHandId); mHandTracker->startHandTracking(gestures[i].getCurrentPosition(), &newId); } } // 獲取定位手。 const nite::Array<nite::HandData>& hands= mHandFrame.getHands(); for (int i = 0; i < hands.getSize(); ++i) { const nite::HandData& user = hands[i]; if (!user.isTracking()) { cout << "Lost hand %d\n" << user.getId(); nite::HandId id = user.getId(); } else { if (user.isNew()) { cout << "Found hand %d\n" << user.getId(); } else { float x, y; // 將手心坐標轉換映射到深度坐標中 mHandTracker->convertHandCoordinatesToDepth(hands[i].getPosition().x, hands[i].getPosition().y, hands[i].getPosition().z, &x, &y); float handDepth = hands[i].getPosition().z * DEPTH_SCALE_FACTOR; roi.x = x - ROI_OFFSET; roi.y = y - ROI_OFFSET; // 從深度圖像中提取手的輪廓圖像 Mat handCpy(depthShow, roi); Mat handMat = handCpy.clone(); // binary threshold handMat = (handMat > (handDepth - BIN_THRESH_OFFSET)) & (handMat < (handDepth + BIN_THRESH_OFFSET)); // 平滑處理 medianBlur(handMat, handMat, MEDIAN_BLUR_K); // create debug image of thresholded hand and cvt to RGB so hints show in color handDebug = handMat.clone(); cvtColor(handDebug, handDebug, CV_GRAY2RGB); // 提取手的輪廓 std::vector< std::vector<Point> > contours; findContours(handMat, contours, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE); if (contours.size()) { for (int i = 0; i < contours.size(); i++) { vector<Point> contour = contours[i]; Mat contourMat = Mat(contour); double cArea = contourArea(contourMat); if(cArea > 2000) { // 計算得到輪廓中心坐標 Scalar center = mean(contourMat); Point centerPoint = Point(center.val[0], center.val[1]); // 通過道格拉斯-普克算法得到一個簡單曲線(近似的輪廓) vector<Point> approxCurve; approxPolyDP(contourMat, approxCurve, 10, true); // 畫出輪廓 vector< vector<Point> > debugContourV; debugContourV.push_back(approxCurve); drawContours(handDebug, debugContourV, 0, COLOR_DARK_GREEN, 3); // 計算輪廓點的凸包。 vector<int> hull; convexHull(Mat(approxCurve), hull, false, false); // 畫出凸包點 for(int j = 0; j < hull.size(); j++) { int index = hull[j]; circle(handDebug, approxCurve[index], 3, COLOR_YELLOW, 2); } // 查找凸缺陷 vector<ConvexityDefect> convexDefects; findConvexityDefects(approxCurve, hull, convexDefects); for(int j = 0; j < convexDefects.size(); j++) { circle(handDebug, convexDefects[j].depth_point, 3, COLOR_BLUE, 2); } // 利用輪廓、凸包、缺陷等點坐標確定指尖等點坐標,並畫出 vector<Point> hullPoints; for(int k = 0; k < hull.size(); k++) { int curveIndex = hull[k]; Point p = approxCurve[curveIndex]; hullPoints.push_back(p); } double hullArea = contourArea(Mat(hullPoints)); double curveArea = contourArea(Mat(approxCurve)); double handRatio = curveArea/hullArea; if(handRatio > GRASPING_THRESH) circle(handDebug, centerPoint, 5, COLOR_LIGHT_GREEN, 5); else circle(handDebug, centerPoint, 5, COLOR_RED, 5); // 顯示結果 imshow("hand", handDebug); } } } } } } imshow("depthFrame", depthShow); key = waitKey(10); } // 關閉Frame mHandFrame.release(); // 關閉跟蹤器 mHandTracker->destroy(); // 關閉NITE環境 nite::NiTE::shutdown(); return 0; }
運行結果:

對於初學者的我來說,每增加一行代碼,就意味着自己在進步一點;在此記錄下自己的學習歷程,希望高手們多多提點,希望和我一樣的初學者互相學習交流~~~
