談談NiTE 2手部跟蹤在彩色圖像上的顯示


主要內容:

一、NiTE2手部跟蹤流程

  我自己都感覺到天天在重復着相同的代碼,但我覺得沒什么不好的,對於新東西的學習只有在重復再重復的過程中,才能積累經驗,較少犯“低級錯誤”的幾率,所以在開始之前,讓我們再熟練熟練NITE 2的手部跟蹤流程,主要包括以下幾個步驟:

  1. 初始化NITE環境: nite::NiTE::initialize();

  2. 創建HandTracker手部跟蹤器: HandTracker mHandTracker; mHandTracker.create(&mDevice);

  3. 設定手勢探測(GESTURE_WAVE、GESTURE_CLICK和GESTURE_HAND_RAISE):mHandTracker.startGestureDetection( GESTURE_WAVE );等等;

  4. 創建並讀取HandTracker Frame信息:HandTrackerFrameRef mHandFrame;  mHandTracker.readFrame( &mHandFrame );

  5. 整個界面幀信息進行分析,統計得到符合的手勢信息:const nite::Array<GestureData>& aGestures = mHandFrame.getGestures();

  6. 通過跟蹤得到的手勢信息,開始對該特定手進行手部跟蹤:const Point3f& rPos = rGesture.getCurrentPosition();HandId mHandID;  mHandTracker.startHandTracking( rPos, &mHandID );

  7. 讀取並統計目前被跟蹤的手信息:const nite::Array<HandData>& aHands = mHandFrame.getHands();

  8. 確定手部是否屬於跟蹤狀態,開始自己的操作:

  if( rHand.isTracking() )
     {
      // 得到手心坐標
      const Point3f& rPos = rHand.getPosition();

    。。。

    }

  9. 關閉跟蹤器:mHandTracker.destroy();

  10. 最后關閉NITE環境:nite::NiTE::shutdown();

二、代碼演示

  談談NITE 2與OpenCV結合的第一個程序談談NITE 2與OpenCV結合提取指尖坐標中我們都是在深度圖像中對獲得的手部信息進行處理,但不知道在彩色圖像中,手部跟蹤獲得手心坐標是怎么樣的?是否也和深度圖像顯示一樣,能夠很好的定位到真正的手心中?為了回答自己的這些問題,模仿談談人體骨骼坐標在彩色圖像中顯示中的方法,將通過NiTE2手部跟蹤得到的手心坐標映射到彩色圖像和深度圖像中,並顯示對比。具體解釋和代碼如下:

#include "stdafx.h"
#include <iostream>

    // OpenCV 頭文件
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>


#include <OpenNI.h>
#include <NiTE.h>

using namespace std;
using namespace openni;
using namespace nite;

int main( int argc, char **argv )
{
    // 初始化OpenNI
    OpenNI::initialize();

    // 打開Kinect設備
    Device  mDevice;
    mDevice.open( ANY_DEVICE );

    // 創建深度數據流
    VideoStream mDepthStream;
    mDepthStream.create( mDevice, SENSOR_DEPTH );

    // 設置VideoMode模式
    VideoMode mDepthMode;
    mDepthMode.setResolution( 640, 480 );
    mDepthMode.setFps( 30 );
    mDepthMode.setPixelFormat( PIXEL_FORMAT_DEPTH_1_MM );
    mDepthStream.setVideoMode(mDepthMode);

    // 同樣的設置彩色數據流
    VideoStream mColorStream;
    mColorStream.create( mDevice, SENSOR_COLOR );
    // 設置VideoMode模式
    VideoMode mColorMode;
    mColorMode.setResolution( 640, 480 );
    mColorMode.setFps( 30 );
    mColorMode.setPixelFormat( PIXEL_FORMAT_RGB888 );
    mColorStream.setVideoMode( mColorMode);

    // 設置深度圖像映射到彩色圖像
    mDevice.setImageRegistrationMode( IMAGE_REGISTRATION_DEPTH_TO_COLOR );

    // 為了得到骨骼數據,先初始化NiTE
    NiTE::initialize();

    // 創建HandTracker跟蹤器
    HandTracker mHandTracker;
    mHandTracker.create(&mDevice);

    // 設定手勢探測(GESTURE_WAVE、GESTURE_CLICK和GESTURE_HAND_RAISE)
    mHandTracker.startGestureDetection( GESTURE_WAVE );
    mHandTracker.startGestureDetection( GESTURE_CLICK );
    //mHandTracker.startGestureDetection( GESTURE_HAND_RAISE );

    mHandTracker.setSmoothingFactor(0.1f);

    // 創建深度圖像顯示
    cv::namedWindow("Depth Image", CV_WINDOW_AUTOSIZE);

    // 創建彩色圖像顯示
    cv::namedWindow( "Hand Image",  CV_WINDOW_AUTOSIZE );

    // 環境初始化后,開始獲取深度數據流和彩色數據流
    mDepthStream.start();
    mColorStream.start();

    // 獲得最大深度值
    int iMaxDepth = mDepthStream.getMaxPixelValue();

    while( true )
    {
        // 創建OpenCV::Mat,用於顯示彩色數據圖像
        cv::Mat cImageBGR;

        // 讀取深度數據幀信息流
        VideoFrameRef mDepthFrame;
        mDepthStream.readFrame(&mDepthFrame);

        // 讀取彩色數據幀信息流
        VideoFrameRef mColorFrame;
        mColorStream.readFrame( &mColorFrame );


        //將深度數據轉換成OpenCV格式
        const cv::Mat mImageDepth( mDepthFrame.getHeight(), mDepthFrame.getWidth(), CV_16UC1, (void*)mDepthFrame.getData());
        // 為了讓深度圖像顯示的更加明顯一些,將CV_16UC1 ==> CV_8U格式
        cv::Mat mScaledDepth;
        mImageDepth.convertTo( mScaledDepth, CV_8U, 255.0 / iMaxDepth );

        // 將彩色數據流轉換為OpenCV格式,記得格式是:CV_8UC3(含R\G\B)
        const cv::Mat mImageRGB( mColorFrame.getHeight(), mColorFrame.getWidth(),
            CV_8UC3, (void*)mColorFrame.getData() );

        // RGB ==> BGR
        cv::cvtColor( mImageRGB, cImageBGR, CV_RGB2BGR );

        // 讀取幀信息
        HandTrackerFrameRef mHandFrame;
        mHandTracker.readFrame( &mHandFrame );

        // 整個界面幀信息進行分析,找到符合的手勢
        const nite::Array<GestureData>& aGestures = mHandFrame.getGestures();

        for( int i = 0; i < aGestures.getSize(); ++ i )
        {
            const GestureData& rGesture = aGestures[i];

            // 得到的手勢信息中還包含了當前手勢的坐標位置
            const Point3f& rPos = rGesture.getCurrentPosition();
            cout << " 手勢位置為: (" << rPos.x << ", " << rPos.y << ", " << rPos.z << ")" << endl;

            // 得到手勢識別后,開始手部跟蹤
            HandId mHandID;
            mHandTracker.startHandTracking( rPos, &mHandID );
            cout << "確定手勢位置,開始手部跟蹤" << endl;

        }

        const nite::Array<HandData>& aHands = mHandFrame.getHands();
        
        for( int i = 0; i < aHands.getSize(); ++ i )
        {
            const HandData& rHand = aHands[i];
            
            if( rHand.isNew() )
                cout << " Start tracking";
            
            else if( rHand.isLost() )
                cout << " Lost";

            // 確定手部是否屬於跟蹤狀態
            if( rHand.isTracking() )
            {
                // 得到手心坐標
                const Point3f& rPos = rHand.getPosition();
                cout << " at " << rPos.x << ", " << rPos.y << ", " << rPos.z;

                cv::Point2f aPoint;
                mHandTracker.convertHandCoordinatesToDepth(rPos.x, rPos.y, rPos.z, &aPoint.x, &aPoint.y);
                // 將手心坐標映射到彩色圖像和深度圖像中
                cv::circle( cImageBGR, aPoint, 3, cv::Scalar( 0, 0, 255 ), 4 );
                cv::circle( mScaledDepth, aPoint, 3, cv::Scalar(0, 0, 255), 4);

                // 在彩色圖像中畫出手的輪廓邊
                cv::Point2f ctlPoint, ctrPoint, cdlPoint, cdrPoint;
                ctlPoint.x = aPoint.x - 100;
                ctlPoint.y = aPoint.y - 100;

                ctrPoint.x = aPoint.x - 100;
                ctrPoint.y = aPoint.y + 100;

                cdlPoint.x = aPoint.x + 100;
                cdlPoint.y = aPoint.y - 100;

                cdrPoint.x = aPoint.x + 100;
                cdrPoint.y = aPoint.y + 100;

                cv::line( cImageBGR, ctlPoint, ctrPoint, cv::Scalar( 255, 0, 0 ), 3 );
                cv::line( cImageBGR, ctlPoint, cdlPoint, cv::Scalar( 255, 0, 0 ), 3 );
                cv::line( cImageBGR, cdlPoint, cdrPoint, cv::Scalar( 255, 0, 0 ), 3 );
                cv::line( cImageBGR, ctrPoint, cdrPoint, cv::Scalar( 255, 0, 0 ), 3 );

                // 在深度圖像中畫出手的輪廓邊
                cv::Point2f mtlPoint, mtrPoint, mdlPoint, mdrPoint;
                mtlPoint.x = aPoint.x - 100;
                mtlPoint.y = aPoint.y - 100;

                mtrPoint.x = aPoint.x - 100;
                mtrPoint.y = aPoint.y + 100;

                mdlPoint.x = aPoint.x + 100;
                mdlPoint.y = aPoint.y - 100;

                mdrPoint.x = aPoint.x + 100;
                mdrPoint.y = aPoint.y + 100;

                cv::line( mScaledDepth, mtlPoint, mtrPoint, cv::Scalar( 255, 0, 0 ), 3 );
                cv::line( mScaledDepth, mtlPoint, mdlPoint, cv::Scalar( 255, 0, 0 ), 3 );
                cv::line( mScaledDepth, mdlPoint, mdrPoint, cv::Scalar( 255, 0, 0 ), 3 );
                cv::line( mScaledDepth, mtrPoint, mdrPoint, cv::Scalar( 255, 0, 0 ), 3 );
            }
        }

        // 顯示圖像
        cv::imshow( "Depth Image", mScaledDepth );
        cv::imshow( "Hand Image", cImageBGR );

        // 按鍵“q”退出循環
        if( cv::waitKey( 1 ) == 'q' )
            break;
    }

    // 先銷毀手部跟蹤器
    mHandTracker.destroy();

    // 銷毀彩色數據流和深度數據流
    mColorStream.destroy();
    mDepthStream.destroy();

    // 關閉Kinect設備
    mDevice.close();

    // 關閉NITE和OpenNI環境
    NiTE::shutdown();
    OpenNI::shutdown();

    return 0;
}

程序運行結果見下圖:

接着畫出手部運動軌跡,直接上代碼:

#include <array>
#include <iostream>
#include <map>
#include <vector>

// OpenCV 頭文件
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>

// NiTE 頭文件
#include <OpenNI.h>
#include <NiTE.h>

using namespace std;
using namespace openni;
using namespace nite;

int main( int argc, char **argv )
{
    // 初始化OpenNI
    OpenNI::initialize();

    // 打開Kinect設備
    Device  mDevice;
    mDevice.open( ANY_DEVICE );

    // 創建深度數據流
    VideoStream mDepthStream;
    mDepthStream.create( mDevice, SENSOR_DEPTH );

    // 設置VideoMode模式
    VideoMode mDepthMode;
    mDepthMode.setResolution( 640, 480 );
    mDepthMode.setFps( 30 );
    mDepthMode.setPixelFormat( PIXEL_FORMAT_DEPTH_1_MM );
    mDepthStream.setVideoMode(mDepthMode);

    // 同樣的設置彩色數據流
    VideoStream mColorStream;
    mColorStream.create( mDevice, SENSOR_COLOR );
    // 設置VideoMode模式
    VideoMode mColorMode;
    mColorMode.setResolution( 640, 480 );
    mColorMode.setFps( 30 );
    mColorMode.setPixelFormat( PIXEL_FORMAT_RGB888 );
    mColorStream.setVideoMode( mColorMode);

    // 設置深度圖像映射到彩色圖像
    mDevice.setImageRegistrationMode( IMAGE_REGISTRATION_DEPTH_TO_COLOR );

    // 初始化 NiTE
    if( NiTE::initialize() != nite::STATUS_OK )
    {
        cerr << "NiTE initial error" << endl;
        return -1;
    }

    // 創建HandTracker跟蹤器
    HandTracker mHandTracker;
    if( mHandTracker.create() != nite::STATUS_OK )
    {
        cerr << "Can't create user tracker" << endl;
        return -1;
    }
    // 設定手勢探測(GESTURE_WAVE、GESTURE_CLICK和GESTURE_HAND_RAISE)
    mHandTracker.startGestureDetection( GESTURE_WAVE );
    mHandTracker.startGestureDetection( GESTURE_CLICK );
    //mHandTracker.startGestureDetection( GESTURE_HAND_RAISE );

    mHandTracker.setSmoothingFactor(0.1f);


    // 創建深度圖像顯示
    cv::namedWindow("Depth Image", CV_WINDOW_AUTOSIZE);

    // 創建彩色圖像顯示
    cv::namedWindow( "Color Image",  CV_WINDOW_AUTOSIZE );

    // 保存點坐標
    map< HandId,vector<cv::Point2f> > mapHandData;
    vector<cv::Point2f> vWaveList;
    vector<cv::Point2f> vClickList;
    cv::Point2f ptSize( 3, 3 );

    array<cv::Scalar,8>    aHandColor;
    aHandColor[0] = cv::Scalar( 255, 0, 0 );
    aHandColor[1] = cv::Scalar( 0, 255, 0 );
    aHandColor[2] = cv::Scalar( 0, 0, 255 );
    aHandColor[3] = cv::Scalar( 255, 255, 0 );
    aHandColor[4] = cv::Scalar( 255, 0, 255 );
    aHandColor[5] = cv::Scalar( 0, 255, 255 );
    aHandColor[6] = cv::Scalar( 255, 255, 255 );
    aHandColor[7] = cv::Scalar( 0, 0, 0 );

    // 環境初始化后,開始獲取深度數據流和彩色數據流
    mDepthStream.start();
    mColorStream.start();

    // 獲得最大深度值
    int iMaxDepth = mDepthStream.getMaxPixelValue();

    // start
    while( true )
    {
        // 創建OpenCV::Mat,用於顯示彩色數據圖像
        cv::Mat cImageBGR;

        // 讀取彩色數據幀信息流
        VideoFrameRef mColorFrame;
        mColorStream.readFrame( &mColorFrame );

        // 將彩色數據流轉換為OpenCV格式,記得格式是:CV_8UC3(含R\G\B)
        const cv::Mat mImageRGB( mColorFrame.getHeight(), mColorFrame.getWidth(),
            CV_8UC3, (void*)mColorFrame.getData() );

        // RGB ==> BGR
        cv::cvtColor( mImageRGB, cImageBGR, CV_RGB2BGR );


        // 獲取手Frame
        HandTrackerFrameRef mHandFrame;
        if( mHandTracker.readFrame( &mHandFrame ) == nite::STATUS_OK )
        {
            openni::VideoFrameRef mDepthFrame = mHandFrame.getDepthFrame();
            // 將深度數據轉換成OpenCV格式
            const cv::Mat mImageDepth( mDepthFrame.getHeight(), mDepthFrame.getWidth(), CV_16UC1, (void*)mDepthFrame.getData() );
            // 為了讓深度圖像顯示的更加明顯一些,將CV_16UC1 ==> CV_8U格式
            cv::Mat mScaledDepth, mImageBGR;
            mImageDepth.convertTo( mScaledDepth, CV_8U, 255.0 / 10000 );
            // 將灰度圖轉換成BGR格式,為了畫出點的顏色坐標和軌跡
            cv::cvtColor( mScaledDepth, mImageBGR, CV_GRAY2BGR );

            // 檢測手勢
            const nite::Array<GestureData>& aGestures = mHandFrame.getGestures();
            for( int i = 0; i < aGestures.getSize(); ++ i )
            {
                const GestureData& rGesture = aGestures[i];
                const Point3f& rPos = rGesture.getCurrentPosition();
                cv::Point2f rPos2D;
                mHandTracker.convertHandCoordinatesToDepth( rPos.x, rPos.y, rPos.z, &rPos2D.x, &rPos2D.y );

                // 畫點
                switch( rGesture.getType() )
                {
                case GESTURE_WAVE:
                    vWaveList.push_back( rPos2D );
                    break;

                case GESTURE_CLICK:
                    vClickList.push_back( rPos2D );
                    break;
                }

                // 手部跟蹤
                HandId mHandID;
                if( mHandTracker.startHandTracking( rPos, &mHandID ) != nite::STATUS_OK )
                    cerr << "Can't track hand" << endl;
            }

            // 得到手心坐標
            const nite::Array<HandData>& aHands = mHandFrame.getHands();
            for( int i = 0; i < aHands.getSize(); ++ i )
            {
                const HandData& rHand = aHands[i];
                HandId uID = rHand.getId();

                if( rHand.isNew() )
                {
                    mapHandData.insert( make_pair( uID, vector<cv::Point2f>() ) );
                }

                if( rHand.isTracking() )
                {
                    // 將手心坐標映射到彩色圖像和深度圖像中
                    const Point3f& rPos = rHand.getPosition();
                    cv::Point2f rPos2D;
                    mHandTracker.convertHandCoordinatesToDepth( rPos.x, rPos.y, rPos.z, &rPos2D.x, &rPos2D.y );

                    mapHandData[uID].push_back( rPos2D );
                }

                if( rHand.isLost() )
                    mapHandData.erase( uID );
            }

            // 畫點和軌跡
            for( auto itHand = mapHandData.begin(); itHand != mapHandData.end(); ++ itHand )
            {
                const cv::Scalar& rColor = aHandColor[ itHand->first % aHandColor.size() ];
                const vector<cv::Point2f>& rPoints = itHand->second;

                for( int i = 1; i < rPoints.size(); ++ i )
                {
                    cv::line( mImageBGR, rPoints[i-1], rPoints[i], rColor, 2 );
                    cv::line( cImageBGR, rPoints[i-1], rPoints[i], rColor, 2 );
                }
            }

            // 畫 click gesture 軌跡
            for( auto itPt = vClickList.begin(); itPt != vClickList.end(); ++ itPt )
            {
                cv::circle( mImageBGR, *itPt, 5, cv::Scalar( 0, 0, 255 ), 2 );
                cv::circle( cImageBGR, *itPt, 5, cv::Scalar( 0, 0, 255 ), 2 );
            }

            // 畫 wave gesture 軌跡
            for( auto itPt = vWaveList.begin(); itPt != vWaveList.end(); ++ itPt )
            {
                cv::rectangle( mImageBGR, *itPt - ptSize, *itPt + ptSize, cv::Scalar( 0, 255, 0 ), 2 );
                cv::rectangle( cImageBGR, *itPt - ptSize, *itPt + ptSize, cv::Scalar( 0, 255, 0 ), 2 );
            }

            // 顯示image
            cv::imshow( "Depth Image", mImageBGR );
            cv::imshow("Color Image", cImageBGR);

            mHandFrame.release();
        }
        else
        {
            cerr << "Can't get new frame" << endl;
        }
        // 按鍵“q”退出循環
        if( cv::waitKey( 1 ) == 'q' )
            break;
    }

    mHandTracker.destroy();
    mColorStream.destroy();
    NiTE::shutdown();
    OpenNI::shutdown();

    return 0;
}

運行結果:

三、總結

  最后說明的是:根據自己的感覺寫代碼,沒做封裝、優化、重構,完全是面向過程,而且肯定還存在細節的問題,會在后面進一步優化的。    寫的粗糙,歡迎指正批評~~~


免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM