1、testRTSPClient簡介
testRTSPClient是個簡單的客戶端實例,這個實例對rtsp數據交互作了詳細的描述,其中涉及到rtsp會話的兩個概念Source和Sink.
Source是生產數據,Sink是消費數據.
testRTSPClient非常簡潔,除了接收服務端發送過來的數據,什么都沒干,所以我們很方便在這個基礎上改造,做我們自己的項目.
2、testRTSPClient編譯,運行
在linux下編譯運行更方便,鑒於我的電腦太渣,虛擬機跑起來費勁,就轉到windows下來折騰.
在windows下只需要加載這一個文件就可以編譯,我們以mediaServer為服務端,以testRTSPClient為客戶端。
當然也可以用支持rtsp協議的攝像機或其他實體設備作為服務端。


先啟動mediaServer,然后在testRTSPClient項目的命令菜單里填入mediaServer 提示的IP, 再啟動testRTSPClient即可。


3、testRTSPClient核心代碼解讀
1)看代碼之前可以大致瀏覽一下總體的框架,這位博主畫了個流程圖http://blog.csdn.net/smilestone_322/article/details/17297817
void DummySink::afterGettingFrame(unsigned frameSize, unsigned numTruncatedBytes,
struct timeval presentationTime, unsigned /*durationInMicroseconds*/) {
// We've just received a frame of data. (Optionally) print out information about it:
#ifdef DEBUG_PRINT_EACH_RECEIVED_FRAME
if (fStreamId != NULL) envir() << "Stream \"" << fStreamId << "\"; ";
envir() << fSubsession.mediumName() << "/" << fSubsession.codecName() << ":\tReceived " << frameSize << " bytes";
if (numTruncatedBytes > 0) envir() << " (with " << numTruncatedBytes << " bytes truncated)";
char uSecsStr[6+1]; // used to output the 'microseconds' part of the presentation time
sprintf(uSecsStr, "%06u", (unsigned)presentationTime.tv_usec);
envir() << ".\tPresentation time: " << (unsigned)presentationTime.tv_sec << "." << uSecsStr;
if (fSubsession.rtpSource() != NULL && !fSubsession.rtpSource()->hasBeenSynchronizedUsingRTCP()) {
envir() << "!"; // mark the debugging output to indicate that this presentation time is not RTCP-synchronized
}
envir() << "\n";
#endif
// Then continue, to request the next frame of data:
continuePlaying();
}
Boolean DummySink::continuePlaying() {
if (fSource == NULL) return False; // sanity check (should not happen)
// Request the next frame of data from our input source. "afterGettingFrame()" will get called later, when it arrives:
fSource->getNextFrame(fReceiveBuffer, DUMMY_SINK_RECEIVE_BUFFER_SIZE,
afterGettingFrame, this,
onSourceClosure, this);
return True;
}
2)有網友在testRTSPClient基礎上,把接收的數據寫成h264文件了http://blog.csdn.net/occupy8/article/details/36426821
void DummySink::afterGettingFrame(void* clientData, unsigned frameSize, unsigned numTruncatedBytes,
struct timeval presentationTime, unsigned durationInMicroseconds) {
DummySink* sink = (DummySink*)clientData;
sink->afterGettingFrame(frameSize, numTruncatedBytes, presentationTime, durationInMicroseconds);
}
// If you don't want to see debugging output for each received frame, then comment out the following line:
#define DEBUG_PRINT_EACH_RECEIVED_FRAME 1
void DummySink::afterGettingFrame(unsigned frameSize, unsigned numTruncatedBytes,
struct timeval presentationTime, unsigned /*durationInMicroseconds*/) {
// We've just received a frame of data. (Optionally) print out information about it:
#ifdef DEBUG_PRINT_EACH_RECEIVED_FRAME
if (fStreamId != NULL) envir() << "Stream \"" << fStreamId << "\"; ";
envir() << fSubsession.mediumName() << "/" << fSubsession.codecName() << ":\tReceived " << frameSize << " bytes";
if (numTruncatedBytes > 0) envir() << " (with " << numTruncatedBytes << " bytes truncated)";
char uSecsStr[6+1]; // used to output the 'microseconds' part of the presentation time
sprintf(uSecsStr, "%06u", (unsigned)presentationTime.tv_usec);
envir() << ".\tPresentation time: " << (unsigned)presentationTime.tv_sec << "." << uSecsStr;
if (fSubsession.rtpSource() != NULL && !fSubsession.rtpSource()->hasBeenSynchronizedUsingRTCP()) {
envir() << "!"; // mark the debugging output to indicate that this presentation time is not RTCP-synchronized
}
envir() << "\n";
#endif
//todo one frame
//save to file
if(!strcmp(fSubsession.mediumName(), "video"))
{
if(firstFrame)
{
unsigned int num;
SPropRecord *sps = parseSPropParameterSets(fSubsession.fmtp_spropparametersets(), num);
// For H.264 video stream, we use a special sink that insert start_codes:
struct timeval tv= {0,0};
unsigned char start_code[4] = {0x00, 0x00, 0x00, 0x01};
FILE *fp = fopen("test.264", "a+b");
if(fp)
{
fwrite(start_code, 4, 1, fp);
fwrite(sps[0].sPropBytes, sps[0].sPropLength, 1, fp);
fwrite(start_code, 4, 1, fp);
fwrite(sps[1].sPropBytes, sps[1].sPropLength, 1, fp);
fclose(fp);
fp = NULL;
}
delete [] sps;
firstFrame = False;
}
char *pbuf = (char *)fReceiveBuffer;
char head[4] = {0x00, 0x00, 0x00, 0x01};
FILE *fp = fopen("test.264", "a+b");
if(fp)
{
fwrite(head, 4, 1, fp);
fwrite(fReceiveBuffer, frameSize, 1, fp);
fclose(fp);
fp = NULL;
}
}
// Then continue, to request the next frame of data:
continuePlaying();
}
Boolean DummySink::continuePlaying() {
if (fSource == NULL) return False; // sanity check (should not happen)
// Request the next frame of data from our input source. "afterGettingFrame()" will get called later, when it arrives:
fSource->getNextFrame(fReceiveBuffer, DUMMY_SINK_RECEIVE_BUFFER_SIZE,
afterGettingFrame, this,
onSourceClosure, this);
return True;
}
3)testRTSPClient這個實例還支持多路錄放,網上搜到有人已經實現了,搬過來.

