搞了一周,終於把視頻流在局域網內傳輸搞定


題目:視頻流在Windows系統上的實時捕捉與傳輸 

參考資料:以下資料都是我從網上找到並積累的,感覺都還可以,有參考價值。

有點興奮,一周的成果,不容易呀,真心的。

從一個對視頻這塊一無所知的碼農,到現在稍稍入門的碼農。沒有什么大的變化,不過還是了解到行行都比較難搞。

言歸正傳:開始講講 我的大致思路:如下圖所示。

 

其實,之前想過多種方案。

方案一:

比較靠譜點的是在linux下,用fifo傳輸數據,一邊采集並轉化為h264,linux自己可以轉,這個比windows簡單多了。

然后,live555這一端,用fifo(其實是管道)來搞,這樣的好處在於 保證兩個進程間通信。而且比較實時。

當然,在windows下,也可以通過進程間通信的方式來實現 有很多方式,這里面不多做介紹。

但是,還有更簡單的方式,我們完全可以用多線程來實現,在另一個線程中,我們利用循環數組的方式實現,這樣的話,可以保證實時性。

方案二:

用opencv采集數據,並可以對數據進行圖像處理,然后,我們可以用libx264庫對視頻進行編碼成h264格式,然后進行推送至rtsp服務器。

不過,這里的一個難點就是 libx264使用不太熟悉,導致后來我放棄了這種方案。

后來,看到雷神寫的一篇文章,終於讓我下定決心,用ffmpeg+live555來搞。

 

如何實現:

關鍵點分析:

程序由三個線程組成: 一個采集並編碼線程;一個將編碼后的數據推送至rtsp264線程;一個主線程。線程之間傳遞數據利用循環數組實現。

整體結構如下:

下面一個個分析下實現思路:

  • 采集並編碼線程:

參考雷神的兩篇日志

http://blog.csdn.net/leixiaohua1020/article/details/39759623 最簡單的基於FFmpeg的內存讀寫的例子:內存轉碼器

http://blog.csdn.net/leixiaohua1020/article/details/39759163 最簡單的基於FFmpeg的內存讀寫的例子:內存播放器

 

  • 將編碼后的數據推送至rtsp264線程

參考網上兩個哥們的日志

http://bubuko.com/infodetail-272265.html

Windows下利用live555實現H264實時流RTSP發送

http://blog.csdn.net/xiejiashu/article/details/8269873 用live555做本地視頻采集轉發,附源碼

 

主要是重寫getnextframe 代碼

整個代碼如下:

  1. #include "global.h"
  2. #include "FFmpegReadCamera.h"
  3. #include "H264LiveVideoServerMediaSubssion.hh"
  4. #include "H264FramedLiveSource.hh"
  5. #include "liveMedia.hh"
  6. #include "BasicUsageEnvironment.hh"
  7.  
  8. #define BUFSIZE 10000
  9.  
  10. DWORD WINAPI ChildFunc1(LPVOID);
  11. DWORD WINAPI ChildFunc2(LPVOID);
  12.  
  13.  
  14. static void announceStream(RTSPServer* rtspServer, ServerMediaSession* sms,char const* streamName)//顯示RTSP連接信息
  15. {
  16.    char* url = rtspServer->rtspURL(sms);
  17.    UsageEnvironment& env = rtspServer->envir();
  18.    env <<streamName<< "\n";
  19.    env << "Play this stream using the URL \"" << url << "\"\n";
  20.    delete[] url;
  21. }
  22.  
  23. int main(int argc, char** argv)
  24. {
  25.    for (int i = 0; i < ARRAYLENGTH; ++i)
  26.    {
  27.       memset(&szbuffer[i],0,sizeof(StreamData));
  28.    }
  29.  
  30.    CreateThread(NULL,0,ChildFunc1,0,0,NULL);
  31.    Sleep(3000);
  32.    CreateThread(NULL,0,ChildFunc2,0,0,NULL);
  33.    while(1)
  34.    {
  35.       Sleep(1);
  36.    }
  37.     return 0;
  38.  
  39. }
  40.  
  41. DWORD WINAPI ChildFunc1(LPVOID p)
  42. {
  43.    int ret;
  44.    AVFormatContext *ofmt_ctx = NULL;
  45.    AVStream *out_stream;
  46.    AVStream *in_stream;
  47.    AVCodecContext *enc_ctx;
  48.    AVCodecContext *dec_ctx;
  49.    AVCodec* encoder;
  50.    enum AVMediaType type;
  51.    fp_write = fopen("test.h264","wb+");
  52.    unsigned int stream_index;
  53.    AVPacket enc_pkt;
  54.    int enc_got_frame;
  55.  
  56.    AVFormatContext *pFormatCtx;
  57.    int i, videoindex;
  58.    AVCodecContext *pCodecCtx;
  59.    AVCodec *pCodec;
  60.  
  61.    av_register_all();
  62.  
  63.    avformat_network_init();
  64.    pFormatCtx = avformat_alloc_context();
  65.  
  66.    avformat_alloc_output_context2(&ofmt_ctx,NULL,"h264",NULL);
  67.  
  68.    //Register Device 注冊所有硬件
  69.    avdevice_register_all();
  70.    //Show Dshow Device 顯示所有可用的硬件
  71.    show_dshow_device();
  72.    //Show Device Options 顯示某一個硬件的所有參數(攝像頭參數)
  73.    show_dshow_device_option();
  74.    //Show VFW Options
  75.    show_vfw_device();
  76.    //Windows
  77. #ifdef _WIN32
  78. #if USE_DSHOW
  79.    AVInputFormat *ifmt=av_find_input_format("dshow");
  80.    //Set own video device's name
  81.    if(avformat_open_input(&pFormatCtx,"video=Integrated Webcam",ifmt,NULL)!=0){
  82.       printf("Couldn't open input stream.(無法打開輸入流)\n");
  83.       return -1;
  84.    }
  85. #else
  86.    AVInputFormat *ifmt=av_find_input_format("vfwcap");
  87.    if(avformat_open_input(&pFormatCtx,"0",ifmt,NULL)!=0){
  88.       printf("Couldn't open input stream.(無法打開輸入流)\n");
  89.       return -1;
  90.    }
  91. #endif
  92. #endif
  93.    //Linux
  94. #ifdef linux
  95.    AVInputFormat *ifmt=av_find_input_format("video4linux2");
  96.    if(avformat_open_input(&pFormatCtx,"/dev/video0",ifmt,NULL)!=0){
  97.       printf("Couldn't open input stream.(無法打開輸入流)\n");
  98.       return -1;
  99.    }
  100. #endif
  101.  
  102.  
  103.    if(avformat_find_stream_info(pFormatCtx,NULL)<0)
  104.    {
  105.       printf("Couldn't find stream information.(無法獲取流信息)\n");
  106.       return -1;
  107.    }
  108.    videoindex=-1;
  109.    for(i=0; i<pFormatCtx->nb_streams; i++)
  110.       if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO)
  111.       {
  112.          videoindex=i;
  113.          break;
  114.       }
  115.       if(videoindex==-1)
  116.       {
  117.          printf("Couldn't find a video stream.(沒有找到視頻流)\n");
  118.          return -1;
  119.       }
  120.  
  121.       pCodecCtx=pFormatCtx->streams[videoindex]->codec;
  122.       pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
  123.       if(pCodec==NULL)
  124.       {
  125.          printf("Codec not found.(沒有找到解碼器)\n");
  126.          return -1;
  127.       }
  128.       if(avcodec_open2(pCodecCtx, pCodec,NULL)<0)
  129.       {
  130.          printf("Could not open codec.(無法打開解碼器)\n");
  131.          return -1;
  132.       }
  133.  
  134.       AVFrame *pFrame,*pFrameYUV;
  135.       pFrame=avcodec_alloc_frame();
  136.       pFrameYUV=avcodec_alloc_frame();
  137.       int length = avpicture_get_size(PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height);
  138.       uint8_t *out_buffer=(uint8_t *)av_malloc(length);
  139.  
  140.  
  141.       /*open output file*/
  142.       AVIOContext *avio_out = avio_alloc_context(out_buffer,length,0,NULL,NULL,write_buffer,NULL);
  143.       if (avio_out == NULL)
  144.       {
  145.          printf("申請內存失敗! \n");
  146.          return -1;
  147.       }
  148.  
  149.       ofmt_ctx->pb = avio_out;
  150.       ofmt_ctx->flags = AVFMT_FLAG_CUSTOM_IO;
  151.       for(int i = 0; i < 1; i++)
  152.       {
  153.          out_stream = avformat_new_stream(ofmt_ctx,NULL);
  154.          if (!out_stream)
  155.          {
  156.             av_log(NULL,AV_LOG_ERROR,"failed allocating output stream");
  157.             return AVERROR_UNKNOWN;
  158.          }
  159.          in_stream = pFormatCtx->streams[i];
  160.          dec_ctx = in_stream->codec;
  161.          enc_ctx = out_stream->codec;
  162.          //設置編碼格式
  163.          if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
  164.          {
  165.             encoder = avcodec_find_encoder(AV_CODEC_ID_H264);
  166.             enc_ctx->height = dec_ctx->height;
  167.             enc_ctx->width = dec_ctx->width;
  168.             enc_ctx->sample_aspect_ratio = dec_ctx->sample_aspect_ratio;
  169.             enc_ctx->pix_fmt = encoder->pix_fmts[0];
  170.             enc_ctx->time_base = dec_ctx->time_base;
  171.             enc_ctx->me_range = 16;
  172.             enc_ctx->max_qdiff = 4;
  173.             enc_ctx->qmin = 10;//10 這兩個值調節清晰度
  174.             enc_ctx->qmax = 51;//51
  175.             enc_ctx->qcompress = 0.6;
  176.             enc_ctx->refs = 3;
  177.             enc_ctx->bit_rate = 500000;
  178.  
  179.             //enc_ctx->time_base.num = 1;
  180.             //enc_ctx->time_base.den = 25;
  181.             //enc_ctx->gop_size = 10;
  182.             //enc_ctx->bit_rate = 3000000;
  183.  
  184.             ret = avcodec_open2(enc_ctx,encoder,NULL);
  185.             if (ret < 0)
  186.             {
  187.                av_log(NULL,AV_LOG_ERROR,"Cannot open video encoder for stream #%u\n",i);
  188.                return ret;
  189.             }
  190.             //av_opt_set(enc_ctx->priv_data,"tune","zerolatency",0);
  191.          }
  192.          else if (dec_ctx->codec_type == AVMEDIA_TYPE_UNKNOWN)
  193.          {
  194.             av_log(NULL, AV_LOG_FATAL, "Elementary stream #%d is of unknown type, cannot proceed\n", i);
  195.             return AVERROR_INVALIDDATA;
  196.          }
  197.          else
  198.          {
  199.             // if this stream must be remuxed
  200.             ret = avcodec_copy_context(ofmt_ctx->streams[i]->codec,pFormatCtx->streams[i]->codec);
  201.             if (ret < 0)
  202.             {
  203.                av_log(NULL, AV_LOG_ERROR, "Copying stream context failed\n");
  204.                return ret;
  205.             }
  206.          }
  207.          if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
  208.          {
  209.             enc_ctx->flags |= CODEC_FLAG_GLOBAL_HEADER;
  210.          }
  211.       }
  212.       // init muxer, write output file header
  213.       ret = avformat_write_header(ofmt_ctx,NULL);
  214.       if (ret < 0)
  215.       {
  216.          if (ret < 0)
  217.          {
  218.             av_log(NULL, AV_LOG_ERROR, "Error occurred when opening output file\n");
  219.             return ret;
  220.          }
  221.       }
  222.  
  223.       i = 0;
  224.       // pCodecCtx 解碼的codec_context
  225.       avpicture_fill((AVPicture *)pFrameYUV, out_buffer, PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height);
  226.       //SDL----------------------------
  227.       if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {
  228.          printf( "Could not initialize SDL - %s\n", SDL_GetError());
  229.          return -1;
  230.       }
  231.       int screen_w=0,screen_h=0;
  232.       SDL_Surface *screen;
  233.       screen_w = pCodecCtx->width;
  234.       screen_h = pCodecCtx->height;
  235.       screen = SDL_SetVideoMode(screen_w, screen_h, 0,0);
  236.  
  237.       if(!screen) {
  238.          printf("SDL: could not set video mode - exiting:%s\n",SDL_GetError());
  239.          return -1;
  240.       }
  241.       SDL_Overlay *bmp;
  242.       bmp = SDL_CreateYUVOverlay(pCodecCtx->width, pCodecCtx->height,SDL_YV12_OVERLAY, screen);
  243.       SDL_Rect rect;
  244.       //SDL End------------------------
  245.       int got_picture;
  246.  
  247.       AVPacket *packet=(AVPacket *)av_malloc(sizeof(AVPacket));
  248.       //Output Information-----------------------------
  249.       printf("File Information(文件信息)---------------------\n");
  250.       av_dump_format(pFormatCtx,0,NULL,0);
  251.       printf("-------------------------------------------------\n");
  252.  
  253. #if OUTPUT_YUV420P
  254.       FILE *fp_yuv=fopen("output.yuv","wb+");
  255. #endif
  256.  
  257.       struct SwsContext *img_convert_ctx;
  258.       img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);
  259.       //------------------------------
  260.       //read all packets
  261.       while(av_read_frame(pFormatCtx, packet)>=0)
  262.       {
  263.          if(packet->stream_index==videoindex)
  264.          {
  265.  
  266.             type = pFormatCtx->streams[packet->stream_index]->codec->codec_type;
  267.             stream_index = packet->stream_index;
  268.             av_log(NULL, AV_LOG_DEBUG, "Demuxer gave frame of stream_index %u\n",stream_index);
  269.  
  270.             if (!pFrame)
  271.             {
  272.                ret = AVERROR(ENOMEM);
  273.                break;
  274.             }
  275.             packet->dts = av_rescale_q_rnd(packet->dts,
  276.                pFormatCtx->streams[stream_index]->time_base,
  277.                pFormatCtx->streams[stream_index]->codec->time_base,
  278.                (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
  279.             packet->pts = av_rescale_q_rnd(packet->pts,
  280.                pFormatCtx->streams[stream_index]->time_base,
  281.                pFormatCtx->streams[stream_index]->codec->time_base,
  282.                (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
  283.             ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, packet);
  284.             printf("Decode 1 Packet\tsize:%d\tpts:%d\n",packet->size,packet->pts);
  285.             if(ret < 0)
  286.             {
  287.                printf("Decode Error.(解碼錯誤)\n");
  288.                av_frame_free(&pFrame);
  289.                return -1;
  290.             }
  291.             if(got_picture)
  292.             {
  293.                //這句話是轉換格式的函數,可用將rgb變為yuv格式
  294.                sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameYUV->data, pFrameYUV->linesize);
  295.                //下面對轉換為YUV格式的AVFramer進行編碼即可
  296.                pFrameYUV->width = pFrame->width;
  297.                pFrameYUV->height = pFrame->height;
  298.  
  299.                /*pFrameYUV->pts = av_frame_get_best_effort_timestamp(pFrameYUV);
  300.                pFrameYUV->pict_type = AV_PICTURE_TYPE_NONE;*/
  301.  
  302.                enc_pkt.data = NULL;
  303.                enc_pkt.size = 0;
  304.                av_init_packet(&enc_pkt);
  305.                enc_pkt.data = out_buffer;
  306.                enc_pkt.size = length;
  307.  
  308.                //編碼必須是YUV420P格式,不然編碼不會成功。
  309.                ret = avcodec_encode_video2(ofmt_ctx->streams[stream_index]->codec,&enc_pkt,pFrameYUV,&enc_got_frame);
  310.                printf("Encode 1 Packet\tsize:%d\tpts:%d\n",enc_pkt.size,enc_pkt.pts);
  311.                if (ret == 0) //一定要記得 ret 值為0 ,代表成功。-1 才是代表失敗。
  312.                {
  313.                   fwrite(enc_pkt.data,enc_pkt.size,1,fp_write); //存儲編碼后的h264文件,可以作為測試用
  314.                   memcpy(szbuffer[recvcount].str,enc_pkt.data,enc_pkt.size);
  315.                   szbuffer[recvcount].len = enc_pkt.size;
  316.                   recvcount = (recvcount + 1)%ARRAYLENGTH;
  317.                }
  318.                if (ret < 0)
  319.                {
  320.                   printf("encode failed");
  321.                   return -1;
  322.                }
  323.                if (!enc_got_frame)
  324.                {
  325.                   continue;
  326.                }
  327.  
  328.                /* prepare packet for muxing */
  329.                enc_pkt.stream_index = stream_index;
  330.                enc_pkt.dts = av_rescale_q_rnd(enc_pkt.dts,
  331.                   ofmt_ctx->streams[stream_index]->codec->time_base,
  332.                   ofmt_ctx->streams[stream_index]->time_base,
  333.                   (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
  334.                enc_pkt.pts = av_rescale_q_rnd(enc_pkt.pts,
  335.                   ofmt_ctx->streams[stream_index]->codec->time_base,
  336.                   ofmt_ctx->streams[stream_index]->time_base,
  337.                   (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
  338.                enc_pkt.duration = av_rescale_q(enc_pkt.duration,
  339.                   ofmt_ctx->streams[stream_index]->codec->time_base,
  340.                   ofmt_ctx->streams[stream_index]->time_base);
  341.                av_log(NULL, AV_LOG_INFO, "Muxing frame %d\n",i);
  342.                /* mux encoded frame */
  343.                av_write_frame(ofmt_ctx,&enc_pkt);
  344.                if (ret < 0)
  345.                {
  346.                   printf("encode failed");
  347.                   return -1;
  348.                }
  349.  
  350. #if OUTPUT_YUV420P
  351.                int y_size=pCodecCtx->width*pCodecCtx->height;
  352.                fwrite(pFrameYUV->data[0],1,y_size,fp_yuv); //Y
  353.                fwrite(pFrameYUV->data[1],1,y_size/4,fp_yuv); //U
  354.                fwrite(pFrameYUV->data[2],1,y_size/4,fp_yuv); //V
  355. #endif
  356.                SDL_LockYUVOverlay(bmp);
  357.                bmp->pixels[0]=pFrameYUV->data[0];
  358.                bmp->pixels[2]=pFrameYUV->data[1];
  359.                bmp->pixels[1]=pFrameYUV->data[2];
  360.                bmp->pitches[0]=pFrameYUV->linesize[0];
  361.                bmp->pitches[2]=pFrameYUV->linesize[1];
  362.                bmp->pitches[1]=pFrameYUV->linesize[2];
  363.                SDL_UnlockYUVOverlay(bmp);
  364.                rect.x = 0;
  365.                rect.y = 0;
  366.                rect.w = screen_w;
  367.                rect.h = screen_h;
  368.                SDL_DisplayYUVOverlay(bmp, &rect);
  369.                //Delay 40ms
  370.                SDL_Delay(40);
  371.             }
  372.          }
  373.          av_free_packet(packet);
  374.       }
  375.  
  376.       /* flush encoders */
  377.       for (i = 0; i < 1; i++) {
  378.          /* flush encoder */
  379.          ret = flush_encoder(ofmt_ctx,i);
  380.          if (ret < 0) {
  381.             av_log(NULL, AV_LOG_ERROR, "Flushing encoder failed\n");
  382.             return -1;
  383.          }
  384.       }
  385.       av_write_trailer(ofmt_ctx);
  386.  
  387.       sws_freeContext(img_convert_ctx);
  388.  
  389. #if OUTPUT_YUV420P
  390.       fclose(fp_yuv);
  391. #endif
  392.  
  393.       SDL_Quit();
  394.  
  395.       av_free(out_buffer);
  396.       av_free(pFrameYUV);
  397.       avcodec_close(pCodecCtx);
  398.       avformat_close_input(&pFormatCtx);
  399.       //fcloseall();
  400.       return 0;
  401. }
  402.  
  403. DWORD WINAPI ChildFunc2(LPVOID p)
  404. {
  405.    //設置環境
  406.    UsageEnvironment* env;
  407.    Boolean reuseFirstSource = False;//如果為"true"則其他接入的客戶端跟第一個客戶端看到一樣的視頻流,否則其他客戶端接入的時候將重新播放
  408.    TaskScheduler* scheduler = BasicTaskScheduler::createNew();
  409.    env = BasicUsageEnvironment::createNew(*scheduler);
  410.  
  411.    //創建RTSP服務器
  412.    UserAuthenticationDatabase* authDB = NULL;
  413.    RTSPServer* rtspServer = RTSPServer::createNew(*env, 8554, authDB);
  414.    if (rtspServer == NULL) {
  415.       *env << "Failed to create RTSP server: " << env->getResultMsg() << "\n";
  416.       exit(1);
  417.    }
  418.    char const* descriptionString= "Session streamed by \"testOnDemandRTSPServer\"";
  419.  
  420.    //模擬實時流發送相關變量
  421.    int datasize;//數據區長度
  422.    unsigned char* databuf;//數據區指針
  423.    databuf = (unsigned char*)malloc(100000);
  424.    bool dosent;//rtsp發送標志位,為true則發送,否則退出
  425.  
  426.    //從文件中拷貝1M數據到內存中作為實時網絡傳輸內存模擬,如果實時網絡傳輸應該是雙線程結構,記得在這里加上線程鎖
  427.    //此外實時傳輸的數據拷貝應該是發生在H264FramedLiveSource文件中,所以這里只是自上往下的傳指針過去給它
  428.     datasize = szbuffer[(recvcount+ARRAYLENGTH-1)%ARRAYLENGTH].len;
  429.    for (int i = 0; i < datasize; ++i)
  430.    {
  431.       databuf[i] = szbuffer[(recvcount+ARRAYLENGTH-1)%ARRAYLENGTH].str[i];
  432.    }
  433.    dosent = true;
  434.    //fclose(pf);
  435.  
  436.    //上面的部分除了模擬網絡傳輸的部分外其他的基本跟live555提供的demo一樣,而下面則修改為網絡傳輸的形式,為此重寫addSubsession的第一個參數相關文件
  437.    char const* streamName = "h264test";
  438.    ServerMediaSession* sms = ServerMediaSession::createNew(*env, streamName, streamName,descriptionString);
  439.    sms->addSubsession(H264LiveVideoServerMediaSubssion::createNew(*env, reuseFirstSource, &datasize, databuf,&dosent));//修改為自己實現的H264LiveVideoServerMediaSubssion
  440.    rtspServer->addServerMediaSession(sms);
  441.    announceStream(rtspServer, sms, streamName);//提示用戶輸入連接信息
  442.  
  443.    env->taskScheduler().doEventLoop(); //循環等待連接(沒有鏈接的話,進不去下一步,無法調試)
  444.  
  445.    free(databuf);//釋放掉內存
  446.    return 0;
  447. }

Source核心代碼:

  1. void H264FramedLiveSource::doGetNextFrame()
  2. {
  3.    if (*Framed_dosent == true)
  4.    {
  5.  
  6.      bufsizel = szbuffer[(recvcount+ARRAYLENGTH-1)%ARRAYLENGTH].len;
  7.    if (bufsizel > fMaxSize)
  8.    {
  9.       fFrameSize = fMaxSize;
  10.    }
  11.    else
  12.    {
  13.       fFrameSize = bufsizel;
  14.    }
  15.        /* for (int i = 0; i < fFrameSize; ++i)
  16.       {
  17.          Framed_databuf[i] = szbuffer[(recvcount+ARRAYLENGTH-1)%ARRAYLENGTH].str[i];
  18.       } */
  19.      memcpy(fTo, szbuffer[(recvcount+ARRAYLENGTH-1)%ARRAYLENGTH].str, fFrameSize);
  20.    }
  21.    nextTask() = envir().taskScheduler().scheduleDelayedTask(0,(TaskFunc*)FramedSource::afterGetting, this);//表示延遲0秒后再執行 afterGetting 函數
  22.    return;
  23. }

代碼下載鏈接:

C:\Users\DELL\Desktop\ffmpeg開發\files 暫且在這,以后再改。


免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM