只要開始初始化一次,結束后釋放就好,中間可以循環轉碼
AVFrame *m_pFrameRGB,*m_pFrameYUV; uint8_t *m_rgbBuffer,*m_yuvBuffer; struct SwsContext *m_img_convert_ctx; void init() //分配兩個Frame,兩段buff,一個轉換上下文 { //為每幀圖像分配內存 m_pFrameYUV = av_frame_alloc(); m_pFrameRGB = av_frame_alloc();
// width和heigt為傳入的分辨率的大小,分辨率有變化時可以以最大標准申請 int numBytes = avpicture_get_size(AV_PIX_FMT_RGB32, nwidth,nheight); m_rgbBuffer = (uint8_t *) av_malloc(numBytes * sizeof(uint8_t)); int yuvSize = nwidth * nheight * 3 /2; m_yuvBuffer = (uint8_t *)av_malloc(yuvSize);
//特別注意sws_getContext內存泄露問題, //注意sws_getContext只能調用一次,在初始化時候調用即可,另外調用完后,在析構函數中使用sws_freeContext,將它的內存釋放。 //設置圖像轉換上下文 m_img_convert_ctx = sws_getContext(nwidth, nheight, AV_PIX_FMT_YUV420P, \ nwidth, nheight, AV_PIX_FMT_RGB32, SWS_BICUBIC, NULL, NULL, NULL); } void play(char* pbuff_in,int nwidth,int nheight)
{ avpicture_fill((AVPicture *) m_pFrameRGB, m_rgbBuffer, AV_PIX_FMT_RGB32,nwidth, nheight); avpicture_fill((AVPicture *) m_pFrameYUV, (uint8_t *)pbuff_in, AV_PIX_FMT_YUV420P, nwidth, nheight); //轉換圖像格式,將解壓出來的YUV420P的圖像轉換為RGB的圖像 sws_scale(m_img_convert_ctx, (uint8_t const * const *) m_pFrameYUV->data, m_pFrameYUV->linesize, 0, nheight, m_pFrameRGB->data, m_pFrameRGB->linesize); //把這個RGB數據 用QImage加載 QImage tmpImg((uchar *)m_rgbBuffer,nwidth,nheight,QImage::Format_RGB32); //把圖像復制一份 傳遞給界面顯示 m_mapImage[nWindowIndex] = tmpImg.copy(); } void release() { av_frame_free(&m_pFrameYUV); av_frame_free(&m_pFrameRGB); av_free(m_rgbBuffer); av_free(m_yuvBuffer); sws_freeContext(m_img_convert_ctx); }
bool YV12ToBGR24_FFmpeg(unsigned char* pYUV,unsigned char* pBGR24,int width,int height) { if (width < 1 || height < 1 || pYUV == NULL || pBGR24 == NULL) return false; //int srcNumBytes,dstNumBytes; //uint8_t *pSrc,*pDst; AVPicture pFrameYUV,pFrameBGR; //pFrameYUV = avpicture_alloc(); //srcNumBytes = avpicture_get_size(PIX_FMT_YUV420P,width,height); //pSrc = (uint8_t *)malloc(sizeof(uint8_t) * srcNumBytes); avpicture_fill(&pFrameYUV,pYUV,PIX_FMT_YUV420P,width,height); //U,V互換 uint8_t * ptmp=pFrameYUV.data[1]; pFrameYUV.data[1]=pFrameYUV.data[2]; pFrameYUV.data [2]=ptmp; //pFrameBGR = avcodec_alloc_frame(); //dstNumBytes = avpicture_get_size(PIX_FMT_BGR24,width,height); //pDst = (uint8_t *)malloc(sizeof(uint8_t) * dstNumBytes); avpicture_fill(&pFrameBGR,pBGR24,PIX_FMT_BGR24,width,height); struct SwsContext* imgCtx = NULL; imgCtx = sws_getContext(width,height,PIX_FMT_YUV420P,width,height,PIX_FMT_BGR24,SWS_BILINEAR,0,0,0); if (imgCtx != NULL){ sws_scale(imgCtx,pFrameYUV.data,pFrameYUV.linesize,0,height,pFrameBGR.data,pFrameBGR.linesize); if(imgCtx){ sws_freeContext(imgCtx); imgCtx = NULL; } return true; } else{ sws_freeContext(imgCtx); imgCtx = NULL; return false; } }
另一種方法:
