在海思3519上基於qt采用ffmpeg對avi進行解碼顯示,其中ffmpeg的配置,qt的配置在前文中已經說明,在此不再贅述。
解碼
解碼在單獨的線程中進行,具體的代碼如下:
void VideoPlayer::run()
{
AVFormatContext *fmt_ctx = NULL;
AVCodecContext *dec_ctx = NULL;
AVFrame *pf = av_frame_alloc();
AVFrame *pfc = av_frame_alloc();
int video_stream_index;
int width, height;
av_register_all();
video_stream_index = getStream(&fmt_ctx, &dec_ctx, "./source_file/test.avi");
decoder(&fmt_ctx, &dec_ctx, video_stream_index, pf, pfc, &width, &height);
}
int VideoPlayer::decoder(AVFormatContext** fmt_ctx, AVCodecContext** dec_ctx,
int video_stream_index, AVFrame *pFrame, AVFrame *pFrameColor, int* width, int* height)
{
AVPacket packet;
int i = 0;
int frameFinished;
uint8_t *buffer;
int numBytes;
char filename[32];
numBytes = avpicture_get_size(AV_PIX_FMT_RGB24, (*dec_ctx)->width, (*dec_ctx)->height);
buffer = (uint8_t*)av_malloc(numBytes);
avpicture_fill((AVPicture *)pFrameColor, buffer, AV_PIX_FMT_RGB24, (*dec_ctx)->width, (*dec_ctx)->height);
while (av_read_frame(*fmt_ctx, &packet) >= 0) {
if (packet.stream_index == video_stream_index) {
avcodec_decode_video2(*dec_ctx, pFrame, &frameFinished, &packet);
if (frameFinished)
{
struct SwsContext *img_convert_ctx = NULL;
img_convert_ctx = sws_getCachedContext(img_convert_ctx, (*dec_ctx)->width,
(*dec_ctx)->height, (*dec_ctx)->pix_fmt,
(*dec_ctx)->width, (*dec_ctx)->height,
AV_PIX_FMT_RGB24, SWS_BICUBIC,
NULL, NULL, NULL);
if (!img_convert_ctx) {
fprintf(stderr, "Cannot initialize sws conversion context\n");
exit(1);
}
sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data,
pFrame->linesize, 0, (*dec_ctx)->height, pFrameColor->data,
pFrameColor->linesize);
QImage tmpImg((uchar *)buffer,(*dec_ctx)->width,(*dec_ctx)->height,QImage::Format_RGB888);
QImage image = tmpImg.copy();
emit sig_GetOneFrame(image);
}
}
av_free_packet(&packet);
}
printf("finished");
av_free(buffer);
av_free(pFrameColor);
av_free(pFrame);
avcodec_close(*dec_ctx);
avformat_close_input(fmt_ctx);
}
int VideoPlayer::getStream(AVFormatContext **fmt_ctx, AVCodecContext **dec_ctx, char* file_name)
{
int video_stream_index = -1;
int ret;
bool decoder_init = false;
if ((ret = avformat_open_input(fmt_ctx, file_name, NULL, NULL)) < 0) {
av_log(NULL, AV_LOG_ERROR, "fail to open input file.\n");
return ret;
}
if ((ret = avformat_find_stream_info(*fmt_ctx, NULL)) < 0) {
av_log(NULL, AV_LOG_ERROR, "fail to find stream information.\n");
avformat_close_input(fmt_ctx);
return ret;
}
for (int i = 0; i < (*fmt_ctx)->nb_streams; i++) {
if ((*fmt_ctx)->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
video_stream_index = i;
if (!decoder_init) {
*dec_ctx = (*fmt_ctx)->streams[i]->codec;
AVCodec *cod = avcodec_find_decoder((*dec_ctx)->codec_id);
if (!cod) {
av_log(NULL, AV_LOG_ERROR, "fail to find decoder.\n");
avformat_close_input(fmt_ctx);
return 1;
}
if (avcodec_open2(*dec_ctx, cod, NULL) != 0) {
av_log(NULL, AV_LOG_ERROR, "fail to open codecer.\n");
avformat_close_input(fmt_ctx);
return 2;
}
decoder_init = true;
}
}
return 0;
}
return video_stream_index;
}
解碼后將數據轉換成RGB並進一步轉換成QImage,每解碼一幀數據后發送一個信號用於更新圖像顯示
QImage tmpImg((uchar *)buffer,(*dec_ctx)->width,(*dec_ctx)->height,QImage::Format_RGB888);
QImage image = tmpImg.copy();
emit sig_GetOneFrame(image);
顯示
具體代碼如下:
void MainWindow::paintEvent(QPaintEvent *event)
{
QPainter painter(this);
painter.setBrush(Qt::black);
painter.drawRect(0, 0, this->width(), this->height()); //先畫成黑色
if (mImage.size().width() <= 0) return;
QImage img = mImage.scaled(this->size(),Qt::KeepAspectRatio);
int x = this->width() - img.width();
int y = this->height() - img.height();
x /= 2;
y /= 2;
painter.drawImage(QPoint(x,y),img); //畫出圖像
}
void MainWindow::slotGetOneFrame(QImage img)
{
mImage = img;
update();
}
總結:
編譯后能夠在板子上成功運行,但是顯示比較緩慢,這塊具體的原因沒有進一步分析,因為后來才用了硬解碼的方式,所以放棄了深入研究,參考代碼獲取