下面兩種方式是直接翻譯過來的,還有問題,比如指針的使用和值的傳入。考慮C#和C++的差異,還是要抱着懷疑的態度去看待,不一定是對的。
H264視頻解碼網絡流:
using FFmpeg.AutoGen;
using RTForwardServer;
using System;
using System.Collections.Generic;
using System.Drawing;
using System.Drawing.Imaging;
using System.Linq;
using System.Net.Sockets;
using System.Runtime.InteropServices;
using System.Text;
using System.Threading.Tasks;
using System.Windows.Forms;
/// <summary>
/// http://blog.csdn.net/jammg/article/details/52750241
/// </summary>
namespace AVParser.Parser
{
public unsafe class H264Parser
{
public PictureBox ShowPictureBox {set;get;}
/// <summary>
/// 解碼H264網絡流
/// </summary>
/// <param name="socket">Socket對象</param>
public unsafe void Parser(Socket socket) {
AVCodecContext* pCodecCtx = null;
AVCodecParserContext* pCodecParserCtx = null;
AVCodec* pCodec = null;
AVFrame* pFrame = null; //yuv
AVPacket packet; //h264
AVPicture picture; //儲存rgb格式圖片
SwsContext* pSwsCtx = null;
AVCodecID codec_id = AVCodecID.AV_CODEC_ID_H264;
int ret;
//FFmpeg可執行二進制命令工具查找
//FFmpegBinariesHelper.RegisterFFmpegBinaries();
//ffmpeg.av_register_all();
//ffmpeg.avcodec_register_all();
/* 初始化AVCodec */
pCodec = ffmpeg.avcodec_find_decoder(codec_id);
/* 初始化AVCodecContext,只是分配,還沒打開 */
pCodecCtx = ffmpeg.avcodec_alloc_context3(pCodec);
/* 初始化AVCodecParserContext */
pCodecParserCtx = ffmpeg.av_parser_init((int)AVCodecID.AV_CODEC_ID_H264);
if (null==pCodecParserCtx)
{
return;//終止執行
}
/* we do not send complete frames,什么意思? */
if (pCodec->capabilities > 0 && ffmpeg.CODEC_CAP_TRUNCATED > 0)
pCodecCtx->flags |= ffmpeg.CODEC_FLAG_TRUNCATED;
/* 打開解碼器 */
ret = ffmpeg.avcodec_open2(pCodecCtx, pCodec, null);
if (ret < 0)
{
return;//終止執行
}
pFrame = ffmpeg.av_frame_alloc();
ffmpeg.av_init_packet(&packet);
packet.size = 0;
packet.data = null;
const int in_buffer_size = 4096;
//uint in_buffer[in_buffer_size + FF_INPUT_BUFFER_PADDING_SIZE] = { 0 };
byte[] in_buffer=new byte[in_buffer_size];
byte *cur_ptr;
int cur_size;
int got;
bool is_first_time = true;
while (true)
{
// Socket通信實例接收信息
//cur_size = 0;//recv(m_socket, (char*)in_buffer, in_buffer_size, 0);
cur_size = socket.Receive(in_buffer, in_buffer_size, SocketFlags.None);
Console.WriteLine("H264Parser Socket Receive: data byte string={0}", BitConverter.ToString(in_buffer));
if (cur_size == 0)
break;
//cur_ptr = in_buffer;//指針轉換問題
cur_ptr = (byte*)ffmpeg.av_malloc(in_buffer_size);
while (cur_size > 0)
{
/* 返回解析了的字節數 */
int len = ffmpeg.av_parser_parse2(pCodecParserCtx, pCodecCtx,
&packet.data, &packet.size, (byte*)cur_ptr, cur_size,
ffmpeg.AV_NOPTS_VALUE, ffmpeg.AV_NOPTS_VALUE, ffmpeg.AV_NOPTS_VALUE);
cur_ptr += len;
cur_size -= len;
if (packet.size == 0)
continue;
//switch (pCodecParserCtx->pict_type)
//{
// case AV_PICTURE_TYPE_I: printf("Type: I\t"); break;
// case AV_PICTURE_TYPE_P: printf("Type: P\t"); break;
// case AV_PICTURE_TYPE_B: printf("Type: B\t"); break;
// default: printf("Type: Other\t"); break;
//}
//printf("Output Number:%4d\t", pCodecParserCtx->output_picture_number);
//printf("Offset:%8ld\n", pCodecParserCtx->cur_offset);
ret = ffmpeg.avcodec_decode_video2(pCodecCtx, pFrame, &got, &packet);
if (ret < 0)
{
return;//終止執行
}
if (got>0)
{
if (is_first_time) //分配格式轉換存儲空間
{
// C AV_PIX_FMT_RGB32 統一改為 AVPixelFormat.AV_PIX_FMT_RGB24
pSwsCtx = ffmpeg.sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt,
pCodecCtx->width, pCodecCtx->height, AVPixelFormat.AV_PIX_FMT_RGB24, ffmpeg.SWS_BICUBIC, null, null, null);
ffmpeg.avpicture_alloc(&picture, AVPixelFormat.AV_PIX_FMT_RGB24, pCodecCtx->width, pCodecCtx->height);
is_first_time = false;
}
/* YUV轉RGB */
ffmpeg.sws_scale(pSwsCtx, pFrame->data, pFrame->linesize,
0, pCodecCtx->height,
picture.data, picture.linesize);
//QImage img(picture.data[0], pCodecCtx->width, pCodecCtx->height, QImage::Format_RGB32);
//emit this-> signal_receive_one_image(img);
//填充視頻幀
//ffmpeg.avpicture_fill((AVPicture*)pFrame, cur_ptr, AVPixelFormat.AV_PIX_FMT_RGB24, pCodecCtx->width, pCodecCtx->height);
#region 構造圖片
var dstData = new byte_ptrArray4();// 聲明形參
var dstLinesize = new int_array4();// 聲明形參
// 目標媒體格式需要的字節長度
var convertedFrameBufferSize = ffmpeg.av_image_get_buffer_size(AVPixelFormat.AV_PIX_FMT_RGB24, pCodecCtx->width, pCodecCtx->height, 1);
// 分配目標媒體格式內存使用
var convertedFrameBufferPtr = Marshal.AllocHGlobal(convertedFrameBufferSize);
// 設置圖像填充參數
ffmpeg.av_image_fill_arrays(ref dstData, ref dstLinesize, (byte*)convertedFrameBufferPtr, AVPixelFormat.AV_PIX_FMT_RGB24, pCodecCtx->width, pCodecCtx->height, 1);
// 封裝Bitmap圖片
var bitmap = new Bitmap(pCodecCtx->width, pCodecCtx->height, dstLinesize[0], PixelFormat.Format24bppRgb, convertedFrameBufferPtr);
ShowPictureBox.Image = bitmap;
ShowPictureBox.Dispose();
#endregion
}
}
}
ShowPictureBox.Image = null;
ShowPictureBox.Dispose();
ffmpeg.av_free_packet(&packet);
ffmpeg.av_frame_free(&pFrame);
ffmpeg.avpicture_free(&picture);
ffmpeg.sws_freeContext(pSwsCtx);
ffmpeg.avcodec_free_context(&pCodecCtx);
ffmpeg.av_parser_close(pCodecParserCtx);
}
}
}
ACC音頻解碼網絡流:
using FFmpeg.AutoGen;
using RTForwardServer;
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Media;
using System.Net.Sockets;
using System.Runtime.InteropServices;
using System.Text;
using System.Threading.Tasks;
using System.Windows.Forms;
/// <summary>
/// http://blog.csdn.net/jammg/article/details/52750241
/// </summary>
namespace AVParser.Parser
{
public unsafe class ACCParser
{
/// <summary>
/// 解碼AAC音頻
/// </summary>
/// <param name="socket">Socket對象</param>
public unsafe void Parser(Socket socket) {
AVCodecContext* pCodecCtx = null;
AVCodecParserContext* pCodecParserCtx = null;
AVCodec* pCodec = null;
AVFrame* pFrame = null;
AVPacket packet;
AVCodecID codec_id = AVCodecID.AV_CODEC_ID_AAC;
int ret;
//FFmpeg可執行二進制命令工具查找
//FFmpegBinariesHelper.RegisterFFmpegBinaries();
//ffmpeg.av_register_all();
//ffmpeg.avcodec_register_all();
/* 初始化AVCodec */
pCodec = ffmpeg.avcodec_find_decoder(codec_id);
/* 初始化AVCodecContext,只是分配,還沒打開 */
pCodecCtx = ffmpeg.avcodec_alloc_context3(pCodec);
/* 初始化AVCodecParserContext */
pCodecParserCtx = ffmpeg.av_parser_init((int)AVCodecID.AV_CODEC_ID_AAC);
if (null==pCodecParserCtx)
{
Application.Exit();//退出程序
}
/* we do not send complete frames,什么意思? */
if (pCodec->capabilities>0 && ffmpeg.CODEC_CAP_TRUNCATED > 0)
pCodecCtx->flags |= ffmpeg.CODEC_FLAG_TRUNCATED;
/* 打開解碼器 */
ret = ffmpeg.avcodec_open2(pCodecCtx, pCodec, null);
if (ret < 0)
{
return ;//終止執行
}
pFrame = ffmpeg.av_frame_alloc();
ffmpeg.av_init_packet(&packet);
packet.size = 0;
packet.data = null;
/* 存儲一幀可以PCM,L(一個采樣點)RLRLR..... ,用於播放 */
int out_buf_size=0;
byte* out_buf = null;
//FILE *fp = fopen("audio.pcm", "wb");
const int in_buffer_size = 4096;
/**
* AVPacket.buf.data 指向AVPacket.data ,AVPacket.buf.size = AVPacket.size + FF_INPUT_BUFFER_PADDING_SIZE
*/
//uint8_t in_buffer[in_buffer_size + FF_INPUT_BUFFER_PADDING_SIZE] = { 0 };
//uint [] in_buffer =new uint[in_buffer_size];
byte [] in_buffer = new byte[in_buffer_size];
byte *cur_ptr;
int cur_size;
int got;
bool is_first_time = true;
while (true)
{
// Socket通信實例接收信息
//cur_size = 0;//recv(m_socket, (char*)in_buffer, in_buffer_size, 0);
cur_size = socket.Receive(in_buffer, in_buffer.Length, SocketFlags.None);
Console.WriteLine("ACCParser Socket Receive: data byte string={0}", BitConverter.ToString(in_buffer));
if (cur_size == 0)
break;
//cur_ptr = in_buffer;//指針轉換問題
cur_ptr = (byte*)ffmpeg.av_malloc(in_buffer_size);
while (cur_size > 0)
{
/* 返回解析了的字節數 */
int len = ffmpeg.av_parser_parse2(pCodecParserCtx, pCodecCtx,
&packet.data, &packet.size, cur_ptr, cur_size,
ffmpeg.AV_NOPTS_VALUE, ffmpeg.AV_NOPTS_VALUE, ffmpeg.AV_NOPTS_VALUE);
cur_ptr += len;
cur_size -= len;
if (packet.size == 0)
continue;
ret = ffmpeg.avcodec_decode_audio4(pCodecCtx, pFrame, &got, &packet);
if (ret < 0)
{
return;//終止執行
}
if (got>0)
{
if (is_first_time) //分配格式轉換存儲空間
{
out_buf_size = ffmpeg.av_samples_get_buffer_size(
null,
pCodecCtx->channels,
pFrame->nb_samples, //讀取一幀數據時每個聲道讀取的采樣個數
pCodecCtx->sample_fmt,
1);
out_buf = (byte*)ffmpeg.av_malloc((ulong)out_buf_size);
if (out_buf == null)
{
return;//終止執行
}
is_first_time = false;
}
UInt32* l = (UInt32*)pFrame->extended_data[0];
UInt32* r = (UInt32*)pFrame->extended_data[1];
//這里是針對AV_SAMPLE_FMT_FLTP格式的寫入方式,其他音頻格式的需要其他方式
for (int i = 0, j = 0; i < out_buf_size; i += 8, j++)
{
out_buf[i] = (byte)(r[j] & 0xff);
out_buf[i + 1] = (byte)(r[j] >> 8 & 0xff);
out_buf[i + 2] = (byte)(r[j] >> 16 & 0xff);
out_buf[i + 3] = (byte)(r[j] >> 24 & 0xff);
out_buf[i + 4] = (byte)(l[j] & 0xff);
out_buf[i + 5] = (byte)(l[j] >> 8 & 0xff);
out_buf[i + 6] = (byte)(l[j] >> 16 & 0xff);
out_buf[i + 7] = (byte)(l[j] >> 24 & 0xff);
}
//std::string str(out_buf, out_buf_size);
//emit this->signal_receive_one_audio_frame(str);
//fwrite(out_buf, out_buf_size, 1, fp);
//填充音頻幀==此處似乎不再需要填充音頻幀數據
//ffmpeg.avcodec_fill_audio_frame(pFrame, pFrame->channels,AVSampleFormat.AV_SAMPLE_FMT_FLTP,out_buf, out_buf_size,0);
// byte*轉為byte[]
byte[] bytes = new byte[out_buf_size];
for (int i = 0; i < out_buf_size; i++)
{
bytes[i] = out_buf[i];
}
// 播放音頻數據
MemoryStream ms = new MemoryStream(bytes);
SoundPlayer sp = new SoundPlayer(ms);
sp.Play();
}
}
}
ffmpeg.av_free_packet(&packet);
ffmpeg.av_frame_free(&pFrame);
ffmpeg.avcodec_free_context(&pCodecCtx);
ffmpeg.av_parser_close(pCodecParserCtx);
}
}
}
