NuPlayer播放源碼分析之DecoderBase分析
[時間:2017-02] [狀態:Open]
[關鍵詞:android,nuplayer,開源播放器,播放框架,DecoderBase,MediaCodec]
0 導讀
DecoderBase是AHandler的一個子類,主要功能是負責解碼,按照MediaPlayer的框架,一般是調用MediaCodec完成解碼,功能類似FFmpeg的libavcodec。其主要功能是解碼器初始化、解碼以及和其他模塊的交互,比如Source、Renderer等。
NuPlayer::Decoder是DecoderBase的子類。
本文是我的NuPlayer播放框架的第六篇。
1 NuPlayer中關於DecoderBase的調用
NuPlayer的成員變量中有兩個DecoderBase指針,如下:
sp<DecoderBase> mVideoDecoder;
sp<DecoderBase> mAudioDecoder;
這里以視頻解碼器mVideoDecoder
說明相關調用邏輯。
// case kWhatSetVideoSurface:
mVideoDecoder->setVideoSurface(surface);
// case kWhatConfigPlayback:
if (mVideoDecoder != NULL) {
float rate = getFrameRate();
if (rate > 0) {
sp<AMessage> params = new AMessage();
params->setFloat("operating-rate", rate * mPlaybackSettings.mSpeed);
mVideoDecoder->setParameters(params);
}
}
// 很多地方調用
instantiateDecoder(false, &mVideoDecoder);
// case kWhatVideoNotify: -- case kWhatAudioNotify:
mVideoDecoder.clear();
// NuPlayer::onStart()
mVideoDecoder->setRenderer(mRenderer);
mVideoDecoder->getStats();
mVideoDecoder->signalResume(needNotify);
這里最主要的初始化位於instantiateDecoder中,其簡化版的實現如下(刪除音頻和字幕相關初始化參數):
status_t NuPlayer::instantiateDecoder(
bool audio, sp<DecoderBase> *decoder, bool checkAudioModeChange) {
// 從Source中獲得原始的音視頻格式
sp<AMessage> format = mSource->getFormat(audio);
if (format == NULL) {
return UNKNOWN_ERROR;
} else {
status_t err;
if (format->findInt32("err", &err) && err) {
return err;
}
}
format->setInt32("priority", 0 /* realtime */);
{
sp<AMessage> notify = new AMessage(kWhatVideoNotify, this);
++mVideoDecoderGeneration;
notify->setInt32("generation", mVideoDecoderGeneration);
*decoder = new Decoder(
notify, mSource, mPID, mRenderer, mSurface, mCCDecoder);
}
(*decoder)->init();
(*decoder)->configure(format);
return OK;
}
2 NuPlayer::DecoderBase類分析
DecoderBase類聲明如下:
struct NuPlayer::DecoderBase : public AHandler {
DecoderBase(const sp<AMessage> ¬ify);
void configure(const sp<AMessage> &format);
void init();
void setParameters(const sp<AMessage> ¶ms);
// Synchronous call to ensure decoder will not request or send out data.
void pause();
void setRenderer(const sp<Renderer> &renderer);
virtual status_t setVideoSurface(const sp<Surface> &) { return INVALID_OPERATION; }
status_t getInputBuffers(Vector<sp<ABuffer> > *dstBuffers) const;
void signalFlush();
void signalResume(bool notifyComplete);
void initiateShutdown();
virtual sp<AMessage> getStats() const;
protected:
virtual ~DecoderBase();
virtual void onMessageReceived(const sp<AMessage> &msg);
virtual void onConfigure(const sp<AMessage> &format) = 0;
virtual void onSetParameters(const sp<AMessage> ¶ms) = 0;
virtual void onSetRenderer(const sp<Renderer> &renderer) = 0;
virtual void onGetInputBuffers(Vector<sp<ABuffer> > *dstBuffers) = 0;
virtual void onResume(bool notifyComplete) = 0;
virtual void onFlush() = 0;
virtual void onShutdown(bool notifyComplete) = 0;
void onRequestInputBuffers();
virtual bool doRequestBuffers() = 0;
virtual void handleError(int32_t err);
sp<AMessage> mNotify;
int32_t mBufferGeneration;
bool mPaused;
sp<AMessage> mStats;
private:
sp<ALooper> mDecoderLooper;
bool mRequestInputBuffersPending;
DISALLOW_EVIL_CONSTRUCTORS(DecoderBase);
};
從聲明來看,DecoderBase主要是基於AHandler-ALoop搭建一個解碼器框架和消息循環泵。將對public接口的調用直接轉移到onXXX的調用上。
下面以configure接口調用為例簡單說明下DecoderBase中的調用邏輯。其實現代碼如下:
void NuPlayer::DecoderBase::configure(const sp<AMessage> &format) {
sp<AMessage> msg = new AMessage(kWhatConfigure, this);
msg->setMessage("format", format);
msg->post();
}
這個函數直接發送了kWhatConfigure消息,對應的消息處理如下:
void NuPlayer::DecoderBase::onMessageReceived(const sp<AMessage> &msg) {
switch (msg->what()) {
case kWhatConfigure:
{
sp<AMessage> format;
CHECK(msg->findMessage("format", &format));
onConfigure(format);
break;
}
// ...
這就直接調用到DecoderBase::onConfigure函數上, 注意這是一個純虛函數,沒有實現的。
其他對外接口的實現邏輯和configure非常類似。
在NuPlayer中DecoderBase有兩個子類:Decoder和DecoderPassThrough(從NuPlayer調用來看,這個類僅跟音頻解碼有關)。
我們還是重點關注一個類NuPlayer::Decoder。
3 NuPlayer::Decoder接口及主要成員分析
這個類內部是真正調用MediaCodec實現解碼的類,這里僅關注主要的對外及protected接口,和核心的成員變量。代碼如下:
struct NuPlayer::Decoder : public DecoderBase {
Decoder(const sp<AMessage> ¬ify,
const sp<Source> &source, pid_t pid,
const sp<Renderer> &renderer = NULL, const sp<Surface> &surface = NULL,
const sp<CCDecoder> &ccDecoder = NULL);
virtual sp<AMessage> getStats() const;
// sets the output surface of video decoders.
virtual status_t setVideoSurface(const sp<Surface> &surface);
protected:
virtual ~Decoder();
virtual void onMessageReceived(const sp<AMessage> &msg);
virtual void onConfigure(const sp<AMessage> &format);
virtual void onSetParameters(const sp<AMessage> ¶ms);
virtual void onSetRenderer(const sp<Renderer> &renderer);
virtual void onGetInputBuffers(Vector<sp<ABuffer> > *dstBuffers);
virtual void onResume(bool notifyComplete);
virtual void onFlush();
virtual void onShutdown(bool notifyComplete);
virtual bool doRequestBuffers();
private:
sp<Surface> mSurface;
sp<Source> mSource;
sp<Renderer> mRenderer;
sp<AMessage> mInputFormat;
sp<AMessage> mOutputFormat;
sp<MediaCodec> mCodec; // 解碼
sp<ALooper> mCodecLooper;
List<sp<AMessage> > mPendingInputMessages;
Vector<sp<ABuffer> > mInputBuffers; // 輸入數據
Vector<sp<ABuffer> > mOutputBuffers; // 輸出數據
Vector<bool> mInputBufferIsDequeued;
Vector<MediaBuffer *> mMediaBuffers;
Vector<size_t> mDequeuedInputBuffers;
DISALLOW_EVIL_CONSTRUCTORS(Decoder);
};
接口多數是繼承自DecoderBase的public和protected成員函數,這里最主要的成員是mCodec,接下來的流程梳理也是圍繞這一點展開。
4 DecoderBase/Decoder實現解析
構造函數和析構函數
從代碼來看這兩個函數主要是創建和銷毀Looper,同時釋放MediaCodec相關資源,代碼如下:
NuPlayer::Decoder::Decoder(
const sp<AMessage> ¬ify,
const sp<Source> &source,
pid_t pid,
const sp<Renderer> &renderer,
const sp<Surface> &surface,
const sp<CCDecoder> &ccDecoder)
: DecoderBase(notify),
mSurface(surface),
mSource(source),
mRenderer(renderer),
{
mCodecLooper = new ALooper;
mCodecLooper->setName("NPDecoder-CL");
mCodecLooper->start(false, false, ANDROID_PRIORITY_AUDIO);
}
void NuPlayer::Decoder::releaseAndResetMediaBuffers() {
for (size_t i = 0; i < mMediaBuffers.size(); i++) {
if (mMediaBuffers[i] != NULL) {
mMediaBuffers[i]->release();
mMediaBuffers.editItemAt(i) = NULL;
}
}
mMediaBuffers.resize(mInputBuffers.size());
for (size_t i = 0; i < mMediaBuffers.size(); i++) {
mMediaBuffers.editItemAt(i) = NULL;
}
mInputBufferIsDequeued.clear();
mInputBufferIsDequeued.resize(mInputBuffers.size());
for (size_t i = 0; i < mInputBufferIsDequeued.size(); i++) {
mInputBufferIsDequeued.editItemAt(i) = false;
}
mPendingInputMessages.clear();
mDequeuedInputBuffers.clear();
mSkipRenderingUntilMediaTimeUs = -1;
}
NuPlayer::Decoder::~Decoder() {
mCodec->release();
releaseAndResetMediaBuffers();
}
init()和configure()實現
init()實現比較簡單,就是把Looper和Handler關聯起來,代碼如下:
void NuPlayer::DecoderBase::init() {
mDecoderLooper->registerHandler(this);
}
configure()的最終實現是在onConfigure中,代碼如下:
void NuPlayer::Decoder::onConfigure(const sp<AMessage> &format) {
CHECK(mCodec == NULL);
AString mime;
CHECK(format->findString("mime", &mime)); // 需要找到音視頻的具體類型
mIsAudio = !strncasecmp("audio/", mime.c_str(), 6);
mIsVideoAVC = !strcasecmp(MEDIA_MIMETYPE_VIDEO_AVC, mime.c_str());
mComponentName = mime;
mComponentName.append(" decoder");
// 根據mime,創建MediaCodec
mCodec = MediaCodec::CreateByType(
mCodecLooper, mime.c_str(), false /* encoder */, NULL /* err */, mPid);
int32_t secure = 0;
if (format->findInt32("secure", &secure) && secure != 0) {
if (mCodec != NULL) {
mCodec->getName(&mComponentName);
mComponentName.append(".secure");
mCodec->release();
mCodec = MediaCodec::CreateByComponentName(
mCodecLooper, mComponentName.c_str(), NULL /* err */, mPid);
}
}
if (mCodec == NULL) {
handleError(UNKNOWN_ERROR);
return;
}
mIsSecure = secure;
mCodec->getName(&mComponentName);
status_t err;
if (mSurface != NULL) {
// disconnect from surface as MediaCodec will reconnect
err = native_window_api_disconnect(
mSurface.get(), NATIVE_WINDOW_API_MEDIA);
// We treat this as a warning, as this is a preparatory step.
// Codec will try to connect to the surface, which is where
// any error signaling will occur.
ALOGW_IF(err != OK, "failed to disconnect from surface: %d", err);
}
err = mCodec->configure(
format, mSurface, NULL /* crypto */, 0 /* flags */);
if (err != OK) {
ALOGE("Failed to configure %s decoder (err=%d)", mComponentName.c_str(), err);
mCodec->release();
mCodec.clear();
handleError(err);
return;
}
rememberCodecSpecificData(format);
// the following should work in configured state
CHECK_EQ((status_t)OK, mCodec->getOutputFormat(&mOutputFormat));
CHECK_EQ((status_t)OK, mCodec->getInputFormat(&mInputFormat));
mStats->setString("mime", mime.c_str());
mStats->setString("component-name", mComponentName.c_str());
if (!mIsAudio) {
int32_t width, height;
if (mOutputFormat->findInt32("width", &width)
&& mOutputFormat->findInt32("height", &height)) {
mStats->setInt32("width", width);
mStats->setInt32("height", height);
}
}
sp<AMessage> reply = new AMessage(kWhatCodecNotify, this);
mCodec->setCallback(reply);
err = mCodec->start();
if (err != OK) {
ALOGE("Failed to start %s decoder (err=%d)", mComponentName.c_str(), err);
mCodec->release();
mCodec.clear();
handleError(err);
return;
}
releaseAndResetMediaBuffers();
mPaused = false;
mResumePending = false;
}
setParameters、setRenderer、setVideoSurface
setParameters實現相對簡單,直接將參數傳遞給MediaCodec,代碼如下:
void NuPlayer::Decoder::onSetParameters(const sp<AMessage> ¶ms) {
if (mCodec == NULL) {
ALOGW("onSetParameters called before codec is created.");
return;
}
mCodec->setParameters(params);
}
setRenderer接口主要的目的是啟動解碼流程,內部實現事件輪巡。代碼如下:
void NuPlayer::Decoder::onSetRenderer(const sp<Renderer> &renderer) {
bool hadNoRenderer = (mRenderer == NULL);
mRenderer = renderer;
if (hadNoRenderer && mRenderer != NULL) {
// this means that the widevine legacy source is ready
onRequestInputBuffers();
}
}
void NuPlayer::DecoderBase::onRequestInputBuffers() {
if (mRequestInputBuffersPending) {
return;
}
// doRequestBuffers() return true if we should request more data
if (doRequestBuffers()) {
mRequestInputBuffersPending = true;
sp<AMessage> msg = new AMessage(kWhatRequestInputBuffers, this);
msg->post(10 * 1000ll); // 這里重發了同一個消息
}
}
// 代碼來自NuPlayer::DecoderBase::onMessageReceived函數
case kWhatRequestInputBuffers:
{
mRequestInputBuffersPending = false;
onRequestInputBuffers();
break;
}
setVideoSurface的實現如下:
status_t NuPlayer::Decoder::setVideoSurface(const sp<Surface> &surface) {
sp<AMessage> msg = new AMessage(kWhatSetVideoSurface, this);
msg->setObject("surface", surface);
sp<AMessage> response;
status_t err = msg->postAndAwaitResponse(&response);
if (err == OK && response != NULL) {
CHECK(response->findInt32("err", &err));
}
return err;
}
對應的消息處理代碼如下,其中調用了native_window相關的代碼:
case kWhatSetVideoSurface:
{
sp<AReplyToken> replyID;
CHECK(msg->senderAwaitsResponse(&replyID));
sp<RefBase> obj;
CHECK(msg->findObject("surface", &obj));
sp<Surface> surface = static_cast<Surface *>(obj.get()); // non-null
int32_t err = INVALID_OPERATION;
// NOTE: in practice mSurface is always non-null, but checking here for completeness
if (mCodec != NULL && mSurface != NULL) {
// TODO: once AwesomePlayer is removed, remove this automatic connecting
// to the surface by MediaPlayerService.
// at this point MediaPlayerService::client has already connected to the
// surface, which MediaCodec does not expect
err = native_window_api_disconnect(surface.get(), NATIVE_WINDOW_API_MEDIA);
if (err == OK) {
err = mCodec->setSurface(surface);
if (err == OK) {
// reconnect to the old surface as MPS::Client will expect to
// be able to disconnect from it.
(void)native_window_api_connect(mSurface.get(), NATIVE_WINDOW_API_MEDIA);
mSurface = surface;
}
}
if (err != OK) {
// reconnect to the new surface on error as MPS::Client will expect to
// be able to disconnect from it.
(void)native_window_api_connect(surface.get(), NATIVE_WINDOW_API_MEDIA);
}
}
sp<AMessage> response = new AMessage;
response->setInt32("err", err);
response->postReply(replyID);
break;
}
signalFlush、signalResume、initiateShutdown
signalFlush實現代碼如下,主要調用Renderer和MediaCodec的flush接口:
void NuPlayer::Decoder::onFlush() {
doFlush(true);
sp<AMessage> notify = mNotify->dup();
notify->setInt32("what", kWhatFlushCompleted);
notify->post();
}
void NuPlayer::Decoder::doFlush(bool notifyComplete) {
if (mCCDecoder != NULL) {
mCCDecoder->flush();
}
if (mRenderer != NULL) {
mRenderer->flush(mIsAudio, notifyComplete);
mRenderer->signalTimeDiscontinuity();
}
status_t err = OK;
if (mCodec != NULL) {
err = mCodec->flush();
mCSDsToSubmit = mCSDsForCurrentFormat; // copy operator
++mBufferGeneration;
}
if (err != OK) {
ALOGE("failed to flush %s (err=%d)", mComponentName.c_str(), err);
handleError(err);
// finish with posting kWhatFlushCompleted.
// we attempt to release the buffers even if flush fails.
}
releaseAndResetMediaBuffers();
mPaused = true;
}
signalResume相對簡單,直接調用MediaCodec接口,代碼如下:
void NuPlayer::Decoder::onResume(bool notifyComplete) {
mPaused = false;
if (notifyComplete) {
mResumePending = true;
}
mCodec->start();
}
initiateShutdown主要是關閉解碼器,實現如下,
void NuPlayer::Decoder::onShutdown(bool notifyComplete) {
status_t err = OK;
if (mCodec != NULL) {
err = mCodec->release();
mCodec = NULL;
++mBufferGeneration;
if (mSurface != NULL) {
// reconnect to surface as MediaCodec disconnected from it
status_t error = native_window_api_connect(mSurface.get(), NATIVE_WINDOW_API_MEDIA);
}
mComponentName = "decoder";
}
releaseAndResetMediaBuffers();
if (err != OK) {
handleError(err);
// finish with posting kWhatShutdownCompleted.
}
if (notifyComplete) {
sp<AMessage> notify = mNotify->dup();
notify->setInt32("what", kWhatShutdownCompleted);
notify->post();
mPaused = true;
}
}
getInputBuffers、getStats
getInputBuffers實現直接調用MediaCodec的接口,如下:
void NuPlayer::Decoder::onGetInputBuffers(
Vector<sp<ABuffer> > *dstBuffers) {
CHECK_EQ((status_t)OK, mCodec->getWidevineLegacyBuffers(dstBuffers));
}
getStats實現如下,獲取解碼幀數、輸入輸出的丟幀數目等:
sp<AMessage> NuPlayer::Decoder::getStats() const {
mStats->setInt64("frames-total", mNumFramesTotal);
mStats->setInt64("frames-dropped-input", mNumInputFramesDropped);
mStats->setInt64("frames-dropped-output", mNumOutputFramesDropped);
return mStats;
}
5 Decoder解碼流程分析
前面是以接口為界進行代碼功能分析,實際上最為主要的解碼流程並沒在這里。
實際解碼過程中,無外乎獲取輸入的壓縮數據,MediaCodec解碼,返回解碼之后的數據,渲染。
在Part 4中介紹onConfigure函數實現時有下面代碼:
mCodec->setCallback(reply);
這里就是將MediaCodec的消息發送給當前類Decoder消息隊列中,然后在onMessageReceived中處理。
實際的解碼開始是從setRenderer開始,Part 4中對這個函數的實現也有介紹,循環解碼的邏輯來在於onRequestInputBuffers函數,其中會調用doRequestBuffers,其實現如下:
// returns true if we should request more data
bool NuPlayer::Decoder::doRequestBuffers() {
// mRenderer is only NULL if we have a legacy widevine source that
// is not yet ready. In this case we must not fetch input.
if (isDiscontinuityPending() || mRenderer == NULL) {
return false;
}
status_t err = OK;
while (err == OK && !mDequeuedInputBuffers.empty()) {
size_t bufferIx = *mDequeuedInputBuffers.begin();
sp<AMessage> msg = new AMessage();
msg->setSize("buffer-ix", bufferIx);
err = fetchInputData(msg); // 取一個輸入緩沖
if (err != OK && err != ERROR_END_OF_STREAM) {
// if EOS, need to queue EOS buffer
break;
}
mDequeuedInputBuffers.erase(mDequeuedInputBuffers.begin());
if (!mPendingInputMessages.empty()
|| !onInputBufferFetched(msg)) {
mPendingInputMessages.push_back(msg); // 實際取出的數據放到這個緩沖消息隊列中
}
}
return err == -EWOULDBLOCK
&& mSource->feedMoreTSData() == OK;
}
如果doRequestBuffers返回true的話,kWhatRequestInputBuffers會循環發送kWhatRequestInputBuffers消息,驅動正常解碼邏輯。
下面是關於MediaCodec返回消息的處理邏輯,代碼主要集中在onMessageReceived中,如下:
case kWhatCodecNotify:
{
int32_t cbID;
CHECK(msg->findInt32("callbackID", &cbID));
if (mPaused) {
break;
}
switch (cbID) {
case MediaCodec::CB_INPUT_AVAILABLE:
{// 可以填充輸入數據了
int32_t index;
CHECK(msg->findInt32("index", &index));
handleAnInputBuffer(index);
break;
}
case MediaCodec::CB_OUTPUT_AVAILABLE:
{// 解碼成功的消息,輸出的數據在這里
int32_t index;
size_t offset;
size_t size;
int64_t timeUs;
int32_t flags;
CHECK(msg->findInt32("index", &index));
CHECK(msg->findSize("offset", &offset));
CHECK(msg->findSize("size", &size));
CHECK(msg->findInt64("timeUs", &timeUs));
CHECK(msg->findInt32("flags", &flags));
handleAnOutputBuffer(index, offset, size, timeUs, flags);
break;
}
case MediaCodec::CB_OUTPUT_FORMAT_CHANGED:
{ // 通知輸出格式變化
sp<AMessage> format;
CHECK(msg->findMessage("format", &format));
handleOutputFormatChange(format);
break;
}
case MediaCodec::CB_ERROR:
{// 發生未知的錯誤,需要處理下
status_t err;
CHECK(msg->findInt32("err", &err));
handleError(err);
break;
}
default:
{// 其他消息不作處理
TRESPASS();
break;
}
}
break;
}
先看如何向MediaCodec輸入數據。代碼如下:
bool NuPlayer::Decoder::handleAnInputBuffer(size_t index) {
if (isDiscontinuityPending()) {
return false;
}
sp<ABuffer> buffer;
mCodec->getInputBuffer(index, &buffer); // 先從MediaCodec中獲取一個可用的輸入緩沖
if (buffer == NULL) {
handleError(UNKNOWN_ERROR);
return false;
}
if (index >= mInputBuffers.size()) {
for (size_t i = mInputBuffers.size(); i <= index; ++i) {
mInputBuffers.add();
mMediaBuffers.add();
mInputBufferIsDequeued.add();
mMediaBuffers.editItemAt(i) = NULL;
mInputBufferIsDequeued.editItemAt(i) = false;
}
}
mInputBuffers.editItemAt(index) = buffer;
//CHECK_LT(bufferIx, mInputBuffers.size());
if (mMediaBuffers[index] != NULL) {
mMediaBuffers[index]->release();
mMediaBuffers.editItemAt(index) = NULL;
}
mInputBufferIsDequeued.editItemAt(index) = true;
if (!mCSDsToSubmit.isEmpty()) {
sp<AMessage> msg = new AMessage();
msg->setSize("buffer-ix", index);
sp<ABuffer> buffer = mCSDsToSubmit.itemAt(0);
ALOGI("[%s] resubmitting CSD", mComponentName.c_str());
msg->setBuffer("buffer", buffer);
mCSDsToSubmit.removeAt(0);
if (!onInputBufferFetched(msg)) {
handleError(UNKNOWN_ERROR);
return false;
}
return true;
}
while (!mPendingInputMessages.empty()) {
sp<AMessage> msg = *mPendingInputMessages.begin();
if (!onInputBufferFetched(msg)) { // 這里是完成數據填充的地方
break;
}
mPendingInputMessages.erase(mPendingInputMessages.begin());
}
if (!mInputBufferIsDequeued.editItemAt(index)) {
return true;
}
mDequeuedInputBuffers.push_back(index);
onRequestInputBuffers();
return true;
}
那么看看onInputBufferFetched的實現,這里會操作MediaCodec:
bool NuPlayer::Decoder::onInputBufferFetched(const sp<AMessage> &msg) {
size_t bufferIx;
CHECK(msg->findSize("buffer-ix", &bufferIx));
CHECK_LT(bufferIx, mInputBuffers.size());
sp<ABuffer> codecBuffer = mInputBuffers[bufferIx];
sp<ABuffer> buffer;
bool hasBuffer = msg->findBuffer("buffer", &buffer);
if (buffer == NULL /* includes !hasBuffer */) {
int32_t streamErr = ERROR_END_OF_STREAM;
CHECK(msg->findInt32("err", &streamErr) || !hasBuffer);
CHECK(streamErr != OK);
// attempt to queue EOS,通知MediaCodec結束標志EOS
status_t err = mCodec->queueInputBuffer(
bufferIx, 0, 0, 0, MediaCodec::BUFFER_FLAG_EOS);
if (err == OK) {
mInputBufferIsDequeued.editItemAt(bufferIx) = false;
} else if (streamErr == ERROR_END_OF_STREAM) {
streamErr = err;
// err will not be ERROR_END_OF_STREAM
}
if (streamErr != ERROR_END_OF_STREAM) {
handleError(streamErr);
}
} else {
sp<AMessage> extra;
if (buffer->meta()->findMessage("extra", &extra) && extra != NULL) {
int64_t resumeAtMediaTimeUs;
if (extra->findInt64("resume-at-mediaTimeUs", &resumeAtMediaTimeUs)) {
mSkipRenderingUntilMediaTimeUs = resumeAtMediaTimeUs;
}
}
int64_t timeUs = 0;
uint32_t flags = 0;
CHECK(buffer->meta()->findInt64("timeUs", &timeUs));
int32_t eos, csd;
// we do not expect SYNCFRAME for decoder
if (buffer->meta()->findInt32("eos", &eos) && eos) {
flags |= MediaCodec::BUFFER_FLAG_EOS;
} else if (buffer->meta()->findInt32("csd", &csd) && csd) {
flags |= MediaCodec::BUFFER_FLAG_CODECCONFIG;
}
// copy into codec buffer
if (buffer != codecBuffer) {
if (buffer->size() > codecBuffer->capacity()) {
handleError(ERROR_BUFFER_TOO_SMALL);
mDequeuedInputBuffers.push_back(bufferIx);
return false;
}
codecBuffer->setRange(0, buffer->size());
memcpy(codecBuffer->data(), buffer->data(), buffer->size()); // 實際復制數據的地方
}
// 實際交給解碼器解碼的位置
status_t err = mCodec->queueInputBuffer(
bufferIx,
codecBuffer->offset(),
codecBuffer->size(),
timeUs,
flags);
if (err != OK) {
if (mediaBuffer != NULL) {
mediaBuffer->release();
}
handleError(err);
} else {
mInputBufferIsDequeued.editItemAt(bufferIx) = false;
if (mediaBuffer != NULL) {
CHECK(mMediaBuffers[bufferIx] == NULL);
mMediaBuffers.editItemAt(bufferIx) = mediaBuffer;
}
}
}
return true;
}
這樣解碼前的數據准備就完成了。
解碼后的數據處理函數是handleAnOutputBuffer,響應MediaCodec::CB_OUTPUT_AVAILABLE消息,代碼如下:
bool NuPlayer::Decoder::handleAnOutputBuffer(size_t index, size_t offset,
size_t size, int64_t timeUs, int32_t flags) {
sp<ABuffer> buffer;
mCodec->getOutputBuffer(index, &buffer);
if (index >= mOutputBuffers.size()) {
for (size_t i = mOutputBuffers.size(); i <= index; ++i) {
mOutputBuffers.add();
}
}
mOutputBuffers.editItemAt(index) = buffer;
buffer->setRange(offset, size);
buffer->meta()->clear();
buffer->meta()->setInt64("timeUs", timeUs);
bool eos = flags & MediaCodec::BUFFER_FLAG_EOS;
// we do not expect CODECCONFIG or SYNCFRAME for decoder
sp<AMessage> reply = new AMessage(kWhatRenderBuffer, this);
reply->setSize("buffer-ix", index);
reply->setInt32("generation", mBufferGeneration);
if (eos) {
buffer->meta()->setInt32("eos", true);
reply->setInt32("eos", true);
} else if (mSkipRenderingUntilMediaTimeUs >= 0) {
if (timeUs < mSkipRenderingUntilMediaTimeUs) {
reply->post();
return true;
}
mSkipRenderingUntilMediaTimeUs = -1;
}
mNumFramesTotal += !mIsAudio;
// wait until 1st frame comes out to signal resume complete
notifyResumeCompleteIfNecessary();
if (mRenderer != NULL) {
// send the buffer to renderer. 這里是將緩沖區給Renderer處理
mRenderer->queueBuffer(mIsAudio, buffer, reply);
if (eos && !isDiscontinuityPending()) {
mRenderer->queueEOS(mIsAudio, ERROR_END_OF_STREAM);
}
}
return true;
}
還有最后一個消息MediaCodec::CB_OUTPUT_AVAILABLE,處理函數是handleOutputFormatChange,代碼如下:
void NuPlayer::Decoder::handleOutputFormatChange(const sp<AMessage> &format) {
if (!mIsAudio) {
int32_t width, height;
if (format->findInt32("width", &width)
&& format->findInt32("height", &height)) {
mStats->setInt32("width", width);
mStats->setInt32("height", height);
}
sp<AMessage> notify = mNotify->dup();
notify->setInt32("what", kWhatVideoSizeChanged);
notify->setMessage("format", format);
notify->post();// 通知NuPlayer視頻分辨率改變
} else if (mRenderer != NULL) { // 音頻的情況,重置AudioSink
uint32_t flags;
int64_t durationUs;
bool hasVideo = (mSource->getFormat(false /* audio */) != NULL);
if (getAudioDeepBufferSetting() // override regardless of source duration
|| (!hasVideo
&& mSource->getDuration(&durationUs) == OK
&& durationUs > AUDIO_SINK_MIN_DEEP_BUFFER_DURATION_US)) {
flags = AUDIO_OUTPUT_FLAG_DEEP_BUFFER;
} else {
flags = AUDIO_OUTPUT_FLAG_NONE;
}
status_t err = mRenderer->openAudioSink(
format, false /* offloadOnly */, hasVideo, flags, NULL /* isOffloaed */);
if (err != OK) {
handleError(err);
}
}
}
還剩最后一部分,顯示是如何處理的。那就是handleAnOutputBuffer發送的一個kWhatRenderBuffer消息的處理,代碼如下:
case kWhatRenderBuffer:
{
if (!isStaleReply(msg)) {
onRenderBuffer(msg);
}
break;
}
void NuPlayer::Decoder::onRenderBuffer(const sp<AMessage> &msg) {
status_t err;
int32_t render;
size_t bufferIx;
int32_t eos;
CHECK(msg->findSize("buffer-ix", &bufferIx));
if (!mIsAudio) {
int64_t timeUs;
sp<ABuffer> buffer = mOutputBuffers[bufferIx];
buffer->meta()->findInt64("timeUs", &timeUs);
}
// 顯示和釋放緩沖
if (msg->findInt32("render", &render) && render) {
int64_t timestampNs;
CHECK(msg->findInt64("timestampNs", ×tampNs));
err = mCodec->renderOutputBufferAndRelease(bufferIx, timestampNs);
} else {
mNumOutputFramesDropped += !mIsAudio;
err = mCodec->releaseOutputBuffer(bufferIx);
}
if (err != OK) {
handleError(err);
}
if (msg->findInt32("eos", &eos) && eos && isDiscontinuityPending()) {
finishHandleDiscontinuity(true /* flushOnTimeChange */);
}
}
至此整個流程分析完畢。
6 總結
回顧下,本文主要介紹了NuPlayer::DecoderBase及其子類NuPlayer::Decoder的實現代碼,從demuxer之后的數據,到通過MediaCodec解碼,得到原始數據,之后渲染。
作為后續學習NuPlayer代碼的參考部分。