本節來看一下NuPlayer Source中的GenericSource,GenericSource主要是用來播放本地視頻的,接下來着重來看以下5個方法:
prepare,start,pause,seek,dequeueAccessUnit
相關代碼位置:
http://aospxref.com/android-12.0.0_r3/xref/frameworks/av/media/libdatasource/DataSourceFactory.cpp
a. prepare

prepare的過程中做了以下幾件事情(這邊的代碼比較簡單,順着看就行所以就不貼代碼了):
1. 根據setDataSource過程中傳進來的uri來創建DataSource,由於GenericSource一般用來播放本地視頻,所以會創建一個FileSource(這里的dataSource實現了最基本的讀寫文件的接口)
2. 利用創建的DataSource來讀取文件,使用media.extractor服務來選擇並創建一個合適的MediaExtractor(media.extractor服務后面可能會來記錄一下它的工作原理)
3. 利用MediaExtractor來獲取文件的metadata,以及各個track的metadata(后面用於創建以及初始化decoder),調用getTrack方法從MediaExtractor中獲取IMediaSource,audio和video track均擁有自己的IMediaSource,IMediaSource實現了demux功能
4. 為音頻和視頻分別創建一個AnotherPacketSource作為數據容器,與IMediaSource一起封裝成為Track對象,之后的函數調用就是操作音頻和視頻的Track
status_t NuPlayer::GenericSource::initFromDataSource() { sp<IMediaExtractor> extractor; // ...... // 創建MediaExtractor extractor = MediaExtractorFactory::Create(dataSource, NULL); // 獲取文件的metadata sp<MetaData> fileMeta = extractor->getMetaData(); // 獲取track數量 size_t numtracks = extractor->countTracks(); // ...... // 獲取文件的時長 if (mFileMeta != NULL) { int64_t duration; if (mFileMeta->findInt64(kKeyDuration, &duration)) { mDurationUs = duration; } } for (size_t i = 0; i < numtracks; ++i) { // 獲取MediaSource sp<IMediaSource> track = extractor->getTrack(i); if (track == NULL) { continue; } sp<MetaData> meta = extractor->getTrackMetaData(i); if (meta == NULL) { ALOGE("no metadata for track %zu", i); return UNKNOWN_ERROR; } const char *mime; CHECK(meta->findCString(kKeyMIMEType, &mime)); // 構建Track if (!strncasecmp(mime, "audio/", 6)) { if (mAudioTrack.mSource == NULL) { mAudioTrack.mIndex = i; mAudioTrack.mSource = track; // 為track構建數據容器AnotherPacketSource mAudioTrack.mPackets = new AnotherPacketSource(mAudioTrack.mSource->getFormat()); if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_VORBIS)) { mAudioIsVorbis = true; } else { mAudioIsVorbis = false; } mMimes.add(String8(mime)); } } else if (!strncasecmp(mime, "video/", 6)) { if (mVideoTrack.mSource == NULL) { mVideoTrack.mIndex = i; mVideoTrack.mSource = track; mVideoTrack.mPackets = new AnotherPacketSource(mVideoTrack.mSource->getFormat()); // video always at the beginning mMimes.insertAt(String8(mime), 0); } } mSources.push(track); return UNKNOWN_ERROR; } // 獲取加密視頻的信息 (void)checkDrmInfo(); // 這里會算視頻的biterate,先忽略 mBitrate = totalBitrate; return OK; }
b. start
NuPlayer的start方法會同步調用Source的start方法,這時候就開始讀取數據了。
調用postReadBuffer發送兩個消息,最后會調用到readBuffer方法當中做數據的讀取
void NuPlayer::GenericSource::start() { // ...... if (mAudioTrack.mSource != NULL) { postReadBuffer(MEDIA_TRACK_TYPE_AUDIO); } if (mVideoTrack.mSource != NULL) { postReadBuffer(MEDIA_TRACK_TYPE_VIDEO); } mStarted = true; }
readBuffer看起來比較長,但是並不是很復雜:
1、根據trackType獲取對應的Track
2、根據actualTimeUs判斷是否需要seek,如需要則構建ReadOptions
3、調用IMediaSource的read或者readMultiple方法讀取數據
4、將讀到的數據加入到AnotherPacketSource
void NuPlayer::GenericSource::readBuffer( media_track_type trackType, int64_t seekTimeUs, MediaPlayerSeekMode mode, int64_t *actualTimeUs, bool formatChange) { Track *track; size_t maxBuffers = 1; // 根據tracktype獲取Track switch (trackType) { case MEDIA_TRACK_TYPE_VIDEO: track = &mVideoTrack; maxBuffers = 8; // too large of a number may influence seeks break; case MEDIA_TRACK_TYPE_AUDIO: track = &mAudioTrack; maxBuffers = 64; break; case MEDIA_TRACK_TYPE_SUBTITLE: track = &mSubtitleTrack; break; case MEDIA_TRACK_TYPE_TIMEDTEXT: track = &mTimedTextTrack; break; default: TRESPASS(); } if (track->mSource == NULL) { return; } // 如果seekTimeUs >= 0,說明發生了seek,封裝ReadOptions在read時作為參數傳下去 if (actualTimeUs) { *actualTimeUs = seekTimeUs; } MediaSource::ReadOptions options; bool seeking = false; if (seekTimeUs >= 0) { options.setSeekTo(seekTimeUs, mode); seeking = true; } // 每次讀取都會讀取maxBuffer數量的buffer(audio 64,video 8),這時候就有兩種讀取方式,每次調用IMediaSource的read方法讀一個buffer,或者調用readMultiple一次性讀取多個buffer上來。無論哪種方法都會讀滿maxBuffers const bool couldReadMultiple = (track->mSource->supportReadMultiple()); if (couldReadMultiple) { options.setNonBlocking(); } int32_t generation = getDataGeneration(trackType); for (size_t numBuffers = 0; numBuffers < maxBuffers; ) { Vector<MediaBufferBase *> mediaBuffers; status_t err = NO_ERROR; sp<IMediaSource> source = track->mSource; mLock.unlock(); if (couldReadMultiple) { err = source->readMultiple( &mediaBuffers, maxBuffers - numBuffers, &options); } else { MediaBufferBase *mbuf = NULL; err = source->read(&mbuf, &options); if (err == OK && mbuf != NULL) { mediaBuffers.push_back(mbuf); } } mLock.lock(); options.clearNonPersistent(); size_t id = 0; size_t count = mediaBuffers.size(); // in case track has been changed since we don't have lock for some time. if (generation != getDataGeneration(trackType)) { for (; id < count; ++id) { mediaBuffers[id]->release(); } break; } for (; id < count; ++id) { int64_t timeUs; MediaBufferBase *mbuf = mediaBuffers[id]; // 記錄讀到的audio/video的媒體位置 if (!mbuf->meta_data().findInt64(kKeyTime, &timeUs)) { mbuf->meta_data().dumpToLog(); track->mPackets->signalEOS(ERROR_MALFORMED); break; } if (trackType == MEDIA_TRACK_TYPE_AUDIO) { mAudioTimeUs = timeUs; } else if (trackType == MEDIA_TRACK_TYPE_VIDEO) { mVideoTimeUs = timeUs; } // 如果seek了,會清除AnotherpacketSource中的數據,並添加seek標志 queueDiscontinuityIfNeeded(seeking, formatChange, trackType, track); sp<ABuffer> buffer = mediaBufferToABuffer(mbuf, trackType); if (numBuffers == 0 && actualTimeUs != nullptr) { *actualTimeUs = timeUs; } if (seeking && buffer != nullptr) { sp<AMessage> meta = buffer->meta(); if (meta != nullptr && mode == MediaPlayerSeekMode::SEEK_CLOSEST && seekTimeUs > timeUs) { sp<AMessage> extra = new AMessage; extra->setInt64("resume-at-mediaTimeUs", seekTimeUs); meta->setMessage("extra", extra); } } // 將數據加入到AnotherPacketSource當中 track->mPackets->queueAccessUnit(buffer); formatChange = false; seeking = false; ++numBuffers; } if (id < count) { // Error, some mediaBuffer doesn't have kKeyTime. for (; id < count; ++id) { // 清除暫存容器的數據用於再次的數據讀取 mediaBuffers[id]->release(); } break; } if (err == WOULD_BLOCK) { break; } else if (err == INFO_FORMAT_CHANGED) { #if 0 track->mPackets->queueDiscontinuity( ATSParser::DISCONTINUITY_FORMATCHANGE, NULL, false /* discard */); #endif } else if (err != OK) { // 如果讀取錯誤,則說明eos queueDiscontinuityIfNeeded(seeking, formatChange, trackType, track); track->mPackets->signalEOS(err); break; } } // 這個應該是播放網絡資源時,不斷下載緩存 if (mIsStreaming && (trackType == MEDIA_TRACK_TYPE_VIDEO || trackType == MEDIA_TRACK_TYPE_AUDIO)) { status_t finalResult; int64_t durationUs = track->mPackets->getBufferedDurationUs(&finalResult); // TODO: maxRebufferingMarkMs could be larger than // mBufferingSettings.mResumePlaybackMarkMs int64_t markUs = (mPreparing ? mBufferingSettings.mInitialMarkMs : mBufferingSettings.mResumePlaybackMarkMs) * 1000LL; if (finalResult == ERROR_END_OF_STREAM || durationUs >= markUs) { if (mPreparing || mSentPauseOnBuffering) { Track *counterTrack = (trackType == MEDIA_TRACK_TYPE_VIDEO ? &mAudioTrack : &mVideoTrack); if (counterTrack->mSource != NULL) { durationUs = counterTrack->mPackets->getBufferedDurationUs(&finalResult); } if (finalResult == ERROR_END_OF_STREAM || durationUs >= markUs) { if (mPreparing) { notifyPrepared(); mPreparing = false; } else { sendCacheStats(); mSentPauseOnBuffering = false; sp<AMessage> notify = dupNotify(); notify->setInt32("what", kWhatResumeOnBufferingEnd); notify->post(); } } } return; } // 自己調用自己,循環讀取 postReadBuffer(trackType); } }
接下來看看queueDiscontinuityIfNeeded,這個方法很簡單,其實就是調用了AnotherPacketSource的queueDiscontinuity方法。這個在后面的博文中會簡單介紹工作原理
void NuPlayer::GenericSource::queueDiscontinuityIfNeeded( bool seeking, bool formatChange, media_track_type trackType, Track *track) { // formatChange && seeking: track whose source is changed during selection // formatChange && !seeking: track whose source is not changed during selection // !formatChange: normal seek if ((seeking || formatChange) && (trackType == MEDIA_TRACK_TYPE_AUDIO || trackType == MEDIA_TRACK_TYPE_VIDEO)) { ATSParser::DiscontinuityType type = (formatChange && seeking) ? ATSParser::DISCONTINUITY_FORMATCHANGE : ATSParser::DISCONTINUITY_NONE; track->mPackets->queueDiscontinuity(type, NULL /* extra */, true /* discard */); } }
c. seek
有了前面的底子,seek方法就很簡單了,NuPlayer調用seekTo方法之后,會調用到readBuffer方法做數據讀取
status_t NuPlayer::GenericSource::seekTo(int64_t seekTimeUs, MediaPlayerSeekMode mode) { ALOGV("seekTo: %lld, %d", (long long)seekTimeUs, mode); sp<AMessage> msg = new AMessage(kWhatSeek, this); msg->setInt64("seekTimeUs", seekTimeUs); msg->setInt32("mode", mode); // Need to call readBuffer on |mLooper| to ensure the calls to // IMediaSource::read* are serialized. Note that IMediaSource::read* // is called without |mLock| acquired and MediaSource is not thread safe. sp<AMessage> response; status_t err = msg->postAndAwaitResponse(&response); if (err == OK && response != NULL) { CHECK(response->findInt32("err", &err)); } return err; } status_t NuPlayer::GenericSource::doSeek(int64_t seekTimeUs, MediaPlayerSeekMode mode) { if (mVideoTrack.mSource != NULL) { ++mVideoDataGeneration; int64_t actualTimeUs; readBuffer(MEDIA_TRACK_TYPE_VIDEO, seekTimeUs, mode, &actualTimeUs); if (mode != MediaPlayerSeekMode::SEEK_CLOSEST) { seekTimeUs = std::max<int64_t>(0, actualTimeUs); } mVideoLastDequeueTimeUs = actualTimeUs; } if (mAudioTrack.mSource != NULL) { ++mAudioDataGeneration; readBuffer(MEDIA_TRACK_TYPE_AUDIO, seekTimeUs, MediaPlayerSeekMode::SEEK_CLOSEST); mAudioLastDequeueTimeUs = seekTimeUs; } if (mSubtitleTrack.mSource != NULL) { mSubtitleTrack.mPackets->clear(); mFetchSubtitleDataGeneration++; } if (mTimedTextTrack.mSource != NULL) { mTimedTextTrack.mPackets->clear(); mFetchTimedTextDataGeneration++; } ++mPollBufferingGeneration; schedulePollBuffering(); return OK; }
d. pause
上層調用pause之后,NuPlayer相應的也會調用GenericSource的pause方法,這個方法很簡單,直接置mStarted為false。
void NuPlayer::GenericSource::pause() { Mutex::Autolock _l(mLock); mStarted = false; }
e. dequeueAccessUnit
NuPlayerDecoder會調用這個方法來從Source中獲取讀到的數據,這是個比較重要的方法。
1、讀取時會先去判斷當前播放器的狀態,如果是pause或者是stop,mStarted為false,則會停止本次數據的讀取。
2、接着判斷數據池中的數據是否足夠,如果不夠則讀取數據
3、從數據池中出隊列一個數據
4、再次判斷數據池中的數據是否足夠,如果不夠則讀取數據
status_t NuPlayer::GenericSource::dequeueAccessUnit( bool audio, sp<ABuffer> *accessUnit) { Mutex::Autolock _l(mLock); // If has gone through stop/releaseDrm sequence, we no longer send down any buffer b/c // the codec's crypto object has gone away (b/37960096). // Note: This will be unnecessary when stop() changes behavior and releases codec (b/35248283). if (!mStarted && mIsDrmReleased) { return -EWOULDBLOCK; } Track *track = audio ? &mAudioTrack : &mVideoTrack; if (track->mSource == NULL) { return -EWOULDBLOCK; } status_t finalResult; // 先判斷AnotherPacketSource中的數據是否足夠,如果不足夠就調用postReadBuffer方法讀取數據 if (!track->mPackets->hasBufferAvailable(&finalResult)) { if (finalResult == OK) { postReadBuffer( audio ? MEDIA_TRACK_TYPE_AUDIO : MEDIA_TRACK_TYPE_VIDEO); return -EWOULDBLOCK; } return finalResult; } // 從AnotherPacketSource中出隊列一個buffer status_t result = track->mPackets->dequeueAccessUnit(accessUnit); // start pulling in more buffers if cache is running low // so that decoder has less chance of being starved // 再判斷數據池中的數據是否足夠,如不夠就去讀取(本地播放) if (!mIsStreaming) { if (track->mPackets->getAvailableBufferCount(&finalResult) < 2) { postReadBuffer(audio? MEDIA_TRACK_TYPE_AUDIO : MEDIA_TRACK_TYPE_VIDEO); } } else { int64_t durationUs = track->mPackets->getBufferedDurationUs(&finalResult); // TODO: maxRebufferingMarkMs could be larger than // mBufferingSettings.mResumePlaybackMarkMs int64_t restartBufferingMarkUs = mBufferingSettings.mResumePlaybackMarkMs * 1000LL / 2; if (finalResult == OK) { if (durationUs < restartBufferingMarkUs) { postReadBuffer(audio? MEDIA_TRACK_TYPE_AUDIO : MEDIA_TRACK_TYPE_VIDEO); } if (track->mPackets->getAvailableBufferCount(&finalResult) < 2 && !mSentPauseOnBuffering && !mPreparing) { mCachedSource->resumeFetchingIfNecessary(); sendCacheStats(); mSentPauseOnBuffering = true; sp<AMessage> notify = dupNotify(); notify->setInt32("what", kWhatPauseOnBufferingStart); notify->post(); } } } if (result != OK) { if (mSubtitleTrack.mSource != NULL) { mSubtitleTrack.mPackets->clear(); mFetchSubtitleDataGeneration++; } if (mTimedTextTrack.mSource != NULL) { mTimedTextTrack.mPackets->clear(); mFetchTimedTextDataGeneration++; } return result; } int64_t timeUs; status_t eosResult; // ignored CHECK((*accessUnit)->meta()->findInt64("timeUs", &timeUs)); if (audio) { mAudioLastDequeueTimeUs = timeUs; } else { mVideoLastDequeueTimeUs = timeUs; } if (mSubtitleTrack.mSource != NULL && !mSubtitleTrack.mPackets->hasBufferAvailable(&eosResult)) { sp<AMessage> msg = new AMessage(kWhatFetchSubtitleData, this); msg->setInt64("timeUs", timeUs); msg->setInt32("generation", mFetchSubtitleDataGeneration); msg->post(); } if (mTimedTextTrack.mSource != NULL && !mTimedTextTrack.mPackets->hasBufferAvailable(&eosResult)) { sp<AMessage> msg = new AMessage(kWhatFetchTimedTextData, this); msg->setInt64("timeUs", timeUs); msg->setInt32("generation", mFetchTimedTextDataGeneration); msg->post(); } return result; }
到這里GenericSource的主要工作原理就學習完成了。
