音樂播放可以使用MediaPlayer.java或者AudioTrack.java
這里我們討論MediaPlayer.java播放音樂從上至下的過程。
------------------------------------
android播放音樂java層的流程:
MediaPlayer mMediaPlayer = new MediaPlayer( );
mMediaPlayer.setDataSource(mContext, mUri);
mMediaPlayer.setAudioStreamType(AudioManager.STREAM_MUSIC); //not necessary
mMediaPlayer.prepareAsync();
mMediaPlayer.start();
-------------------------------------
Medie相關的類圖
類之間的關系
MediaPlayer [mPlayer = MediaplayService.create() { return MediaPlayerService::Client} ]
MediaPlayerService::Client[mPlayer = StageFrightPlayer[mPlayer=AwesomePlayer[mAudioPlayer[mAudioSink extends AudioOutput]]]]
//MediaPlayer.java public class MediaPlayer implements SubtitleController.Listener { static { System.loadLibrary("media_jni");//加載庫 native_init(); //JNI初始化相關的東西 } private final static String TAG = "MediaPlayer"; // Name of the remote interface for the media player. Must be kept // in sync with the 2nd parameter of the IMPLEMENT_META_INTERFACE // macro invocation in IMediaPlayer.cpp private final static String IMEDIA_PLAYER = "android.media.IMediaPlayer"; private long mNativeContext; // accessed by native methods private long mNativeSurfaceTexture; // accessed by native methods private int mListenerContext; // accessed by native methods private SurfaceHolder mSurfaceHolder; private EventHandler mEventHandler; private PowerManager.WakeLock mWakeLock = null; private boolean mScreenOnWhilePlaying; private boolean mStayAwake; private final IAppOpsService mAppOps; private int mStreamType = AudioManager.USE_DEFAULT_STREAM_TYPE; private int mUsage = -1; /** * Default constructor. Consider using one of the create() methods for * synchronously instantiating a MediaPlayer from a Uri or resource. * <p>When done with the MediaPlayer, you should call {@link #release()}, * to free the resources. If not released, too many MediaPlayer instances may * result in an exception.</p> */ public MediaPlayer() { Looper looper; if ((looper = Looper.myLooper()) != null) { mEventHandler = new EventHandler(this, looper); } else if ((looper = Looper.getMainLooper()) != null) { mEventHandler = new EventHandler(this, looper); } else { mEventHandler = null; } mTimeProvider = new TimeProvider(this); mOutOfBandSubtitleTracks = new Vector<SubtitleTrack>(); mOpenSubtitleSources = new Vector<InputStream>(); mInbandSubtitleTracks = new SubtitleTrack[0]; IBinder b = ServiceManager.getService(Context.APP_OPS_SERVICE); mAppOps = IAppOpsService.Stub.asInterface(b); /* Native setup requires a weak reference to our object. * It's easier to create it here than in C++. */ native_setup(new WeakReference<MediaPlayer>(this)); } private void setDataSource(String path, String[] keys, String[] values) throws IOException, IllegalArgumentException, SecurityException, IllegalStateException { final Uri uri = Uri.parse(path); final String scheme = uri.getScheme(); if ("file".equals(scheme)) { path = uri.getPath(); } else if (scheme != null) { // handle non-file sources nativeSetDataSource( MediaHTTPService.createHttpServiceBinderIfNecessary(path), path, keys, values); return; } final File file = new File(path); if (file.exists()) { FileInputStream is = new FileInputStream(file); FileDescriptor fd = is.getFD(); //文件描述符FileDescriptor是這么獲取的 setDataSource(fd); is.close(); } else { throw new IOException("setDataSource failed."); } } public void setDataSource(FileDescriptor fd, long offset, long length) throws IOException, IllegalArgumentException, IllegalStateException { //MediaPlayer.java往JNI層傳的最終是文件描述符FileDescriptor _setDataSource(fd, offset, length); }
//JNI層方法 private native void _setDataSource(FileDescriptor fd, long offset, long length) throws IOException, IllegalArgumentException, IllegalStateException;
private native final void native_setup(Object mediaplayer_this); }
context: 用來保存創建的mediaplayer.
post_event:用來將JNI層的事件回調給JAVA層。
實現:mediaplayer.java中實現了 postEventFromNative()函數,發消息給mediaplayer.java中的線程,jni層中,獲取 postEventFromNative()函數指針,賦給post_event. JNI層就可以通過post_event來將事件回調給java層。
因此,可以看出 JNI層有一個MediaPlayer.cpp對象,它接收java層的MediaPlayer.java對象傳遞過來的信息。
JNI層更像是一個接力手,將java層MediaPlayer對象的事情交給C++層的MediaPlayer對象(這個對象對應於java層的MediaPlayer對象,但不是同一個對象)來處理
frameworks/base/media/jni/android_media_MediaPlayer.cpp struct fields_t { jfieldID context; jfieldID surface_texture; jmethodID post_event; jmethodID proxyConfigGetHost; jmethodID proxyConfigGetPort; jmethodID proxyConfigGetExclusionList; }; static fields_t fields; //JNI層保存的JNI層的一些值 這是個全局變量
static void
android_media_MediaPlayer_native_setup(JNIEnv *env, jobject thiz, jobject weak_this)
{
ALOGV("native_setup");
sp<MediaPlayer> mp = new MediaPlayer(); //創建JNI層MediaPlayer對象
if (mp == NULL) {
jniThrowException(env, "java/lang/RuntimeException", "Out of memory");
return;
}
// create new listener and give it to MediaPlayer
sp<JNIMediaPlayerListener> listener = new JNIMediaPlayerListener(env, thiz, weak_this);
mp->setListener(listener);
// Stow our new C++ MediaPlayer in an opaque field in the Java object.
setMediaPlayer(env, thiz, mp);
} //播放或設置時通過該函數來獲得JNI層的MediaPlayer(對應java層的MediaPlayer) 對應的字段為context //JNIEnv每個線程獨享的JNI環境,負責函數接口成員尋找 thiz指java層調用這個函數當前對象 static sp<MediaPlayer> getMediaPlayer(JNIEnv* env, jobject thiz) { Mutex::Autolock l(sLock); MediaPlayer* const p = (MediaPlayer*)env->GetLongField(thiz, fields.context); return sp<MediaPlayer>(p); } //保存C++層聲明的MediaPlayer對象,留一個引用,方便以后用。 通過fields.context來保存 static sp<MediaPlayer> setMediaPlayer(JNIEnv* env, jobject thiz, const sp<MediaPlayer>& player) { Mutex::Autolock l(sLock); sp<MediaPlayer> old = (MediaPlayer*)env->GetLongField(thiz, fields.context); if (player.get()) { player->incStrong((void*)setMediaPlayer); } if (old != 0) { old->decStrong((void*)setMediaPlayer); } env->SetLongField(thiz, fields.context, (jlong)player.get()); return old; } static void android_media_MediaPlayer_setDataSourceFD(JNIEnv *env, jobject thiz, jobject fileDescriptor, jlong offset, jlong length) { sp<MediaPlayer> mp = getMediaPlayer(env, thiz); //可以看到 每次都是將存儲到JNI層的mp取出來 if (mp == NULL ) { jniThrowException(env, "java/lang/IllegalStateException", NULL); return; } if (fileDescriptor == NULL) { jniThrowException(env, "java/lang/IllegalArgumentException", NULL); return; } int fd = jniGetFDFromFileDescriptor(env, fileDescriptor); ALOGV("setDataSourceFD: fd %d", fd); process_media_player_call( env, thiz, mp->setDataSource(fd, offset, length), "java/io/IOException", "setDataSourceFD failed." ); }
然后到了C++層
//frameworks/av/media/libmedia/mediaplayer.cpp status_t MediaPlayer::setDataSource(int fd, int64_t offset, int64_t length) { ALOGV("setDataSource(%d, %" PRId64 ", %" PRId64 ")", fd, offset, length); status_t err = UNKNOWN_ERROR; const sp<IMediaPlayerService>& service(getMediaPlayerService()); if (service != 0) {
//創建一個MediaPlayerService:client sp<IMediaPlayer> player(service->create(this, mAudioSessionId)); if ((NO_ERROR != doSetRetransmitEndpoint(player)) || (NO_ERROR != player->setDataSource(fd, offset, length))) { player.clear(); } err = attachNewPlayer(player);//保存這個MediaPlayerService:client } return err; }
//frameworks/av/media/libmediaplayerservice/MediaPlayerService.cpp
//service->create(this, mAudioSessionId)
sp<IMediaPlayer> MediaPlayerService::create(const sp<IMediaPlayerClient>& client, int audioSessionId) { pid_t pid = IPCThreadState::self()->getCallingPid(); int32_t connId = android_atomic_inc(&mNextConnId); //在Bp中注冊一個client sp<Client> c = new Client( this, pid, connId, client, audioSessionId, IPCThreadState::self()->getCallingUid()); ALOGV("Create new client(%d) from pid %d, uid %d, ", connId, pid, IPCThreadState::self()->getCallingUid()); wp<Client> w = c; { Mutex::Autolock lock(mLock); //加到Bp的client數組中 mClients.add(w); } return c; }
//player->setDataSource(fd, offset, length)
status_t MediaPlayerService::Client::setDataSource(int fd, int64_t offset, int64_t length) {
ALOGV("setDataSource fd=%d, offset=%lld, length=%lld", fd, offset, length);
struct stat sb;
int ret = fstat(fd, &sb);
if (ret != 0) {
ALOGE("fstat(%d) failed: %d, %s", fd, ret, strerror(errno));
return UNKNOWN_ERROR;
}
ALOGV("st_dev = %llu", sb.st_dev);
ALOGV("st_mode = %u", sb.st_mode);
ALOGV("st_uid = %lu", static_cast<unsigned long>(sb.st_uid));
ALOGV("st_gid = %lu", static_cast<unsigned long>(sb.st_gid));
ALOGV("st_size = %llu", sb.st_size);
if (offset >= sb.st_size) {
ALOGE("offset error");
::close(fd);
return UNKNOWN_ERROR;
}
if (offset + length > sb.st_size) {
length = sb.st_size - offset;
ALOGV("calculated length = %lld", length);
}
//MediaPlayerFactory::getPlayerType,根據播放文件的類型選擇合適的播放器。
//MediaPlayerFactory中通過讀取file中的內容來判斷是下面的哪種類型
//在STAGEFRIGHT_PLAYER, NU_PLAYER, SONIVOX_PLAYER, TEST_PLAYER
//默認是STAGEFRIGHT_PLAYER
player_type playerType = MediaPlayerFactory::getPlayerType(this, fd, offset, length);
sp<MediaPlayerBase> p = setDataSource_pre(playerType);
// now set data source
setDataSource_post(p, p->setDataSource(fd, offset, length));
return mStatus;
}
拿 sp<IMediaPlayerService> &service (本質是Bp)創建了一個MediaPlayerService:client也就是sp<IMediaPlayer> player給最上層(Apk)用。
每個player對應一個Bp中的MediaPlayerService:client,Bp中可能有很多MediaPlayerService::client
class MediaPlayerService : public BnMediaPlayerService { class Client : public BnMediaPlayer {} } class BnMediaPlayer: public BnInterface<IMediaPlayer> {}
sp<IMediaPlayer> player實際上是MediaPlayerService:client對象,而不是MediaPlayer對象,千萬別搞錯了
frameworks/av/media/libmedia/IMediaDeathNotifier.cpp namespace android { // client singleton for binder interface to services Mutex IMediaDeathNotifier::sServiceLock; sp<IMediaPlayerService> IMediaDeathNotifier::sMediaPlayerService; //android系統中的全局變量保存Bp端引用 sp<IMediaDeathNotifier::DeathNotifier> IMediaDeathNotifier::sDeathNotifier; SortedVector< wp<IMediaDeathNotifier> > IMediaDeathNotifier::sObitRecipients; // establish binder interface to MediaPlayerService /*static*/const sp<IMediaPlayerService>& IMediaDeathNotifier::getMediaPlayerService() { ALOGV("getMediaPlayerService"); Mutex::Autolock _l(sServiceLock); if (sMediaPlayerService == 0) { sp<IServiceManager> sm = defaultServiceManager(); //handle為0的Bp ServiceManager sp<IBinder> binder; do { //取MediaPlayerService的Bp端,看到沒 是這樣去取的:服務可能沒起來,所以可能要等 binder = sm->getService(String16("media.player")); if (binder != 0) { break; } ALOGW("Media player service not published, waiting..."); usleep(500000); // 0.5 s } while (true); if (sDeathNotifier == NULL) { sDeathNotifier = new DeathNotifier(); } binder->linkToDeath(sDeathNotifier); sMediaPlayerService = interface_cast<IMediaPlayerService>(binder); //跟ServiceManager如此相似,其實就是建立了個包含binder的IMediaPlayerService對象 } ALOGE_IF(sMediaPlayerService == 0, "no media player service!?"); return sMediaPlayerService; } }
sp<MediaPlayerBase> p = setDataSource_pre(playerType);
//MediaPlayerService.cpp sp<MediaPlayerBase> MediaPlayerService::Client::setDataSource_pre( player_type playerType) { ALOGV("player type = %d", playerType); // create the right type of player sp<MediaPlayerBase> p = createPlayer(playerType); if (!p->hardwareOutput()) { mAudioOutput = new AudioOutput(mAudioSessionId, IPCThreadState::self()->getCallingUid(), mPid, mAudioAttributes); static_cast<MediaPlayerInterface*>(p.get())->setAudioSink(mAudioOutput); } return p; } //依據播放類型獲得播放器 sp<MediaPlayerBase> MediaPlayerService::Client::createPlayer(player_type playerType) { // determine if we have the right player type sp<MediaPlayerBase> p = mPlayer; //當前播放類型不是想要的則重新創建一個播放器 if ((p != NULL) && (p->playerType() != playerType)) { ALOGV("delete player"); p.clear(); } if (p == NULL) { p = MediaPlayerFactory::createPlayer(playerType, this, notify); } return p; }
void AwesomePlayer::setAudioSink( const sp<MediaPlayerBase::AudioSink> &audioSink) { mAudioSink = audioSink; }
當創建AudioPlayer時,將這個mAudioSink傳給了AudioPlayer
mAudioPlayer = new AudioPlayer(mAudioSink, flags, this);
//frameworks/av/media/libmediaplayerservice/MediaPlayerFactory.cpp sp<MediaPlayerBase> MediaPlayerFactory::createPlayer(player_type playerType, void* cookie, notify_callback_f notifyFunc) { sp<MediaPlayerBase> p; IFactory* factory; status_t init_result; Mutex::Autolock lock_(&sLock); if (sFactoryMap.indexOfKey(playerType) < 0) { ALOGE("Failed to create player object of type %d, no registered" " factory", playerType); return p; }
//顯然是使用的是工廠模式中的工廠方法模式。依據播放類型獲得對應的播放器工廠對象 factory = sFactoryMap.valueFor(playerType);//依據播放類型選擇對應的播放器工廠對象,目前有3種類型 CHECK(NULL != factory); //使用該播放工廠對象來獲得一個播放器 p = factory->createPlayer();
if (init_result == NO_ERROR) { p->setNotifyCallback(cookie, notifyFunc); } else { ALOGE("Failed to create player object of type %d, initCheck failed" " (res = %d)", playerType, init_result); p.clear(); } return p; } class StagefrightPlayerFactory : public MediaPlayerFactory::IFactory { public: virtual float scoreFactory(const sp<IMediaPlayer>& /*client*/, int fd, int64_t offset, int64_t /*length*/, float /*curScore*/) { if (getDefaultPlayerType() == STAGEFRIGHT_PLAYER) { char buf[20]; lseek(fd, offset, SEEK_SET); read(fd, buf, sizeof(buf)); lseek(fd, offset, SEEK_SET); uint32_t ident = *((uint32_t*)buf); // Ogg vorbis? if (ident == 0x5367674f) // 'OggS' return 1.0; } return 0.0; } virtual sp<MediaPlayerBase> createPlayer() { ALOGV(" create StagefrightPlayer"); return new StagefrightPlayer();//第一種 } }; class NuPlayerFactory : public MediaPlayerFactory::IFactory { //.............. if (!strncasecmp("http://", url, 7) || !strncasecmp("https://", url, 8) || !strncasecmp("file://", url, 7)) { size_t len = strlen(url); if (len >= 5 && !strcasecmp(".m3u8", &url[len - 5])) { return kOurScore; } if (strstr(url,"m3u8")) { return kOurScore; } if ((len >= 4 && !strcasecmp(".sdp", &url[len - 4])) || strstr(url, ".sdp?")) { return kOurScore; } } if (!strncasecmp("rtsp://", url, 7)) { return kOurScore; } virtual sp<MediaPlayerBase> createPlayer() { ALOGV(" create NuPlayer"); return new NuPlayerDriver; //第二種 } }; class SonivoxPlayerFactory : public MediaPlayerFactory::IFactory { static const char* const FILE_EXTS[] = { ".mid", ".midi", ".smf", ".xmf", ".mxmf", ".imy", ".rtttl", ".rtx", ".ota" }; //.............. virtual sp<MediaPlayerBase> createPlayer() { ALOGV(" create MidiFile"); return new MidiFile(); //第三種 } };
StagefrightPlayer: 默認播放器,本地文件基本都使用其播放
NuPlayerDriver:主要用於播放網絡視頻,http https rtsp等
SonivoxPlayer:用於播放midi等類型的音樂
enum player_type { PV_PLAYER = 1, SONIVOX_PLAYER = 2, STAGEFRIGHT_PLAYER = 3, NU_PLAYER = 4, // Test players are available only in the 'test' and 'eng' builds. // The shared library with the test player is passed passed as an // argument to the 'test:' url in the setDataSource call. TEST_PLAYER = 5, };
//frameworks/av/media/libmediaplayerservice/StagefrightPlayer.cpp StagefrightPlayer::StagefrightPlayer() : mPlayer(new AwesomePlayer) { mPlayer->setListener(this); } status_t StagefrightPlayer::setDataSource(int fd, int64_t offset, int64_t length) { ALOGV("setDataSource(%d, %lld, %lld)", fd, offset, length); return mPlayer->setDataSource(dup(fd), offset, length); }
p->setDataSource(fd, offset, length)
//frameworks/av/media/libstagefright/AwesomePlayer.cpp status_t AwesomePlayer::setDataSource(int fd, int64_t offset, int64_t length) { reset_l(); sp<DataSource> dataSource = new FileSource(fd, offset, length); status_t err = dataSource->initCheck(); mFileSource = dataSource;
return setDataSource_l(dataSource); } status_t AwesomePlayer::setDataSource_l( const sp<DataSource> &dataSource) { //這兒會調用sniff檢測文件的類型,並根據文件類型創建相應的解碼器 //如:WAV會new WAVExtractor, mp3會new MP3Extractor sp<MediaExtractor> extractor = MediaExtractor::Create(dataSource); //依據dataSource創建解碼器 return setDataSource_l(extractor); }
然后依據extractor做A/V分離
status_t AwesomePlayer::setDataSource_l(const sp<MediaExtractor> &extractor) { // Attempt to approximate overall stream bitrate by summing all // tracks' individual bitrates, if not all of them advertise bitrate, // we have to fail. int64_t totalBitRate = 0; mExtractor = extractor; for (size_t i = 0; i < extractor->countTracks(); ++i) { sp<MetaData> meta = extractor->getTrackMetaData(i); int32_t bitrate; if (!meta->findInt32(kKeyBitRate, &bitrate)) { const char *mime; CHECK(meta->findCString(kKeyMIMEType, &mime)); ALOGV("track of type '%s' does not publish bitrate", mime); totalBitRate = -1; break; } totalBitRate += bitrate; } sp<MetaData> fileMeta = mExtractor->getMetaData(); if (fileMeta != NULL) { int64_t duration; if (fileMeta->findInt64(kKeyDuration, &duration)) { mDurationUs = duration; } } mBitrate = totalBitRate; ALOGV("mBitrate = %lld bits/sec", mBitrate); { Mutex::Autolock autoLock(mStatsLock); mStats.mBitrate = mBitrate; mStats.mTracks.clear(); mStats.mAudioTrackIndex = -1; mStats.mVideoTrackIndex = -1; } bool haveAudio = false; bool haveVideo = false; for (size_t i = 0; i < extractor->countTracks(); ++i) { sp<MetaData> meta = extractor->getTrackMetaData(i); const char *_mime; CHECK(meta->findCString(kKeyMIMEType, &_mime)); String8 mime = String8(_mime); if (!haveVideo && !strncasecmp(mime.string(), "video/", 6)) { setVideoSource(extractor->getTrack(i)); haveVideo = true; // Set the presentation/display size int32_t displayWidth, displayHeight; bool success = meta->findInt32(kKeyDisplayWidth, &displayWidth); if (success) { success = meta->findInt32(kKeyDisplayHeight, &displayHeight); } if (success) { mDisplayWidth = displayWidth; mDisplayHeight = displayHeight; } { Mutex::Autolock autoLock(mStatsLock); mStats.mVideoTrackIndex = mStats.mTracks.size(); mStats.mTracks.push(); TrackStat *stat = &mStats.mTracks.editItemAt(mStats.mVideoTrackIndex); stat->mMIME = mime.string(); } } else if (!haveAudio && !strncasecmp(mime.string(), "audio/", 6)) { setAudioSource(extractor->getTrack(i)); haveAudio = true; mActiveAudioTrackIndex = i; { Mutex::Autolock autoLock(mStatsLock); mStats.mAudioTrackIndex = mStats.mTracks.size(); mStats.mTracks.push(); TrackStat *stat = &mStats.mTracks.editItemAt(mStats.mAudioTrackIndex); stat->mMIME = mime.string(); } if (!strcasecmp(mime.string(), MEDIA_MIMETYPE_AUDIO_VORBIS)) { // Only do this for vorbis audio, none of the other audio // formats even support this ringtone specific hack and // retrieving the metadata on some extractors may turn out // to be very expensive. sp<MetaData> fileMeta = extractor->getMetaData(); int32_t loop; if (fileMeta != NULL && fileMeta->findInt32(kKeyAutoLoop, &loop) && loop != 0) { modifyFlags(AUTO_LOOPING, SET); } } } else if (!strcasecmp(mime.string(), MEDIA_MIMETYPE_TEXT_3GPP)) { addTextSource_l(i, extractor->getTrack(i)); } } if (!haveAudio && !haveVideo) { if (mWVMExtractor != NULL) { return mWVMExtractor->getError(); } else { return UNKNOWN_ERROR; } } mExtractorFlags = extractor->flags(); return OK; }
// now set data source
setDataSource_post(p, p->setDataSource(fd, offset, length));
//MediaPlayerService.cpp void MediaPlayerService::Client::setDataSource_post(const sp<MediaPlayerBase>& p, status_t status) { mStatus = status; if (mStatus != OK) { ALOGE(" error: %d", mStatus);//如果設置解碼器失敗,那就歇菜了。 return; }if (mStatus == OK) { mPlayer = p; //client保存該播放器 } }
err = attachNewPlayer(player);
status_t MediaPlayer::attachNewPlayer(const sp<IMediaPlayer>& player) { //........................ mPlayer = player;//保存起來 }
Note:好多類中都有mPlayer這個成員,千萬別搞混淆
小結下:
//依據文件來獲得播放類型
player_type playerType = MediaPlayerFactory::getPlayerType(this, fd, offset, length);
//第一步:_pre()創建播放器 sp<MediaPlayerBase> p = setDataSource_pre(playerType); //第二步:依據dataSource創建對應的解碼器
p->setDataSource(fd, offset, length)
//第三步:post()將播放器保存在client中 setDataSource_post(p, p->setDataSource(fd, offset, length));
//將這個MediaPlayerService:client保存在MediaPlayer中
err = attachNewPlayer(player);
至此 MediaPlayer.setDataSource()流程走完
------------------------------------------------------------------------------------------------------------------------------------------------
mMediaPlayer.prepareAsync();
frameworks/base/media/java/android/media/MediaPlayer.java public native void prepareAsync() throws IllegalStateException; static void android_media_MediaPlayer_prepareAsync(JNIEnv *env, jobject thiz){ mp->prepareAsync() } MediaPlayer::prepareAsync()=>MediaPlayer::prepareAsync_l(); status_t MediaPlayer::prepareAsync_l() {
if ( (mPlayer != 0) && ( mCurrentState & ( MEDIA_PLAYER_INITIALIZED | MEDIA_PLAYER_STOPPED) ) ) {
mPlayer->setAudioStreamType(mStreamType);
if (mAudioAttributesParcel != NULL) {
mPlayer->setParameter(KEY_PARAMETER_AUDIO_ATTRIBUTES, *mAudioAttributesParcel);
}
mCurrentState = MEDIA_PLAYER_PREPARING;
return mPlayer->prepareAsync();
}
return INVALID_OPERATION;
}
//MediaPlayerService.cpp status_t MediaPlayerService::Client::setAudioStreamType(audio_stream_type_t type) { ALOGV("[%d] setAudioStreamType(%d)", mConnId, type); mAudioOutput->setAudioStreamType(type); return NO_ERROR; }
//frameworks/av/media/libmediaplayerservice/MediaPlayerService.h void setAudioStreamType(audio_stream_type_t streamType) { mStreamType = streamType; } sp<MediaPlayerBase> getPlayer() const { return mPlayer; }
status_t MediaPlayerService::Client::prepareAsync() { sp<MediaPlayerBase> p = getPlayer(); //Maybe StagefrightPlayer object status_t ret = p->prepareAsync(); return ret; } status_t StagefrightPlayer::prepareAsync() { return mPlayer->prepareAsync(); } status_t AwesomePlayer::prepareAsync() { mIsAsyncPrepare = true; return prepareAsync_l(); } status_t AwesomePlayer::prepareAsync_l() { if (!mQueueStarted) { mQueue.start(); //開啟一個事件隊列,不停的處理各種事件 mQueueStarted = true; } modifyFlags(PREPARING, SET); mAsyncPrepareEvent = new AwesomeEvent(this, &AwesomePlayer::onPrepareAsyncEvent); mQueue.postEvent(mAsyncPrepareEvent); //發送第一個事件:PrepareAsyncEvent,做些初始化的事件。當處理這個事情的時候回調AwesomePlayer::onPrepareAsyncEvent return OK; }
首先mQueue.start()。mQueue是TimedEventQueue類型的實例,TimedEventQueue::start函數則創建一個線程, 該線程不斷地從mQueue里取出相應的event,並調用相應的event的fire函數來執行相應的處理。之前,我們已經 知道在AwesomePlayer的構造函數里已經創建了很多TimedEventQueue::Event實例。
//frameworks/av/media/libstagefright/include/TimedEventQueue.h struct TimedEventQueue { struct Event : public RefBase { virtual void fire(TimedEventQueue *queue, int64_t now_us) = 0; friend class TimedEventQueue; }; // Start executing the event loop. void start(); // Stop executing the event loop, if flush is false, any pending // events are discarded, otherwise the queue will stop (and this call // return) once all pending events have been handled. void stop(bool flush = false); // Posts an event to the front of the queue (after all events that // have previously been posted to the front but before timed events). event_id postEvent(const sp<Event> &event); pthread_t mThread; List<QueueItem> mQueue; } void TimedEventQueue::start() { if (mRunning) { return; } mStopped = false; pthread_attr_t attr; pthread_attr_init(&attr); pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE); pthread_create(&mThread, &attr, ThreadWrapper, this); pthread_attr_destroy(&attr); mRunning = true; } void TimedEventQueue::stop(bool flush) { if (!mRunning) { return; } if (flush) { postEventToBack(new StopEvent); } else { postTimedEvent(new StopEvent, INT64_MIN); } void *dummy; pthread_join(mThread, &dummy); // some events may be left in the queue if we did not flush and the wake lock // must be released. releaseWakeLock_l(true /*force*/); mQueue.clear(); mRunning = false; }
void AwesomePlayer::onPrepareAsyncEvent() { beginPrepareAsync_l(); } void AwesomePlayer::beginPrepareAsync_l() { if (mUri.size() > 0) { status_t err = finishSetDataSource_l(); if (err != OK) { abortPrepare(err); return; } } if (mVideoTrack != NULL && mVideoSource == NULL) { status_t err = initVideoDecoder(); } if (mAudioTrack != NULL && mAudioSource == NULL) { status_t err = initAudioDecoder(); } modifyFlags(PREPARING_CONNECTED, SET); if (isStreamingHTTP()) { postBufferingEvent_l(); } else { finishAsyncPrepare_l(); } } void AwesomePlayer::finishAsyncPrepare_l() { if (mIsAsyncPrepare) { if (mVideoSource == NULL) { notifyListener_l(MEDIA_SET_VIDEO_SIZE, 0, 0); } else { notifyVideoSize_l(); } notifyListener_l(MEDIA_PREPARED); } mPrepareResult = OK; modifyFlags((PREPARING|PREPARE_CANCELLED|PREPARING_CONNECTED), CLEAR); modifyFlags(PREPARED, SET); mAsyncPrepareEvent = NULL; mPreparedCondition.broadcast(); if (mAudioTearDown) { if (mPrepareResult == OK) { if (mExtractorFlags & MediaExtractor::CAN_SEEK) { seekTo_l(mAudioTearDownPosition); } if (mAudioTearDownWasPlaying) { modifyFlags(CACHE_UNDERRUN, CLEAR); play_l(); } } mAudioTearDown = false; } }
status_t AwesomePlayer::initAudioDecoder() { sp<MetaData> meta = mAudioTrack->getFormat(); const char *mime; CHECK(meta->findCString(kKeyMIMEType, &mime)); // Check whether there is a hardware codec for this stream // This doesn't guarantee that the hardware has a free stream // but it avoids us attempting to open (and re-open) an offload // stream to hardware that doesn't have the necessary codec audio_stream_type_t streamType = AUDIO_STREAM_MUSIC; if (mAudioSink != NULL) { streamType = mAudioSink->getAudioStreamType(); } mOffloadAudio = canOffloadStream(meta, (mVideoSource != NULL), isStreamingHTTP(), streamType); if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_RAW)) { ALOGV("createAudioPlayer: bypass OMX (raw)"); mAudioSource = mAudioTrack; } else { // If offloading we still create a OMX decoder as a fall-back // but we don't start it mOmxSource = OMXCodec::Create( mClient.interface(), mAudioTrack->getFormat(), false, // createEncoder mAudioTrack); if (mOffloadAudio) { ALOGV("createAudioPlayer: bypass OMX (offload)"); mAudioSource = mAudioTrack; } else { mAudioSource = mOmxSource; } } if (mAudioSource != NULL) { int64_t durationUs; if (mAudioTrack->getFormat()->findInt64(kKeyDuration, &durationUs)) { Mutex::Autolock autoLock(mMiscStateLock); if (mDurationUs < 0 || durationUs > mDurationUs) { mDurationUs = durationUs; } } status_t err = mAudioSource->start(); if (err != OK) { mAudioSource.clear(); mOmxSource.clear(); return err; } } else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_QCELP)) { // For legacy reasons we're simply going to ignore the absence // of an audio decoder for QCELP instead of aborting playback // altogether. return OK; } if (mAudioSource != NULL) { Mutex::Autolock autoLock(mStatsLock); TrackStat *stat = &mStats.mTracks.editItemAt(mStats.mAudioTrackIndex); const char *component; if (!mAudioSource->getFormat() ->findCString(kKeyDecoderComponent, &component)) { component = "none"; } stat->mDecoderName = component; } return mAudioSource != NULL ? OK : UNKNOWN_ERROR; }
--------------------------------------------------------------------------------------------------------------
mMediaPlayer.start();
/frameworks/base/media/java/android/media/MediaPlayer.java public void start() throws IllegalStateException { _start(); } private native void _start() throws IllegalStateException; static void android_media_MediaPlayer_start(JNIEnv *env, jobject thiz) { sp<MediaPlayer> mp = getMediaPlayer(env, thiz); process_media_player_call( env, thiz, mp->start(), NULL, NULL ); } status_t MediaPlayer::start() { mPlayer->setLooping(mLoop); mPlayer->setVolume(mLeftVolume, mRightVolume); mPlayer->setAuxEffectSendLevel(mSendLevel); mCurrentState = MEDIA_PLAYER_STARTED; ret = mPlayer->start(); //叫MediaPlayerService:client開始播放 return ret; } status_t MediaPlayerService::Client::start() { sp<MediaPlayerBase> p = getPlayer(); //獲得播放器,比如StagefrightPlayer p->setLooping(mLoop); return p->start(); } status_t StagefrightPlayer::start() { return mPlayer->play(); // AwesomePlayer->play(); } status_t AwesomePlayer::play() { modifyFlags(CACHE_UNDERRUN, CLEAR); return play_l(); } status_t AwesomePlayer::play_l() { modifyFlags(SEEK_PREVIEW, CLEAR); if (mFlags & PLAYING) { return OK; } if (!(mFlags & PREPARED)) { status_t err = prepare_l(); //如果事件隊列沒起來就叫它起來干活,這里前面已經prepare_l()了 if (err != OK) { return err; } } modifyFlags(PLAYING, SET); modifyFlags(FIRST_FRAME, SET); if (mAudioSource != NULL) {
if (mAudioPlayer == NULL) {
createAudioPlayer_l();
}
if (mVideoSource == NULL) { // We don't want to post an error notification at this point, // the error returned from MediaPlayer::start() will suffice. status_t err = startAudioPlayer_l( false /* sendErrorNotification */); } } if (mFlags & AT_EOS) { // Legacy behaviour, if a stream finishes playing and then // is started again, we play from the start... seekTo_l(0); }
return OK; }
void AwesomePlayer::createAudioPlayer_l() { uint32_t flags = 0; int64_t cachedDurationUs; bool eos; if (mOffloadAudio) { flags |= AudioPlayer::USE_OFFLOAD; } else if (mVideoSource == NULL && (mDurationUs > AUDIO_SINK_MIN_DEEP_BUFFER_DURATION_US || (getCachedDuration_l(&cachedDurationUs, &eos) && cachedDurationUs > AUDIO_SINK_MIN_DEEP_BUFFER_DURATION_US))) { flags |= AudioPlayer::ALLOW_DEEP_BUFFERING; } if (isStreamingHTTP()) { flags |= AudioPlayer::IS_STREAMING; } if (mVideoSource != NULL) { flags |= AudioPlayer::HAS_VIDEO; } mAudioPlayer = new AudioPlayer(mAudioSink, flags, this); mAudioPlayer->setSource(mAudioSource); mTimeSource = mAudioPlayer; // If there was a seek request before we ever started, // honor the request now. // Make sure to do this before starting the audio player // to avoid a race condition. seekAudioIfNecessary_l(); }
class AudioPlayer : public TimeSource {}
status_t AwesomePlayer::startAudioPlayer_l(bool sendErrorNotification) { if (mOffloadAudio) { mQueue.cancelEvent(mAudioTearDownEvent->eventID()); mAudioTearDownEventPending = false; } if (!(mFlags & AUDIOPLAYER_STARTED)) { bool wasSeeking = mAudioPlayer->isSeeking(); // We've already started the MediaSource in order to enable // the prefetcher to read its data. err = mAudioPlayer->start( true /* sourceAlreadyStarted */); modifyFlags(AUDIOPLAYER_STARTED, SET); if (wasSeeking) { CHECK(!mAudioPlayer->isSeeking()); // We will have finished the seek while starting the audio player. postAudioSeekComplete(); } else { notifyIfMediaStarted_l(); } } else { err = mAudioPlayer->resume(); } if (err == OK) { modifyFlags(AUDIO_RUNNING, SET); mWatchForAudioEOS = true; } return err; }
status_t AudioPlayer::start(bool sourceAlreadyStarted) { status_t err; if (!sourceAlreadyStarted) { err = mSource->start(); } // We allow an optional INFO_FORMAT_CHANGED at the very beginning // of playback, if there is one, getFormat below will retrieve the // updated format, if there isn't, we'll stash away the valid buffer // of data to be used on the first audio callback. CHECK(mFirstBuffer == NULL); mFirstBufferResult = mSource->read(&mFirstBuffer, &options);//調用的是OMXCodec::read(),讀取audio decoder數據 if (mFirstBufferResult == INFO_FORMAT_CHANGED) { ALOGV("INFO_FORMAT_CHANGED!!!"); CHECK(mFirstBuffer == NULL); mFirstBufferResult = OK; mIsFirstBuffer = false; } else { mIsFirstBuffer = true; } sp<MetaData> format = mSource->getFormat(); const char *mime; bool success = format->findCString(kKeyMIMEType, &mime); CHECK(success); CHECK(useOffload() || !strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_RAW)); int32_t numChannels, channelMask; success = format->findInt32(kKeyChannelCount, &numChannels); audio_format_t audioFormat = AUDIO_FORMAT_PCM_16_BIT; if (useOffload()) { if (mapMimeToAudioFormat(audioFormat, mime) != OK) { ALOGE("Couldn't map mime type \"%s\" to a valid AudioSystem::audio_format", mime); audioFormat = AUDIO_FORMAT_INVALID; } else { ALOGV("Mime type \"%s\" mapped to audio_format 0x%x", mime, audioFormat); } int32_t aacaot = -1; if ((audioFormat == AUDIO_FORMAT_AAC) && format->findInt32(kKeyAACAOT, &aacaot)) { // Redefine AAC format corrosponding to aac profile mapAACProfileToAudioFormat(audioFormat,(OMX_AUDIO_AACPROFILETYPE) aacaot); } } int avgBitRate = -1; format->findInt32(kKeyBitRate, &avgBitRate); if (mAudioSink.get() != NULL) {//一般這里=true 不管是offload還是raw數據播放 uint32_t flags = AUDIO_OUTPUT_FLAG_NONE; audio_offload_info_t offloadInfo = AUDIO_INFO_INITIALIZER; if (allowDeepBuffering()) { flags |= AUDIO_OUTPUT_FLAG_DEEP_BUFFER; } if (useOffload()) { flags |= AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD; int64_t durationUs; if (format->findInt64(kKeyDuration, &durationUs)) { offloadInfo.duration_us = durationUs; } else { offloadInfo.duration_us = -1; } offloadInfo.sample_rate = mSampleRate; offloadInfo.channel_mask = channelMask; offloadInfo.format = audioFormat; offloadInfo.stream_type = AUDIO_STREAM_MUSIC; offloadInfo.bit_rate = avgBitRate; offloadInfo.has_video = ((mCreateFlags & HAS_VIDEO) != 0); offloadInfo.is_streaming = ((mCreateFlags & IS_STREAMING) != 0); } status_t err = mAudioSink->open( mSampleRate, numChannels, channelMask, audioFormat, DEFAULT_AUDIOSINK_BUFFERCOUNT, &AudioPlayer::AudioSinkCallback, this, (audio_output_flags_t)flags, useOffload() ? &offloadInfo : NULL); if (err == OK) { mLatencyUs = (int64_t)mAudioSink->latency() * 1000; mFrameSize = mAudioSink->frameSize(); if (useOffload()) { // If the playback is offloaded to h/w we pass the // HAL some metadata information // We don't want to do this for PCM because it will be going // through the AudioFlinger mixer before reaching the hardware sendMetaDataToHal(mAudioSink, format); } err = mAudioSink->start();//開始播放=>mTrack->start()=>playbackThread->addTrack_l(this)=>AudioSystem::startOutput() // do not alter behavior for non offloaded tracks: ignore start status. if (!useOffload()) { err = OK; } } if (err != OK) { if (mFirstBuffer != NULL) { mFirstBuffer->release(); mFirstBuffer = NULL; } if (!sourceAlreadyStarted) { mSource->stop(); } return err; } } else { // playing to an AudioTrack, set up mask if necessary audio_channel_mask_t audioMask = channelMask == CHANNEL_MASK_USE_CHANNEL_ORDER ? audio_channel_out_mask_from_count(numChannels) : channelMask; if (0 == audioMask) { return BAD_VALUE; } mAudioTrack = new AudioTrack( AUDIO_STREAM_MUSIC, mSampleRate, AUDIO_FORMAT_PCM_16_BIT, audioMask, 0 /*frameCount*/, AUDIO_OUTPUT_FLAG_NONE, &AudioCallback, this, 0 /*notificationFrames*/); if ((err = mAudioTrack->initCheck()) != OK) { mAudioTrack.clear(); if (mFirstBuffer != NULL) { mFirstBuffer->release(); mFirstBuffer = NULL; } if (!sourceAlreadyStarted) { mSource->stop(); } return err; } mLatencyUs = (int64_t)mAudioTrack->latency() * 1000; mFrameSize = mAudioTrack->frameSize(); mAudioTrack->start(); } mStarted = true; mPlaying = true; mPinnedTimeUs = -1ll; return OK; }
class MediaPlayerService : public BnMediaPlayerService {
class AudioOutput : public MediaPlayerBase::AudioSink{}
}
status_t MediaPlayerService::AudioOutput::open( uint32_t sampleRate, int channelCount, audio_channel_mask_t channelMask, audio_format_t format, int bufferCount, AudioCallback cb, void *cookie, audio_output_flags_t flags) { .......................... audioTrack = new AudioTrack(mStreamType, sampleRate, format,channelMask, 0, flags,CallbackWrapper,newcbd, 0,mSessionId); } void MediaPlayerService::AudioOutput::start() { ALOGV("start"); if (mCallbackData != NULL) { mCallbackData->endTrackSwitch(); } if (mTrack) { mTrack->setVolume(mLeftVolume, mRightVolume); mTrack->setAuxEffectSendLevel(mSendLevel); mTrack->start(); } }
AwesomePlayer:
struct AwesomePlayer { status_t play(); status_t pause(); void setAudioSink(const sp<MediaPlayerBase::AudioSink> &audioSink);
sp<MediaPlayerBase::AudioSink> mAudioSink; //AudioPlayer中存有這個對象 sp<DataSource> mFileSource; //setDataSource()的結果 其實也就是dataSource sp<MediaExtractor> mExtractor; //依據mFileSource創建的解碼器
sp<MediaSource> mVideoTrack; sp<MediaSource> mVideoSource; sp<MediaSource> mAudioTrack; //屬於mExtractor的成員 sp<MediaSource> mAudioSource; //屬於mExtractor的成員
//AudioPlayer是TimeSource的子類, 一般mTimeSource = mAudioPlayer
TimeSource *mTimeSource; AudioPlayer *mAudioPlayer; }
class AudioPlayer : public TimeSource { AudioPlayer(const sp<MediaPlayerBase::AudioSink> &audioSink, uint32_t flags = 0, AwesomePlayer *audioObserver = NULL); void setSource(const sp<MediaSource> &source); status_t start(bool sourceAlreadyStarted = false); void pause(bool playPendingSamples = false); status_t resume(); status_t seekTo(int64_t time_us); size_t fillBuffer(void *data, size_t size); void reset(); bool allowDeepBuffering() const { return (mCreateFlags & ALLOW_DEEP_BUFFERING) != 0; } bool useOffload() const { return (mCreateFlags & USE_OFFLOAD) != 0; } static void AudioCallback(int event, void *user, void *info);
//有2個回調方法 void AudioCallback(int event, void *info); static size_t AudioSinkCallback( MediaPlayerBase::AudioSink *audioSink, void *data, size_t size, void *me, MediaPlayerBase::AudioSink::cb_event_t event); sp<MediaSource> mSource; sp<AudioTrack> mAudioTrack; MediaBuffer *mInputBuffer; int mSampleRate; int64_t mLatencyUs; size_t mFrameSize; bool mStarted; bool mIsFirstBuffer; status_t mFirstBufferResult; MediaBuffer *mFirstBuffer; sp<MediaPlayerBase::AudioSink> mAudioSink; AwesomePlayer *mObserver; };
圖片出處:http://blog.chinaunix.net/uid-7318785-id-3323948.html