MediaPlayer


音乐播放可以使用MediaPlayer.java或者AudioTrack.java

这里我们讨论MediaPlayer.java播放音乐从上至下的过程。

------------------------------------

android播放音乐java层的流程:

MediaPlayer  mMediaPlayer = new MediaPlayer( );
mMediaPlayer.setDataSource(mContext, mUri);
mMediaPlayer.setAudioStreamType(AudioManager.STREAM_MUSIC); //not necessary
mMediaPlayer.prepareAsync();
mMediaPlayer.start();

-------------------------------------

Medie相关的类图

类之间的关系

MediaPlayer [mPlayer = MediaplayService.create() { return MediaPlayerService::Client} ]


MediaPlayerService::Client[mPlayer = StageFrightPlayer[mPlayer=AwesomePlayer[mAudioPlayer[mAudioSink extends AudioOutput]]]]

//MediaPlayer.java

public class MediaPlayer implements SubtitleController.Listener {
    static {
        System.loadLibrary("media_jni");//加载库
        native_init(); //JNI初始化相关的东西
    }

    private final static String TAG = "MediaPlayer";
    // Name of the remote interface for the media player. Must be kept
    // in sync with the 2nd parameter of the IMPLEMENT_META_INTERFACE
    // macro invocation in IMediaPlayer.cpp
    private final static String IMEDIA_PLAYER = "android.media.IMediaPlayer";

    private long mNativeContext; // accessed by native methods
    private long mNativeSurfaceTexture;  // accessed by native methods
    private int mListenerContext; // accessed by native methods
    private SurfaceHolder mSurfaceHolder;
    private EventHandler mEventHandler;
    private PowerManager.WakeLock mWakeLock = null;
    private boolean mScreenOnWhilePlaying;
    private boolean mStayAwake;
    private final IAppOpsService mAppOps;
    private int mStreamType = AudioManager.USE_DEFAULT_STREAM_TYPE;
    private int mUsage = -1;

    /**
     * Default constructor. Consider using one of the create() methods for
     * synchronously instantiating a MediaPlayer from a Uri or resource.
     * <p>When done with the MediaPlayer, you should call  {@link #release()},
     * to free the resources. If not released, too many MediaPlayer instances may
     * result in an exception.</p>
     */
    public MediaPlayer() {

        Looper looper;
        if ((looper = Looper.myLooper()) != null) {
            mEventHandler = new EventHandler(this, looper);
        } else if ((looper = Looper.getMainLooper()) != null) {
            mEventHandler = new EventHandler(this, looper);
        } else {
            mEventHandler = null;
        }

        mTimeProvider = new TimeProvider(this);
        mOutOfBandSubtitleTracks = new Vector<SubtitleTrack>();
        mOpenSubtitleSources = new Vector<InputStream>();
        mInbandSubtitleTracks = new SubtitleTrack[0];
        IBinder b = ServiceManager.getService(Context.APP_OPS_SERVICE);
        mAppOps = IAppOpsService.Stub.asInterface(b);

        /* Native setup requires a weak reference to our object.
         * It's easier to create it here than in C++.
         */ native_setup(new WeakReference<MediaPlayer>(this));
    }

    private void setDataSource(String path, String[] keys, String[] values)
            throws IOException, IllegalArgumentException, SecurityException, IllegalStateException {
        final Uri uri = Uri.parse(path);
        final String scheme = uri.getScheme();
        if ("file".equals(scheme)) {
            path = uri.getPath();
        } else if (scheme != null) {
            // handle non-file sources
            nativeSetDataSource(
                MediaHTTPService.createHttpServiceBinderIfNecessary(path),
                path,
                keys,
                values);
            return;
        }

        final File file = new File(path);
        if (file.exists()) {
            FileInputStream is = new FileInputStream(file);
            FileDescriptor fd = is.getFD(); //文件描述符FileDescriptor是这么获取的
            setDataSource(fd);
            is.close();
        } else {
            throw new IOException("setDataSource failed.");
        }
    }

    public void setDataSource(FileDescriptor fd, long offset, long length)
            throws IOException, IllegalArgumentException, IllegalStateException {
         //MediaPlayer.java往JNI层传的最终是文件描述符FileDescriptor
 _setDataSource(fd, offset, length);
    }

    //JNI层方法
private native void _setDataSource(FileDescriptor fd, long offset, long length) throws IOException, IllegalArgumentException, IllegalStateException;

    private native final void native_setup(Object mediaplayer_this); }

context:  用来保存创建的mediaplayer.

post_event:用来将JNI层的事件回调给JAVA层。

实现:mediaplayer.java中实现了 postEventFromNative()函数,发消息给mediaplayer.java中的线程,jni层中,获取 postEventFromNative()函数指针,赋给post_event. JNI层就可以通过post_event来将事件回调给java层。

因此,可以看出 JNI层有一个MediaPlayer.cpp对象,它接收java层的MediaPlayer.java对象传递过来的信息。

JNI层更像是一个接力手,将java层MediaPlayer对象的事情交给C++层的MediaPlayer对象(这个对象对应于java层的MediaPlayer对象,但不是同一个对象)来处理

 

frameworks/base/media/jni/android_media_MediaPlayer.cpp

struct fields_t {
    jfieldID    context;
    jfieldID    surface_texture;

    jmethodID   post_event;

    jmethodID   proxyConfigGetHost;
    jmethodID   proxyConfigGetPort;
    jmethodID   proxyConfigGetExclusionList;
};

static fields_t fields;  //JNI层保存的JNI层的一些值 这是个全局变量

static void
android_media_MediaPlayer_native_setup(JNIEnv *env, jobject thiz, jobject weak_this)
{
    ALOGV("native_setup");
    sp<MediaPlayer> mp = new MediaPlayer();   //创建JNI层MediaPlayer对象
    if (mp == NULL) {
        jniThrowException(env, "java/lang/RuntimeException", "Out of memory");
        return;
    }

    // create new listener and give it to MediaPlayer
    sp<JNIMediaPlayerListener> listener = new JNIMediaPlayerListener(env, thiz, weak_this);
    mp->setListener(listener);

    // Stow our new C++ MediaPlayer in an opaque field in the Java object.
    setMediaPlayer(env, thiz, mp);
}
//播放或设置时通过该函数来获得JNI层的MediaPlayer(对应java层的MediaPlayer) 对应的字段为context //JNIEnv每个线程独享的JNI环境,负责函数接口成员寻找 thiz指java层调用这个函数当前对象 static sp<MediaPlayer> getMediaPlayer(JNIEnv* env, jobject thiz) { Mutex::Autolock l(sLock); MediaPlayer* const p = (MediaPlayer*)env->GetLongField(thiz, fields.context); return sp<MediaPlayer>(p); } //保存C++层声明的MediaPlayer对象,留一个引用,方便以后用。 通过fields.context来保存 static sp<MediaPlayer> setMediaPlayer(JNIEnv* env, jobject thiz, const sp<MediaPlayer>& player) { Mutex::Autolock l(sLock); sp<MediaPlayer> old = (MediaPlayer*)env->GetLongField(thiz, fields.context); if (player.get()) { player->incStrong((void*)setMediaPlayer); } if (old != 0) { old->decStrong((void*)setMediaPlayer); } env->SetLongField(thiz, fields.context, (jlong)player.get()); return old; } static void android_media_MediaPlayer_setDataSourceFD(JNIEnv *env, jobject thiz, jobject fileDescriptor, jlong offset, jlong length) { sp<MediaPlayer> mp = getMediaPlayer(env, thiz); //可以看到 每次都是将存储到JNI层的mp取出来 if (mp == NULL ) { jniThrowException(env, "java/lang/IllegalStateException", NULL); return; } if (fileDescriptor == NULL) { jniThrowException(env, "java/lang/IllegalArgumentException", NULL); return; } int fd = jniGetFDFromFileDescriptor(env, fileDescriptor); ALOGV("setDataSourceFD: fd %d", fd); process_media_player_call( env, thiz, mp->setDataSource(fd, offset, length), "java/io/IOException", "setDataSourceFD failed." ); }

然后到了C++层

//frameworks/av/media/libmedia/mediaplayer.cpp
status_t MediaPlayer::setDataSource(int fd, int64_t offset, int64_t length)
{
    ALOGV("setDataSource(%d, %" PRId64 ", %" PRId64 ")", fd, offset, length);
    status_t err = UNKNOWN_ERROR;
    const sp<IMediaPlayerService>& service(getMediaPlayerService());
    if (service != 0) {
//创建
一个MediaPlayerService:client sp<IMediaPlayer> player(service->create(this, mAudioSessionId)); if ((NO_ERROR != doSetRetransmitEndpoint(player)) || (NO_ERROR != player->setDataSource(fd, offset, length))) { player.clear(); } err = attachNewPlayer(player);//保存这个MediaPlayerService:client } return err; }

//frameworks/av/media/libmediaplayerservice/MediaPlayerService.cpp
//service->create(this, mAudioSessionId)
sp<IMediaPlayer> MediaPlayerService::create(const sp<IMediaPlayerClient>& client, int audioSessionId) { pid_t pid = IPCThreadState::self()->getCallingPid(); int32_t connId = android_atomic_inc(&mNextConnId); //在Bp中注册一个client sp<Client> c = new Client( this, pid, connId, client, audioSessionId, IPCThreadState::self()->getCallingUid()); ALOGV("Create new client(%d) from pid %d, uid %d, ", connId, pid, IPCThreadState::self()->getCallingUid()); wp<Client> w = c; { Mutex::Autolock lock(mLock); //加到Bp的client数组中 mClients.add(w); } return c; }

//player->setDataSource(fd, offset, length)
status_t MediaPlayerService::Client::setDataSource(int fd, int64_t offset, int64_t length) {
    ALOGV("setDataSource fd=%d, offset=%lld, length=%lld", fd, offset, length);
    struct stat sb;
    int ret = fstat(fd, &sb);
    if (ret != 0) {
        ALOGE("fstat(%d) failed: %d, %s", fd, ret, strerror(errno));
        return UNKNOWN_ERROR;
    }

    ALOGV("st_dev  = %llu", sb.st_dev);
    ALOGV("st_mode = %u", sb.st_mode);
    ALOGV("st_uid  = %lu", static_cast<unsigned long>(sb.st_uid));
    ALOGV("st_gid  = %lu", static_cast<unsigned long>(sb.st_gid));
    ALOGV("st_size = %llu", sb.st_size);

    if (offset >= sb.st_size) {
        ALOGE("offset error");
        ::close(fd);
        return UNKNOWN_ERROR;
    }
    if (offset + length > sb.st_size) {
        length = sb.st_size - offset;
        ALOGV("calculated length = %lld", length);
    }
//MediaPlayerFactory::getPlayerType,根据播放文件的类型选择合适的播放器。
//MediaPlayerFactory中通过读取file中的内容来判断是下面的哪种类型

    //在STAGEFRIGHT_PLAYER, NU_PLAYER, SONIVOX_PLAYER, TEST_PLAYER
    //默认是STAGEFRIGHT_PLAYER
    player_type playerType = MediaPlayerFactory::getPlayerType(this, fd, offset, length);
    sp<MediaPlayerBase> p = setDataSource_pre(playerType);
    // now set data source
    setDataSource_post(p, p->setDataSource(fd, offset, length));
    return mStatus;
}

 拿 sp<IMediaPlayerService> &service (本质是Bp)创建了一个MediaPlayerService:client也就是sp<IMediaPlayer> player给最上层(Apk)用。

每个player对应一个Bp中的MediaPlayerService:client,Bp中可能有很多MediaPlayerService::client

class MediaPlayerService : public BnMediaPlayerService {
   class Client : public BnMediaPlayer {}
}

class BnMediaPlayer: public BnInterface<IMediaPlayer> {}

 sp<IMediaPlayer> player实际上是MediaPlayerService:client对象,而不是MediaPlayer对象,千万别搞错了

frameworks/av/media/libmedia/IMediaDeathNotifier.cpp
namespace android {
// client singleton for binder interface to services
Mutex IMediaDeathNotifier::sServiceLock;
sp<IMediaPlayerService> IMediaDeathNotifier::sMediaPlayerService;    //android系统中的全局变量保存Bp端引用
sp<IMediaDeathNotifier::DeathNotifier> IMediaDeathNotifier::sDeathNotifier;
SortedVector< wp<IMediaDeathNotifier> > IMediaDeathNotifier::sObitRecipients;

// establish binder interface to MediaPlayerService
/*static*/const sp<IMediaPlayerService>&
IMediaDeathNotifier::getMediaPlayerService()
{
    ALOGV("getMediaPlayerService");
    Mutex::Autolock _l(sServiceLock);
    if (sMediaPlayerService == 0) {
        sp<IServiceManager> sm = defaultServiceManager();   //handle为0的Bp ServiceManager
        sp<IBinder> binder;
        do {  //取MediaPlayerService的Bp端,看到没 是这样去取的:服务可能没起来,所以可能要等
            binder = sm->getService(String16("media.player"));  
            if (binder != 0) {
                break;
            }
            ALOGW("Media player service not published, waiting...");
            usleep(500000); // 0.5 s
        } while (true);

        if (sDeathNotifier == NULL) {
            sDeathNotifier = new DeathNotifier();
        }
        binder->linkToDeath(sDeathNotifier);
        sMediaPlayerService = interface_cast<IMediaPlayerService>(binder); //跟ServiceManager如此相似,其实就是建立了个包含binder的IMediaPlayerService对象
    }
    ALOGE_IF(sMediaPlayerService == 0, "no media player service!?");
    return sMediaPlayerService;
}

}

     sp<MediaPlayerBase> p = setDataSource_pre(playerType);

//MediaPlayerService.cpp

sp<MediaPlayerBase> MediaPlayerService::Client::setDataSource_pre(
        player_type playerType) {
    ALOGV("player type = %d", playerType);

    // create the right type of player
    sp<MediaPlayerBase> p = createPlayer(playerType);
    if (!p->hardwareOutput()) {
       mAudioOutput = new AudioOutput(mAudioSessionId, IPCThreadState::self()->getCallingUid(),
                mPid, mAudioAttributes);
        static_cast<MediaPlayerInterface*>(p.get())->setAudioSink(mAudioOutput);
    }

    return p;
}

//依据播放类型获得播放器
sp<MediaPlayerBase> MediaPlayerService::Client::createPlayer(player_type playerType) {
    // determine if we have the right player type
    sp<MediaPlayerBase> p = mPlayer;
    //当前播放类型不是想要的则重新创建一个播放器
    if ((p != NULL) && (p->playerType() != playerType)) {
        ALOGV("delete player");
        p.clear();
    }
    if (p == NULL) {
        p = MediaPlayerFactory::createPlayer(playerType, this, notify);
    }

    return p;
}

 

void AwesomePlayer::setAudioSink(
        const sp<MediaPlayerBase::AudioSink> &audioSink) {
    mAudioSink = audioSink;
}

 当创建AudioPlayer时,将这个mAudioSink传给了AudioPlayer

mAudioPlayer = new AudioPlayer(mAudioSink, flags, this);

//frameworks/av/media/libmediaplayerservice/MediaPlayerFactory.cpp
sp<MediaPlayerBase> MediaPlayerFactory::createPlayer(player_type playerType, void* cookie, notify_callback_f notifyFunc) {
    sp<MediaPlayerBase> p;
    IFactory* factory;
    status_t init_result;
    Mutex::Autolock lock_(&sLock);

    if (sFactoryMap.indexOfKey(playerType) < 0) {
        ALOGE("Failed to create player object of type %d, no registered"
              " factory", playerType);
        return p;
    }

//显然是使用的是工厂模式中的工厂方法模式。依据播放类型获得对应的播放器工厂对象 factory
= sFactoryMap.valueFor(playerType);//依据播放类型选择对应的播放器工厂对象,目前有3种类型 CHECK(NULL != factory); //使用该播放工厂对象来获得一个播放器 p = factory->createPlayer();
   if (init_result == NO_ERROR) { p->setNotifyCallback(cookie, notifyFunc); } else { ALOGE("Failed to create player object of type %d, initCheck failed" " (res = %d)", playerType, init_result); p.clear(); } return p; } class StagefrightPlayerFactory : public MediaPlayerFactory::IFactory { public: virtual float scoreFactory(const sp<IMediaPlayer>& /*client*/, int fd, int64_t offset, int64_t /*length*/, float /*curScore*/) { if (getDefaultPlayerType() == STAGEFRIGHT_PLAYER) { char buf[20]; lseek(fd, offset, SEEK_SET); read(fd, buf, sizeof(buf)); lseek(fd, offset, SEEK_SET); uint32_t ident = *((uint32_t*)buf); // Ogg vorbis? if (ident == 0x5367674f) // 'OggS' return 1.0; } return 0.0; } virtual sp<MediaPlayerBase> createPlayer() { ALOGV(" create StagefrightPlayer"); return new StagefrightPlayer();//第一种 } }; class NuPlayerFactory : public MediaPlayerFactory::IFactory { //.............. if (!strncasecmp("http://", url, 7) || !strncasecmp("https://", url, 8) || !strncasecmp("file://", url, 7)) { size_t len = strlen(url); if (len >= 5 && !strcasecmp(".m3u8", &url[len - 5])) { return kOurScore; } if (strstr(url,"m3u8")) { return kOurScore; } if ((len >= 4 && !strcasecmp(".sdp", &url[len - 4])) || strstr(url, ".sdp?")) { return kOurScore; } } if (!strncasecmp("rtsp://", url, 7)) { return kOurScore; } virtual sp<MediaPlayerBase> createPlayer() { ALOGV(" create NuPlayer"); return new NuPlayerDriver; //第二种 } }; class SonivoxPlayerFactory : public MediaPlayerFactory::IFactory { static const char* const FILE_EXTS[] = { ".mid", ".midi", ".smf", ".xmf", ".mxmf", ".imy", ".rtttl", ".rtx", ".ota" }; //.............. virtual sp<MediaPlayerBase> createPlayer() { ALOGV(" create MidiFile"); return new MidiFile(); //第三种 } };

StagefrightPlayer: 默认播放器,本地文件基本都使用其播放

NuPlayerDriver:主要用于播放网络视频,http https rtsp等

SonivoxPlayer:用于播放midi等类型的音乐

enum player_type {
    PV_PLAYER = 1,
    SONIVOX_PLAYER = 2,
    STAGEFRIGHT_PLAYER = 3,
    NU_PLAYER = 4,
    // Test players are available only in the 'test' and 'eng' builds.
    // The shared library with the test player is passed passed as an
    // argument to the 'test:' url in the setDataSource call.
    TEST_PLAYER = 5,
};

 

//frameworks/av/media/libmediaplayerservice/StagefrightPlayer.cpp
StagefrightPlayer::StagefrightPlayer()
    : mPlayer(new AwesomePlayer) {
    mPlayer->setListener(this);
}

status_t StagefrightPlayer::setDataSource(int fd, int64_t offset, int64_t length) {
    ALOGV("setDataSource(%d, %lld, %lld)", fd, offset, length);
    return mPlayer->setDataSource(dup(fd), offset, length);
}

p->setDataSource(fd, offset, length)

//frameworks/av/media/libstagefright/AwesomePlayer.cpp
status_t AwesomePlayer::setDataSource(int fd, int64_t offset, int64_t length) {
    reset_l();
    sp<DataSource> dataSource = new FileSource(fd, offset, length);
    status_t err = dataSource->initCheck();
  mFileSource = dataSource; 
return setDataSource_l(dataSource); } status_t AwesomePlayer::setDataSource_l( const sp<DataSource> &dataSource) { //这儿会调用sniff检测文件的类型,并根据文件类型创建相应的解码器 //如:WAV会new WAVExtractor, mp3会new MP3Extractor sp<MediaExtractor> extractor = MediaExtractor::Create(dataSource); //依据dataSource创建解码器 return setDataSource_l(extractor); }

  然后依据extractor做A/V分离

 

status_t AwesomePlayer::setDataSource_l(const sp<MediaExtractor> &extractor) {
    // Attempt to approximate overall stream bitrate by summing all
    // tracks' individual bitrates, if not all of them advertise bitrate,
    // we have to fail.

    int64_t totalBitRate = 0;

    mExtractor = extractor;
    for (size_t i = 0; i < extractor->countTracks(); ++i) {
        sp<MetaData> meta = extractor->getTrackMetaData(i);

        int32_t bitrate;
        if (!meta->findInt32(kKeyBitRate, &bitrate)) {
            const char *mime;
            CHECK(meta->findCString(kKeyMIMEType, &mime));
            ALOGV("track of type '%s' does not publish bitrate", mime);

            totalBitRate = -1;
            break;
        }

        totalBitRate += bitrate;
    }
    sp<MetaData> fileMeta = mExtractor->getMetaData();
    if (fileMeta != NULL) {
        int64_t duration;
        if (fileMeta->findInt64(kKeyDuration, &duration)) {
            mDurationUs = duration;
        }
    }

    mBitrate = totalBitRate;

    ALOGV("mBitrate = %lld bits/sec", mBitrate);

    {
        Mutex::Autolock autoLock(mStatsLock);
        mStats.mBitrate = mBitrate;
        mStats.mTracks.clear();
        mStats.mAudioTrackIndex = -1;
        mStats.mVideoTrackIndex = -1;
    }

 bool haveAudio = false;
    bool haveVideo = false;
    for (size_t i = 0; i < extractor->countTracks(); ++i) {
        sp<MetaData> meta = extractor->getTrackMetaData(i);

        const char *_mime;
        CHECK(meta->findCString(kKeyMIMEType, &_mime));

        String8 mime = String8(_mime);

        if (!haveVideo && !strncasecmp(mime.string(), "video/", 6)) {
            setVideoSource(extractor->getTrack(i));
           haveVideo = true;

            // Set the presentation/display size
            int32_t displayWidth, displayHeight;
            bool success = meta->findInt32(kKeyDisplayWidth, &displayWidth);
            if (success) {
                success = meta->findInt32(kKeyDisplayHeight, &displayHeight);
            }
            if (success) {
                mDisplayWidth = displayWidth;
                mDisplayHeight = displayHeight;
            }

            {
                Mutex::Autolock autoLock(mStatsLock);
                mStats.mVideoTrackIndex = mStats.mTracks.size();
                mStats.mTracks.push();
                TrackStat *stat =
                    &mStats.mTracks.editItemAt(mStats.mVideoTrackIndex);
                stat->mMIME = mime.string();
            }
        } else if (!haveAudio && !strncasecmp(mime.string(), "audio/", 6)) {
            setAudioSource(extractor->getTrack(i));
            haveAudio = true;
            mActiveAudioTrackIndex = i;

            {
                Mutex::Autolock autoLock(mStatsLock);
                mStats.mAudioTrackIndex = mStats.mTracks.size();
                mStats.mTracks.push();
                TrackStat *stat =
                    &mStats.mTracks.editItemAt(mStats.mAudioTrackIndex);
                stat->mMIME = mime.string();
            }

            if (!strcasecmp(mime.string(), MEDIA_MIMETYPE_AUDIO_VORBIS)) {
                // Only do this for vorbis audio, none of the other audio
                // formats even support this ringtone specific hack and
                // retrieving the metadata on some extractors may turn out
                // to be very expensive.
                sp<MetaData> fileMeta = extractor->getMetaData();
                int32_t loop;
                if (fileMeta != NULL
                        && fileMeta->findInt32(kKeyAutoLoop, &loop) && loop != 0) {
                    modifyFlags(AUTO_LOOPING, SET);
                }
            }
        } else if (!strcasecmp(mime.string(), MEDIA_MIMETYPE_TEXT_3GPP)) {
            addTextSource_l(i, extractor->getTrack(i));
        }
    }

    if (!haveAudio && !haveVideo) {
        if (mWVMExtractor != NULL) {
            return mWVMExtractor->getError();
        } else {
            return UNKNOWN_ERROR;
        }
    }

    mExtractorFlags = extractor->flags();

    return OK;
}

 

 

  // now set data source
    setDataSource_post(p, p->setDataSource(fd, offset, length));

//MediaPlayerService.cpp
void MediaPlayerService::Client::setDataSource_post(const sp<MediaPlayerBase>& p, status_t status) {
    mStatus = status;
    if (mStatus != OK) {
        ALOGE("  error: %d", mStatus);//如果设置解码器失败,那就歇菜了。 return;
    }if (mStatus == OK) {
        mPlayer = p;   //client保存该播放器
    }
}

 err = attachNewPlayer(player);

status_t MediaPlayer::attachNewPlayer(const sp<IMediaPlayer>& player) {
        //........................
        mPlayer = player;//保存起来
}

Note:好多类中都有mPlayer这个成员,千万别搞混淆

小结下:
//依据文件来获得播放类型
player_type playerType = MediaPlayerFactory::getPlayerType(this, fd, offset, length);
//第一步:_pre()创建播放器 sp<MediaPlayerBase> p = setDataSource_pre(playerType); //第二步:依据dataSource创建对应的解码器
p->setDataSource(fd, offset, length)
//第三步:post()将播放器保存在client中 setDataSource_post(p, p->setDataSource(fd, offset, length));
//将这个MediaPlayerService:client保存在MediaPlayer中
err = attachNewPlayer(player);

至此 MediaPlayer.setDataSource()流程走完

------------------------------------------------------------------------------------------------------------------------------------------------

mMediaPlayer.prepareAsync();
frameworks/base/media/java/android/media/MediaPlayer.java
public native void prepareAsync() throws IllegalStateException;
static void android_media_MediaPlayer_prepareAsync(JNIEnv *env, jobject thiz){
    mp->prepareAsync()
}

MediaPlayer::prepareAsync()=>MediaPlayer::prepareAsync_l();
status_t MediaPlayer::prepareAsync_l() {
    if ( (mPlayer != 0) && ( mCurrentState & ( MEDIA_PLAYER_INITIALIZED | MEDIA_PLAYER_STOPPED) ) ) {
        mPlayer->setAudioStreamType(mStreamType);
        if (mAudioAttributesParcel != NULL) {
            mPlayer->setParameter(KEY_PARAMETER_AUDIO_ATTRIBUTES, *mAudioAttributesParcel);
        }
        mCurrentState = MEDIA_PLAYER_PREPARING;
        return mPlayer->prepareAsync();
    }
    return INVALID_OPERATION;
}

 

//MediaPlayerService.cpp
status_t MediaPlayerService::Client::setAudioStreamType(audio_stream_type_t type) {
    ALOGV("[%d] setAudioStreamType(%d)", mConnId, type);
    mAudioOutput->setAudioStreamType(type);
    return NO_ERROR;
}

 

//frameworks/av/media/libmediaplayerservice/MediaPlayerService.h
void setAudioStreamType(audio_stream_type_t streamType) {
    mStreamType = streamType;
}

sp<MediaPlayerBase> getPlayer() const {
    return mPlayer;
}

 

status_t MediaPlayerService::Client::prepareAsync() {
    sp<MediaPlayerBase> p = getPlayer();     //Maybe StagefrightPlayer object
    status_t ret = p->prepareAsync();
    return ret;
}

status_t StagefrightPlayer::prepareAsync() {
    return mPlayer->prepareAsync();     
}

status_t AwesomePlayer::prepareAsync() {
    mIsAsyncPrepare = true;
    return prepareAsync_l();
}

status_t AwesomePlayer::prepareAsync_l() {
    if (!mQueueStarted) {
        mQueue.start();           //开启一个事件队列,不停的处理各种事件
        mQueueStarted = true; }

 modifyFlags(PREPARING, SET); mAsyncPrepareEvent = new AwesomeEvent(this, &AwesomePlayer::onPrepareAsyncEvent);  
    mQueue.postEvent(mAsyncPrepareEvent);  //发送第一个事件:PrepareAsyncEvent,做些初始化的事件。当处理这个事情的时候回调AwesomePlayer::onPrepareAsyncEvent
    return OK;
}

 首先mQueue.start()。mQueue是TimedEventQueue类型的实例,TimedEventQueue::start函数则创建一个线程, 该线程不断地从mQueue里取出相应的event,并调用相应的event的fire函数来执行相应的处理。之前,我们已经 知道在AwesomePlayer的构造函数里已经创建了很多TimedEventQueue::Event实例。

//frameworks/av/media/libstagefright/include/TimedEventQueue.h
struct TimedEventQueue {
    struct Event : public RefBase {
        virtual void fire(TimedEventQueue *queue, int64_t now_us) = 0;
        friend class TimedEventQueue;
    };

    // Start executing the event loop.
    void start();

    // Stop executing the event loop, if flush is false, any pending
    // events are discarded, otherwise the queue will stop (and this call
    // return) once all pending events have been handled.
    void stop(bool flush = false);

    // Posts an event to the front of the queue (after all events that
    // have previously been posted to the front but before timed events).
    event_id postEvent(const sp<Event> &event);
    pthread_t mThread;
    List<QueueItem> mQueue;
}


void TimedEventQueue::start() {
    if (mRunning) {
        return;
    }

    mStopped = false;
    pthread_attr_t attr;
    pthread_attr_init(&attr);
    pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
    pthread_create(&mThread, &attr, ThreadWrapper, this);
    pthread_attr_destroy(&attr);
    mRunning = true;
}

void TimedEventQueue::stop(bool flush) {
    if (!mRunning) {
        return;
    }

    if (flush) {
        postEventToBack(new StopEvent);
    } else {
        postTimedEvent(new StopEvent, INT64_MIN);
    }

    void *dummy;
    pthread_join(mThread, &dummy);

    // some events may be left in the queue if we did not flush and the wake lock
    // must be released.
    releaseWakeLock_l(true /*force*/);
    mQueue.clear();

    mRunning = false;
}

 

void AwesomePlayer::onPrepareAsyncEvent() {
    beginPrepareAsync_l();
}

void AwesomePlayer::beginPrepareAsync_l() {
    if (mUri.size() > 0) {
        status_t err = finishSetDataSource_l();
        if (err != OK) {
            abortPrepare(err);
            return;
        }
    }

    if (mVideoTrack != NULL && mVideoSource == NULL) {
        status_t err = initVideoDecoder();
    }

    if (mAudioTrack != NULL && mAudioSource == NULL) {
     status_t err = initAudioDecoder();
    }

    modifyFlags(PREPARING_CONNECTED, SET);

    if (isStreamingHTTP()) {
        postBufferingEvent_l();
    } else {
        finishAsyncPrepare_l();
    }
}

void AwesomePlayer::finishAsyncPrepare_l() {
    if (mIsAsyncPrepare) {
        if (mVideoSource == NULL) {
            notifyListener_l(MEDIA_SET_VIDEO_SIZE, 0, 0);
        } else {
            notifyVideoSize_l();
        }

        notifyListener_l(MEDIA_PREPARED);
    }

    mPrepareResult = OK;
    modifyFlags((PREPARING|PREPARE_CANCELLED|PREPARING_CONNECTED), CLEAR);
    modifyFlags(PREPARED, SET);
    mAsyncPrepareEvent = NULL;
    mPreparedCondition.broadcast();

    if (mAudioTearDown) {
        if (mPrepareResult == OK) {
            if (mExtractorFlags & MediaExtractor::CAN_SEEK) {
                seekTo_l(mAudioTearDownPosition);
            }

            if (mAudioTearDownWasPlaying) {
                modifyFlags(CACHE_UNDERRUN, CLEAR);
                play_l();
            }
        }
        mAudioTearDown = false;
    }
}

 

 

status_t AwesomePlayer::initAudioDecoder() {
    sp<MetaData> meta = mAudioTrack->getFormat();

    const char *mime;
    CHECK(meta->findCString(kKeyMIMEType, &mime));
    // Check whether there is a hardware codec for this stream
    // This doesn't guarantee that the hardware has a free stream
    // but it avoids us attempting to open (and re-open) an offload
    // stream to hardware that doesn't have the necessary codec
    audio_stream_type_t streamType = AUDIO_STREAM_MUSIC;
    if (mAudioSink != NULL) {
        streamType = mAudioSink->getAudioStreamType();
    }
 mOffloadAudio = canOffloadStream(meta, (mVideoSource != NULL),
                                     isStreamingHTTP(), streamType);

    if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_RAW)) {
        ALOGV("createAudioPlayer: bypass OMX (raw)");
        mAudioSource = mAudioTrack;
    } else {
        // If offloading we still create a OMX decoder as a fall-back
        // but we don't start it
        mOmxSource = OMXCodec::Create(
                mClient.interface(), mAudioTrack->getFormat(),
                false, // createEncoder
                mAudioTrack);

        if (mOffloadAudio) {
            ALOGV("createAudioPlayer: bypass OMX (offload)");
            mAudioSource = mAudioTrack;
        } else {
            mAudioSource = mOmxSource;
        }
    }

    if (mAudioSource != NULL) {
        int64_t durationUs;
        if (mAudioTrack->getFormat()->findInt64(kKeyDuration, &durationUs)) {
            Mutex::Autolock autoLock(mMiscStateLock);
            if (mDurationUs < 0 || durationUs > mDurationUs) {
                mDurationUs = durationUs;
            }
        }

        status_t err = mAudioSource->start();

        if (err != OK) {
            mAudioSource.clear();
            mOmxSource.clear();
            return err;
        }
    } else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_QCELP)) {
        // For legacy reasons we're simply going to ignore the absence
        // of an audio decoder for QCELP instead of aborting playback
        // altogether.
        return OK;
    }

    if (mAudioSource != NULL) {
        Mutex::Autolock autoLock(mStatsLock);
        TrackStat *stat = &mStats.mTracks.editItemAt(mStats.mAudioTrackIndex);
        const char *component;
        if (!mAudioSource->getFormat()
                ->findCString(kKeyDecoderComponent, &component)) {
            component = "none";
        }

        stat->mDecoderName = component;
    }

    return mAudioSource != NULL ? OK : UNKNOWN_ERROR;
}

 --------------------------------------------------------------------------------------------------------------

mMediaPlayer.start();

/frameworks/base/media/java/android/media/MediaPlayer.java
public  void start() throws IllegalStateException {
    _start();
}

private native void _start() throws IllegalStateException;

static void android_media_MediaPlayer_start(JNIEnv *env, jobject thiz) {
   sp<MediaPlayer> mp = getMediaPlayer(env, thiz);
   process_media_player_call( env, thiz, mp->start(), NULL, NULL );
}

status_t MediaPlayer::start() {
    mPlayer->setLooping(mLoop);
    mPlayer->setVolume(mLeftVolume, mRightVolume);
    mPlayer->setAuxEffectSendLevel(mSendLevel);
    mCurrentState = MEDIA_PLAYER_STARTED;
    ret = mPlayer->start();  //叫MediaPlayerService:client开始播放

    return ret;
}

status_t MediaPlayerService::Client::start() {
    sp<MediaPlayerBase> p = getPlayer(); //获得播放器,比如StagefrightPlayer
    p->setLooping(mLoop);
    return p->start();
}

status_t StagefrightPlayer::start() {
    return mPlayer->play();  // AwesomePlayer->play();
}

status_t AwesomePlayer::play() {
    modifyFlags(CACHE_UNDERRUN, CLEAR);
    return play_l();
}

status_t AwesomePlayer::play_l() {
    modifyFlags(SEEK_PREVIEW, CLEAR);
    if (mFlags & PLAYING) {
        return OK;
    }
    if (!(mFlags & PREPARED)) {
        status_t err = prepare_l();  //如果事件队列没起来就叫它起来干活,这里前面已经prepare_l()了
        if (err != OK) {
            return err;
        }
    }

    modifyFlags(PLAYING, SET);
    modifyFlags(FIRST_FRAME, SET);

    if (mAudioSource != NULL) {
        if (mAudioPlayer == NULL) {
            createAudioPlayer_l();
        }
if (mVideoSource == NULL) { // We don't want to post an error notification at this point, // the error returned from MediaPlayer::start() will suffice. status_t err = startAudioPlayer_l( false /* sendErrorNotification */); } } if (mFlags & AT_EOS) { // Legacy behaviour, if a stream finishes playing and then // is started again, we play from the start... seekTo_l(0); }
return OK; }

 

void AwesomePlayer::createAudioPlayer_l() {
    uint32_t flags = 0;
    int64_t cachedDurationUs;
    bool eos;

    if (mOffloadAudio) {
        flags |= AudioPlayer::USE_OFFLOAD;
    } else if (mVideoSource == NULL
            && (mDurationUs > AUDIO_SINK_MIN_DEEP_BUFFER_DURATION_US ||
            (getCachedDuration_l(&cachedDurationUs, &eos) &&
            cachedDurationUs > AUDIO_SINK_MIN_DEEP_BUFFER_DURATION_US))) {
        flags |= AudioPlayer::ALLOW_DEEP_BUFFERING;
    }

    if (isStreamingHTTP()) {
        flags |= AudioPlayer::IS_STREAMING;
    }

    if (mVideoSource != NULL) {
        flags |= AudioPlayer::HAS_VIDEO;
    } mAudioPlayer = new AudioPlayer(mAudioSink, flags, this);
    mAudioPlayer->setSource(mAudioSource);
    mTimeSource = mAudioPlayer;

    // If there was a seek request before we ever started,
    // honor the request now.
    // Make sure to do this before starting the audio player
    // to avoid a race condition.
    seekAudioIfNecessary_l();
}

class AudioPlayer : public TimeSource {}

status_t AwesomePlayer::startAudioPlayer_l(bool sendErrorNotification) {
    if (mOffloadAudio) {
        mQueue.cancelEvent(mAudioTearDownEvent->eventID());
        mAudioTearDownEventPending = false;
    }

    if (!(mFlags & AUDIOPLAYER_STARTED)) {
        bool wasSeeking = mAudioPlayer->isSeeking();

        // We've already started the MediaSource in order to enable
        // the prefetcher to read its data.
        err = mAudioPlayer->start(
                true /* sourceAlreadyStarted */);

        modifyFlags(AUDIOPLAYER_STARTED, SET);

        if (wasSeeking) {
            CHECK(!mAudioPlayer->isSeeking());
            // We will have finished the seek while starting the audio player.
            postAudioSeekComplete();
        } else {
            notifyIfMediaStarted_l();
        }
    } else {
        err = mAudioPlayer->resume();
    }

    if (err == OK) {
        modifyFlags(AUDIO_RUNNING, SET);
        mWatchForAudioEOS = true;
    }

    return err;
}

 

status_t AudioPlayer::start(bool sourceAlreadyStarted) {
    status_t err;
    if (!sourceAlreadyStarted) {
        err = mSource->start();
    }

    // We allow an optional INFO_FORMAT_CHANGED at the very beginning
    // of playback, if there is one, getFormat below will retrieve the
    // updated format, if there isn't, we'll stash away the valid buffer
    // of data to be used on the first audio callback.
    CHECK(mFirstBuffer == NULL);

    mFirstBufferResult = mSource->read(&mFirstBuffer, &options);//调用的是OMXCodec::read(),读取audio decoder数据
    if (mFirstBufferResult == INFO_FORMAT_CHANGED) {
        ALOGV("INFO_FORMAT_CHANGED!!!");
        CHECK(mFirstBuffer == NULL);
        mFirstBufferResult = OK;
        mIsFirstBuffer = false;
    } else {
        mIsFirstBuffer = true;
    }

    sp<MetaData> format = mSource->getFormat();
    const char *mime;
    bool success = format->findCString(kKeyMIMEType, &mime);
    CHECK(success);
    CHECK(useOffload() || !strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_RAW));

    int32_t numChannels, channelMask;
    success = format->findInt32(kKeyChannelCount, &numChannels);
    audio_format_t audioFormat = AUDIO_FORMAT_PCM_16_BIT;

    if (useOffload()) {
        if (mapMimeToAudioFormat(audioFormat, mime) != OK) {
            ALOGE("Couldn't map mime type \"%s\" to a valid AudioSystem::audio_format", mime);
            audioFormat = AUDIO_FORMAT_INVALID;
        } else {
            ALOGV("Mime type \"%s\" mapped to audio_format 0x%x", mime, audioFormat);
        }

        int32_t aacaot = -1;
        if ((audioFormat == AUDIO_FORMAT_AAC) && format->findInt32(kKeyAACAOT, &aacaot)) {
            // Redefine AAC format corrosponding to aac profile
            mapAACProfileToAudioFormat(audioFormat,(OMX_AUDIO_AACPROFILETYPE) aacaot);
        }
    }

    int avgBitRate = -1;
    format->findInt32(kKeyBitRate, &avgBitRate);

    if (mAudioSink.get() != NULL) {//一般这里=true 不管是offload还是raw数据播放
        uint32_t flags = AUDIO_OUTPUT_FLAG_NONE;
        audio_offload_info_t offloadInfo = AUDIO_INFO_INITIALIZER;

        if (allowDeepBuffering()) {
            flags |= AUDIO_OUTPUT_FLAG_DEEP_BUFFER;
        }
        if (useOffload()) {
            flags |= AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD;

            int64_t durationUs;
            if (format->findInt64(kKeyDuration, &durationUs)) {
                offloadInfo.duration_us = durationUs;
            } else {
                offloadInfo.duration_us = -1;
            }

            offloadInfo.sample_rate = mSampleRate;
            offloadInfo.channel_mask = channelMask;
            offloadInfo.format = audioFormat;
            offloadInfo.stream_type = AUDIO_STREAM_MUSIC;
            offloadInfo.bit_rate = avgBitRate;
            offloadInfo.has_video = ((mCreateFlags & HAS_VIDEO) != 0);
            offloadInfo.is_streaming = ((mCreateFlags & IS_STREAMING) != 0);
        }

        status_t err = mAudioSink->open(
                mSampleRate, numChannels, channelMask, audioFormat,
                DEFAULT_AUDIOSINK_BUFFERCOUNT,
                &AudioPlayer::AudioSinkCallback,
                this,
                (audio_output_flags_t)flags,
                useOffload() ? &offloadInfo : NULL);

        if (err == OK) {
            mLatencyUs = (int64_t)mAudioSink->latency() * 1000;
            mFrameSize = mAudioSink->frameSize();

            if (useOffload()) {
                // If the playback is offloaded to h/w we pass the
                // HAL some metadata information
                // We don't want to do this for PCM because it will be going
                // through the AudioFlinger mixer before reaching the hardware
                sendMetaDataToHal(mAudioSink, format);
            }

            err = mAudioSink->start();//开始播放=>mTrack->start()=>playbackThread->addTrack_l(this)=>AudioSystem::startOutput() // do not alter behavior for non offloaded tracks: ignore start status.
            if (!useOffload()) {
                err = OK;
            }
        }

        if (err != OK) {
            if (mFirstBuffer != NULL) {
                mFirstBuffer->release();
                mFirstBuffer = NULL;
            }

            if (!sourceAlreadyStarted) {
                mSource->stop();
            }

            return err;
        }

 } else {
        // playing to an AudioTrack, set up mask if necessary
        audio_channel_mask_t audioMask = channelMask == CHANNEL_MASK_USE_CHANNEL_ORDER ?
                audio_channel_out_mask_from_count(numChannels) : channelMask;
        if (0 == audioMask) {
            return BAD_VALUE;
        }

       mAudioTrack = new AudioTrack(
                AUDIO_STREAM_MUSIC, mSampleRate, AUDIO_FORMAT_PCM_16_BIT, audioMask,
                0 /*frameCount*/, AUDIO_OUTPUT_FLAG_NONE, &AudioCallback, this,
                0 /*notificationFrames*/);

        if ((err = mAudioTrack->initCheck()) != OK) {
            mAudioTrack.clear();

            if (mFirstBuffer != NULL) {
                mFirstBuffer->release();
                mFirstBuffer = NULL;
            }

            if (!sourceAlreadyStarted) {
                mSource->stop();
            }

            return err;
        }

        mLatencyUs = (int64_t)mAudioTrack->latency() * 1000;
        mFrameSize = mAudioTrack->frameSize();
        mAudioTrack->start();
    }

    mStarted = true;
    mPlaying = true;
    mPinnedTimeUs = -1ll;

    return OK;
}

 class MediaPlayerService : public BnMediaPlayerService {
     class AudioOutput : public MediaPlayerBase::AudioSink{}
}

status_t MediaPlayerService::AudioOutput::open(
        uint32_t sampleRate, int channelCount, audio_channel_mask_t channelMask,
        audio_format_t format, int bufferCount,
        AudioCallback cb, void *cookie,
        audio_output_flags_t flags) {  
   ..........................
   audioTrack = new AudioTrack(mStreamType, sampleRate, format,channelMask,
                    0, flags,CallbackWrapper,newcbd, 0,mSessionId);                             
}

void MediaPlayerService::AudioOutput::start() {
    ALOGV("start");
    if (mCallbackData != NULL) {
        mCallbackData->endTrackSwitch();
    }

    if (mTrack) {
        mTrack->setVolume(mLeftVolume, mRightVolume);
        mTrack->setAuxEffectSendLevel(mSendLevel);
        mTrack->start();
    }
}

 AwesomePlayer

struct AwesomePlayer {
    status_t play();
    status_t pause();
    void setAudioSink(const sp<MediaPlayerBase::AudioSink> &audioSink);

sp<MediaPlayerBase::AudioSink> mAudioSink; //AudioPlayer中存有这个对象 sp<DataSource> mFileSource; //setDataSource()的结果 其实也就是dataSource sp<MediaExtractor> mExtractor;      //依据mFileSource创建的解码器
sp
<MediaSource> mVideoTrack; sp<MediaSource> mVideoSource; sp<MediaSource> mAudioTrack; //属于mExtractor的成员 sp<MediaSource> mAudioSource; //属于mExtractor的成员
    //AudioPlayer是TimeSource的子类, 一般mTimeSource = mAudioPlayer
    TimeSource *mTimeSource; AudioPlayer
*mAudioPlayer; }

 

class AudioPlayer : public TimeSource {
    AudioPlayer(const sp<MediaPlayerBase::AudioSink> &audioSink, uint32_t flags = 0, AwesomePlayer *audioObserver = NULL);

    void setSource(const sp<MediaSource> &source);
    status_t start(bool sourceAlreadyStarted = false);
    void pause(bool playPendingSamples = false);
    status_t resume();
    status_t seekTo(int64_t time_us);
    size_t fillBuffer(void *data, size_t size);
    void reset();
    bool allowDeepBuffering() const { return (mCreateFlags & ALLOW_DEEP_BUFFERING) != 0; }
    bool useOffload() const { return (mCreateFlags & USE_OFFLOAD) != 0; }
    static void AudioCallback(int event, void *user, void *info);
//有2个回调方法
void AudioCallback(int event, void *info); static size_t AudioSinkCallback( MediaPlayerBase::AudioSink *audioSink, void *data, size_t size, void *me, MediaPlayerBase::AudioSink::cb_event_t event); sp<MediaSource> mSource; sp<AudioTrack> mAudioTrack; MediaBuffer *mInputBuffer; int mSampleRate; int64_t mLatencyUs; size_t mFrameSize; bool mStarted; bool mIsFirstBuffer; status_t mFirstBufferResult; MediaBuffer *mFirstBuffer; sp<MediaPlayerBase::AudioSink> mAudioSink; AwesomePlayer *mObserver; };

 

 

图片出处:http://blog.chinaunix.net/uid-7318785-id-3323948.html


免责声明!

本站转载的文章为个人学习借鉴使用,本站对版权不负任何法律责任。如果侵犯了您的隐私权益,请联系本站邮箱yoyou2525@163.com删除。



 
粤ICP备18138465号  © 2018-2025 CODEPRJ.COM